[llvm] [X86] X86DAGToDAGISel - attempt to merge XMM/YMM loads with YMM/ZMM loads of the same ptr (PR #73126)

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Thu Nov 23 03:02:12 PST 2023


https://github.com/RKSimon updated https://github.com/llvm/llvm-project/pull/73126

>From e8a34a63f6a251956bc31e62ad22feedc6fe8676 Mon Sep 17 00:00:00 2001
From: Simon Pilgrim <llvm-dev at redking.me.uk>
Date: Wed, 22 Nov 2023 14:38:18 +0000
Subject: [PATCH 1/2] [X86] X86DAGToDAGISel - attempt to merge XMM/YMM loads
 with YMM/ZMM loads of the same ptr

If we are loading the same ptr at different vector widths, then reuse the larger load and just extract the low subvector.

This is mainly useful for better constant sharing.

Unlike the equivalent VBROADCAST_LOAD/SUBV_BROADCAST_LOAD folds which can occur in DAG, we have to wait until DAGISel otherwise we can hit infinite loops if constant folding recreates the original constant value.
---
 llvm/lib/Target/X86/X86ISelDAGToDAG.cpp       |   36 +
 llvm/test/CodeGen/X86/avx512-regcall-Mask.ll  |    4 +-
 llvm/test/CodeGen/X86/bfloat.ll               |    6 +-
 .../X86/bitcast-int-to-vector-bool-sext.ll    |   58 +-
 .../X86/bitcast-int-to-vector-bool-zext.ll    |   66 +-
 .../X86/broadcast-elm-cross-splat-vec.ll      |  204 +-
 .../test/CodeGen/X86/constant-pool-sharing.ll |   16 +-
 .../X86/insert-into-constant-vector.ll        |   42 +-
 llvm/test/CodeGen/X86/midpoint-int-vec-512.ll |   40 +-
 llvm/test/CodeGen/X86/pr57340.ll              |   63 +-
 llvm/test/CodeGen/X86/splat-for-size.ll       |    6 +-
 llvm/test/CodeGen/X86/subvector-broadcast.ll  |   42 +-
 llvm/test/CodeGen/X86/vec_fabs.ll             |   88 +-
 llvm/test/CodeGen/X86/vec_int_to_fp.ll        |  215 +-
 llvm/test/CodeGen/X86/vector-fshl-256.ll      |   32 +-
 llvm/test/CodeGen/X86/vector-fshl-512.ll      |    8 +-
 llvm/test/CodeGen/X86/vector-fshl-rot-256.ll  |   39 +-
 llvm/test/CodeGen/X86/vector-fshr-256.ll      |  334 +-
 llvm/test/CodeGen/X86/vector-fshr-512.ll      |  188 +-
 llvm/test/CodeGen/X86/vector-fshr-rot-256.ll  |   39 +-
 .../vector-interleaved-load-i16-stride-7.ll   |   50 +-
 .../vector-interleaved-load-i8-stride-4.ll    |  233 +-
 .../vector-interleaved-load-i8-stride-5.ll    |  210 +-
 .../vector-interleaved-load-i8-stride-6.ll    |  202 +-
 .../vector-interleaved-store-i16-stride-3.ll  |  271 +-
 .../vector-interleaved-store-i16-stride-5.ll  | 1145 ++--
 .../vector-interleaved-store-i16-stride-7.ll  | 5502 +++++++++--------
 .../vector-interleaved-store-i8-stride-5.ll   |  578 +-
 .../vector-interleaved-store-i8-stride-6.ll   | 2398 ++++---
 .../vector-interleaved-store-i8-stride-7.ll   | 1417 ++---
 llvm/test/CodeGen/X86/vector-sext.ll          |   10 +-
 .../X86/vector-shuffle-combining-avx.ll       |   24 +-
 llvm/test/CodeGen/X86/viabs.ll                |    8 +-
 llvm/test/CodeGen/X86/vselect-avx.ll          |    9 +-
 .../CodeGen/X86/x86-interleaved-access.ll     |  127 +-
 .../zero_extend_vector_inreg_of_broadcast.ll  |   12 +-
 ...d_vector_inreg_of_broadcast_from_memory.ll |   21 +-
 37 files changed, 6885 insertions(+), 6858 deletions(-)

diff --git a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
index 93e184eca9bc515..8b9393580dc45bf 100644
--- a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
+++ b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
@@ -1036,6 +1036,42 @@ void X86DAGToDAGISel::PreprocessISelDAG() {
 
       break;
     }
+    case ISD::LOAD: {
+      // If this is a XMM/YMM load of the same lower bits as another YMM/ZMM
+      // load, then just extract the lower subvector and avoid the second load.
+      auto *Ld = cast<LoadSDNode>(N);
+      MVT VT = N->getSimpleValueType(0);
+      if (ISD::isNormalLoad(Ld) && Ld->isSimple() &&
+          (VT.is128BitVector() || VT.is256BitVector())) {
+        SDValue Ptr = Ld->getBasePtr();
+        SDValue Chain = Ld->getChain();
+        for (SDNode *User : Ptr->uses()) {
+          auto *UserLd = dyn_cast<LoadSDNode>(N);
+          MVT UserVT = User->getSimpleValueType(0);
+          if (User != N && UserLd && ISD::isNormalLoad(User) &&
+              UserLd->getBasePtr() == Ptr && UserLd->getChain() == Chain &&
+              !User->hasAnyUseOfValue(1) &&
+              (UserVT.is256BitVector() || UserVT.is512BitVector()) &&
+              UserVT.getSizeInBits() > VT.getSizeInBits()) {
+            SDLoc dl(N);
+            unsigned NumSubElts =
+                VT.getSizeInBits() / UserVT.getScalarSizeInBits();
+            MVT SubVT = MVT::getVectorVT(UserVT.getScalarType(), NumSubElts);
+            SDValue Extract = CurDAG->getNode(ISD::EXTRACT_SUBVECTOR, dl, SubVT,
+                                              SDValue(User, 0),
+                                              CurDAG->getIntPtrConstant(0, dl));
+            SDValue Res = CurDAG->getBitcast(VT, Extract);
+            --I;
+            SDValue To[] = {Res, SDValue(UserLd, 1)};
+            CurDAG->ReplaceAllUsesWith(N, To);
+            ++I;
+            MadeChange = true;
+            continue;
+          }
+        }
+      }
+      break;
+    }
     case ISD::VSELECT: {
       // Replace VSELECT with non-mask conditions with with BLENDV/VPTERNLOG.
       EVT EleVT = N->getOperand(0).getValueType().getVectorElementType();
diff --git a/llvm/test/CodeGen/X86/avx512-regcall-Mask.ll b/llvm/test/CodeGen/X86/avx512-regcall-Mask.ll
index 34a205a7baa8641..b3a0c7dffae117c 100644
--- a/llvm/test/CodeGen/X86/avx512-regcall-Mask.ll
+++ b/llvm/test/CodeGen/X86/avx512-regcall-Mask.ll
@@ -98,10 +98,8 @@ define dso_local i64 @caller_argv64i1() #0 {
 ; X32:       # %bb.0: # %entry
 ; X32-NEXT:    pushl %edi
 ; X32-NEXT:    subl $88, %esp
-; X32-NEXT:    vmovddup {{.*#+}} xmm0 = [2,1,2,1]
-; X32-NEXT:    # xmm0 = mem[0,0]
-; X32-NEXT:    vmovups %xmm0, {{[0-9]+}}(%esp)
 ; X32-NEXT:    vbroadcastsd {{.*#+}} zmm0 = [2,1,2,1,2,1,2,1,2,1,2,1,2,1,2,1]
+; X32-NEXT:    vmovups %xmm0, {{[0-9]+}}(%esp)
 ; X32-NEXT:    vmovups %zmm0, (%esp)
 ; X32-NEXT:    movl $1, {{[0-9]+}}(%esp)
 ; X32-NEXT:    movl $2, {{[0-9]+}}(%esp)
diff --git a/llvm/test/CodeGen/X86/bfloat.ll b/llvm/test/CodeGen/X86/bfloat.ll
index 7a82515ad24b72c..4792e8343d7589f 100644
--- a/llvm/test/CodeGen/X86/bfloat.ll
+++ b/llvm/test/CodeGen/X86/bfloat.ll
@@ -1401,9 +1401,9 @@ define <32 x bfloat> @pr63017_2() nounwind {
 ; AVXNC-NEXT:    jne .LBB12_2
 ; AVXNC-NEXT:  # %bb.1: # %cond.load
 ; AVXNC-NEXT:    vpbroadcastw {{.*#+}} ymm1 = [49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024]
-; AVXNC-NEXT:    vpbroadcastw {{.*#+}} xmm0 = [49024,49024,49024,49024,49024,49024,49024,49024]
-; AVXNC-NEXT:    vpinsrw $0, (%rax), %xmm0, %xmm0
-; AVXNC-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],mem[4,5,6,7]
+; AVXNC-NEXT:    vpbroadcastw {{.*#+}} ymm0 = [49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024]
+; AVXNC-NEXT:    vpinsrw $0, (%rax), %xmm0, %xmm2
+; AVXNC-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
 ; AVXNC-NEXT:  .LBB12_2: # %else
 ; AVXNC-NEXT:    xorl %eax, %eax
 ; AVXNC-NEXT:    testb %al, %al
diff --git a/llvm/test/CodeGen/X86/bitcast-int-to-vector-bool-sext.ll b/llvm/test/CodeGen/X86/bitcast-int-to-vector-bool-sext.ll
index a995e93708456e7..adf7fa97b776527 100644
--- a/llvm/test/CodeGen/X86/bitcast-int-to-vector-bool-sext.ll
+++ b/llvm/test/CodeGen/X86/bitcast-int-to-vector-bool-sext.ll
@@ -209,8 +209,9 @@ define <4 x i64> @ext_i4_4i64(i4 %a0) {
 ; AVX1-NEXT:    vmovd %edi, %xmm0
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
 ; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
-; AVX1-NEXT:    vpcmpeqq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
+; AVX1-NEXT:    vmovaps {{.*#+}} ymm1 = [1,2,4,8]
+; AVX1-NEXT:    vandps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT:    vpcmpeqq %xmm1, %xmm0, %xmm1
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; AVX1-NEXT:    vpcmpeqq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
@@ -255,8 +256,9 @@ define <8 x i32> @ext_i8_8i32(i8 %a0) {
 ; AVX1-NEXT:    vmovd %edi, %xmm0
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
 ; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
-; AVX1-NEXT:    vpcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
+; AVX1-NEXT:    vmovaps {{.*#+}} ymm1 = [1,2,4,8,16,32,64,128]
+; AVX1-NEXT:    vandps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm1
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; AVX1-NEXT:    vpcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
@@ -303,8 +305,9 @@ define <16 x i16> @ext_i16_16i16(i16 %a0) {
 ; AVX1-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
 ; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
-; AVX1-NEXT:    vpcmpeqw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
+; AVX1-NEXT:    vmovaps {{.*#+}} ymm1 = [1,2,4,8,16,32,64,128,256,512,1024,2048,4096,8192,16384,32768]
+; AVX1-NEXT:    vandps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT:    vpcmpeqw %xmm1, %xmm0, %xmm1
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; AVX1-NEXT:    vpcmpeqw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
@@ -421,13 +424,15 @@ define <8 x i64> @ext_i8_8i64(i8 %a0) {
 ; AVX1-NEXT:    vmovd %edi, %xmm0
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
 ; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm1
-; AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm0
-; AVX1-NEXT:    vpcmpeqq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm2
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
-; AVX1-NEXT:    vpcmpeqq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm2, %ymm0
-; AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
-; AVX1-NEXT:    vpcmpeqq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm2
+; AVX1-NEXT:    vmovaps {{.*#+}} ymm0 = [1,2,4,8]
+; AVX1-NEXT:    vandps %ymm0, %ymm1, %ymm2
+; AVX1-NEXT:    vpcmpeqq %xmm0, %xmm2, %xmm0
+; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm2
+; AVX1-NEXT:    vpcmpeqq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT:    vmovaps {{.*#+}} ymm2 = [16,32,64,128]
+; AVX1-NEXT:    vandps %ymm2, %ymm1, %ymm1
+; AVX1-NEXT:    vpcmpeqq %xmm2, %xmm1, %xmm2
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm1
 ; AVX1-NEXT:    vpcmpeqq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
 ; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm1
@@ -482,13 +487,15 @@ define <16 x i32> @ext_i16_16i32(i16 %a0) {
 ; AVX1-NEXT:    vmovd %edi, %xmm0
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
 ; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm1
-; AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm0
-; AVX1-NEXT:    vpcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm2
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
-; AVX1-NEXT:    vpcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm2, %ymm0
-; AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
-; AVX1-NEXT:    vpcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm2
+; AVX1-NEXT:    vmovaps {{.*#+}} ymm0 = [1,2,4,8,16,32,64,128]
+; AVX1-NEXT:    vandps %ymm0, %ymm1, %ymm2
+; AVX1-NEXT:    vpcmpeqd %xmm0, %xmm2, %xmm0
+; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm2
+; AVX1-NEXT:    vpcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT:    vmovaps {{.*#+}} ymm2 = [256,512,1024,2048,4096,8192,16384,32768]
+; AVX1-NEXT:    vandps %ymm2, %ymm1, %ymm1
+; AVX1-NEXT:    vpcmpeqd %xmm2, %xmm1, %xmm2
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm1
 ; AVX1-NEXT:    vpcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
 ; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm1
@@ -549,17 +556,16 @@ define <32 x i16> @ext_i32_32i16(i32 %a0) {
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [256,512,1024,2048,4096,8192,16384,32768]
 ; AVX1-NEXT:    vpcmpeqw %xmm4, %xmm3, %xmm3
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [1,2,4,8,16,32,64,128]
-; AVX1-NEXT:    vpcmpeqw %xmm5, %xmm0, %xmm0
+; AVX1-NEXT:    vpcmpeqw %xmm2, %xmm0, %xmm0
 ; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm0
 ; AVX1-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[1,1,1,1,4,5,6,7]
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
 ; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm1, %ymm1
 ; AVX1-NEXT:    vandps %ymm2, %ymm1, %ymm1
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
-; AVX1-NEXT:    vpcmpeqw %xmm4, %xmm2, %xmm2
-; AVX1-NEXT:    vpcmpeqw %xmm5, %xmm1, %xmm1
-; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT:    vpcmpeqw %xmm4, %xmm3, %xmm3
+; AVX1-NEXT:    vpcmpeqw %xmm2, %xmm1, %xmm1
+; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm1, %ymm1
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: ext_i32_32i16:
diff --git a/llvm/test/CodeGen/X86/bitcast-int-to-vector-bool-zext.ll b/llvm/test/CodeGen/X86/bitcast-int-to-vector-bool-zext.ll
index 544d9b21eca7b9a..50c132b6c34de98 100644
--- a/llvm/test/CodeGen/X86/bitcast-int-to-vector-bool-zext.ll
+++ b/llvm/test/CodeGen/X86/bitcast-int-to-vector-bool-zext.ll
@@ -265,8 +265,9 @@ define <4 x i64> @ext_i4_4i64(i4 %a0) {
 ; AVX1-NEXT:    vmovd %edi, %xmm0
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
 ; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
-; AVX1-NEXT:    vpcmpeqq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
+; AVX1-NEXT:    vmovaps {{.*#+}} ymm1 = [1,2,4,8]
+; AVX1-NEXT:    vandps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT:    vpcmpeqq %xmm1, %xmm0, %xmm1
 ; AVX1-NEXT:    vpsrlq $63, %xmm1, %xmm1
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; AVX1-NEXT:    vpcmpeqq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
@@ -324,8 +325,9 @@ define <8 x i32> @ext_i8_8i32(i8 %a0) {
 ; AVX1-NEXT:    vmovd %edi, %xmm0
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
 ; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
-; AVX1-NEXT:    vpcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
+; AVX1-NEXT:    vmovaps {{.*#+}} ymm1 = [1,2,4,8,16,32,64,128]
+; AVX1-NEXT:    vandps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm1
 ; AVX1-NEXT:    vpsrld $31, %xmm1, %xmm1
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; AVX1-NEXT:    vpcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
@@ -385,8 +387,9 @@ define <16 x i16> @ext_i16_16i16(i16 %a0) {
 ; AVX1-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
 ; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
-; AVX1-NEXT:    vpcmpeqw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
+; AVX1-NEXT:    vmovaps {{.*#+}} ymm1 = [1,2,4,8,16,32,64,128,256,512,1024,2048,4096,8192,16384,32768]
+; AVX1-NEXT:    vandps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT:    vpcmpeqw %xmm1, %xmm0, %xmm1
 ; AVX1-NEXT:    vpsrlw $15, %xmm1, %xmm1
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; AVX1-NEXT:    vpcmpeqw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
@@ -544,15 +547,17 @@ define <8 x i64> @ext_i8_8i64(i8 %a0) {
 ; AVX1-NEXT:    vmovd %edi, %xmm0
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
 ; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm1
-; AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm0
-; AVX1-NEXT:    vpcmpeqq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm2
-; AVX1-NEXT:    vpsrlq $63, %xmm2, %xmm2
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
-; AVX1-NEXT:    vpcmpeqq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT:    vmovaps {{.*#+}} ymm0 = [1,2,4,8]
+; AVX1-NEXT:    vandps %ymm0, %ymm1, %ymm2
+; AVX1-NEXT:    vpcmpeqq %xmm0, %xmm2, %xmm0
 ; AVX1-NEXT:    vpsrlq $63, %xmm0, %xmm0
-; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm2, %ymm0
-; AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
-; AVX1-NEXT:    vpcmpeqq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm2
+; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm2
+; AVX1-NEXT:    vpcmpeqq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
+; AVX1-NEXT:    vpsrlq $63, %xmm2, %xmm2
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT:    vmovaps {{.*#+}} ymm2 = [16,32,64,128]
+; AVX1-NEXT:    vandps %ymm2, %ymm1, %ymm1
+; AVX1-NEXT:    vpcmpeqq %xmm2, %xmm1, %xmm2
 ; AVX1-NEXT:    vpsrlq $63, %xmm2, %xmm2
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm1
 ; AVX1-NEXT:    vpcmpeqq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
@@ -623,15 +628,17 @@ define <16 x i32> @ext_i16_16i32(i16 %a0) {
 ; AVX1-NEXT:    vmovd %edi, %xmm0
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
 ; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm1
-; AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm0
-; AVX1-NEXT:    vpcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm2
-; AVX1-NEXT:    vpsrld $31, %xmm2, %xmm2
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
-; AVX1-NEXT:    vpcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT:    vmovaps {{.*#+}} ymm0 = [1,2,4,8,16,32,64,128]
+; AVX1-NEXT:    vandps %ymm0, %ymm1, %ymm2
+; AVX1-NEXT:    vpcmpeqd %xmm0, %xmm2, %xmm0
 ; AVX1-NEXT:    vpsrld $31, %xmm0, %xmm0
-; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm2, %ymm0
-; AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
-; AVX1-NEXT:    vpcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm2
+; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm2
+; AVX1-NEXT:    vpcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
+; AVX1-NEXT:    vpsrld $31, %xmm2, %xmm2
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT:    vmovaps {{.*#+}} ymm2 = [256,512,1024,2048,4096,8192,16384,32768]
+; AVX1-NEXT:    vandps %ymm2, %ymm1, %ymm1
+; AVX1-NEXT:    vpcmpeqd %xmm2, %xmm1, %xmm2
 ; AVX1-NEXT:    vpsrld $31, %xmm2, %xmm2
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm1
 ; AVX1-NEXT:    vpcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
@@ -705,22 +712,21 @@ define <32 x i16> @ext_i32_32i16(i32 %a0) {
 ; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
 ; AVX1-NEXT:    vmovaps {{.*#+}} ymm2 = [1,2,4,8,16,32,64,128,256,512,1024,2048,4096,8192,16384,32768]
 ; AVX1-NEXT:    vandps %ymm2, %ymm0, %ymm0
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [1,2,4,8,16,32,64,128]
-; AVX1-NEXT:    vpcmpeqw %xmm3, %xmm0, %xmm4
-; AVX1-NEXT:    vpsrlw $15, %xmm4, %xmm4
+; AVX1-NEXT:    vpcmpeqw %xmm2, %xmm0, %xmm3
+; AVX1-NEXT:    vpsrlw $15, %xmm3, %xmm3
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [256,512,1024,2048,4096,8192,16384,32768]
-; AVX1-NEXT:    vpcmpeqw %xmm5, %xmm0, %xmm0
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [256,512,1024,2048,4096,8192,16384,32768]
+; AVX1-NEXT:    vpcmpeqw %xmm4, %xmm0, %xmm0
 ; AVX1-NEXT:    vpsrlw $15, %xmm0, %xmm0
-; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm4, %ymm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm3, %ymm0
 ; AVX1-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[1,1,1,1,4,5,6,7]
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
 ; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm1, %ymm1
 ; AVX1-NEXT:    vandps %ymm2, %ymm1, %ymm1
-; AVX1-NEXT:    vpcmpeqw %xmm3, %xmm1, %xmm2
+; AVX1-NEXT:    vpcmpeqw %xmm2, %xmm1, %xmm2
 ; AVX1-NEXT:    vpsrlw $15, %xmm2, %xmm2
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm1
-; AVX1-NEXT:    vpcmpeqw %xmm5, %xmm1, %xmm1
+; AVX1-NEXT:    vpcmpeqw %xmm4, %xmm1, %xmm1
 ; AVX1-NEXT:    vpsrlw $15, %xmm1, %xmm1
 ; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm1
 ; AVX1-NEXT:    retq
diff --git a/llvm/test/CodeGen/X86/broadcast-elm-cross-splat-vec.ll b/llvm/test/CodeGen/X86/broadcast-elm-cross-splat-vec.ll
index b69d22e04d7deb7..13f1451bbc8b027 100644
--- a/llvm/test/CodeGen/X86/broadcast-elm-cross-splat-vec.ll
+++ b/llvm/test/CodeGen/X86/broadcast-elm-cross-splat-vec.ll
@@ -123,12 +123,12 @@ define <16 x i8> @f16xi8_i64(<16 x i8> %a) {
 define <32 x i8> @f32xi8_i16(<32 x i8> %a) {
 ; AVX-LABEL: f32xi8_i16:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; AVX-NEXT:    vbroadcastss {{.*#+}} xmm2 = [0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1]
-; AVX-NEXT:    vpaddb %xmm2, %xmm1, %xmm1
-; AVX-NEXT:    vpaddb %xmm2, %xmm0, %xmm0
-; AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
+; AVX-NEXT:    vbroadcastss {{.*#+}} ymm1 = [0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1]
+; AVX-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; AVX-NEXT:    vpaddb %xmm1, %xmm2, %xmm2
+; AVX-NEXT:    vpaddb %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX-NEXT:    vandps %ymm1, %ymm0, %ymm0
 ; AVX-NEXT:    retl
 ;
 ; ALL32-LABEL: f32xi8_i16:
@@ -140,12 +140,12 @@ define <32 x i8> @f32xi8_i16(<32 x i8> %a) {
 ;
 ; AVX-64-LABEL: f32xi8_i16:
 ; AVX-64:       # %bb.0:
-; AVX-64-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; AVX-64-NEXT:    vbroadcastss {{.*#+}} xmm2 = [0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1]
-; AVX-64-NEXT:    vpaddb %xmm2, %xmm1, %xmm1
-; AVX-64-NEXT:    vpaddb %xmm2, %xmm0, %xmm0
-; AVX-64-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX-64-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX-64-NEXT:    vbroadcastss {{.*#+}} ymm1 = [0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1]
+; AVX-64-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; AVX-64-NEXT:    vpaddb %xmm1, %xmm2, %xmm2
+; AVX-64-NEXT:    vpaddb %xmm1, %xmm0, %xmm0
+; AVX-64-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX-64-NEXT:    vandps %ymm1, %ymm0, %ymm0
 ; AVX-64-NEXT:    retq
 ;
 ; ALL64-LABEL: f32xi8_i16:
@@ -245,12 +245,13 @@ define <32 x i8> @f32xi8_i64(<32 x i8> %a) {
 define <32 x i8> @f32xi8_i128(<32 x i8> %a) {
 ; AVX-LABEL: f32xi8_i128:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; AVX-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
-; AVX-NEXT:    vpaddb %xmm2, %xmm1, %xmm1
-; AVX-NEXT:    vpaddb %xmm2, %xmm0, %xmm0
-; AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm1 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; AVX-NEXT:    # ymm1 = mem[0,1,0,1]
+; AVX-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; AVX-NEXT:    vpaddb %xmm1, %xmm2, %xmm2
+; AVX-NEXT:    vpaddb %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX-NEXT:    vandps %ymm1, %ymm0, %ymm0
 ; AVX-NEXT:    retl
 ;
 ; ALL32-LABEL: f32xi8_i128:
@@ -263,12 +264,13 @@ define <32 x i8> @f32xi8_i128(<32 x i8> %a) {
 ;
 ; AVX-64-LABEL: f32xi8_i128:
 ; AVX-64:       # %bb.0:
-; AVX-64-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; AVX-64-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
-; AVX-64-NEXT:    vpaddb %xmm2, %xmm1, %xmm1
-; AVX-64-NEXT:    vpaddb %xmm2, %xmm0, %xmm0
-; AVX-64-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX-64-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX-64-NEXT:    vbroadcastf128 {{.*#+}} ymm1 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; AVX-64-NEXT:    # ymm1 = mem[0,1,0,1]
+; AVX-64-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; AVX-64-NEXT:    vpaddb %xmm1, %xmm2, %xmm2
+; AVX-64-NEXT:    vpaddb %xmm1, %xmm0, %xmm0
+; AVX-64-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX-64-NEXT:    vandps %ymm1, %ymm0, %ymm0
 ; AVX-64-NEXT:    retq
 ;
 ; ALL64-LABEL: f32xi8_i128:
@@ -287,16 +289,15 @@ define <32 x i8> @f32xi8_i128(<32 x i8> %a) {
 define <64 x i8> @f64xi8_i16(<64 x i8> %a) {
 ; AVX-LABEL: f64xi8_i16:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    vextractf128 $1, %ymm1, %xmm2
-; AVX-NEXT:    vbroadcastss {{.*#+}} xmm3 = [0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1]
-; AVX-NEXT:    vpaddb %xmm3, %xmm2, %xmm2
-; AVX-NEXT:    vpaddb %xmm3, %xmm1, %xmm1
-; AVX-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
-; AVX-NEXT:    vextractf128 $1, %ymm0, %xmm2
-; AVX-NEXT:    vpaddb %xmm3, %xmm2, %xmm2
-; AVX-NEXT:    vpaddb %xmm3, %xmm0, %xmm0
-; AVX-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
 ; AVX-NEXT:    vbroadcastss {{.*#+}} ymm2 = [0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1]
+; AVX-NEXT:    vextractf128 $1, %ymm1, %xmm3
+; AVX-NEXT:    vpaddb %xmm2, %xmm3, %xmm3
+; AVX-NEXT:    vpaddb %xmm2, %xmm1, %xmm1
+; AVX-NEXT:    vinsertf128 $1, %xmm3, %ymm1, %ymm1
+; AVX-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; AVX-NEXT:    vpaddb %xmm2, %xmm3, %xmm3
+; AVX-NEXT:    vpaddb %xmm2, %xmm0, %xmm0
+; AVX-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm0
 ; AVX-NEXT:    vandps %ymm2, %ymm0, %ymm0
 ; AVX-NEXT:    vandps %ymm2, %ymm1, %ymm1
 ; AVX-NEXT:    retl
@@ -319,16 +320,15 @@ define <64 x i8> @f64xi8_i16(<64 x i8> %a) {
 ;
 ; AVX-64-LABEL: f64xi8_i16:
 ; AVX-64:       # %bb.0:
-; AVX-64-NEXT:    vextractf128 $1, %ymm1, %xmm2
-; AVX-64-NEXT:    vbroadcastss {{.*#+}} xmm3 = [0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1]
-; AVX-64-NEXT:    vpaddb %xmm3, %xmm2, %xmm2
-; AVX-64-NEXT:    vpaddb %xmm3, %xmm1, %xmm1
-; AVX-64-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
-; AVX-64-NEXT:    vextractf128 $1, %ymm0, %xmm2
-; AVX-64-NEXT:    vpaddb %xmm3, %xmm2, %xmm2
-; AVX-64-NEXT:    vpaddb %xmm3, %xmm0, %xmm0
-; AVX-64-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
 ; AVX-64-NEXT:    vbroadcastss {{.*#+}} ymm2 = [0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1]
+; AVX-64-NEXT:    vextractf128 $1, %ymm1, %xmm3
+; AVX-64-NEXT:    vpaddb %xmm2, %xmm3, %xmm3
+; AVX-64-NEXT:    vpaddb %xmm2, %xmm1, %xmm1
+; AVX-64-NEXT:    vinsertf128 $1, %xmm3, %ymm1, %ymm1
+; AVX-64-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; AVX-64-NEXT:    vpaddb %xmm2, %xmm3, %xmm3
+; AVX-64-NEXT:    vpaddb %xmm2, %xmm0, %xmm0
+; AVX-64-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm0
 ; AVX-64-NEXT:    vandps %ymm2, %ymm0, %ymm0
 ; AVX-64-NEXT:    vandps %ymm2, %ymm1, %ymm1
 ; AVX-64-NEXT:    retq
@@ -570,16 +570,15 @@ define <64 x i8> @f64xi8_i256(<64 x i8> %a) {
 ; AVX-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; AVX-NEXT:    vmovdqa {{.*#+}} xmm3 = [16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31]
 ; AVX-NEXT:    vpaddb %xmm3, %xmm2, %xmm2
-; AVX-NEXT:    vmovdqa {{.*#+}} xmm4 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; AVX-NEXT:    vmovdqa {{.*#+}} ymm4 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31]
 ; AVX-NEXT:    vpaddb %xmm4, %xmm1, %xmm1
 ; AVX-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
 ; AVX-NEXT:    vextractf128 $1, %ymm0, %xmm2
 ; AVX-NEXT:    vpaddb %xmm3, %xmm2, %xmm2
 ; AVX-NEXT:    vpaddb %xmm4, %xmm0, %xmm0
 ; AVX-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; AVX-NEXT:    vmovaps {{.*#+}} ymm2 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31]
-; AVX-NEXT:    vandps %ymm2, %ymm0, %ymm0
-; AVX-NEXT:    vandps %ymm2, %ymm1, %ymm1
+; AVX-NEXT:    vandps %ymm4, %ymm0, %ymm0
+; AVX-NEXT:    vandps %ymm4, %ymm1, %ymm1
 ; AVX-NEXT:    retl
 ;
 ; AVX2-LABEL: f64xi8_i256:
@@ -604,16 +603,15 @@ define <64 x i8> @f64xi8_i256(<64 x i8> %a) {
 ; AVX-64-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; AVX-64-NEXT:    vmovdqa {{.*#+}} xmm3 = [16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31]
 ; AVX-64-NEXT:    vpaddb %xmm3, %xmm2, %xmm2
-; AVX-64-NEXT:    vmovdqa {{.*#+}} xmm4 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; AVX-64-NEXT:    vmovdqa {{.*#+}} ymm4 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31]
 ; AVX-64-NEXT:    vpaddb %xmm4, %xmm1, %xmm1
 ; AVX-64-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
 ; AVX-64-NEXT:    vextractf128 $1, %ymm0, %xmm2
 ; AVX-64-NEXT:    vpaddb %xmm3, %xmm2, %xmm2
 ; AVX-64-NEXT:    vpaddb %xmm4, %xmm0, %xmm0
 ; AVX-64-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; AVX-64-NEXT:    vmovaps {{.*#+}} ymm2 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31]
-; AVX-64-NEXT:    vandps %ymm2, %ymm0, %ymm0
-; AVX-64-NEXT:    vandps %ymm2, %ymm1, %ymm1
+; AVX-64-NEXT:    vandps %ymm4, %ymm0, %ymm0
+; AVX-64-NEXT:    vandps %ymm4, %ymm1, %ymm1
 ; AVX-64-NEXT:    retq
 ;
 ; AVX2-64-LABEL: f64xi8_i256:
@@ -793,12 +791,13 @@ define <16 x i16> @f16xi16_i64(<16 x i16> %a) {
 define <16 x i16> @f16xi16_i128(<16 x i16> %a) {
 ; AVX-LABEL: f16xi16_i128:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; AVX-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,1,2,3,4,5,6,7]
-; AVX-NEXT:    vpaddw %xmm2, %xmm1, %xmm1
-; AVX-NEXT:    vpaddw %xmm2, %xmm0, %xmm0
-; AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm1 = [0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7]
+; AVX-NEXT:    # ymm1 = mem[0,1,0,1]
+; AVX-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; AVX-NEXT:    vpaddw %xmm1, %xmm2, %xmm2
+; AVX-NEXT:    vpaddw %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX-NEXT:    vandps %ymm1, %ymm0, %ymm0
 ; AVX-NEXT:    retl
 ;
 ; ALL32-LABEL: f16xi16_i128:
@@ -811,12 +810,13 @@ define <16 x i16> @f16xi16_i128(<16 x i16> %a) {
 ;
 ; AVX-64-LABEL: f16xi16_i128:
 ; AVX-64:       # %bb.0:
-; AVX-64-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; AVX-64-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,1,2,3,4,5,6,7]
-; AVX-64-NEXT:    vpaddw %xmm2, %xmm1, %xmm1
-; AVX-64-NEXT:    vpaddw %xmm2, %xmm0, %xmm0
-; AVX-64-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX-64-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX-64-NEXT:    vbroadcastf128 {{.*#+}} ymm1 = [0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7]
+; AVX-64-NEXT:    # ymm1 = mem[0,1,0,1]
+; AVX-64-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; AVX-64-NEXT:    vpaddw %xmm1, %xmm2, %xmm2
+; AVX-64-NEXT:    vpaddw %xmm1, %xmm0, %xmm0
+; AVX-64-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX-64-NEXT:    vandps %ymm1, %ymm0, %ymm0
 ; AVX-64-NEXT:    retq
 ;
 ; ALL64-LABEL: f16xi16_i128:
@@ -1048,16 +1048,15 @@ define <32 x i16> @f32xi16_i256(<32 x i16> %a) {
 ; AVX-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; AVX-NEXT:    vmovdqa {{.*#+}} xmm3 = [8,9,10,11,12,13,14,15]
 ; AVX-NEXT:    vpaddw %xmm3, %xmm2, %xmm2
-; AVX-NEXT:    vmovdqa {{.*#+}} xmm4 = [0,1,2,3,4,5,6,7]
+; AVX-NEXT:    vmovdqa {{.*#+}} ymm4 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
 ; AVX-NEXT:    vpaddw %xmm4, %xmm1, %xmm1
 ; AVX-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
 ; AVX-NEXT:    vextractf128 $1, %ymm0, %xmm2
 ; AVX-NEXT:    vpaddw %xmm3, %xmm2, %xmm2
 ; AVX-NEXT:    vpaddw %xmm4, %xmm0, %xmm0
 ; AVX-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; AVX-NEXT:    vmovaps {{.*#+}} ymm2 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
-; AVX-NEXT:    vandps %ymm2, %ymm0, %ymm0
-; AVX-NEXT:    vandps %ymm2, %ymm1, %ymm1
+; AVX-NEXT:    vandps %ymm4, %ymm0, %ymm0
+; AVX-NEXT:    vandps %ymm4, %ymm1, %ymm1
 ; AVX-NEXT:    retl
 ;
 ; AVX2-LABEL: f32xi16_i256:
@@ -1082,16 +1081,15 @@ define <32 x i16> @f32xi16_i256(<32 x i16> %a) {
 ; AVX-64-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; AVX-64-NEXT:    vmovdqa {{.*#+}} xmm3 = [8,9,10,11,12,13,14,15]
 ; AVX-64-NEXT:    vpaddw %xmm3, %xmm2, %xmm2
-; AVX-64-NEXT:    vmovdqa {{.*#+}} xmm4 = [0,1,2,3,4,5,6,7]
+; AVX-64-NEXT:    vmovdqa {{.*#+}} ymm4 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
 ; AVX-64-NEXT:    vpaddw %xmm4, %xmm1, %xmm1
 ; AVX-64-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
 ; AVX-64-NEXT:    vextractf128 $1, %ymm0, %xmm2
 ; AVX-64-NEXT:    vpaddw %xmm3, %xmm2, %xmm2
 ; AVX-64-NEXT:    vpaddw %xmm4, %xmm0, %xmm0
 ; AVX-64-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; AVX-64-NEXT:    vmovaps {{.*#+}} ymm2 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
-; AVX-64-NEXT:    vandps %ymm2, %ymm0, %ymm0
-; AVX-64-NEXT:    vandps %ymm2, %ymm1, %ymm1
+; AVX-64-NEXT:    vandps %ymm4, %ymm0, %ymm0
+; AVX-64-NEXT:    vandps %ymm4, %ymm1, %ymm1
 ; AVX-64-NEXT:    retq
 ;
 ; AVX2-64-LABEL: f32xi16_i256:
@@ -1197,12 +1195,13 @@ define <8 x i32> @f8xi32_i64(<8 x i32> %a) {
 define <8 x i32> @f8xi32_i128(<8 x i32> %a) {
 ; AVX-LABEL: f8xi32_i128:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; AVX-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,1,2,3]
-; AVX-NEXT:    vpaddd %xmm2, %xmm1, %xmm1
-; AVX-NEXT:    vpaddd %xmm2, %xmm0, %xmm0
-; AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm1 = [0,1,2,3,0,1,2,3]
+; AVX-NEXT:    # ymm1 = mem[0,1,0,1]
+; AVX-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; AVX-NEXT:    vpaddd %xmm1, %xmm2, %xmm2
+; AVX-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX-NEXT:    vandps %ymm1, %ymm0, %ymm0
 ; AVX-NEXT:    retl
 ;
 ; ALL32-LABEL: f8xi32_i128:
@@ -1215,12 +1214,13 @@ define <8 x i32> @f8xi32_i128(<8 x i32> %a) {
 ;
 ; AVX-64-LABEL: f8xi32_i128:
 ; AVX-64:       # %bb.0:
-; AVX-64-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; AVX-64-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,1,2,3]
-; AVX-64-NEXT:    vpaddd %xmm2, %xmm1, %xmm1
-; AVX-64-NEXT:    vpaddd %xmm2, %xmm0, %xmm0
-; AVX-64-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX-64-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX-64-NEXT:    vbroadcastf128 {{.*#+}} ymm1 = [0,1,2,3,0,1,2,3]
+; AVX-64-NEXT:    # ymm1 = mem[0,1,0,1]
+; AVX-64-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; AVX-64-NEXT:    vpaddd %xmm1, %xmm2, %xmm2
+; AVX-64-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
+; AVX-64-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX-64-NEXT:    vandps %ymm1, %ymm0, %ymm0
 ; AVX-64-NEXT:    retq
 ;
 ; ALL64-LABEL: f8xi32_i128:
@@ -1381,12 +1381,13 @@ define <16 x i32> @f16xi32_i128(<16 x i32> %a) {
 define <4 x i64> @f4xi64_i128(<4 x i64> %a) {
 ; AVX-LABEL: f4xi64_i128:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; AVX-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,0,1,0]
-; AVX-NEXT:    vpaddq %xmm2, %xmm1, %xmm1
-; AVX-NEXT:    vpaddq %xmm2, %xmm0, %xmm0
-; AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm1 = [0,0,1,0,0,0,1,0]
+; AVX-NEXT:    # ymm1 = mem[0,1,0,1]
+; AVX-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; AVX-NEXT:    vpaddq %xmm1, %xmm2, %xmm2
+; AVX-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX-NEXT:    vandps %ymm1, %ymm0, %ymm0
 ; AVX-NEXT:    retl
 ;
 ; ALL32-LABEL: f4xi64_i128:
@@ -1399,12 +1400,13 @@ define <4 x i64> @f4xi64_i128(<4 x i64> %a) {
 ;
 ; AVX-64-LABEL: f4xi64_i128:
 ; AVX-64:       # %bb.0:
-; AVX-64-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; AVX-64-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,1]
-; AVX-64-NEXT:    vpaddq %xmm2, %xmm1, %xmm1
-; AVX-64-NEXT:    vpaddq %xmm2, %xmm0, %xmm0
-; AVX-64-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX-64-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX-64-NEXT:    vbroadcastf128 {{.*#+}} ymm1 = [0,1,0,1]
+; AVX-64-NEXT:    # ymm1 = mem[0,1,0,1]
+; AVX-64-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; AVX-64-NEXT:    vpaddq %xmm1, %xmm2, %xmm2
+; AVX-64-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
+; AVX-64-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX-64-NEXT:    vandps %ymm1, %ymm0, %ymm0
 ; AVX-64-NEXT:    retq
 ;
 ; ALL64-LABEL: f4xi64_i128:
@@ -1501,16 +1503,15 @@ define <8 x i64> @f8xi64_i256(<8 x i64> %a) {
 ; AVX-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; AVX-NEXT:    vmovdqa {{.*#+}} xmm3 = [2,0,3,0]
 ; AVX-NEXT:    vpaddq %xmm3, %xmm2, %xmm2
-; AVX-NEXT:    vmovdqa {{.*#+}} xmm4 = [0,0,1,0]
+; AVX-NEXT:    vmovdqa {{.*#+}} ymm4 = [0,0,1,0,2,0,3,0]
 ; AVX-NEXT:    vpaddq %xmm4, %xmm1, %xmm1
 ; AVX-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
 ; AVX-NEXT:    vextractf128 $1, %ymm0, %xmm2
 ; AVX-NEXT:    vpaddq %xmm3, %xmm2, %xmm2
 ; AVX-NEXT:    vpaddq %xmm4, %xmm0, %xmm0
 ; AVX-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; AVX-NEXT:    vmovaps {{.*#+}} ymm2 = [0,0,1,0,2,0,3,0]
-; AVX-NEXT:    vandps %ymm2, %ymm0, %ymm0
-; AVX-NEXT:    vandps %ymm2, %ymm1, %ymm1
+; AVX-NEXT:    vandps %ymm4, %ymm0, %ymm0
+; AVX-NEXT:    vandps %ymm4, %ymm1, %ymm1
 ; AVX-NEXT:    retl
 ;
 ; AVX2-LABEL: f8xi64_i256:
@@ -1535,16 +1536,15 @@ define <8 x i64> @f8xi64_i256(<8 x i64> %a) {
 ; AVX-64-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; AVX-64-NEXT:    vmovdqa {{.*#+}} xmm3 = [2,3]
 ; AVX-64-NEXT:    vpaddq %xmm3, %xmm2, %xmm2
-; AVX-64-NEXT:    vmovdqa {{.*#+}} xmm4 = [0,1]
+; AVX-64-NEXT:    vmovdqa {{.*#+}} ymm4 = [0,1,2,3]
 ; AVX-64-NEXT:    vpaddq %xmm4, %xmm1, %xmm1
 ; AVX-64-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
 ; AVX-64-NEXT:    vextractf128 $1, %ymm0, %xmm2
 ; AVX-64-NEXT:    vpaddq %xmm3, %xmm2, %xmm2
 ; AVX-64-NEXT:    vpaddq %xmm4, %xmm0, %xmm0
 ; AVX-64-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; AVX-64-NEXT:    vmovaps {{.*#+}} ymm2 = [0,1,2,3]
-; AVX-64-NEXT:    vandps %ymm2, %ymm0, %ymm0
-; AVX-64-NEXT:    vandps %ymm2, %ymm1, %ymm1
+; AVX-64-NEXT:    vandps %ymm4, %ymm0, %ymm0
+; AVX-64-NEXT:    vandps %ymm4, %ymm1, %ymm1
 ; AVX-64-NEXT:    retq
 ;
 ; AVX2-64-LABEL: f8xi64_i256:
diff --git a/llvm/test/CodeGen/X86/constant-pool-sharing.ll b/llvm/test/CodeGen/X86/constant-pool-sharing.ll
index 5118eaccb8db27d..db5a7810974c9da 100644
--- a/llvm/test/CodeGen/X86/constant-pool-sharing.ll
+++ b/llvm/test/CodeGen/X86/constant-pool-sharing.ll
@@ -108,11 +108,11 @@ define void @store_repeated_constants(ptr %lo, ptr %hi) {
 ; AVX-LINUX-NEXT:    vbroadcastf128 {{.*#+}} ymm0 = [18446744073709551615,0,18446744073709551615,0]
 ; AVX-LINUX-NEXT:    # ymm0 = mem[0,1,0,1]
 ; AVX-LINUX-NEXT:    vmovaps %ymm0, (%rdi)
-; AVX-LINUX-NEXT:    vmovaps {{.*#+}} xmm0 = [18446744073709551615,0]
-; AVX-LINUX-NEXT:    vmovaps %ymm0, 32(%rdi)
-; AVX-LINUX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
-; AVX-LINUX-NEXT:    vmovaps %ymm0, 32(%rsi)
 ; AVX-LINUX-NEXT:    vmovaps {{.*#+}} ymm0 = [18446744073709551615,0,0,18446744073709551615]
+; AVX-LINUX-NEXT:    vmovaps %xmm0, %xmm1
+; AVX-LINUX-NEXT:    vmovaps %ymm1, 32(%rdi)
+; AVX-LINUX-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; AVX-LINUX-NEXT:    vmovaps %ymm1, 32(%rsi)
 ; AVX-LINUX-NEXT:    vmovaps %ymm0, (%rsi)
 ; AVX-LINUX-NEXT:    vzeroupper
 ; AVX-LINUX-NEXT:    retq
@@ -122,11 +122,11 @@ define void @store_repeated_constants(ptr %lo, ptr %hi) {
 ; AVX-MSVC-NEXT:    vbroadcastf128 {{.*#+}} ymm0 = [18446744073709551615,0,18446744073709551615,0]
 ; AVX-MSVC-NEXT:    # ymm0 = mem[0,1,0,1]
 ; AVX-MSVC-NEXT:    vmovaps %ymm0, (%rcx)
-; AVX-MSVC-NEXT:    vmovaps {{.*#+}} xmm0 = [18446744073709551615,0]
-; AVX-MSVC-NEXT:    vmovaps %ymm0, 32(%rcx)
-; AVX-MSVC-NEXT:    vxorps %xmm0, %xmm0, %xmm0
-; AVX-MSVC-NEXT:    vmovaps %ymm0, 32(%rdx)
 ; AVX-MSVC-NEXT:    vmovaps {{.*#+}} ymm0 = [18446744073709551615,0,0,18446744073709551615]
+; AVX-MSVC-NEXT:    vmovaps %xmm0, %xmm1
+; AVX-MSVC-NEXT:    vmovaps %ymm1, 32(%rcx)
+; AVX-MSVC-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; AVX-MSVC-NEXT:    vmovaps %ymm1, 32(%rdx)
 ; AVX-MSVC-NEXT:    vmovaps %ymm0, (%rdx)
 ; AVX-MSVC-NEXT:    vzeroupper
 ; AVX-MSVC-NEXT:    retq
diff --git a/llvm/test/CodeGen/X86/insert-into-constant-vector.ll b/llvm/test/CodeGen/X86/insert-into-constant-vector.ll
index 7ea4d71385c6bc1..07787f3851bd907 100644
--- a/llvm/test/CodeGen/X86/insert-into-constant-vector.ll
+++ b/llvm/test/CodeGen/X86/insert-into-constant-vector.ll
@@ -412,9 +412,9 @@ define <8 x i64> @elt5_v8i64(i64 %x) {
 ;
 ; X64-AVX1-LABEL: elt5_v8i64:
 ; X64-AVX1:       # %bb.0:
-; X64-AVX1-NEXT:    vmovdqa {{.*#+}} xmm0 = <4,u>
-; X64-AVX1-NEXT:    vpinsrq $1, %rdi, %xmm0, %xmm0
-; X64-AVX1-NEXT:    vblendps {{.*#+}} ymm1 = ymm0[0,1,2,3],mem[4,5,6,7]
+; X64-AVX1-NEXT:    vmovdqa {{.*#+}} ymm0 = <4,u,6,7>
+; X64-AVX1-NEXT:    vpinsrq $1, %rdi, %xmm0, %xmm1
+; X64-AVX1-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm0[4,5,6,7]
 ; X64-AVX1-NEXT:    vmovaps {{.*#+}} ymm0 = [42,1,2,3]
 ; X64-AVX1-NEXT:    retq
 ;
@@ -429,9 +429,9 @@ define <8 x i64> @elt5_v8i64(i64 %x) {
 ;
 ; X64-AVX2-LABEL: elt5_v8i64:
 ; X64-AVX2:       # %bb.0:
-; X64-AVX2-NEXT:    vmovdqa {{.*#+}} xmm0 = <4,u>
-; X64-AVX2-NEXT:    vpinsrq $1, %rdi, %xmm0, %xmm0
-; X64-AVX2-NEXT:    vpblendd {{.*#+}} ymm1 = ymm0[0,1,2,3],mem[4,5,6,7]
+; X64-AVX2-NEXT:    vmovdqa {{.*#+}} ymm0 = <4,u,6,7>
+; X64-AVX2-NEXT:    vpinsrq $1, %rdi, %xmm0, %xmm1
+; X64-AVX2-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm0[4,5,6,7]
 ; X64-AVX2-NEXT:    vmovaps {{.*#+}} ymm0 = [42,1,2,3]
 ; X64-AVX2-NEXT:    retq
 ;
@@ -478,49 +478,47 @@ define <8 x double> @elt1_v8f64(double %x) {
 ;
 ; X86-AVX1-LABEL: elt1_v8f64:
 ; X86-AVX1:       # %bb.0:
-; X86-AVX1-NEXT:    vmovaps {{.*#+}} xmm0 = <4.2E+1,u>
-; X86-AVX1-NEXT:    vmovhps {{.*#+}} xmm0 = xmm0[0,1],mem[0,1]
-; X86-AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],mem[4,5,6,7]
+; X86-AVX1-NEXT:    vmovaps {{.*#+}} ymm0 = <4.2E+1,u,2.0E+0,3.0E+0>
+; X86-AVX1-NEXT:    vmovhps {{.*#+}} xmm1 = xmm0[0,1],mem[0,1]
+; X86-AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
 ; X86-AVX1-NEXT:    vmovaps {{.*#+}} ymm1 = [4.0E+0,5.0E+0,6.0E+0,7.0E+0]
 ; X86-AVX1-NEXT:    retl
 ;
 ; X64-AVX1-LABEL: elt1_v8f64:
 ; X64-AVX1:       # %bb.0:
-; X64-AVX1-NEXT:    vmovaps {{.*#+}} xmm1 = <4.2E+1,u>
+; X64-AVX1-NEXT:    vmovaps {{.*#+}} ymm1 = <4.2E+1,u,2.0E+0,3.0E+0>
 ; X64-AVX1-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
-; X64-AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],mem[4,5,6,7]
+; X64-AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
 ; X64-AVX1-NEXT:    vmovaps {{.*#+}} ymm1 = [4.0E+0,5.0E+0,6.0E+0,7.0E+0]
 ; X64-AVX1-NEXT:    retq
 ;
 ; X86-AVX2-LABEL: elt1_v8f64:
 ; X86-AVX2:       # %bb.0:
-; X86-AVX2-NEXT:    vmovaps {{.*#+}} xmm0 = <4.2E+1,u>
-; X86-AVX2-NEXT:    vmovhps {{.*#+}} xmm0 = xmm0[0,1],mem[0,1]
-; X86-AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],mem[4,5,6,7]
+; X86-AVX2-NEXT:    vmovaps {{.*#+}} ymm0 = <4.2E+1,u,2.0E+0,3.0E+0>
+; X86-AVX2-NEXT:    vmovhps {{.*#+}} xmm1 = xmm0[0,1],mem[0,1]
+; X86-AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
 ; X86-AVX2-NEXT:    vmovaps {{.*#+}} ymm1 = [4.0E+0,5.0E+0,6.0E+0,7.0E+0]
 ; X86-AVX2-NEXT:    retl
 ;
 ; X64-AVX2-LABEL: elt1_v8f64:
 ; X64-AVX2:       # %bb.0:
-; X64-AVX2-NEXT:    vmovaps {{.*#+}} xmm1 = <4.2E+1,u>
+; X64-AVX2-NEXT:    vmovaps {{.*#+}} ymm1 = <4.2E+1,u,2.0E+0,3.0E+0>
 ; X64-AVX2-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
-; X64-AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],mem[4,5,6,7]
+; X64-AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
 ; X64-AVX2-NEXT:    vmovaps {{.*#+}} ymm1 = [4.0E+0,5.0E+0,6.0E+0,7.0E+0]
 ; X64-AVX2-NEXT:    retq
 ;
 ; X86-AVX512F-LABEL: elt1_v8f64:
 ; X86-AVX512F:       # %bb.0:
-; X86-AVX512F-NEXT:    vmovaps {{.*#+}} xmm0 = <4.2E+1,u>
-; X86-AVX512F-NEXT:    vmovhps {{.*#+}} xmm0 = xmm0[0,1],mem[0,1]
-; X86-AVX512F-NEXT:    vmovaps {{.*#+}} zmm1 = <4.2E+1,u,2.0E+0,3.0E+0,4.0E+0,5.0E+0,6.0E+0,7.0E+0>
-; X86-AVX512F-NEXT:    vinsertf32x4 $0, %xmm0, %zmm1, %zmm0
+; X86-AVX512F-NEXT:    vmovaps {{.*#+}} zmm0 = <4.2E+1,u,2.0E+0,3.0E+0,4.0E+0,5.0E+0,6.0E+0,7.0E+0>
+; X86-AVX512F-NEXT:    vmovhps {{.*#+}} xmm1 = xmm0[0,1],mem[0,1]
+; X86-AVX512F-NEXT:    vinsertf32x4 $0, %xmm1, %zmm0, %zmm0
 ; X86-AVX512F-NEXT:    retl
 ;
 ; X64-AVX512F-LABEL: elt1_v8f64:
 ; X64-AVX512F:       # %bb.0:
-; X64-AVX512F-NEXT:    vmovaps {{.*#+}} xmm1 = <4.2E+1,u>
-; X64-AVX512F-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
 ; X64-AVX512F-NEXT:    vmovaps {{.*#+}} zmm1 = <4.2E+1,u,2.0E+0,3.0E+0,4.0E+0,5.0E+0,6.0E+0,7.0E+0>
+; X64-AVX512F-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
 ; X64-AVX512F-NEXT:    vinsertf32x4 $0, %xmm0, %zmm1, %zmm0
 ; X64-AVX512F-NEXT:    retq
    %ins = insertelement <8 x double> <double 42.0, double 1.0, double 2.0, double 3.0, double 4.0, double 5.0, double 6.0, double 7.0>, double %x, i32 1
diff --git a/llvm/test/CodeGen/X86/midpoint-int-vec-512.ll b/llvm/test/CodeGen/X86/midpoint-int-vec-512.ll
index 0f107427546a3d9..2fdf6ef224ca960 100644
--- a/llvm/test/CodeGen/X86/midpoint-int-vec-512.ll
+++ b/llvm/test/CodeGen/X86/midpoint-int-vec-512.ll
@@ -693,8 +693,8 @@ define <64 x i8> @vec512_i8_signed_reg_reg(<64 x i8> %a1, <64 x i8> %a2) nounwin
 ; AVX512F-NEXT:    vpsrlw $1, %ymm1, %ymm1
 ; AVX512F-NEXT:    vpsrlw $1, %ymm2, %ymm2
 ; AVX512F-NEXT:    vinserti64x4 $1, %ymm2, %zmm1, %zmm5
-; AVX512F-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm5, %zmm5
-; AVX512F-NEXT:    vpbroadcastb {{.*#+}} ymm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX512F-NEXT:    vpbroadcastd {{.*#+}} zmm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX512F-NEXT:    vpandq %zmm6, %zmm5, %zmm5
 ; AVX512F-NEXT:    vpand %ymm6, %ymm2, %ymm2
 ; AVX512F-NEXT:    vpxor %xmm7, %xmm7, %xmm7
 ; AVX512F-NEXT:    vpsubb %ymm2, %ymm7, %ymm2
@@ -724,8 +724,8 @@ define <64 x i8> @vec512_i8_signed_reg_reg(<64 x i8> %a1, <64 x i8> %a2) nounwin
 ; AVX512VL-FALLBACK-NEXT:    vpsrlw $1, %ymm1, %ymm1
 ; AVX512VL-FALLBACK-NEXT:    vpsrlw $1, %ymm2, %ymm2
 ; AVX512VL-FALLBACK-NEXT:    vinserti64x4 $1, %ymm2, %zmm1, %zmm5
-; AVX512VL-FALLBACK-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm5, %zmm5
-; AVX512VL-FALLBACK-NEXT:    vpbroadcastd {{.*#+}} ymm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX512VL-FALLBACK-NEXT:    vpbroadcastd {{.*#+}} zmm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX512VL-FALLBACK-NEXT:    vpandq %zmm6, %zmm5, %zmm5
 ; AVX512VL-FALLBACK-NEXT:    vpand %ymm6, %ymm2, %ymm2
 ; AVX512VL-FALLBACK-NEXT:    vpxor %xmm7, %xmm7, %xmm7
 ; AVX512VL-FALLBACK-NEXT:    vpsubb %ymm2, %ymm7, %ymm2
@@ -779,8 +779,8 @@ define <64 x i8> @vec512_i8_unsigned_reg_reg(<64 x i8> %a1, <64 x i8> %a2) nounw
 ; AVX512F-NEXT:    vpsrlw $1, %ymm1, %ymm1
 ; AVX512F-NEXT:    vpsrlw $1, %ymm2, %ymm2
 ; AVX512F-NEXT:    vinserti64x4 $1, %ymm2, %zmm1, %zmm4
-; AVX512F-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm4, %zmm4
-; AVX512F-NEXT:    vpbroadcastb {{.*#+}} ymm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX512F-NEXT:    vpbroadcastd {{.*#+}} zmm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX512F-NEXT:    vpandq %zmm6, %zmm4, %zmm4
 ; AVX512F-NEXT:    vpand %ymm6, %ymm2, %ymm2
 ; AVX512F-NEXT:    vpxor %xmm7, %xmm7, %xmm7
 ; AVX512F-NEXT:    vpsubb %ymm2, %ymm7, %ymm2
@@ -810,8 +810,8 @@ define <64 x i8> @vec512_i8_unsigned_reg_reg(<64 x i8> %a1, <64 x i8> %a2) nounw
 ; AVX512VL-FALLBACK-NEXT:    vpsrlw $1, %ymm1, %ymm1
 ; AVX512VL-FALLBACK-NEXT:    vpsrlw $1, %ymm2, %ymm2
 ; AVX512VL-FALLBACK-NEXT:    vinserti64x4 $1, %ymm2, %zmm1, %zmm4
-; AVX512VL-FALLBACK-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm4, %zmm4
-; AVX512VL-FALLBACK-NEXT:    vpbroadcastd {{.*#+}} ymm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX512VL-FALLBACK-NEXT:    vpbroadcastd {{.*#+}} zmm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX512VL-FALLBACK-NEXT:    vpandq %zmm6, %zmm4, %zmm4
 ; AVX512VL-FALLBACK-NEXT:    vpand %ymm6, %ymm2, %ymm2
 ; AVX512VL-FALLBACK-NEXT:    vpxor %xmm7, %xmm7, %xmm7
 ; AVX512VL-FALLBACK-NEXT:    vpsubb %ymm2, %ymm7, %ymm2
@@ -868,8 +868,8 @@ define <64 x i8> @vec512_i8_signed_mem_reg(ptr %a1_addr, <64 x i8> %a2) nounwind
 ; AVX512F-NEXT:    vpsrlw $1, %ymm0, %ymm0
 ; AVX512F-NEXT:    vpsrlw $1, %ymm1, %ymm1
 ; AVX512F-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm5
-; AVX512F-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm5, %zmm5
-; AVX512F-NEXT:    vpbroadcastb {{.*#+}} ymm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX512F-NEXT:    vpbroadcastd {{.*#+}} zmm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX512F-NEXT:    vpandq %zmm6, %zmm5, %zmm5
 ; AVX512F-NEXT:    vpand %ymm6, %ymm1, %ymm1
 ; AVX512F-NEXT:    vpxor %xmm7, %xmm7, %xmm7
 ; AVX512F-NEXT:    vpsubb %ymm1, %ymm7, %ymm1
@@ -900,8 +900,8 @@ define <64 x i8> @vec512_i8_signed_mem_reg(ptr %a1_addr, <64 x i8> %a2) nounwind
 ; AVX512VL-FALLBACK-NEXT:    vpsrlw $1, %ymm0, %ymm0
 ; AVX512VL-FALLBACK-NEXT:    vpsrlw $1, %ymm1, %ymm1
 ; AVX512VL-FALLBACK-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm5
-; AVX512VL-FALLBACK-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm5, %zmm5
-; AVX512VL-FALLBACK-NEXT:    vpbroadcastd {{.*#+}} ymm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX512VL-FALLBACK-NEXT:    vpbroadcastd {{.*#+}} zmm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX512VL-FALLBACK-NEXT:    vpandq %zmm6, %zmm5, %zmm5
 ; AVX512VL-FALLBACK-NEXT:    vpand %ymm6, %ymm1, %ymm1
 ; AVX512VL-FALLBACK-NEXT:    vpxor %xmm7, %xmm7, %xmm7
 ; AVX512VL-FALLBACK-NEXT:    vpsubb %ymm1, %ymm7, %ymm1
@@ -958,8 +958,8 @@ define <64 x i8> @vec512_i8_signed_reg_mem(<64 x i8> %a1, ptr %a2_addr) nounwind
 ; AVX512F-NEXT:    vpsrlw $1, %ymm1, %ymm1
 ; AVX512F-NEXT:    vpsrlw $1, %ymm2, %ymm2
 ; AVX512F-NEXT:    vinserti64x4 $1, %ymm2, %zmm1, %zmm5
-; AVX512F-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm5, %zmm5
-; AVX512F-NEXT:    vpbroadcastb {{.*#+}} ymm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX512F-NEXT:    vpbroadcastd {{.*#+}} zmm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX512F-NEXT:    vpandq %zmm6, %zmm5, %zmm5
 ; AVX512F-NEXT:    vpand %ymm6, %ymm2, %ymm2
 ; AVX512F-NEXT:    vpxor %xmm7, %xmm7, %xmm7
 ; AVX512F-NEXT:    vpsubb %ymm2, %ymm7, %ymm2
@@ -990,8 +990,8 @@ define <64 x i8> @vec512_i8_signed_reg_mem(<64 x i8> %a1, ptr %a2_addr) nounwind
 ; AVX512VL-FALLBACK-NEXT:    vpsrlw $1, %ymm1, %ymm1
 ; AVX512VL-FALLBACK-NEXT:    vpsrlw $1, %ymm2, %ymm2
 ; AVX512VL-FALLBACK-NEXT:    vinserti64x4 $1, %ymm2, %zmm1, %zmm5
-; AVX512VL-FALLBACK-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm5, %zmm5
-; AVX512VL-FALLBACK-NEXT:    vpbroadcastd {{.*#+}} ymm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX512VL-FALLBACK-NEXT:    vpbroadcastd {{.*#+}} zmm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX512VL-FALLBACK-NEXT:    vpandq %zmm6, %zmm5, %zmm5
 ; AVX512VL-FALLBACK-NEXT:    vpand %ymm6, %ymm2, %ymm2
 ; AVX512VL-FALLBACK-NEXT:    vpxor %xmm7, %xmm7, %xmm7
 ; AVX512VL-FALLBACK-NEXT:    vpsubb %ymm2, %ymm7, %ymm2
@@ -1049,8 +1049,8 @@ define <64 x i8> @vec512_i8_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind
 ; AVX512F-NEXT:    vpsrlw $1, %ymm0, %ymm0
 ; AVX512F-NEXT:    vpsrlw $1, %ymm1, %ymm1
 ; AVX512F-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm5
-; AVX512F-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm5, %zmm5
-; AVX512F-NEXT:    vpbroadcastb {{.*#+}} ymm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX512F-NEXT:    vpbroadcastd {{.*#+}} zmm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX512F-NEXT:    vpandq %zmm6, %zmm5, %zmm5
 ; AVX512F-NEXT:    vpand %ymm6, %ymm1, %ymm1
 ; AVX512F-NEXT:    vpxor %xmm7, %xmm7, %xmm7
 ; AVX512F-NEXT:    vpsubb %ymm1, %ymm7, %ymm1
@@ -1082,8 +1082,8 @@ define <64 x i8> @vec512_i8_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind
 ; AVX512VL-FALLBACK-NEXT:    vpsrlw $1, %ymm0, %ymm0
 ; AVX512VL-FALLBACK-NEXT:    vpsrlw $1, %ymm1, %ymm1
 ; AVX512VL-FALLBACK-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm5
-; AVX512VL-FALLBACK-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm5, %zmm5
-; AVX512VL-FALLBACK-NEXT:    vpbroadcastd {{.*#+}} ymm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX512VL-FALLBACK-NEXT:    vpbroadcastd {{.*#+}} zmm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX512VL-FALLBACK-NEXT:    vpandq %zmm6, %zmm5, %zmm5
 ; AVX512VL-FALLBACK-NEXT:    vpand %ymm6, %ymm1, %ymm1
 ; AVX512VL-FALLBACK-NEXT:    vpxor %xmm7, %xmm7, %xmm7
 ; AVX512VL-FALLBACK-NEXT:    vpsubb %ymm1, %ymm7, %ymm1
diff --git a/llvm/test/CodeGen/X86/pr57340.ll b/llvm/test/CodeGen/X86/pr57340.ll
index a6ae7ce5ccd15dd..f0bb0c7339f279e 100644
--- a/llvm/test/CodeGen/X86/pr57340.ll
+++ b/llvm/test/CodeGen/X86/pr57340.ll
@@ -5,16 +5,15 @@ define void @main.41() local_unnamed_addr #1 {
 ; CHECK-LABEL: main.41:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vpbroadcastw (%rax), %xmm0
-; CHECK-NEXT:    vmovdqu (%rax), %ymm2
-; CHECK-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm3
-; CHECK-NEXT:    vmovdqa {{.*#+}} ymm1 = [31,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14]
-; CHECK-NEXT:    vpermi2w %ymm3, %ymm2, %ymm1
+; CHECK-NEXT:    vmovdqu (%rax), %ymm1
+; CHECK-NEXT:    vmovdqa {{.*#+}} ymm4 = [31,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14]
+; CHECK-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm2
+; CHECK-NEXT:    vpermt2w %ymm2, %ymm4, %ymm1
 ; CHECK-NEXT:    vpextrw $0, %xmm0, %eax
 ; CHECK-NEXT:    movzwl %ax, %eax
 ; CHECK-NEXT:    vmovd %eax, %xmm0
 ; CHECK-NEXT:    vcvtph2ps %xmm0, %xmm0
-; CHECK-NEXT:    vmovdqu (%rax), %xmm5
-; CHECK-NEXT:    vpextrw $0, %xmm5, %eax
+; CHECK-NEXT:    vpextrw $0, %xmm4, %eax
 ; CHECK-NEXT:    movzwl %ax, %eax
 ; CHECK-NEXT:    vmovd %eax, %xmm2
 ; CHECK-NEXT:    vcvtph2ps %xmm2, %xmm2
@@ -26,14 +25,14 @@ define void @main.41() local_unnamed_addr #1 {
 ; CHECK-NEXT:    vpextrw $0, %xmm3, %eax
 ; CHECK-NEXT:    movzwl %ax, %eax
 ; CHECK-NEXT:    vmovd %eax, %xmm3
-; CHECK-NEXT:    vpsrld $16, %xmm5, %xmm4
-; CHECK-NEXT:    vpextrw $0, %xmm4, %eax
+; CHECK-NEXT:    vpsrld $16, %xmm4, %xmm5
+; CHECK-NEXT:    vpextrw $0, %xmm5, %eax
 ; CHECK-NEXT:    movzwl %ax, %eax
-; CHECK-NEXT:    vmovd %eax, %xmm4
+; CHECK-NEXT:    vmovd %eax, %xmm5
 ; CHECK-NEXT:    setne %al
 ; CHECK-NEXT:    andl $1, %eax
 ; CHECK-NEXT:    vcvtph2ps %xmm3, %xmm6
-; CHECK-NEXT:    vcvtph2ps %xmm4, %xmm3
+; CHECK-NEXT:    vcvtph2ps %xmm5, %xmm3
 ; CHECK-NEXT:    kmovw %eax, %k0
 ; CHECK-NEXT:    vucomiss %xmm6, %xmm3
 ; CHECK-NEXT:    setnp %al
@@ -47,12 +46,12 @@ define void @main.41() local_unnamed_addr #1 {
 ; CHECK-NEXT:    movw $-5, %ax
 ; CHECK-NEXT:    kmovd %eax, %k1
 ; CHECK-NEXT:    kandw %k1, %k0, %k0
-; CHECK-NEXT:    vprolq $32, %xmm1, %xmm4
-; CHECK-NEXT:    vpextrw $0, %xmm4, %eax
+; CHECK-NEXT:    vprolq $32, %xmm1, %xmm5
+; CHECK-NEXT:    vpextrw $0, %xmm5, %eax
 ; CHECK-NEXT:    movzwl %ax, %eax
-; CHECK-NEXT:    vmovd %eax, %xmm4
-; CHECK-NEXT:    vcvtph2ps %xmm4, %xmm4
-; CHECK-NEXT:    vucomiss %xmm4, %xmm0
+; CHECK-NEXT:    vmovd %eax, %xmm5
+; CHECK-NEXT:    vcvtph2ps %xmm5, %xmm5
+; CHECK-NEXT:    vucomiss %xmm5, %xmm0
 ; CHECK-NEXT:    setnp %al
 ; CHECK-NEXT:    sete %cl
 ; CHECK-NEXT:    testb %al, %cl
@@ -63,18 +62,18 @@ define void @main.41() local_unnamed_addr #1 {
 ; CHECK-NEXT:    korw %k1, %k0, %k0
 ; CHECK-NEXT:    movw $-9, %ax
 ; CHECK-NEXT:    kmovd %eax, %k1
-; CHECK-NEXT:    vpsrlq $48, %xmm1, %xmm4
-; CHECK-NEXT:    vpextrw $0, %xmm4, %eax
+; CHECK-NEXT:    vpsrlq $48, %xmm1, %xmm5
+; CHECK-NEXT:    vpextrw $0, %xmm5, %eax
 ; CHECK-NEXT:    kandw %k1, %k0, %k0
 ; CHECK-NEXT:    movzwl %ax, %eax
-; CHECK-NEXT:    vmovd %eax, %xmm4
-; CHECK-NEXT:    vcvtph2ps %xmm4, %xmm6
-; CHECK-NEXT:    vpsrlq $48, %xmm5, %xmm4
-; CHECK-NEXT:    vpextrw $0, %xmm4, %eax
+; CHECK-NEXT:    vmovd %eax, %xmm5
+; CHECK-NEXT:    vcvtph2ps %xmm5, %xmm6
+; CHECK-NEXT:    vpsrlq $48, %xmm4, %xmm5
+; CHECK-NEXT:    vpextrw $0, %xmm5, %eax
 ; CHECK-NEXT:    movzwl %ax, %eax
-; CHECK-NEXT:    vmovd %eax, %xmm4
-; CHECK-NEXT:    vcvtph2ps %xmm4, %xmm4
-; CHECK-NEXT:    vucomiss %xmm6, %xmm4
+; CHECK-NEXT:    vmovd %eax, %xmm5
+; CHECK-NEXT:    vcvtph2ps %xmm5, %xmm5
+; CHECK-NEXT:    vucomiss %xmm6, %xmm5
 ; CHECK-NEXT:    setnp %al
 ; CHECK-NEXT:    sete %cl
 ; CHECK-NEXT:    testb %al, %cl
@@ -107,7 +106,7 @@ define void @main.41() local_unnamed_addr #1 {
 ; CHECK-NEXT:    movzwl %ax, %eax
 ; CHECK-NEXT:    vmovd %eax, %xmm6
 ; CHECK-NEXT:    vcvtph2ps %xmm6, %xmm7
-; CHECK-NEXT:    vpsrldq {{.*#+}} xmm6 = xmm5[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; CHECK-NEXT:    vpsrldq {{.*#+}} xmm6 = xmm4[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
 ; CHECK-NEXT:    vpextrw $0, %xmm6, %eax
 ; CHECK-NEXT:    kandw %k1, %k0, %k0
 ; CHECK-NEXT:    movzwl %ax, %eax
@@ -147,12 +146,12 @@ define void @main.41() local_unnamed_addr #1 {
 ; CHECK-NEXT:    movzwl %ax, %eax
 ; CHECK-NEXT:    vmovd %eax, %xmm7
 ; CHECK-NEXT:    vcvtph2ps %xmm7, %xmm7
-; CHECK-NEXT:    vpsrldq {{.*#+}} xmm5 = xmm5[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; CHECK-NEXT:    vpextrw $0, %xmm5, %eax
+; CHECK-NEXT:    vpsrldq {{.*#+}} xmm4 = xmm4[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; CHECK-NEXT:    vpextrw $0, %xmm4, %eax
 ; CHECK-NEXT:    movzwl %ax, %eax
-; CHECK-NEXT:    vmovd %eax, %xmm5
-; CHECK-NEXT:    vcvtph2ps %xmm5, %xmm5
-; CHECK-NEXT:    vucomiss %xmm7, %xmm5
+; CHECK-NEXT:    vmovd %eax, %xmm4
+; CHECK-NEXT:    vcvtph2ps %xmm4, %xmm4
+; CHECK-NEXT:    vucomiss %xmm7, %xmm4
 ; CHECK-NEXT:    setnp %al
 ; CHECK-NEXT:    sete %cl
 ; CHECK-NEXT:    testb %al, %cl
@@ -220,7 +219,7 @@ define void @main.41() local_unnamed_addr #1 {
 ; CHECK-NEXT:    movzwl %ax, %eax
 ; CHECK-NEXT:    vmovd %eax, %xmm2
 ; CHECK-NEXT:    vcvtph2ps %xmm2, %xmm2
-; CHECK-NEXT:    vucomiss %xmm2, %xmm4
+; CHECK-NEXT:    vucomiss %xmm2, %xmm5
 ; CHECK-NEXT:    setnp %al
 ; CHECK-NEXT:    sete %cl
 ; CHECK-NEXT:    testb %al, %cl
@@ -286,7 +285,7 @@ define void @main.41() local_unnamed_addr #1 {
 ; CHECK-NEXT:    vmovd %eax, %xmm0
 ; CHECK-NEXT:    vcvtph2ps %xmm0, %xmm0
 ; CHECK-NEXT:    kshiftrw $1, %k0, %k0
-; CHECK-NEXT:    vucomiss %xmm0, %xmm5
+; CHECK-NEXT:    vucomiss %xmm0, %xmm4
 ; CHECK-NEXT:    setnp %al
 ; CHECK-NEXT:    sete %cl
 ; CHECK-NEXT:    testb %al, %cl
diff --git a/llvm/test/CodeGen/X86/splat-for-size.ll b/llvm/test/CodeGen/X86/splat-for-size.ll
index 5b54d941198d4e5..6325f083d59bd10 100644
--- a/llvm/test/CodeGen/X86/splat-for-size.ll
+++ b/llvm/test/CodeGen/X86/splat-for-size.ll
@@ -387,9 +387,9 @@ define <32 x i8> @splat_v32i8_pgso(<32 x i8> %x) !prof !14 {
 define <8 x i64> @pr23259() #1 {
 ; AVX-LABEL: pr23259:
 ; AVX:       # %bb.0: # %entry
-; AVX-NEXT:    vmovaps A+16(%rip), %xmm0
-; AVX-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],mem[4,5,6,7]
+; AVX-NEXT:    vbroadcastsd {{.*#+}} ymm0 = [1,1,1,1]
+; AVX-NEXT:    vblendps {{.*#+}} xmm1 = mem[0,1],xmm0[2,3]
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
 ; AVX-NEXT:    vbroadcastsd {{.*#+}} ymm1 = [1,1,1,1]
 ; AVX-NEXT:    retq
 ;
diff --git a/llvm/test/CodeGen/X86/subvector-broadcast.ll b/llvm/test/CodeGen/X86/subvector-broadcast.ll
index e104d9d2b4c1570..3aec8330596e181 100644
--- a/llvm/test/CodeGen/X86/subvector-broadcast.ll
+++ b/llvm/test/CodeGen/X86/subvector-broadcast.ll
@@ -803,24 +803,23 @@ define <16 x i32> @test_broadcast_4i32_16i32_chain(ptr %p0, ptr %p1) {
 define dso_local void @fallback_broadcast_v4i64_to_v8i64(<4 x i64> %a, <8 x i64> %b) {
 ; X86-AVX1-LABEL: fallback_broadcast_v4i64_to_v8i64:
 ; X86-AVX1:       # %bb.0: # %entry
-; X86-AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [1,0,2,0]
-; X86-AVX1-NEXT:    vpaddq %xmm4, %xmm0, %xmm3
+; X86-AVX1-NEXT:    vmovdqa {{.*#+}} ymm3 = [1,0,2,0,3,0,4,0]
+; X86-AVX1-NEXT:    vpaddq %xmm3, %xmm0, %xmm4
 ; X86-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; X86-AVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [3,0,4,0]
 ; X86-AVX1-NEXT:    vpaddq %xmm5, %xmm0, %xmm0
-; X86-AVX1-NEXT:    vmovaps {{.*#+}} ymm6 = [1,0,2,0,3,0,4,0]
-; X86-AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm7
-; X86-AVX1-NEXT:    vpaddq %xmm5, %xmm7, %xmm7
-; X86-AVX1-NEXT:    vpaddq %xmm4, %xmm2, %xmm2
-; X86-AVX1-NEXT:    vinsertf128 $1, %xmm7, %ymm2, %ymm2
-; X86-AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm7
-; X86-AVX1-NEXT:    vpaddq %xmm5, %xmm7, %xmm5
-; X86-AVX1-NEXT:    vpaddq %xmm4, %xmm1, %xmm1
+; X86-AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm6
+; X86-AVX1-NEXT:    vpaddq %xmm5, %xmm6, %xmm6
+; X86-AVX1-NEXT:    vpaddq %xmm3, %xmm2, %xmm2
+; X86-AVX1-NEXT:    vinsertf128 $1, %xmm6, %ymm2, %ymm2
+; X86-AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm6
+; X86-AVX1-NEXT:    vpaddq %xmm5, %xmm6, %xmm5
+; X86-AVX1-NEXT:    vpaddq %xmm3, %xmm1, %xmm1
 ; X86-AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm1, %ymm1
-; X86-AVX1-NEXT:    vandps %ymm6, %ymm1, %ymm1
-; X86-AVX1-NEXT:    vandps %ymm6, %ymm2, %ymm2
+; X86-AVX1-NEXT:    vandps %ymm3, %ymm1, %ymm1
+; X86-AVX1-NEXT:    vandps %ymm3, %ymm2, %ymm2
 ; X86-AVX1-NEXT:    vmovdqu %xmm0, ga4+16
-; X86-AVX1-NEXT:    vmovdqu %xmm3, ga4
+; X86-AVX1-NEXT:    vmovdqu %xmm4, ga4
 ; X86-AVX1-NEXT:    vmovups %ymm2, gb4+32
 ; X86-AVX1-NEXT:    vmovups %ymm1, gb4
 ; X86-AVX1-NEXT:    vzeroupper
@@ -854,22 +853,21 @@ define dso_local void @fallback_broadcast_v4i64_to_v8i64(<4 x i64> %a, <8 x i64>
 ;
 ; X64-AVX1-LABEL: fallback_broadcast_v4i64_to_v8i64:
 ; X64-AVX1:       # %bb.0: # %entry
-; X64-AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [1,2]
+; X64-AVX1-NEXT:    vmovdqa {{.*#+}} ymm3 = [1,2,3,4]
 ; X64-AVX1-NEXT:    vpaddq %xmm3, %xmm0, %xmm4
 ; X64-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; X64-AVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [3,4]
 ; X64-AVX1-NEXT:    vpaddq %xmm5, %xmm0, %xmm0
-; X64-AVX1-NEXT:    vmovaps {{.*#+}} ymm6 = [1,2,3,4]
-; X64-AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm7
-; X64-AVX1-NEXT:    vpaddq %xmm5, %xmm7, %xmm7
+; X64-AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm6
+; X64-AVX1-NEXT:    vpaddq %xmm5, %xmm6, %xmm6
 ; X64-AVX1-NEXT:    vpaddq %xmm3, %xmm2, %xmm2
-; X64-AVX1-NEXT:    vinsertf128 $1, %xmm7, %ymm2, %ymm2
-; X64-AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm7
-; X64-AVX1-NEXT:    vpaddq %xmm5, %xmm7, %xmm5
+; X64-AVX1-NEXT:    vinsertf128 $1, %xmm6, %ymm2, %ymm2
+; X64-AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm6
+; X64-AVX1-NEXT:    vpaddq %xmm5, %xmm6, %xmm5
 ; X64-AVX1-NEXT:    vpaddq %xmm3, %xmm1, %xmm1
 ; X64-AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm1, %ymm1
-; X64-AVX1-NEXT:    vandps %ymm6, %ymm1, %ymm1
-; X64-AVX1-NEXT:    vandps %ymm6, %ymm2, %ymm2
+; X64-AVX1-NEXT:    vandps %ymm3, %ymm1, %ymm1
+; X64-AVX1-NEXT:    vandps %ymm3, %ymm2, %ymm2
 ; X64-AVX1-NEXT:    vmovdqu %xmm0, ga4+16(%rip)
 ; X64-AVX1-NEXT:    vmovdqu %xmm4, ga4(%rip)
 ; X64-AVX1-NEXT:    vmovups %ymm2, gb4+32(%rip)
diff --git a/llvm/test/CodeGen/X86/vec_fabs.ll b/llvm/test/CodeGen/X86/vec_fabs.ll
index ececfce210f563d..7e915c9ee040764 100644
--- a/llvm/test/CodeGen/X86/vec_fabs.ll
+++ b/llvm/test/CodeGen/X86/vec_fabs.ll
@@ -2813,75 +2813,29 @@ define i64 @fabs_v2f32_2() {
 
 declare <2 x float> @llvm.fabs.v2f32(<2 x float> %p)
 
-; PR70947 - TODO remove duplicate xmm/ymm constant loads
+; PR70947 - remove duplicate xmm/ymm constant loads
 define void @PR70947(ptr %src, ptr %dst) {
-; X86-AVX1-LABEL: PR70947:
-; X86-AVX1:       # %bb.0:
-; X86-AVX1-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-AVX1-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-AVX1-NEXT:    vmovups (%ecx), %ymm0
-; X86-AVX1-NEXT:    vmovups 32(%ecx), %xmm1
-; X86-AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
-; X86-AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1, %xmm1
-; X86-AVX1-NEXT:    vmovups %ymm0, (%eax)
-; X86-AVX1-NEXT:    vmovups %xmm1, 16(%eax)
-; X86-AVX1-NEXT:    vzeroupper
-; X86-AVX1-NEXT:    retl
-;
-; X86-AVX2-LABEL: PR70947:
-; X86-AVX2:       # %bb.0:
-; X86-AVX2-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-AVX2-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-AVX2-NEXT:    vbroadcastsd {{.*#+}} ymm0 = [NaN,NaN,NaN,NaN]
-; X86-AVX2-NEXT:    vandps (%ecx), %ymm0, %ymm1
-; X86-AVX2-NEXT:    vandps 32(%ecx), %xmm0, %xmm0
-; X86-AVX2-NEXT:    vmovups %ymm1, (%eax)
-; X86-AVX2-NEXT:    vmovups %xmm0, 16(%eax)
-; X86-AVX2-NEXT:    vzeroupper
-; X86-AVX2-NEXT:    retl
-;
-; X86-AVX512-LABEL: PR70947:
-; X86-AVX512:       # %bb.0:
-; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-AVX512-NEXT:    vbroadcastsd {{.*#+}} ymm0 = [NaN,NaN,NaN,NaN]
-; X86-AVX512-NEXT:    vandps (%ecx), %ymm0, %ymm1
-; X86-AVX512-NEXT:    vandps 32(%ecx), %xmm0, %xmm0
-; X86-AVX512-NEXT:    vmovups %ymm1, (%eax)
-; X86-AVX512-NEXT:    vmovups %xmm0, 16(%eax)
-; X86-AVX512-NEXT:    vzeroupper
-; X86-AVX512-NEXT:    retl
-;
-; X64-AVX1-LABEL: PR70947:
-; X64-AVX1:       # %bb.0:
-; X64-AVX1-NEXT:    vmovups (%rdi), %ymm0
-; X64-AVX1-NEXT:    vmovups 32(%rdi), %xmm1
-; X64-AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
-; X64-AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; X64-AVX1-NEXT:    vmovups %ymm0, (%rsi)
-; X64-AVX1-NEXT:    vmovups %xmm1, 16(%rsi)
-; X64-AVX1-NEXT:    vzeroupper
-; X64-AVX1-NEXT:    retq
-;
-; X64-AVX2-LABEL: PR70947:
-; X64-AVX2:       # %bb.0:
-; X64-AVX2-NEXT:    vbroadcastsd {{.*#+}} ymm0 = [NaN,NaN,NaN,NaN]
-; X64-AVX2-NEXT:    vandps (%rdi), %ymm0, %ymm1
-; X64-AVX2-NEXT:    vandps 32(%rdi), %xmm0, %xmm0
-; X64-AVX2-NEXT:    vmovups %ymm1, (%rsi)
-; X64-AVX2-NEXT:    vmovups %xmm0, 16(%rsi)
-; X64-AVX2-NEXT:    vzeroupper
-; X64-AVX2-NEXT:    retq
+; X86-LABEL: PR70947:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    vbroadcastsd {{.*#+}} ymm0 = [NaN,NaN,NaN,NaN]
+; X86-NEXT:    vandps (%ecx), %ymm0, %ymm1
+; X86-NEXT:    vandps 32(%ecx), %xmm0, %xmm0
+; X86-NEXT:    vmovups %ymm1, (%eax)
+; X86-NEXT:    vmovups %xmm0, 16(%eax)
+; X86-NEXT:    vzeroupper
+; X86-NEXT:    retl
 ;
-; X64-AVX512-LABEL: PR70947:
-; X64-AVX512:       # %bb.0:
-; X64-AVX512-NEXT:    vbroadcastsd {{.*#+}} ymm0 = [NaN,NaN,NaN,NaN]
-; X64-AVX512-NEXT:    vandps (%rdi), %ymm0, %ymm1
-; X64-AVX512-NEXT:    vandps 32(%rdi), %xmm0, %xmm0
-; X64-AVX512-NEXT:    vmovups %ymm1, (%rsi)
-; X64-AVX512-NEXT:    vmovups %xmm0, 16(%rsi)
-; X64-AVX512-NEXT:    vzeroupper
-; X64-AVX512-NEXT:    retq
+; X64-LABEL: PR70947:
+; X64:       # %bb.0:
+; X64-NEXT:    vbroadcastsd {{.*#+}} ymm0 = [NaN,NaN,NaN,NaN]
+; X64-NEXT:    vandps (%rdi), %ymm0, %ymm1
+; X64-NEXT:    vandps 32(%rdi), %xmm0, %xmm0
+; X64-NEXT:    vmovups %ymm1, (%rsi)
+; X64-NEXT:    vmovups %xmm0, 16(%rsi)
+; X64-NEXT:    vzeroupper
+; X64-NEXT:    retq
   %src4 = getelementptr inbounds double, ptr %src, i64 4
   %dst4 = getelementptr inbounds i32, ptr %dst, i64 4
   %ld0 = load <4 x double>, ptr %src, align 8
diff --git a/llvm/test/CodeGen/X86/vec_int_to_fp.ll b/llvm/test/CodeGen/X86/vec_int_to_fp.ll
index 18cc77c239b7815..7579c89ec438989 100644
--- a/llvm/test/CodeGen/X86/vec_int_to_fp.ll
+++ b/llvm/test/CodeGen/X86/vec_int_to_fp.ll
@@ -4369,30 +4369,29 @@ define <4 x float> @uitofp_load_4i64_to_4f32(ptr%a) {
 ;
 ; AVX1-LABEL: uitofp_load_4i64_to_4f32:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vmovapd (%rdi), %ymm0
-; AVX1-NEXT:    vmovdqa (%rdi), %xmm1
+; AVX1-NEXT:    vmovdqa (%rdi), %ymm0
+; AVX1-NEXT:    vpsrlq $1, %xmm0, %xmm1
 ; AVX1-NEXT:    vmovdqa 16(%rdi), %xmm2
-; AVX1-NEXT:    vpsrlq $1, %xmm1, %xmm3
-; AVX1-NEXT:    vpsrlq $1, %xmm2, %xmm4
-; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm3, %ymm3
-; AVX1-NEXT:    vandpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm4
-; AVX1-NEXT:    vorpd %ymm4, %ymm3, %ymm3
-; AVX1-NEXT:    vblendvpd %ymm0, %ymm3, %ymm0, %ymm0
-; AVX1-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX1-NEXT:    vcvtsi2ss %rax, %xmm5, %xmm3
-; AVX1-NEXT:    vmovq %xmm0, %rax
-; AVX1-NEXT:    vcvtsi2ss %rax, %xmm5, %xmm4
+; AVX1-NEXT:    vpsrlq $1, %xmm2, %xmm3
+; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm1, %ymm1
+; AVX1-NEXT:    vandpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm3
+; AVX1-NEXT:    vorpd %ymm3, %ymm1, %ymm1
+; AVX1-NEXT:    vblendvpd %ymm0, %ymm1, %ymm0, %ymm1
+; AVX1-NEXT:    vpextrq $1, %xmm1, %rax
+; AVX1-NEXT:    vcvtsi2ss %rax, %xmm4, %xmm3
+; AVX1-NEXT:    vmovq %xmm1, %rax
+; AVX1-NEXT:    vcvtsi2ss %rax, %xmm4, %xmm4
 ; AVX1-NEXT:    vinsertps {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[2,3]
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
-; AVX1-NEXT:    vmovq %xmm0, %rax
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT:    vmovq %xmm1, %rax
 ; AVX1-NEXT:    vcvtsi2ss %rax, %xmm5, %xmm4
 ; AVX1-NEXT:    vinsertps {{.*#+}} xmm3 = xmm3[0,1],xmm4[0],xmm3[3]
-; AVX1-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX1-NEXT:    vcvtsi2ss %rax, %xmm5, %xmm0
-; AVX1-NEXT:    vinsertps {{.*#+}} xmm0 = xmm3[0,1,2],xmm0[0]
-; AVX1-NEXT:    vaddps %xmm0, %xmm0, %xmm3
-; AVX1-NEXT:    vpackssdw %xmm2, %xmm1, %xmm1
-; AVX1-NEXT:    vblendvps %xmm1, %xmm3, %xmm0, %xmm0
+; AVX1-NEXT:    vpextrq $1, %xmm1, %rax
+; AVX1-NEXT:    vcvtsi2ss %rax, %xmm5, %xmm1
+; AVX1-NEXT:    vinsertps {{.*#+}} xmm1 = xmm3[0,1,2],xmm1[0]
+; AVX1-NEXT:    vaddps %xmm1, %xmm1, %xmm3
+; AVX1-NEXT:    vpackssdw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vblendvps %xmm0, %xmm3, %xmm1, %xmm0
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
 ;
@@ -4403,23 +4402,22 @@ define <4 x float> @uitofp_load_4i64_to_4f32(ptr%a) {
 ; AVX2-NEXT:    vpand %ymm1, %ymm0, %ymm1
 ; AVX2-NEXT:    vpsrlq $1, %ymm0, %ymm2
 ; AVX2-NEXT:    vpor %ymm1, %ymm2, %ymm1
-; AVX2-NEXT:    vblendvpd %ymm0, %ymm1, %ymm0, %ymm0
-; AVX2-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX2-NEXT:    vcvtsi2ss %rax, %xmm3, %xmm1
-; AVX2-NEXT:    vmovq %xmm0, %rax
-; AVX2-NEXT:    vcvtsi2ss %rax, %xmm3, %xmm2
-; AVX2-NEXT:    vinsertps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[2,3]
-; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm0
-; AVX2-NEXT:    vmovq %xmm0, %rax
+; AVX2-NEXT:    vblendvpd %ymm0, %ymm1, %ymm0, %ymm1
+; AVX2-NEXT:    vpextrq $1, %xmm1, %rax
 ; AVX2-NEXT:    vcvtsi2ss %rax, %xmm3, %xmm2
-; AVX2-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3]
-; AVX2-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX2-NEXT:    vcvtsi2ss %rax, %xmm3, %xmm0
-; AVX2-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
-; AVX2-NEXT:    vaddps %xmm0, %xmm0, %xmm1
-; AVX2-NEXT:    vmovdqa (%rdi), %xmm2
-; AVX2-NEXT:    vpackssdw 16(%rdi), %xmm2, %xmm2
-; AVX2-NEXT:    vblendvps %xmm2, %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vmovq %xmm1, %rax
+; AVX2-NEXT:    vcvtsi2ss %rax, %xmm3, %xmm3
+; AVX2-NEXT:    vinsertps {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[2,3]
+; AVX2-NEXT:    vextracti128 $1, %ymm1, %xmm1
+; AVX2-NEXT:    vmovq %xmm1, %rax
+; AVX2-NEXT:    vcvtsi2ss %rax, %xmm4, %xmm3
+; AVX2-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0,1],xmm3[0],xmm2[3]
+; AVX2-NEXT:    vpextrq $1, %xmm1, %rax
+; AVX2-NEXT:    vcvtsi2ss %rax, %xmm4, %xmm1
+; AVX2-NEXT:    vinsertps {{.*#+}} xmm1 = xmm2[0,1,2],xmm1[0]
+; AVX2-NEXT:    vaddps %xmm1, %xmm1, %xmm2
+; AVX2-NEXT:    vpackssdw 16(%rdi), %xmm0, %xmm0
+; AVX2-NEXT:    vblendvps %xmm0, %xmm2, %xmm1, %xmm0
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
 ;
@@ -4812,56 +4810,54 @@ define <8 x float> @uitofp_load_8i64_to_8f32(ptr%a) {
 ;
 ; AVX1-LABEL: uitofp_load_8i64_to_8f32:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vmovapd (%rdi), %ymm2
-; AVX1-NEXT:    vmovapd 32(%rdi), %ymm3
-; AVX1-NEXT:    vbroadcastsd {{.*#+}} ymm4 = [1,1,1,1]
-; AVX1-NEXT:    vandpd %ymm4, %ymm3, %ymm5
-; AVX1-NEXT:    vmovaps (%rdi), %xmm0
-; AVX1-NEXT:    vmovdqa 16(%rdi), %xmm1
-; AVX1-NEXT:    vmovdqa 32(%rdi), %xmm6
-; AVX1-NEXT:    vpsrlq $1, %xmm6, %xmm7
-; AVX1-NEXT:    vmovdqa 48(%rdi), %xmm8
-; AVX1-NEXT:    vpsrlq $1, %xmm8, %xmm9
-; AVX1-NEXT:    vinsertf128 $1, %xmm9, %ymm7, %ymm7
-; AVX1-NEXT:    vorpd %ymm5, %ymm7, %ymm5
-; AVX1-NEXT:    vblendvpd %ymm3, %ymm5, %ymm3, %ymm3
+; AVX1-NEXT:    vmovaps (%rdi), %ymm0
+; AVX1-NEXT:    vmovaps 32(%rdi), %ymm1
+; AVX1-NEXT:    vbroadcastsd {{.*#+}} ymm2 = [1,1,1,1]
+; AVX1-NEXT:    vandps %ymm2, %ymm1, %ymm3
+; AVX1-NEXT:    vpsrlq $1, %xmm1, %xmm4
+; AVX1-NEXT:    vmovdqa 48(%rdi), %xmm5
+; AVX1-NEXT:    vpsrlq $1, %xmm5, %xmm6
+; AVX1-NEXT:    vinsertf128 $1, %xmm6, %ymm4, %ymm4
+; AVX1-NEXT:    vorps %ymm3, %ymm4, %ymm3
+; AVX1-NEXT:    vblendvpd %ymm1, %ymm3, %ymm1, %ymm3
 ; AVX1-NEXT:    vpextrq $1, %xmm3, %rax
-; AVX1-NEXT:    vcvtsi2ss %rax, %xmm10, %xmm5
+; AVX1-NEXT:    vcvtsi2ss %rax, %xmm7, %xmm4
 ; AVX1-NEXT:    vmovq %xmm3, %rax
-; AVX1-NEXT:    vcvtsi2ss %rax, %xmm10, %xmm7
-; AVX1-NEXT:    vinsertps {{.*#+}} xmm5 = xmm7[0],xmm5[0],xmm7[2,3]
+; AVX1-NEXT:    vcvtsi2ss %rax, %xmm7, %xmm6
+; AVX1-NEXT:    vinsertps {{.*#+}} xmm4 = xmm6[0],xmm4[0],xmm6[2,3]
 ; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm3
 ; AVX1-NEXT:    vmovq %xmm3, %rax
-; AVX1-NEXT:    vcvtsi2ss %rax, %xmm10, %xmm7
-; AVX1-NEXT:    vinsertps {{.*#+}} xmm5 = xmm5[0,1],xmm7[0],xmm5[3]
+; AVX1-NEXT:    vcvtsi2ss %rax, %xmm7, %xmm6
+; AVX1-NEXT:    vinsertps {{.*#+}} xmm4 = xmm4[0,1],xmm6[0],xmm4[3]
 ; AVX1-NEXT:    vpextrq $1, %xmm3, %rax
-; AVX1-NEXT:    vcvtsi2ss %rax, %xmm10, %xmm3
-; AVX1-NEXT:    vinsertps {{.*#+}} xmm3 = xmm5[0,1,2],xmm3[0]
-; AVX1-NEXT:    vaddps %xmm3, %xmm3, %xmm5
-; AVX1-NEXT:    vpackssdw %xmm8, %xmm6, %xmm6
-; AVX1-NEXT:    vblendvps %xmm6, %xmm5, %xmm3, %xmm3
-; AVX1-NEXT:    vandpd %ymm4, %ymm2, %ymm4
-; AVX1-NEXT:    vpsrlq $1, %xmm0, %xmm5
-; AVX1-NEXT:    vpsrlq $1, %xmm1, %xmm6
-; AVX1-NEXT:    vinsertf128 $1, %xmm6, %ymm5, %ymm5
-; AVX1-NEXT:    vorpd %ymm4, %ymm5, %ymm4
-; AVX1-NEXT:    vblendvpd %ymm2, %ymm4, %ymm2, %ymm2
+; AVX1-NEXT:    vcvtsi2ss %rax, %xmm7, %xmm3
+; AVX1-NEXT:    vinsertps {{.*#+}} xmm3 = xmm4[0,1,2],xmm3[0]
+; AVX1-NEXT:    vaddps %xmm3, %xmm3, %xmm4
+; AVX1-NEXT:    vpackssdw %xmm5, %xmm1, %xmm1
+; AVX1-NEXT:    vblendvps %xmm1, %xmm4, %xmm3, %xmm1
+; AVX1-NEXT:    vandps %ymm2, %ymm0, %ymm2
+; AVX1-NEXT:    vpsrlq $1, %xmm0, %xmm3
+; AVX1-NEXT:    vmovdqa 16(%rdi), %xmm4
+; AVX1-NEXT:    vpsrlq $1, %xmm4, %xmm5
+; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm3, %ymm3
+; AVX1-NEXT:    vorps %ymm2, %ymm3, %ymm2
+; AVX1-NEXT:    vblendvpd %ymm0, %ymm2, %ymm0, %ymm2
 ; AVX1-NEXT:    vpextrq $1, %xmm2, %rax
-; AVX1-NEXT:    vcvtsi2ss %rax, %xmm10, %xmm4
+; AVX1-NEXT:    vcvtsi2ss %rax, %xmm7, %xmm3
 ; AVX1-NEXT:    vmovq %xmm2, %rax
-; AVX1-NEXT:    vcvtsi2ss %rax, %xmm10, %xmm5
-; AVX1-NEXT:    vinsertps {{.*#+}} xmm4 = xmm5[0],xmm4[0],xmm5[2,3]
+; AVX1-NEXT:    vcvtsi2ss %rax, %xmm7, %xmm5
+; AVX1-NEXT:    vinsertps {{.*#+}} xmm3 = xmm5[0],xmm3[0],xmm5[2,3]
 ; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm2
 ; AVX1-NEXT:    vmovq %xmm2, %rax
-; AVX1-NEXT:    vcvtsi2ss %rax, %xmm10, %xmm5
-; AVX1-NEXT:    vinsertps {{.*#+}} xmm4 = xmm4[0,1],xmm5[0],xmm4[3]
+; AVX1-NEXT:    vcvtsi2ss %rax, %xmm7, %xmm5
+; AVX1-NEXT:    vinsertps {{.*#+}} xmm3 = xmm3[0,1],xmm5[0],xmm3[3]
 ; AVX1-NEXT:    vpextrq $1, %xmm2, %rax
-; AVX1-NEXT:    vcvtsi2ss %rax, %xmm10, %xmm2
-; AVX1-NEXT:    vinsertps {{.*#+}} xmm2 = xmm4[0,1,2],xmm2[0]
-; AVX1-NEXT:    vaddps %xmm2, %xmm2, %xmm4
-; AVX1-NEXT:    vpackssdw %xmm1, %xmm0, %xmm0
-; AVX1-NEXT:    vblendvps %xmm0, %xmm4, %xmm2, %xmm0
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm0
+; AVX1-NEXT:    vcvtsi2ss %rax, %xmm7, %xmm2
+; AVX1-NEXT:    vinsertps {{.*#+}} xmm2 = xmm3[0,1,2],xmm2[0]
+; AVX1-NEXT:    vaddps %xmm2, %xmm2, %xmm3
+; AVX1-NEXT:    vpackssdw %xmm4, %xmm0, %xmm0
+; AVX1-NEXT:    vblendvps %xmm0, %xmm3, %xmm2, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: uitofp_load_8i64_to_8f32:
@@ -4872,43 +4868,41 @@ define <8 x float> @uitofp_load_8i64_to_8f32(ptr%a) {
 ; AVX2-NEXT:    vpand %ymm2, %ymm1, %ymm3
 ; AVX2-NEXT:    vpsrlq $1, %ymm1, %ymm4
 ; AVX2-NEXT:    vpor %ymm3, %ymm4, %ymm3
-; AVX2-NEXT:    vblendvpd %ymm1, %ymm3, %ymm1, %ymm1
-; AVX2-NEXT:    vpextrq $1, %xmm1, %rax
-; AVX2-NEXT:    vcvtsi2ss %rax, %xmm5, %xmm3
-; AVX2-NEXT:    vmovq %xmm1, %rax
-; AVX2-NEXT:    vcvtsi2ss %rax, %xmm5, %xmm4
-; AVX2-NEXT:    vinsertps {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[2,3]
-; AVX2-NEXT:    vextracti128 $1, %ymm1, %xmm1
-; AVX2-NEXT:    vmovq %xmm1, %rax
+; AVX2-NEXT:    vblendvpd %ymm1, %ymm3, %ymm1, %ymm3
+; AVX2-NEXT:    vpextrq $1, %xmm3, %rax
 ; AVX2-NEXT:    vcvtsi2ss %rax, %xmm5, %xmm4
-; AVX2-NEXT:    vinsertps {{.*#+}} xmm3 = xmm3[0,1],xmm4[0],xmm3[3]
-; AVX2-NEXT:    vpextrq $1, %xmm1, %rax
-; AVX2-NEXT:    vcvtsi2ss %rax, %xmm5, %xmm1
-; AVX2-NEXT:    vinsertps {{.*#+}} xmm1 = xmm3[0,1,2],xmm1[0]
-; AVX2-NEXT:    vaddps %xmm1, %xmm1, %xmm3
-; AVX2-NEXT:    vmovdqa (%rdi), %xmm4
-; AVX2-NEXT:    vmovdqa 32(%rdi), %xmm5
-; AVX2-NEXT:    vpackssdw 48(%rdi), %xmm5, %xmm5
-; AVX2-NEXT:    vblendvps %xmm5, %xmm3, %xmm1, %xmm1
+; AVX2-NEXT:    vmovq %xmm3, %rax
+; AVX2-NEXT:    vcvtsi2ss %rax, %xmm5, %xmm5
+; AVX2-NEXT:    vinsertps {{.*#+}} xmm4 = xmm5[0],xmm4[0],xmm5[2,3]
+; AVX2-NEXT:    vextracti128 $1, %ymm3, %xmm3
+; AVX2-NEXT:    vmovq %xmm3, %rax
+; AVX2-NEXT:    vcvtsi2ss %rax, %xmm6, %xmm5
+; AVX2-NEXT:    vinsertps {{.*#+}} xmm4 = xmm4[0,1],xmm5[0],xmm4[3]
+; AVX2-NEXT:    vpextrq $1, %xmm3, %rax
+; AVX2-NEXT:    vcvtsi2ss %rax, %xmm6, %xmm3
+; AVX2-NEXT:    vinsertps {{.*#+}} xmm3 = xmm4[0,1,2],xmm3[0]
+; AVX2-NEXT:    vaddps %xmm3, %xmm3, %xmm4
+; AVX2-NEXT:    vpackssdw 48(%rdi), %xmm1, %xmm1
+; AVX2-NEXT:    vblendvps %xmm1, %xmm4, %xmm3, %xmm1
 ; AVX2-NEXT:    vandps %ymm2, %ymm0, %ymm2
 ; AVX2-NEXT:    vpsrlq $1, %ymm0, %ymm3
 ; AVX2-NEXT:    vpor %ymm2, %ymm3, %ymm2
-; AVX2-NEXT:    vblendvpd %ymm0, %ymm2, %ymm0, %ymm0
-; AVX2-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX2-NEXT:    vcvtsi2ss %rax, %xmm6, %xmm2
-; AVX2-NEXT:    vmovq %xmm0, %rax
-; AVX2-NEXT:    vcvtsi2ss %rax, %xmm6, %xmm3
-; AVX2-NEXT:    vinsertps {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[2,3]
-; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm0
-; AVX2-NEXT:    vmovq %xmm0, %rax
+; AVX2-NEXT:    vblendvpd %ymm0, %ymm2, %ymm0, %ymm2
+; AVX2-NEXT:    vpextrq $1, %xmm2, %rax
 ; AVX2-NEXT:    vcvtsi2ss %rax, %xmm6, %xmm3
-; AVX2-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0,1],xmm3[0],xmm2[3]
-; AVX2-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX2-NEXT:    vcvtsi2ss %rax, %xmm6, %xmm0
-; AVX2-NEXT:    vinsertps {{.*#+}} xmm0 = xmm2[0,1,2],xmm0[0]
-; AVX2-NEXT:    vaddps %xmm0, %xmm0, %xmm2
-; AVX2-NEXT:    vpackssdw 16(%rdi), %xmm4, %xmm3
-; AVX2-NEXT:    vblendvps %xmm3, %xmm2, %xmm0, %xmm0
+; AVX2-NEXT:    vmovq %xmm2, %rax
+; AVX2-NEXT:    vcvtsi2ss %rax, %xmm6, %xmm4
+; AVX2-NEXT:    vinsertps {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[2,3]
+; AVX2-NEXT:    vextracti128 $1, %ymm2, %xmm2
+; AVX2-NEXT:    vmovq %xmm2, %rax
+; AVX2-NEXT:    vcvtsi2ss %rax, %xmm6, %xmm4
+; AVX2-NEXT:    vinsertps {{.*#+}} xmm3 = xmm3[0,1],xmm4[0],xmm3[3]
+; AVX2-NEXT:    vpextrq $1, %xmm2, %rax
+; AVX2-NEXT:    vcvtsi2ss %rax, %xmm6, %xmm2
+; AVX2-NEXT:    vinsertps {{.*#+}} xmm2 = xmm3[0,1,2],xmm2[0]
+; AVX2-NEXT:    vaddps %xmm2, %xmm2, %xmm3
+; AVX2-NEXT:    vpackssdw 16(%rdi), %xmm0, %xmm0
+; AVX2-NEXT:    vblendvps %xmm0, %xmm3, %xmm2, %xmm0
 ; AVX2-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
 ;
@@ -5010,10 +5004,9 @@ define <8 x float> @uitofp_load_8i32_to_8f32(ptr%a) {
 ;
 ; AVX1-LABEL: uitofp_load_8i32_to_8f32:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vmovaps (%rdi), %ymm0
-; AVX1-NEXT:    vmovdqa (%rdi), %xmm1
+; AVX1-NEXT:    vmovdqa (%rdi), %ymm0
+; AVX1-NEXT:    vpsrld $16, %xmm0, %xmm1
 ; AVX1-NEXT:    vmovdqa 16(%rdi), %xmm2
-; AVX1-NEXT:    vpsrld $16, %xmm1, %xmm1
 ; AVX1-NEXT:    vpsrld $16, %xmm2, %xmm2
 ; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
 ; AVX1-NEXT:    vcvtdq2ps %ymm1, %ymm1
diff --git a/llvm/test/CodeGen/X86/vector-fshl-256.ll b/llvm/test/CodeGen/X86/vector-fshl-256.ll
index 1720193e1f04bd3..790c9d013efac47 100644
--- a/llvm/test/CodeGen/X86/vector-fshl-256.ll
+++ b/llvm/test/CodeGen/X86/vector-fshl-256.ll
@@ -161,10 +161,10 @@ define <4 x i64> @var_funnnel_v4i64(<4 x i64> %x, <4 x i64> %y, <4 x i64> %amt)
 define <8 x i32> @var_funnnel_v8i32(<8 x i32> %x, <8 x i32> %y, <8 x i32> %amt) nounwind {
 ; AVX1-LABEL: var_funnnel_v8i32:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2
-; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm3
-; AVX1-NEXT:    vbroadcastss {{.*#+}} xmm4 = [31,31,31,31]
-; AVX1-NEXT:    vpxor %xmm4, %xmm3, %xmm5
+; AVX1-NEXT:    vbroadcastss {{.*#+}} ymm3 = [31,31,31,31,31,31,31,31]
+; AVX1-NEXT:    vandps %ymm3, %ymm2, %ymm2
+; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
+; AVX1-NEXT:    vxorps %xmm3, %xmm4, %xmm5
 ; AVX1-NEXT:    vpsrldq {{.*#+}} xmm6 = xmm5[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm7
 ; AVX1-NEXT:    vpsrld $1, %xmm7, %xmm7
@@ -179,24 +179,24 @@ define <8 x i32> @var_funnnel_v8i32(<8 x i32> %x, <8 x i32> %y, <8 x i32> %amt)
 ; AVX1-NEXT:    vpsrld %xmm5, %xmm7, %xmm5
 ; AVX1-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3],xmm9[4,5,6,7]
 ; AVX1-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0,1],xmm6[2,3],xmm5[4,5],xmm6[6,7]
-; AVX1-NEXT:    vpslld $23, %xmm3, %xmm3
+; AVX1-NEXT:    vpslld $23, %xmm4, %xmm4
 ; AVX1-NEXT:    vbroadcastss {{.*#+}} xmm6 = [1065353216,1065353216,1065353216,1065353216]
-; AVX1-NEXT:    vpaddd %xmm6, %xmm3, %xmm3
-; AVX1-NEXT:    vcvttps2dq %xmm3, %xmm3
+; AVX1-NEXT:    vpaddd %xmm6, %xmm4, %xmm4
+; AVX1-NEXT:    vcvttps2dq %xmm4, %xmm4
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm7
-; AVX1-NEXT:    vpmulld %xmm3, %xmm7, %xmm3
-; AVX1-NEXT:    vpor %xmm5, %xmm3, %xmm3
-; AVX1-NEXT:    vpxor %xmm4, %xmm2, %xmm4
-; AVX1-NEXT:    vpsrldq {{.*#+}} xmm5 = xmm4[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX1-NEXT:    vpmulld %xmm4, %xmm7, %xmm4
+; AVX1-NEXT:    vpor %xmm5, %xmm4, %xmm4
+; AVX1-NEXT:    vxorps %xmm3, %xmm2, %xmm3
+; AVX1-NEXT:    vpsrldq {{.*#+}} xmm5 = xmm3[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
 ; AVX1-NEXT:    vpsrld $1, %xmm1, %xmm1
 ; AVX1-NEXT:    vpsrld %xmm5, %xmm1, %xmm5
-; AVX1-NEXT:    vpsrlq $32, %xmm4, %xmm7
+; AVX1-NEXT:    vpsrlq $32, %xmm3, %xmm7
 ; AVX1-NEXT:    vpsrld %xmm7, %xmm1, %xmm7
 ; AVX1-NEXT:    vpblendw {{.*#+}} xmm5 = xmm7[0,1,2,3],xmm5[4,5,6,7]
-; AVX1-NEXT:    vpunpckhdq {{.*#+}} xmm7 = xmm4[2],xmm8[2],xmm4[3],xmm8[3]
+; AVX1-NEXT:    vpunpckhdq {{.*#+}} xmm7 = xmm3[2],xmm8[2],xmm3[3],xmm8[3]
 ; AVX1-NEXT:    vpsrld %xmm7, %xmm1, %xmm7
-; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero
-; AVX1-NEXT:    vpsrld %xmm4, %xmm1, %xmm1
+; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero
+; AVX1-NEXT:    vpsrld %xmm3, %xmm1, %xmm1
 ; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm7[4,5,6,7]
 ; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm5[2,3],xmm1[4,5],xmm5[6,7]
 ; AVX1-NEXT:    vpslld $23, %xmm2, %xmm2
@@ -204,7 +204,7 @@ define <8 x i32> @var_funnnel_v8i32(<8 x i32> %x, <8 x i32> %y, <8 x i32> %amt)
 ; AVX1-NEXT:    vcvttps2dq %xmm2, %xmm2
 ; AVX1-NEXT:    vpmulld %xmm2, %xmm0, %xmm0
 ; AVX1-NEXT:    vpor %xmm1, %xmm0, %xmm0
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm0, %ymm0
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: var_funnnel_v8i32:
diff --git a/llvm/test/CodeGen/X86/vector-fshl-512.ll b/llvm/test/CodeGen/X86/vector-fshl-512.ll
index 114d706f702b4f8..c246aaf61dbcfe9 100644
--- a/llvm/test/CodeGen/X86/vector-fshl-512.ll
+++ b/llvm/test/CodeGen/X86/vector-fshl-512.ll
@@ -227,9 +227,9 @@ define <64 x i8> @var_funnnel_v64i8(<64 x i8> %x, <64 x i8> %y, <64 x i8> %amt)
 ; AVX512F-NEXT:    vpsrlw $4, %ymm5, %ymm3
 ; AVX512F-NEXT:    vpbroadcastb {{.*#+}} ymm6 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
 ; AVX512F-NEXT:    vpand %ymm6, %ymm3, %ymm7
-; AVX512F-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm2, %zmm2
+; AVX512F-NEXT:    vpbroadcastd {{.*#+}} zmm8 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512F-NEXT:    vpandq %zmm8, %zmm2, %zmm2
 ; AVX512F-NEXT:    vextracti64x4 $1, %zmm2, %ymm3
-; AVX512F-NEXT:    vpbroadcastb {{.*#+}} ymm8 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
 ; AVX512F-NEXT:    vpxor %ymm3, %ymm8, %ymm9
 ; AVX512F-NEXT:    vpsllw $5, %ymm9, %ymm9
 ; AVX512F-NEXT:    vpblendvb %ymm9, %ymm7, %ymm5, %ymm5
@@ -296,9 +296,9 @@ define <64 x i8> @var_funnnel_v64i8(<64 x i8> %x, <64 x i8> %y, <64 x i8> %amt)
 ; AVX512VL-NEXT:    vpsrlw $4, %ymm5, %ymm3
 ; AVX512VL-NEXT:    vpbroadcastd {{.*#+}} ymm6 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
 ; AVX512VL-NEXT:    vpand %ymm6, %ymm3, %ymm7
-; AVX512VL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm2, %zmm2
+; AVX512VL-NEXT:    vpbroadcastd {{.*#+}} zmm8 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512VL-NEXT:    vpandq %zmm8, %zmm2, %zmm2
 ; AVX512VL-NEXT:    vextracti64x4 $1, %zmm2, %ymm3
-; AVX512VL-NEXT:    vpbroadcastd {{.*#+}} ymm8 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
 ; AVX512VL-NEXT:    vpxor %ymm3, %ymm8, %ymm9
 ; AVX512VL-NEXT:    vpsllw $5, %ymm9, %ymm9
 ; AVX512VL-NEXT:    vpblendvb %ymm9, %ymm7, %ymm5, %ymm5
diff --git a/llvm/test/CodeGen/X86/vector-fshl-rot-256.ll b/llvm/test/CodeGen/X86/vector-fshl-rot-256.ll
index b2922975d7beff1..8762072f3e8f6cc 100644
--- a/llvm/test/CodeGen/X86/vector-fshl-rot-256.ll
+++ b/llvm/test/CodeGen/X86/vector-fshl-rot-256.ll
@@ -22,35 +22,34 @@ declare <32 x i8> @llvm.fshl.v32i8(<32 x i8>, <32 x i8>, <32 x i8>)
 define <4 x i64> @var_funnnel_v4i64(<4 x i64> %x, <4 x i64> %amt) nounwind {
 ; AVX1-LABEL: var_funnnel_v4i64:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
-; AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm3
+; AVX1-NEXT:    vbroadcastsd {{.*#+}} ymm2 = [63,63,63,63]
+; AVX1-NEXT:    vandps %ymm2, %ymm1, %ymm3
 ; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm4
-; AVX1-NEXT:    vpsllq %xmm4, %xmm2, %xmm5
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
+; AVX1-NEXT:    vpsllq %xmm4, %xmm5, %xmm6
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[2,3,2,3]
-; AVX1-NEXT:    vpsllq %xmm4, %xmm2, %xmm4
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm4 = xmm5[0,1,2,3],xmm4[4,5,6,7]
-; AVX1-NEXT:    vpsllq %xmm3, %xmm0, %xmm5
+; AVX1-NEXT:    vpsllq %xmm4, %xmm5, %xmm4
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm4 = xmm6[0,1,2,3],xmm4[4,5,6,7]
+; AVX1-NEXT:    vpsllq %xmm3, %xmm0, %xmm6
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[2,3,2,3]
 ; AVX1-NEXT:    vpsllq %xmm3, %xmm0, %xmm3
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm3 = xmm5[0,1,2,3],xmm3[4,5,6,7]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm3 = xmm6[0,1,2,3],xmm3[4,5,6,7]
 ; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm3, %ymm3
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm4
-; AVX1-NEXT:    vpxor %xmm5, %xmm5, %xmm5
-; AVX1-NEXT:    vpsubq %xmm4, %xmm5, %xmm4
-; AVX1-NEXT:    vmovddup {{.*#+}} xmm6 = [63,63]
-; AVX1-NEXT:    # xmm6 = mem[0,0]
-; AVX1-NEXT:    vpand %xmm6, %xmm4, %xmm4
-; AVX1-NEXT:    vpsrlq %xmm4, %xmm2, %xmm7
+; AVX1-NEXT:    vpxor %xmm6, %xmm6, %xmm6
+; AVX1-NEXT:    vpsubq %xmm4, %xmm6, %xmm4
+; AVX1-NEXT:    vpand %xmm2, %xmm4, %xmm4
+; AVX1-NEXT:    vpsrlq %xmm4, %xmm5, %xmm7
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[2,3,2,3]
-; AVX1-NEXT:    vpsrlq %xmm4, %xmm2, %xmm2
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm7[0,1,2,3],xmm2[4,5,6,7]
-; AVX1-NEXT:    vpsubq %xmm1, %xmm5, %xmm1
-; AVX1-NEXT:    vpand %xmm6, %xmm1, %xmm1
-; AVX1-NEXT:    vpsrlq %xmm1, %xmm0, %xmm4
+; AVX1-NEXT:    vpsrlq %xmm4, %xmm5, %xmm4
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm4 = xmm7[0,1,2,3],xmm4[4,5,6,7]
+; AVX1-NEXT:    vpsubq %xmm1, %xmm6, %xmm1
+; AVX1-NEXT:    vpand %xmm2, %xmm1, %xmm1
+; AVX1-NEXT:    vpsrlq %xmm1, %xmm0, %xmm2
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
 ; AVX1-NEXT:    vpsrlq %xmm1, %xmm0, %xmm0
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm4[0,1,2,3],xmm0[4,5,6,7]
-; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm2[0,1,2,3],xmm0[4,5,6,7]
+; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm0, %ymm0
 ; AVX1-NEXT:    vorps %ymm0, %ymm3, %ymm0
 ; AVX1-NEXT:    retq
 ;
diff --git a/llvm/test/CodeGen/X86/vector-fshr-256.ll b/llvm/test/CodeGen/X86/vector-fshr-256.ll
index b39b7c140a451d3..aeb58f67b5bfa4a 100644
--- a/llvm/test/CodeGen/X86/vector-fshr-256.ll
+++ b/llvm/test/CodeGen/X86/vector-fshr-256.ll
@@ -162,50 +162,50 @@ define <4 x i64> @var_funnnel_v4i64(<4 x i64> %x, <4 x i64> %y, <4 x i64> %amt)
 define <8 x i32> @var_funnnel_v8i32(<8 x i32> %x, <8 x i32> %y, <8 x i32> %amt) nounwind {
 ; AVX1-LABEL: var_funnnel_v8i32:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2
+; AVX1-NEXT:    vbroadcastss {{.*#+}} ymm3 = [31,31,31,31,31,31,31,31]
+; AVX1-NEXT:    vandps %ymm3, %ymm2, %ymm2
 ; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
 ; AVX1-NEXT:    vpsrldq {{.*#+}} xmm5 = xmm4[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX1-NEXT:    vpsrld %xmm5, %xmm3, %xmm5
-; AVX1-NEXT:    vpsrlq $32, %xmm4, %xmm6
-; AVX1-NEXT:    vpsrld %xmm6, %xmm3, %xmm6
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm5 = xmm6[0,1,2,3],xmm5[4,5,6,7]
-; AVX1-NEXT:    vpxor %xmm6, %xmm6, %xmm6
-; AVX1-NEXT:    vpunpckhdq {{.*#+}} xmm7 = xmm4[2],xmm6[2],xmm4[3],xmm6[3]
-; AVX1-NEXT:    vpsrld %xmm7, %xmm3, %xmm7
-; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm8 = xmm4[0],zero,xmm4[1],zero
-; AVX1-NEXT:    vpsrld %xmm8, %xmm3, %xmm3
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm7[4,5,6,7]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm5[2,3],xmm3[4,5],xmm5[6,7]
-; AVX1-NEXT:    vbroadcastss {{.*#+}} xmm5 = [31,31,31,31]
-; AVX1-NEXT:    vpxor %xmm5, %xmm4, %xmm4
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm6
+; AVX1-NEXT:    vpsrld %xmm5, %xmm6, %xmm5
+; AVX1-NEXT:    vpsrlq $32, %xmm4, %xmm7
+; AVX1-NEXT:    vpsrld %xmm7, %xmm6, %xmm7
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm5 = xmm7[0,1,2,3],xmm5[4,5,6,7]
+; AVX1-NEXT:    vpxor %xmm7, %xmm7, %xmm7
+; AVX1-NEXT:    vpunpckhdq {{.*#+}} xmm8 = xmm4[2],xmm7[2],xmm4[3],xmm7[3]
+; AVX1-NEXT:    vpsrld %xmm8, %xmm6, %xmm8
+; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm9 = xmm4[0],zero,xmm4[1],zero
+; AVX1-NEXT:    vpsrld %xmm9, %xmm6, %xmm6
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0,1,2,3],xmm8[4,5,6,7]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm5 = xmm6[0,1],xmm5[2,3],xmm6[4,5],xmm5[6,7]
+; AVX1-NEXT:    vpxor %xmm3, %xmm4, %xmm4
 ; AVX1-NEXT:    vpslld $23, %xmm4, %xmm4
-; AVX1-NEXT:    vbroadcastss {{.*#+}} xmm7 = [1065353216,1065353216,1065353216,1065353216]
-; AVX1-NEXT:    vpaddd %xmm7, %xmm4, %xmm4
+; AVX1-NEXT:    vbroadcastss {{.*#+}} xmm6 = [1065353216,1065353216,1065353216,1065353216]
+; AVX1-NEXT:    vpaddd %xmm6, %xmm4, %xmm4
 ; AVX1-NEXT:    vcvttps2dq %xmm4, %xmm4
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm8
 ; AVX1-NEXT:    vpaddd %xmm8, %xmm8, %xmm8
 ; AVX1-NEXT:    vpmulld %xmm4, %xmm8, %xmm4
-; AVX1-NEXT:    vpor %xmm3, %xmm4, %xmm3
-; AVX1-NEXT:    vpsrldq {{.*#+}} xmm4 = xmm2[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX1-NEXT:    vpsrld %xmm4, %xmm1, %xmm4
+; AVX1-NEXT:    vpor %xmm5, %xmm4, %xmm4
+; AVX1-NEXT:    vpsrldq {{.*#+}} xmm5 = xmm2[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX1-NEXT:    vpsrld %xmm5, %xmm1, %xmm5
 ; AVX1-NEXT:    vpsrlq $32, %xmm2, %xmm8
 ; AVX1-NEXT:    vpsrld %xmm8, %xmm1, %xmm8
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm4 = xmm8[0,1,2,3],xmm4[4,5,6,7]
-; AVX1-NEXT:    vpunpckhdq {{.*#+}} xmm6 = xmm2[2],xmm6[2],xmm2[3],xmm6[3]
-; AVX1-NEXT:    vpsrld %xmm6, %xmm1, %xmm6
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm5 = xmm8[0,1,2,3],xmm5[4,5,6,7]
+; AVX1-NEXT:    vpunpckhdq {{.*#+}} xmm7 = xmm2[2],xmm7[2],xmm2[3],xmm7[3]
+; AVX1-NEXT:    vpsrld %xmm7, %xmm1, %xmm7
 ; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm8 = xmm2[0],zero,xmm2[1],zero
 ; AVX1-NEXT:    vpsrld %xmm8, %xmm1, %xmm1
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm6[4,5,6,7]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm4[2,3],xmm1[4,5],xmm4[6,7]
-; AVX1-NEXT:    vpxor %xmm5, %xmm2, %xmm2
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm7[4,5,6,7]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm5[2,3],xmm1[4,5],xmm5[6,7]
+; AVX1-NEXT:    vpxor %xmm3, %xmm2, %xmm2
 ; AVX1-NEXT:    vpslld $23, %xmm2, %xmm2
-; AVX1-NEXT:    vpaddd %xmm7, %xmm2, %xmm2
+; AVX1-NEXT:    vpaddd %xmm6, %xmm2, %xmm2
 ; AVX1-NEXT:    vcvttps2dq %xmm2, %xmm2
 ; AVX1-NEXT:    vpaddd %xmm0, %xmm0, %xmm0
 ; AVX1-NEXT:    vpmulld %xmm2, %xmm0, %xmm0
 ; AVX1-NEXT:    vpor %xmm1, %xmm0, %xmm0
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm0, %ymm0
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: var_funnnel_v8i32:
@@ -280,25 +280,25 @@ define <8 x i32> @var_funnnel_v8i32(<8 x i32> %x, <8 x i32> %y, <8 x i32> %amt)
 ;
 ; XOPAVX1-LABEL: var_funnnel_v8i32:
 ; XOPAVX1:       # %bb.0:
-; XOPAVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2
-; XOPAVX1-NEXT:    vextractf128 $1, %ymm2, %xmm3
-; XOPAVX1-NEXT:    vpxor %xmm4, %xmm4, %xmm4
-; XOPAVX1-NEXT:    vpsubd %xmm3, %xmm4, %xmm5
-; XOPAVX1-NEXT:    vextractf128 $1, %ymm1, %xmm6
-; XOPAVX1-NEXT:    vpshld %xmm5, %xmm6, %xmm5
-; XOPAVX1-NEXT:    vbroadcastss {{.*#+}} xmm6 = [31,31,31,31]
-; XOPAVX1-NEXT:    vpxor %xmm6, %xmm3, %xmm3
+; XOPAVX1-NEXT:    vbroadcastss {{.*#+}} ymm3 = [31,31,31,31,31,31,31,31]
+; XOPAVX1-NEXT:    vandps %ymm3, %ymm2, %ymm2
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
+; XOPAVX1-NEXT:    vpxor %xmm5, %xmm5, %xmm5
+; XOPAVX1-NEXT:    vpsubd %xmm4, %xmm5, %xmm6
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm1, %xmm7
+; XOPAVX1-NEXT:    vpshld %xmm6, %xmm7, %xmm6
+; XOPAVX1-NEXT:    vpxor %xmm3, %xmm4, %xmm4
 ; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm7
 ; XOPAVX1-NEXT:    vpaddd %xmm7, %xmm7, %xmm7
-; XOPAVX1-NEXT:    vpshld %xmm3, %xmm7, %xmm3
-; XOPAVX1-NEXT:    vpor %xmm5, %xmm3, %xmm3
-; XOPAVX1-NEXT:    vpsubd %xmm2, %xmm4, %xmm4
-; XOPAVX1-NEXT:    vpshld %xmm4, %xmm1, %xmm1
-; XOPAVX1-NEXT:    vpxor %xmm6, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpshld %xmm4, %xmm7, %xmm4
+; XOPAVX1-NEXT:    vpor %xmm6, %xmm4, %xmm4
+; XOPAVX1-NEXT:    vpsubd %xmm2, %xmm5, %xmm5
+; XOPAVX1-NEXT:    vpshld %xmm5, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vpxor %xmm3, %xmm2, %xmm2
 ; XOPAVX1-NEXT:    vpaddd %xmm0, %xmm0, %xmm0
 ; XOPAVX1-NEXT:    vpshld %xmm2, %xmm0, %xmm0
 ; XOPAVX1-NEXT:    vpor %xmm1, %xmm0, %xmm0
-; XOPAVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm0
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm0, %ymm0
 ; XOPAVX1-NEXT:    retq
 ;
 ; XOPAVX2-LABEL: var_funnnel_v8i32:
@@ -318,39 +318,39 @@ define <8 x i32> @var_funnnel_v8i32(<8 x i32> %x, <8 x i32> %y, <8 x i32> %amt)
 define <16 x i16> @var_funnnel_v16i16(<16 x i16> %x, <16 x i16> %y, <16 x i16> %amt) nounwind {
 ; AVX1-LABEL: var_funnnel_v16i16:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2
-; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm3
-; AVX1-NEXT:    vpsllw $12, %xmm3, %xmm4
-; AVX1-NEXT:    vpsllw $4, %xmm3, %xmm5
-; AVX1-NEXT:    vpor %xmm4, %xmm5, %xmm4
-; AVX1-NEXT:    vpaddw %xmm4, %xmm4, %xmm5
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm6
-; AVX1-NEXT:    vpsrlw $8, %xmm6, %xmm7
-; AVX1-NEXT:    vpblendvb %xmm4, %xmm7, %xmm6, %xmm4
-; AVX1-NEXT:    vpsrlw $4, %xmm4, %xmm6
-; AVX1-NEXT:    vpblendvb %xmm5, %xmm6, %xmm4, %xmm4
-; AVX1-NEXT:    vpsrlw $2, %xmm4, %xmm6
-; AVX1-NEXT:    vpaddw %xmm5, %xmm5, %xmm5
-; AVX1-NEXT:    vpblendvb %xmm5, %xmm6, %xmm4, %xmm4
-; AVX1-NEXT:    vpsrlw $1, %xmm4, %xmm6
-; AVX1-NEXT:    vpaddw %xmm5, %xmm5, %xmm5
-; AVX1-NEXT:    vpblendvb %xmm5, %xmm6, %xmm4, %xmm4
-; AVX1-NEXT:    vbroadcastss {{.*#+}} xmm5 = [15,15,15,15,15,15,15,15]
-; AVX1-NEXT:    vpxor %xmm5, %xmm3, %xmm6
-; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm6[4,4,5,5,6,6,7,7]
-; AVX1-NEXT:    vpslld $23, %xmm3, %xmm7
-; AVX1-NEXT:    vbroadcastss {{.*#+}} xmm3 = [1065353216,1065353216,1065353216,1065353216]
-; AVX1-NEXT:    vpaddd %xmm3, %xmm7, %xmm7
+; AVX1-NEXT:    vbroadcastss {{.*#+}} ymm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX1-NEXT:    vandps %ymm3, %ymm2, %ymm2
+; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
+; AVX1-NEXT:    vpsllw $12, %xmm4, %xmm5
+; AVX1-NEXT:    vpsllw $4, %xmm4, %xmm6
+; AVX1-NEXT:    vpor %xmm5, %xmm6, %xmm5
+; AVX1-NEXT:    vpaddw %xmm5, %xmm5, %xmm6
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm7
+; AVX1-NEXT:    vpsrlw $8, %xmm7, %xmm8
+; AVX1-NEXT:    vpblendvb %xmm5, %xmm8, %xmm7, %xmm5
+; AVX1-NEXT:    vpsrlw $4, %xmm5, %xmm7
+; AVX1-NEXT:    vpblendvb %xmm6, %xmm7, %xmm5, %xmm5
+; AVX1-NEXT:    vpsrlw $2, %xmm5, %xmm7
+; AVX1-NEXT:    vpaddw %xmm6, %xmm6, %xmm6
+; AVX1-NEXT:    vpblendvb %xmm6, %xmm7, %xmm5, %xmm5
+; AVX1-NEXT:    vpsrlw $1, %xmm5, %xmm7
+; AVX1-NEXT:    vpaddw %xmm6, %xmm6, %xmm6
+; AVX1-NEXT:    vpblendvb %xmm6, %xmm7, %xmm5, %xmm5
+; AVX1-NEXT:    vpxor %xmm3, %xmm4, %xmm6
+; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm4 = xmm6[4,4,5,5,6,6,7,7]
+; AVX1-NEXT:    vpslld $23, %xmm4, %xmm7
+; AVX1-NEXT:    vbroadcastss {{.*#+}} xmm4 = [1065353216,1065353216,1065353216,1065353216]
+; AVX1-NEXT:    vpaddd %xmm4, %xmm7, %xmm7
 ; AVX1-NEXT:    vcvttps2dq %xmm7, %xmm7
 ; AVX1-NEXT:    vpmovzxwd {{.*#+}} xmm6 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero
 ; AVX1-NEXT:    vpslld $23, %xmm6, %xmm6
-; AVX1-NEXT:    vpaddd %xmm3, %xmm6, %xmm6
+; AVX1-NEXT:    vpaddd %xmm4, %xmm6, %xmm6
 ; AVX1-NEXT:    vcvttps2dq %xmm6, %xmm6
 ; AVX1-NEXT:    vpackusdw %xmm7, %xmm6, %xmm6
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm7
 ; AVX1-NEXT:    vpaddw %xmm7, %xmm7, %xmm7
 ; AVX1-NEXT:    vpmullw %xmm6, %xmm7, %xmm6
-; AVX1-NEXT:    vpor %xmm4, %xmm6, %xmm4
+; AVX1-NEXT:    vpor %xmm5, %xmm6, %xmm5
 ; AVX1-NEXT:    vpsllw $12, %xmm2, %xmm6
 ; AVX1-NEXT:    vpsllw $4, %xmm2, %xmm7
 ; AVX1-NEXT:    vpor %xmm6, %xmm7, %xmm6
@@ -365,20 +365,20 @@ define <16 x i16> @var_funnnel_v16i16(<16 x i16> %x, <16 x i16> %y, <16 x i16> %
 ; AVX1-NEXT:    vpsrlw $1, %xmm1, %xmm6
 ; AVX1-NEXT:    vpaddw %xmm7, %xmm7, %xmm7
 ; AVX1-NEXT:    vpblendvb %xmm7, %xmm6, %xmm1, %xmm1
-; AVX1-NEXT:    vpxor %xmm5, %xmm2, %xmm2
-; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm5 = xmm2[4,4,5,5,6,6,7,7]
-; AVX1-NEXT:    vpslld $23, %xmm5, %xmm5
-; AVX1-NEXT:    vpaddd %xmm3, %xmm5, %xmm5
-; AVX1-NEXT:    vcvttps2dq %xmm5, %xmm5
+; AVX1-NEXT:    vpxor %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm2[4,4,5,5,6,6,7,7]
+; AVX1-NEXT:    vpslld $23, %xmm3, %xmm3
+; AVX1-NEXT:    vpaddd %xmm4, %xmm3, %xmm3
+; AVX1-NEXT:    vcvttps2dq %xmm3, %xmm3
 ; AVX1-NEXT:    vpmovzxwd {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
 ; AVX1-NEXT:    vpslld $23, %xmm2, %xmm2
-; AVX1-NEXT:    vpaddd %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vpaddd %xmm4, %xmm2, %xmm2
 ; AVX1-NEXT:    vcvttps2dq %xmm2, %xmm2
-; AVX1-NEXT:    vpackusdw %xmm5, %xmm2, %xmm2
+; AVX1-NEXT:    vpackusdw %xmm3, %xmm2, %xmm2
 ; AVX1-NEXT:    vpaddw %xmm0, %xmm0, %xmm0
 ; AVX1-NEXT:    vpmullw %xmm2, %xmm0, %xmm0
 ; AVX1-NEXT:    vpor %xmm1, %xmm0, %xmm0
-; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm0, %ymm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm0, %ymm0
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: var_funnnel_v16i16:
@@ -460,25 +460,25 @@ define <16 x i16> @var_funnnel_v16i16(<16 x i16> %x, <16 x i16> %y, <16 x i16> %
 ;
 ; XOPAVX1-LABEL: var_funnnel_v16i16:
 ; XOPAVX1:       # %bb.0:
-; XOPAVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2
-; XOPAVX1-NEXT:    vextractf128 $1, %ymm2, %xmm3
-; XOPAVX1-NEXT:    vpxor %xmm4, %xmm4, %xmm4
-; XOPAVX1-NEXT:    vpsubw %xmm3, %xmm4, %xmm5
-; XOPAVX1-NEXT:    vextractf128 $1, %ymm1, %xmm6
-; XOPAVX1-NEXT:    vpshlw %xmm5, %xmm6, %xmm5
-; XOPAVX1-NEXT:    vbroadcastss {{.*#+}} xmm6 = [15,15,15,15,15,15,15,15]
-; XOPAVX1-NEXT:    vpxor %xmm6, %xmm3, %xmm3
+; XOPAVX1-NEXT:    vbroadcastss {{.*#+}} ymm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; XOPAVX1-NEXT:    vandps %ymm3, %ymm2, %ymm2
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
+; XOPAVX1-NEXT:    vpxor %xmm5, %xmm5, %xmm5
+; XOPAVX1-NEXT:    vpsubw %xmm4, %xmm5, %xmm6
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm1, %xmm7
+; XOPAVX1-NEXT:    vpshlw %xmm6, %xmm7, %xmm6
+; XOPAVX1-NEXT:    vpxor %xmm3, %xmm4, %xmm4
 ; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm7
 ; XOPAVX1-NEXT:    vpaddw %xmm7, %xmm7, %xmm7
-; XOPAVX1-NEXT:    vpshlw %xmm3, %xmm7, %xmm3
-; XOPAVX1-NEXT:    vpor %xmm5, %xmm3, %xmm3
-; XOPAVX1-NEXT:    vpsubw %xmm2, %xmm4, %xmm4
-; XOPAVX1-NEXT:    vpshlw %xmm4, %xmm1, %xmm1
-; XOPAVX1-NEXT:    vpxor %xmm6, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpshlw %xmm4, %xmm7, %xmm4
+; XOPAVX1-NEXT:    vpor %xmm6, %xmm4, %xmm4
+; XOPAVX1-NEXT:    vpsubw %xmm2, %xmm5, %xmm5
+; XOPAVX1-NEXT:    vpshlw %xmm5, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vpxor %xmm3, %xmm2, %xmm2
 ; XOPAVX1-NEXT:    vpaddw %xmm0, %xmm0, %xmm0
 ; XOPAVX1-NEXT:    vpshlw %xmm2, %xmm0, %xmm0
 ; XOPAVX1-NEXT:    vpor %xmm1, %xmm0, %xmm0
-; XOPAVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm0
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm0, %ymm0
 ; XOPAVX1-NEXT:    retq
 ;
 ; XOPAVX2-LABEL: var_funnnel_v16i16:
@@ -509,69 +509,69 @@ define <16 x i16> @var_funnnel_v16i16(<16 x i16> %x, <16 x i16> %y, <16 x i16> %
 define <32 x i8> @var_funnnel_v32i8(<32 x i8> %x, <32 x i8> %y, <32 x i8> %amt) nounwind {
 ; AVX1-LABEL: var_funnnel_v32i8:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT:    vpaddb %xmm3, %xmm3, %xmm5
-; AVX1-NEXT:    vpsllw $4, %xmm5, %xmm4
-; AVX1-NEXT:    vbroadcastss {{.*#+}} xmm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
-; AVX1-NEXT:    vpand %xmm3, %xmm4, %xmm6
-; AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm5
+; AVX1-NEXT:    vpsrlw $4, %xmm5, %xmm3
+; AVX1-NEXT:    vbroadcastss {{.*#+}} xmm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX1-NEXT:    vpand %xmm4, %xmm3, %xmm6
+; AVX1-NEXT:    vbroadcastss {{.*#+}} ymm3 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX1-NEXT:    vandps %ymm3, %ymm2, %ymm2
 ; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm7
-; AVX1-NEXT:    vbroadcastss {{.*#+}} xmm4 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
-; AVX1-NEXT:    vpxor %xmm4, %xmm7, %xmm8
-; AVX1-NEXT:    vpsllw $5, %xmm8, %xmm8
+; AVX1-NEXT:    vpsllw $5, %xmm7, %xmm8
 ; AVX1-NEXT:    vpblendvb %xmm8, %xmm6, %xmm5, %xmm6
-; AVX1-NEXT:    vpsllw $2, %xmm6, %xmm9
-; AVX1-NEXT:    vbroadcastss {{.*#+}} xmm5 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
+; AVX1-NEXT:    vpsrlw $2, %xmm6, %xmm9
+; AVX1-NEXT:    vbroadcastss {{.*#+}} xmm5 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63]
 ; AVX1-NEXT:    vpand %xmm5, %xmm9, %xmm9
 ; AVX1-NEXT:    vpaddb %xmm8, %xmm8, %xmm8
-; AVX1-NEXT:    vpblendvb %xmm8, %xmm9, %xmm6, %xmm6
-; AVX1-NEXT:    vpaddb %xmm6, %xmm6, %xmm9
+; AVX1-NEXT:    vpblendvb %xmm8, %xmm9, %xmm6, %xmm9
+; AVX1-NEXT:    vpsrlw $1, %xmm9, %xmm10
+; AVX1-NEXT:    vbroadcastss {{.*#+}} xmm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX1-NEXT:    vpand %xmm6, %xmm10, %xmm10
 ; AVX1-NEXT:    vpaddb %xmm8, %xmm8, %xmm8
-; AVX1-NEXT:    vpblendvb %xmm8, %xmm9, %xmm6, %xmm6
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm8
-; AVX1-NEXT:    vpsrlw $4, %xmm8, %xmm9
-; AVX1-NEXT:    vbroadcastss {{.*#+}} xmm10 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; AVX1-NEXT:    vpand %xmm10, %xmm9, %xmm9
+; AVX1-NEXT:    vpblendvb %xmm8, %xmm10, %xmm9, %xmm8
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm9
+; AVX1-NEXT:    vpaddb %xmm9, %xmm9, %xmm9
+; AVX1-NEXT:    vpsllw $4, %xmm9, %xmm10
+; AVX1-NEXT:    vbroadcastss {{.*#+}} xmm11 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; AVX1-NEXT:    vpand %xmm11, %xmm10, %xmm10
+; AVX1-NEXT:    vpxor %xmm3, %xmm7, %xmm7
 ; AVX1-NEXT:    vpsllw $5, %xmm7, %xmm7
-; AVX1-NEXT:    vpblendvb %xmm7, %xmm9, %xmm8, %xmm8
-; AVX1-NEXT:    vpsrlw $2, %xmm8, %xmm9
-; AVX1-NEXT:    vbroadcastss {{.*#+}} xmm11 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63]
-; AVX1-NEXT:    vpand %xmm11, %xmm9, %xmm9
+; AVX1-NEXT:    vpblendvb %xmm7, %xmm10, %xmm9, %xmm9
+; AVX1-NEXT:    vpsllw $2, %xmm9, %xmm10
+; AVX1-NEXT:    vbroadcastss {{.*#+}} xmm12 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
+; AVX1-NEXT:    vpand %xmm12, %xmm10, %xmm10
 ; AVX1-NEXT:    vpaddb %xmm7, %xmm7, %xmm7
-; AVX1-NEXT:    vpblendvb %xmm7, %xmm9, %xmm8, %xmm8
-; AVX1-NEXT:    vpsrlw $1, %xmm8, %xmm9
-; AVX1-NEXT:    vbroadcastss {{.*#+}} xmm12 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; AVX1-NEXT:    vpand %xmm12, %xmm9, %xmm9
+; AVX1-NEXT:    vpblendvb %xmm7, %xmm10, %xmm9, %xmm9
+; AVX1-NEXT:    vpaddb %xmm9, %xmm9, %xmm10
 ; AVX1-NEXT:    vpaddb %xmm7, %xmm7, %xmm7
-; AVX1-NEXT:    vpblendvb %xmm7, %xmm9, %xmm8, %xmm7
-; AVX1-NEXT:    vpor %xmm7, %xmm6, %xmm6
+; AVX1-NEXT:    vpblendvb %xmm7, %xmm10, %xmm9, %xmm7
+; AVX1-NEXT:    vpor %xmm7, %xmm8, %xmm7
+; AVX1-NEXT:    vpsrlw $4, %xmm1, %xmm8
+; AVX1-NEXT:    vpand %xmm4, %xmm8, %xmm4
+; AVX1-NEXT:    vpsllw $5, %xmm2, %xmm8
+; AVX1-NEXT:    vpblendvb %xmm8, %xmm4, %xmm1, %xmm1
+; AVX1-NEXT:    vpsrlw $2, %xmm1, %xmm4
+; AVX1-NEXT:    vpand %xmm5, %xmm4, %xmm4
+; AVX1-NEXT:    vpaddb %xmm8, %xmm8, %xmm5
+; AVX1-NEXT:    vpblendvb %xmm5, %xmm4, %xmm1, %xmm1
+; AVX1-NEXT:    vpsrlw $1, %xmm1, %xmm4
+; AVX1-NEXT:    vpand %xmm6, %xmm4, %xmm4
+; AVX1-NEXT:    vpaddb %xmm5, %xmm5, %xmm5
+; AVX1-NEXT:    vpblendvb %xmm5, %xmm4, %xmm1, %xmm1
 ; AVX1-NEXT:    vpaddb %xmm0, %xmm0, %xmm0
-; AVX1-NEXT:    vpsllw $4, %xmm0, %xmm7
-; AVX1-NEXT:    vpand %xmm3, %xmm7, %xmm3
-; AVX1-NEXT:    vpxor %xmm4, %xmm2, %xmm4
-; AVX1-NEXT:    vpsllw $5, %xmm4, %xmm4
-; AVX1-NEXT:    vpblendvb %xmm4, %xmm3, %xmm0, %xmm0
-; AVX1-NEXT:    vpsllw $2, %xmm0, %xmm3
-; AVX1-NEXT:    vpand %xmm5, %xmm3, %xmm3
-; AVX1-NEXT:    vpaddb %xmm4, %xmm4, %xmm4
-; AVX1-NEXT:    vpblendvb %xmm4, %xmm3, %xmm0, %xmm0
-; AVX1-NEXT:    vpaddb %xmm0, %xmm0, %xmm3
-; AVX1-NEXT:    vpaddb %xmm4, %xmm4, %xmm4
-; AVX1-NEXT:    vpblendvb %xmm4, %xmm3, %xmm0, %xmm0
-; AVX1-NEXT:    vpsrlw $4, %xmm1, %xmm3
-; AVX1-NEXT:    vpand %xmm3, %xmm10, %xmm3
+; AVX1-NEXT:    vpsllw $4, %xmm0, %xmm4
+; AVX1-NEXT:    vpand %xmm4, %xmm11, %xmm4
+; AVX1-NEXT:    vpxor %xmm3, %xmm2, %xmm2
 ; AVX1-NEXT:    vpsllw $5, %xmm2, %xmm2
-; AVX1-NEXT:    vpblendvb %xmm2, %xmm3, %xmm1, %xmm1
-; AVX1-NEXT:    vpsrlw $2, %xmm1, %xmm3
-; AVX1-NEXT:    vpand %xmm3, %xmm11, %xmm3
-; AVX1-NEXT:    vpaddb %xmm2, %xmm2, %xmm2
-; AVX1-NEXT:    vpblendvb %xmm2, %xmm3, %xmm1, %xmm1
-; AVX1-NEXT:    vpsrlw $1, %xmm1, %xmm3
+; AVX1-NEXT:    vpblendvb %xmm2, %xmm4, %xmm0, %xmm0
+; AVX1-NEXT:    vpsllw $2, %xmm0, %xmm3
 ; AVX1-NEXT:    vpand %xmm3, %xmm12, %xmm3
 ; AVX1-NEXT:    vpaddb %xmm2, %xmm2, %xmm2
-; AVX1-NEXT:    vpblendvb %xmm2, %xmm3, %xmm1, %xmm1
+; AVX1-NEXT:    vpblendvb %xmm2, %xmm3, %xmm0, %xmm0
+; AVX1-NEXT:    vpaddb %xmm0, %xmm0, %xmm3
+; AVX1-NEXT:    vpaddb %xmm2, %xmm2, %xmm2
+; AVX1-NEXT:    vpblendvb %xmm2, %xmm3, %xmm0, %xmm0
 ; AVX1-NEXT:    vpor %xmm1, %xmm0, %xmm0
-; AVX1-NEXT:    vinsertf128 $1, %xmm6, %ymm0, %ymm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm7, %ymm0, %ymm0
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: var_funnnel_v32i8:
@@ -720,47 +720,47 @@ define <32 x i8> @var_funnnel_v32i8(<32 x i8> %x, <32 x i8> %y, <32 x i8> %amt)
 ;
 ; XOPAVX1-LABEL: var_funnnel_v32i8:
 ; XOPAVX1:       # %bb.0:
-; XOPAVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2
-; XOPAVX1-NEXT:    vextractf128 $1, %ymm2, %xmm3
-; XOPAVX1-NEXT:    vpxor %xmm4, %xmm4, %xmm4
-; XOPAVX1-NEXT:    vpsubb %xmm3, %xmm4, %xmm5
-; XOPAVX1-NEXT:    vextractf128 $1, %ymm1, %xmm6
-; XOPAVX1-NEXT:    vpshlb %xmm5, %xmm6, %xmm5
-; XOPAVX1-NEXT:    vbroadcastss {{.*#+}} xmm6 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
-; XOPAVX1-NEXT:    vpxor %xmm6, %xmm3, %xmm3
+; XOPAVX1-NEXT:    vbroadcastss {{.*#+}} ymm3 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; XOPAVX1-NEXT:    vandps %ymm3, %ymm2, %ymm2
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
+; XOPAVX1-NEXT:    vpxor %xmm5, %xmm5, %xmm5
+; XOPAVX1-NEXT:    vpsubb %xmm4, %xmm5, %xmm6
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm1, %xmm7
+; XOPAVX1-NEXT:    vpshlb %xmm6, %xmm7, %xmm6
+; XOPAVX1-NEXT:    vpxor %xmm3, %xmm4, %xmm4
 ; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm7
 ; XOPAVX1-NEXT:    vpaddb %xmm7, %xmm7, %xmm7
-; XOPAVX1-NEXT:    vpshlb %xmm3, %xmm7, %xmm3
-; XOPAVX1-NEXT:    vpor %xmm5, %xmm3, %xmm3
-; XOPAVX1-NEXT:    vpsubb %xmm2, %xmm4, %xmm4
-; XOPAVX1-NEXT:    vpshlb %xmm4, %xmm1, %xmm1
-; XOPAVX1-NEXT:    vpxor %xmm6, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpshlb %xmm4, %xmm7, %xmm4
+; XOPAVX1-NEXT:    vpor %xmm6, %xmm4, %xmm4
+; XOPAVX1-NEXT:    vpsubb %xmm2, %xmm5, %xmm5
+; XOPAVX1-NEXT:    vpshlb %xmm5, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vpxor %xmm3, %xmm2, %xmm2
 ; XOPAVX1-NEXT:    vpaddb %xmm0, %xmm0, %xmm0
 ; XOPAVX1-NEXT:    vpshlb %xmm2, %xmm0, %xmm0
 ; XOPAVX1-NEXT:    vpor %xmm1, %xmm0, %xmm0
-; XOPAVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm0
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm0, %ymm0
 ; XOPAVX1-NEXT:    retq
 ;
 ; XOPAVX2-LABEL: var_funnnel_v32i8:
 ; XOPAVX2:       # %bb.0:
-; XOPAVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2
-; XOPAVX2-NEXT:    vextracti128 $1, %ymm2, %xmm3
-; XOPAVX2-NEXT:    vpxor %xmm4, %xmm4, %xmm4
-; XOPAVX2-NEXT:    vpsubb %xmm3, %xmm4, %xmm5
-; XOPAVX2-NEXT:    vextracti128 $1, %ymm1, %xmm6
-; XOPAVX2-NEXT:    vpshlb %xmm5, %xmm6, %xmm5
-; XOPAVX2-NEXT:    vpsubb %xmm2, %xmm4, %xmm4
-; XOPAVX2-NEXT:    vpshlb %xmm4, %xmm1, %xmm1
-; XOPAVX2-NEXT:    vinserti128 $1, %xmm5, %ymm1, %ymm1
-; XOPAVX2-NEXT:    vpbroadcastb {{.*#+}} xmm4 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
-; XOPAVX2-NEXT:    vpxor %xmm4, %xmm3, %xmm3
+; XOPAVX2-NEXT:    vpbroadcastb {{.*#+}} ymm3 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; XOPAVX2-NEXT:    vpand %ymm3, %ymm2, %ymm2
+; XOPAVX2-NEXT:    vextracti128 $1, %ymm2, %xmm4
+; XOPAVX2-NEXT:    vpxor %xmm5, %xmm5, %xmm5
+; XOPAVX2-NEXT:    vpsubb %xmm4, %xmm5, %xmm6
+; XOPAVX2-NEXT:    vextracti128 $1, %ymm1, %xmm7
+; XOPAVX2-NEXT:    vpshlb %xmm6, %xmm7, %xmm6
+; XOPAVX2-NEXT:    vpsubb %xmm2, %xmm5, %xmm5
+; XOPAVX2-NEXT:    vpshlb %xmm5, %xmm1, %xmm1
+; XOPAVX2-NEXT:    vinserti128 $1, %xmm6, %ymm1, %ymm1
+; XOPAVX2-NEXT:    vpxor %xmm3, %xmm4, %xmm4
 ; XOPAVX2-NEXT:    vextracti128 $1, %ymm0, %xmm5
 ; XOPAVX2-NEXT:    vpaddb %xmm5, %xmm5, %xmm5
-; XOPAVX2-NEXT:    vpshlb %xmm3, %xmm5, %xmm3
-; XOPAVX2-NEXT:    vpxor %xmm4, %xmm2, %xmm2
+; XOPAVX2-NEXT:    vpshlb %xmm4, %xmm5, %xmm4
+; XOPAVX2-NEXT:    vpxor %xmm3, %xmm2, %xmm2
 ; XOPAVX2-NEXT:    vpaddb %xmm0, %xmm0, %xmm0
 ; XOPAVX2-NEXT:    vpshlb %xmm2, %xmm0, %xmm0
-; XOPAVX2-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm0
+; XOPAVX2-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm0
 ; XOPAVX2-NEXT:    vpor %ymm1, %ymm0, %ymm0
 ; XOPAVX2-NEXT:    retq
   %res = call <32 x i8> @llvm.fshr.v32i8(<32 x i8> %x, <32 x i8> %y, <32 x i8> %amt)
diff --git a/llvm/test/CodeGen/X86/vector-fshr-512.ll b/llvm/test/CodeGen/X86/vector-fshr-512.ll
index 2bd03507a0249c4..b1fee9d1b0b3986 100644
--- a/llvm/test/CodeGen/X86/vector-fshr-512.ll
+++ b/llvm/test/CodeGen/X86/vector-fshr-512.ll
@@ -222,135 +222,135 @@ define <32 x i16> @var_funnnel_v32i16(<32 x i16> %x, <32 x i16> %y, <32 x i16> %
 define <64 x i8> @var_funnnel_v64i8(<64 x i8> %x, <64 x i8> %y, <64 x i8> %amt) nounwind {
 ; AVX512F-LABEL: var_funnnel_v64i8:
 ; AVX512F:       # %bb.0:
-; AVX512F-NEXT:    vextracti64x4 $1, %zmm0, %ymm3
-; AVX512F-NEXT:    vpaddb %ymm3, %ymm3, %ymm4
-; AVX512F-NEXT:    vpsllw $4, %ymm4, %ymm3
-; AVX512F-NEXT:    vpbroadcastb {{.*#+}} ymm5 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
-; AVX512F-NEXT:    vpand %ymm5, %ymm3, %ymm6
-; AVX512F-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm2, %zmm2
-; AVX512F-NEXT:    vextracti64x4 $1, %zmm2, %ymm3
-; AVX512F-NEXT:    vpbroadcastb {{.*#+}} ymm7 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
-; AVX512F-NEXT:    vpxor %ymm7, %ymm3, %ymm8
-; AVX512F-NEXT:    vpsllw $5, %ymm8, %ymm8
-; AVX512F-NEXT:    vpblendvb %ymm8, %ymm6, %ymm4, %ymm4
-; AVX512F-NEXT:    vpsllw $2, %ymm4, %ymm6
-; AVX512F-NEXT:    vpbroadcastb {{.*#+}} ymm9 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
-; AVX512F-NEXT:    vpand %ymm6, %ymm9, %ymm6
+; AVX512F-NEXT:    vextracti64x4 $1, %zmm1, %ymm5
+; AVX512F-NEXT:    vpsrlw $4, %ymm5, %ymm3
+; AVX512F-NEXT:    vpbroadcastb {{.*#+}} ymm6 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512F-NEXT:    vpand %ymm6, %ymm3, %ymm7
+; AVX512F-NEXT:    vpbroadcastd {{.*#+}} zmm3 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512F-NEXT:    vpandq %zmm3, %zmm2, %zmm2
+; AVX512F-NEXT:    vextracti64x4 $1, %zmm2, %ymm4
+; AVX512F-NEXT:    vpsllw $5, %ymm4, %ymm8
+; AVX512F-NEXT:    vpblendvb %ymm8, %ymm7, %ymm5, %ymm5
+; AVX512F-NEXT:    vpsrlw $2, %ymm5, %ymm7
+; AVX512F-NEXT:    vpbroadcastb {{.*#+}} ymm9 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63]
+; AVX512F-NEXT:    vpand %ymm7, %ymm9, %ymm7
 ; AVX512F-NEXT:    vpaddb %ymm8, %ymm8, %ymm8
-; AVX512F-NEXT:    vpblendvb %ymm8, %ymm6, %ymm4, %ymm4
-; AVX512F-NEXT:    vpaddb %ymm4, %ymm4, %ymm6
+; AVX512F-NEXT:    vpblendvb %ymm8, %ymm7, %ymm5, %ymm5
+; AVX512F-NEXT:    vpsrlw $1, %ymm5, %ymm7
+; AVX512F-NEXT:    vpbroadcastb {{.*#+}} ymm10 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX512F-NEXT:    vpand %ymm7, %ymm10, %ymm7
 ; AVX512F-NEXT:    vpaddb %ymm8, %ymm8, %ymm8
-; AVX512F-NEXT:    vpblendvb %ymm8, %ymm6, %ymm4, %ymm4
+; AVX512F-NEXT:    vpblendvb %ymm8, %ymm7, %ymm5, %ymm5
+; AVX512F-NEXT:    vpsrlw $4, %ymm1, %ymm7
+; AVX512F-NEXT:    vpand %ymm6, %ymm7, %ymm6
+; AVX512F-NEXT:    vpsllw $5, %ymm2, %ymm7
+; AVX512F-NEXT:    vpblendvb %ymm7, %ymm6, %ymm1, %ymm1
+; AVX512F-NEXT:    vpsrlw $2, %ymm1, %ymm6
+; AVX512F-NEXT:    vpand %ymm6, %ymm9, %ymm6
+; AVX512F-NEXT:    vpaddb %ymm7, %ymm7, %ymm7
+; AVX512F-NEXT:    vpblendvb %ymm7, %ymm6, %ymm1, %ymm1
+; AVX512F-NEXT:    vpsrlw $1, %ymm1, %ymm6
+; AVX512F-NEXT:    vpand %ymm6, %ymm10, %ymm6
+; AVX512F-NEXT:    vpaddb %ymm7, %ymm7, %ymm7
+; AVX512F-NEXT:    vpblendvb %ymm7, %ymm6, %ymm1, %ymm1
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm5, %zmm1, %zmm1
+; AVX512F-NEXT:    vextracti64x4 $1, %zmm0, %ymm5
+; AVX512F-NEXT:    vpaddb %ymm5, %ymm5, %ymm5
+; AVX512F-NEXT:    vpsllw $4, %ymm5, %ymm6
+; AVX512F-NEXT:    vpbroadcastb {{.*#+}} ymm7 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; AVX512F-NEXT:    vpand %ymm7, %ymm6, %ymm6
+; AVX512F-NEXT:    vpxor %ymm3, %ymm4, %ymm4
+; AVX512F-NEXT:    vpsllw $5, %ymm4, %ymm4
+; AVX512F-NEXT:    vpblendvb %ymm4, %ymm6, %ymm5, %ymm5
+; AVX512F-NEXT:    vpsllw $2, %ymm5, %ymm6
+; AVX512F-NEXT:    vpbroadcastb {{.*#+}} ymm8 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
+; AVX512F-NEXT:    vpand %ymm6, %ymm8, %ymm6
+; AVX512F-NEXT:    vpaddb %ymm4, %ymm4, %ymm4
+; AVX512F-NEXT:    vpblendvb %ymm4, %ymm6, %ymm5, %ymm5
+; AVX512F-NEXT:    vpaddb %ymm5, %ymm5, %ymm6
+; AVX512F-NEXT:    vpaddb %ymm4, %ymm4, %ymm4
+; AVX512F-NEXT:    vpblendvb %ymm4, %ymm6, %ymm5, %ymm4
 ; AVX512F-NEXT:    vpaddb %ymm0, %ymm0, %ymm0
-; AVX512F-NEXT:    vpsllw $4, %ymm0, %ymm6
-; AVX512F-NEXT:    vpand %ymm5, %ymm6, %ymm5
-; AVX512F-NEXT:    vpxor %ymm7, %ymm2, %ymm6
-; AVX512F-NEXT:    vpsllw $5, %ymm6, %ymm6
-; AVX512F-NEXT:    vpblendvb %ymm6, %ymm5, %ymm0, %ymm0
-; AVX512F-NEXT:    vpsllw $2, %ymm0, %ymm5
-; AVX512F-NEXT:    vpand %ymm5, %ymm9, %ymm5
-; AVX512F-NEXT:    vpaddb %ymm6, %ymm6, %ymm6
-; AVX512F-NEXT:    vpblendvb %ymm6, %ymm5, %ymm0, %ymm0
-; AVX512F-NEXT:    vpaddb %ymm0, %ymm0, %ymm5
-; AVX512F-NEXT:    vpaddb %ymm6, %ymm6, %ymm6
-; AVX512F-NEXT:    vpblendvb %ymm6, %ymm5, %ymm0, %ymm0
-; AVX512F-NEXT:    vinserti64x4 $1, %ymm4, %zmm0, %zmm0
-; AVX512F-NEXT:    vextracti64x4 $1, %zmm1, %ymm4
-; AVX512F-NEXT:    vpsrlw $4, %ymm4, %ymm5
-; AVX512F-NEXT:    vpbroadcastb {{.*#+}} ymm6 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; AVX512F-NEXT:    vpand %ymm6, %ymm5, %ymm5
-; AVX512F-NEXT:    vpsllw $5, %ymm3, %ymm3
-; AVX512F-NEXT:    vpblendvb %ymm3, %ymm5, %ymm4, %ymm4
-; AVX512F-NEXT:    vpsrlw $2, %ymm4, %ymm5
-; AVX512F-NEXT:    vpbroadcastb {{.*#+}} ymm7 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63]
+; AVX512F-NEXT:    vpsllw $4, %ymm0, %ymm5
 ; AVX512F-NEXT:    vpand %ymm7, %ymm5, %ymm5
-; AVX512F-NEXT:    vpaddb %ymm3, %ymm3, %ymm3
-; AVX512F-NEXT:    vpblendvb %ymm3, %ymm5, %ymm4, %ymm4
-; AVX512F-NEXT:    vpsrlw $1, %ymm4, %ymm5
-; AVX512F-NEXT:    vpbroadcastb {{.*#+}} ymm8 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; AVX512F-NEXT:    vpand %ymm5, %ymm8, %ymm5
-; AVX512F-NEXT:    vpaddb %ymm3, %ymm3, %ymm3
-; AVX512F-NEXT:    vpblendvb %ymm3, %ymm5, %ymm4, %ymm3
-; AVX512F-NEXT:    vpsrlw $4, %ymm1, %ymm4
-; AVX512F-NEXT:    vpand %ymm6, %ymm4, %ymm4
+; AVX512F-NEXT:    vpxor %ymm3, %ymm2, %ymm2
 ; AVX512F-NEXT:    vpsllw $5, %ymm2, %ymm2
-; AVX512F-NEXT:    vpblendvb %ymm2, %ymm4, %ymm1, %ymm1
-; AVX512F-NEXT:    vpsrlw $2, %ymm1, %ymm4
-; AVX512F-NEXT:    vpand %ymm7, %ymm4, %ymm4
+; AVX512F-NEXT:    vpblendvb %ymm2, %ymm5, %ymm0, %ymm0
+; AVX512F-NEXT:    vpsllw $2, %ymm0, %ymm3
+; AVX512F-NEXT:    vpand %ymm3, %ymm8, %ymm3
 ; AVX512F-NEXT:    vpaddb %ymm2, %ymm2, %ymm2
-; AVX512F-NEXT:    vpblendvb %ymm2, %ymm4, %ymm1, %ymm1
-; AVX512F-NEXT:    vpsrlw $1, %ymm1, %ymm4
-; AVX512F-NEXT:    vpand %ymm4, %ymm8, %ymm4
+; AVX512F-NEXT:    vpblendvb %ymm2, %ymm3, %ymm0, %ymm0
+; AVX512F-NEXT:    vpaddb %ymm0, %ymm0, %ymm3
 ; AVX512F-NEXT:    vpaddb %ymm2, %ymm2, %ymm2
-; AVX512F-NEXT:    vpblendvb %ymm2, %ymm4, %ymm1, %ymm1
-; AVX512F-NEXT:    vinserti64x4 $1, %ymm3, %zmm1, %zmm1
+; AVX512F-NEXT:    vpblendvb %ymm2, %ymm3, %ymm0, %ymm0
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm4, %zmm0, %zmm0
 ; AVX512F-NEXT:    vporq %zmm1, %zmm0, %zmm0
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: var_funnnel_v64i8:
 ; AVX512VL:       # %bb.0:
-; AVX512VL-NEXT:    vextracti64x4 $1, %zmm0, %ymm3
-; AVX512VL-NEXT:    vpaddb %ymm3, %ymm3, %ymm4
-; AVX512VL-NEXT:    vpsllw $4, %ymm4, %ymm3
-; AVX512VL-NEXT:    vpbroadcastd {{.*#+}} ymm5 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; AVX512VL-NEXT:    vextracti64x4 $1, %zmm1, %ymm4
+; AVX512VL-NEXT:    vpsrlw $4, %ymm4, %ymm3
+; AVX512VL-NEXT:    vpbroadcastd {{.*#+}} ymm5 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
 ; AVX512VL-NEXT:    vpand %ymm5, %ymm3, %ymm6
-; AVX512VL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm2, %zmm2
+; AVX512VL-NEXT:    vpbroadcastd {{.*#+}} zmm7 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512VL-NEXT:    vpandq %zmm7, %zmm2, %zmm2
 ; AVX512VL-NEXT:    vextracti64x4 $1, %zmm2, %ymm3
-; AVX512VL-NEXT:    vpbroadcastd {{.*#+}} ymm7 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
-; AVX512VL-NEXT:    vpxor %ymm7, %ymm3, %ymm8
-; AVX512VL-NEXT:    vpsllw $5, %ymm8, %ymm8
+; AVX512VL-NEXT:    vpsllw $5, %ymm3, %ymm8
 ; AVX512VL-NEXT:    vpblendvb %ymm8, %ymm6, %ymm4, %ymm4
-; AVX512VL-NEXT:    vpsllw $2, %ymm4, %ymm6
-; AVX512VL-NEXT:    vpbroadcastd {{.*#+}} ymm9 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
+; AVX512VL-NEXT:    vpsrlw $2, %ymm4, %ymm6
+; AVX512VL-NEXT:    vpbroadcastd {{.*#+}} ymm9 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63]
 ; AVX512VL-NEXT:    vpand %ymm6, %ymm9, %ymm6
 ; AVX512VL-NEXT:    vpaddb %ymm8, %ymm8, %ymm8
 ; AVX512VL-NEXT:    vpblendvb %ymm8, %ymm6, %ymm4, %ymm4
-; AVX512VL-NEXT:    vpaddb %ymm4, %ymm4, %ymm6
+; AVX512VL-NEXT:    vpsrlw $1, %ymm4, %ymm6
+; AVX512VL-NEXT:    vpbroadcastd {{.*#+}} ymm10 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX512VL-NEXT:    vpand %ymm6, %ymm10, %ymm6
 ; AVX512VL-NEXT:    vpaddb %ymm8, %ymm8, %ymm8
 ; AVX512VL-NEXT:    vpblendvb %ymm8, %ymm6, %ymm4, %ymm4
-; AVX512VL-NEXT:    vpaddb %ymm0, %ymm0, %ymm0
-; AVX512VL-NEXT:    vpsllw $4, %ymm0, %ymm6
+; AVX512VL-NEXT:    vpsrlw $4, %ymm1, %ymm6
 ; AVX512VL-NEXT:    vpand %ymm5, %ymm6, %ymm5
-; AVX512VL-NEXT:    vpxor %ymm7, %ymm2, %ymm6
-; AVX512VL-NEXT:    vpsllw $5, %ymm6, %ymm6
-; AVX512VL-NEXT:    vpblendvb %ymm6, %ymm5, %ymm0, %ymm0
-; AVX512VL-NEXT:    vpsllw $2, %ymm0, %ymm5
+; AVX512VL-NEXT:    vpsllw $5, %ymm2, %ymm6
+; AVX512VL-NEXT:    vpblendvb %ymm6, %ymm5, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpsrlw $2, %ymm1, %ymm5
 ; AVX512VL-NEXT:    vpand %ymm5, %ymm9, %ymm5
 ; AVX512VL-NEXT:    vpaddb %ymm6, %ymm6, %ymm6
-; AVX512VL-NEXT:    vpblendvb %ymm6, %ymm5, %ymm0, %ymm0
-; AVX512VL-NEXT:    vpaddb %ymm0, %ymm0, %ymm5
+; AVX512VL-NEXT:    vpblendvb %ymm6, %ymm5, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpsrlw $1, %ymm1, %ymm5
+; AVX512VL-NEXT:    vpand %ymm5, %ymm10, %ymm5
 ; AVX512VL-NEXT:    vpaddb %ymm6, %ymm6, %ymm6
-; AVX512VL-NEXT:    vpblendvb %ymm6, %ymm5, %ymm0, %ymm0
-; AVX512VL-NEXT:    vinserti64x4 $1, %ymm4, %zmm0, %zmm0
-; AVX512VL-NEXT:    vextracti64x4 $1, %zmm1, %ymm4
-; AVX512VL-NEXT:    vpsrlw $4, %ymm4, %ymm5
-; AVX512VL-NEXT:    vpbroadcastd {{.*#+}} ymm6 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512VL-NEXT:    vpblendvb %ymm6, %ymm5, %ymm1, %ymm1
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm4, %zmm1, %zmm1
+; AVX512VL-NEXT:    vextracti64x4 $1, %zmm0, %ymm4
+; AVX512VL-NEXT:    vpaddb %ymm4, %ymm4, %ymm4
+; AVX512VL-NEXT:    vpsllw $4, %ymm4, %ymm5
+; AVX512VL-NEXT:    vpbroadcastd {{.*#+}} ymm6 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
 ; AVX512VL-NEXT:    vpand %ymm6, %ymm5, %ymm5
+; AVX512VL-NEXT:    vpxor %ymm7, %ymm3, %ymm3
 ; AVX512VL-NEXT:    vpsllw $5, %ymm3, %ymm3
 ; AVX512VL-NEXT:    vpblendvb %ymm3, %ymm5, %ymm4, %ymm4
-; AVX512VL-NEXT:    vpsrlw $2, %ymm4, %ymm5
-; AVX512VL-NEXT:    vpbroadcastd {{.*#+}} ymm7 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63]
-; AVX512VL-NEXT:    vpand %ymm7, %ymm5, %ymm5
+; AVX512VL-NEXT:    vpsllw $2, %ymm4, %ymm5
+; AVX512VL-NEXT:    vpbroadcastd {{.*#+}} ymm8 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
+; AVX512VL-NEXT:    vpand %ymm5, %ymm8, %ymm5
 ; AVX512VL-NEXT:    vpaddb %ymm3, %ymm3, %ymm3
 ; AVX512VL-NEXT:    vpblendvb %ymm3, %ymm5, %ymm4, %ymm4
-; AVX512VL-NEXT:    vpsrlw $1, %ymm4, %ymm5
-; AVX512VL-NEXT:    vpbroadcastd {{.*#+}} ymm8 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; AVX512VL-NEXT:    vpand %ymm5, %ymm8, %ymm5
+; AVX512VL-NEXT:    vpaddb %ymm4, %ymm4, %ymm5
 ; AVX512VL-NEXT:    vpaddb %ymm3, %ymm3, %ymm3
 ; AVX512VL-NEXT:    vpblendvb %ymm3, %ymm5, %ymm4, %ymm3
-; AVX512VL-NEXT:    vpsrlw $4, %ymm1, %ymm4
+; AVX512VL-NEXT:    vpaddb %ymm0, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpsllw $4, %ymm0, %ymm4
 ; AVX512VL-NEXT:    vpand %ymm6, %ymm4, %ymm4
+; AVX512VL-NEXT:    vpxor %ymm7, %ymm2, %ymm2
 ; AVX512VL-NEXT:    vpsllw $5, %ymm2, %ymm2
-; AVX512VL-NEXT:    vpblendvb %ymm2, %ymm4, %ymm1, %ymm1
-; AVX512VL-NEXT:    vpsrlw $2, %ymm1, %ymm4
-; AVX512VL-NEXT:    vpand %ymm7, %ymm4, %ymm4
-; AVX512VL-NEXT:    vpaddb %ymm2, %ymm2, %ymm2
-; AVX512VL-NEXT:    vpblendvb %ymm2, %ymm4, %ymm1, %ymm1
-; AVX512VL-NEXT:    vpsrlw $1, %ymm1, %ymm4
+; AVX512VL-NEXT:    vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpsllw $2, %ymm0, %ymm4
 ; AVX512VL-NEXT:    vpand %ymm4, %ymm8, %ymm4
 ; AVX512VL-NEXT:    vpaddb %ymm2, %ymm2, %ymm2
-; AVX512VL-NEXT:    vpblendvb %ymm2, %ymm4, %ymm1, %ymm1
-; AVX512VL-NEXT:    vinserti64x4 $1, %ymm3, %zmm1, %zmm1
+; AVX512VL-NEXT:    vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpaddb %ymm0, %ymm0, %ymm4
+; AVX512VL-NEXT:    vpaddb %ymm2, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm3, %zmm0, %zmm0
 ; AVX512VL-NEXT:    vporq %zmm1, %zmm0, %zmm0
 ; AVX512VL-NEXT:    retq
 ;
diff --git a/llvm/test/CodeGen/X86/vector-fshr-rot-256.ll b/llvm/test/CodeGen/X86/vector-fshr-rot-256.ll
index ecc832f1d586027..e4867ba09696852 100644
--- a/llvm/test/CodeGen/X86/vector-fshr-rot-256.ll
+++ b/llvm/test/CodeGen/X86/vector-fshr-rot-256.ll
@@ -22,35 +22,34 @@ declare <32 x i8> @llvm.fshr.v32i8(<32 x i8>, <32 x i8>, <32 x i8>)
 define <4 x i64> @var_funnnel_v4i64(<4 x i64> %x, <4 x i64> %amt) nounwind {
 ; AVX1-LABEL: var_funnnel_v4i64:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
-; AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm3
+; AVX1-NEXT:    vbroadcastsd {{.*#+}} ymm2 = [63,63,63,63]
+; AVX1-NEXT:    vandps %ymm2, %ymm1, %ymm3
 ; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm4
-; AVX1-NEXT:    vpsrlq %xmm4, %xmm2, %xmm5
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
+; AVX1-NEXT:    vpsrlq %xmm4, %xmm5, %xmm6
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[2,3,2,3]
-; AVX1-NEXT:    vpsrlq %xmm4, %xmm2, %xmm4
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm4 = xmm5[0,1,2,3],xmm4[4,5,6,7]
-; AVX1-NEXT:    vpsrlq %xmm3, %xmm0, %xmm5
+; AVX1-NEXT:    vpsrlq %xmm4, %xmm5, %xmm4
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm4 = xmm6[0,1,2,3],xmm4[4,5,6,7]
+; AVX1-NEXT:    vpsrlq %xmm3, %xmm0, %xmm6
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[2,3,2,3]
 ; AVX1-NEXT:    vpsrlq %xmm3, %xmm0, %xmm3
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm3 = xmm5[0,1,2,3],xmm3[4,5,6,7]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm3 = xmm6[0,1,2,3],xmm3[4,5,6,7]
 ; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm3, %ymm3
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm4
-; AVX1-NEXT:    vpxor %xmm5, %xmm5, %xmm5
-; AVX1-NEXT:    vpsubq %xmm4, %xmm5, %xmm4
-; AVX1-NEXT:    vmovddup {{.*#+}} xmm6 = [63,63]
-; AVX1-NEXT:    # xmm6 = mem[0,0]
-; AVX1-NEXT:    vpand %xmm6, %xmm4, %xmm4
-; AVX1-NEXT:    vpsllq %xmm4, %xmm2, %xmm7
+; AVX1-NEXT:    vpxor %xmm6, %xmm6, %xmm6
+; AVX1-NEXT:    vpsubq %xmm4, %xmm6, %xmm4
+; AVX1-NEXT:    vpand %xmm2, %xmm4, %xmm4
+; AVX1-NEXT:    vpsllq %xmm4, %xmm5, %xmm7
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[2,3,2,3]
-; AVX1-NEXT:    vpsllq %xmm4, %xmm2, %xmm2
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm7[0,1,2,3],xmm2[4,5,6,7]
-; AVX1-NEXT:    vpsubq %xmm1, %xmm5, %xmm1
-; AVX1-NEXT:    vpand %xmm6, %xmm1, %xmm1
-; AVX1-NEXT:    vpsllq %xmm1, %xmm0, %xmm4
+; AVX1-NEXT:    vpsllq %xmm4, %xmm5, %xmm4
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm4 = xmm7[0,1,2,3],xmm4[4,5,6,7]
+; AVX1-NEXT:    vpsubq %xmm1, %xmm6, %xmm1
+; AVX1-NEXT:    vpand %xmm2, %xmm1, %xmm1
+; AVX1-NEXT:    vpsllq %xmm1, %xmm0, %xmm2
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
 ; AVX1-NEXT:    vpsllq %xmm1, %xmm0, %xmm0
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm4[0,1,2,3],xmm0[4,5,6,7]
-; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm2[0,1,2,3],xmm0[4,5,6,7]
+; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm0, %ymm0
 ; AVX1-NEXT:    vorps %ymm0, %ymm3, %ymm0
 ; AVX1-NEXT:    retq
 ;
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-7.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-7.ll
index c5933114f3cd3af..720e3464ead2007 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-7.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-7.ll
@@ -2154,11 +2154,11 @@ define void @load_i16_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm8 = xmm10[0],xmm8[1],xmm10[2,3,4,5],xmm8[6],xmm10[7]
 ; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm8 = xmm8[2,3,0,1,14,15,12,13,10,11,10,11,10,11,10,11]
 ; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm10 = ymm3[0,1],ymm2[2],ymm3[3,4],ymm2[5],ymm3[6,7]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm11 = ymm10[2,3,0,1]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm10 = ymm10[0,1,2],ymm11[3],ymm10[4,5,6,7,8,9,10],ymm11[11],ymm10[12,13,14,15]
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm11 = ymm10[2,3,2,3,2,3,2,3,8,9,8,9,6,7,4,5,18,19,18,19,18,19,18,19,24,25,24,25,22,23,20,21]
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} xmm10 = <255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0>
-; AVX2-SLOW-NEXT:    vpblendvb %ymm10, %ymm8, %ymm11, %ymm8
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm10[2,3,0,1]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm10 = ymm10[0,1,2],ymm12[3],ymm10[4,5,6,7,8,9,10],ymm12[11],ymm10[12,13,14,15]
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm12 = ymm10[2,3,2,3,2,3,2,3,8,9,8,9,6,7,4,5,18,19,18,19,18,19,18,19,24,25,24,25,22,23,20,21]
+; AVX2-SLOW-NEXT:    vmovdqa %xmm11, %xmm10
+; AVX2-SLOW-NEXT:    vpblendvb %ymm10, %ymm8, %ymm12, %ymm8
 ; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm11 = ymm5[0,1],ymm6[2],ymm5[3,4,5],ymm6[6],ymm5[7]
 ; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm11, %xmm12
 ; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm11 = xmm11[0,1,2,3],xmm12[4],xmm11[5],xmm12[6],xmm11[7]
@@ -2326,11 +2326,11 @@ define void @load_i16_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm10 = xmm11[0],xmm10[1],xmm11[2,3,4,5],xmm10[6],xmm11[7]
 ; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm10 = xmm10[2,3,0,1,14,15,12,13,10,11,10,11,10,11,10,11]
 ; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm11 = ymm3[0,1],ymm2[2],ymm3[3,4],ymm2[5],ymm3[6,7]
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm12 = <2,5,1,u,4,u,u,u>
-; AVX2-FAST-NEXT:    vpermd %ymm11, %ymm12, %ymm11
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm12 = ymm11[2,3,2,3,2,3,2,3,8,9,0,1,6,7,8,9,18,19,18,19,18,19,18,19,24,25,16,17,22,23,24,25]
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm11 = <255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0>
-; AVX2-FAST-NEXT:    vpblendvb %ymm11, %ymm10, %ymm12, %ymm10
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm13 = <2,5,1,u,4,u,u,u>
+; AVX2-FAST-NEXT:    vpermd %ymm11, %ymm13, %ymm11
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm13 = ymm11[2,3,2,3,2,3,2,3,8,9,0,1,6,7,8,9,18,19,18,19,18,19,18,19,24,25,16,17,22,23,24,25]
+; AVX2-FAST-NEXT:    vmovdqa %xmm12, %xmm11
+; AVX2-FAST-NEXT:    vpblendvb %ymm11, %ymm10, %ymm13, %ymm10
 ; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm12 = ymm4[0,1],ymm6[2],ymm4[3,4,5],ymm6[6],ymm4[7]
 ; AVX2-FAST-NEXT:    vextracti128 $1, %ymm12, %xmm13
 ; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm12 = xmm12[0,1,2,3],xmm13[4],xmm12[5],xmm13[6],xmm12[7]
@@ -2386,19 +2386,19 @@ define void @load_i16_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-NEXT:    vpshuflw {{.*#+}} xmm14 = xmm14[3,1,2,3,4,5,6,7]
 ; AVX2-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm14 = xmm15[0],xmm14[0],xmm15[1],xmm14[1],xmm15[2],xmm14[2],xmm15[3],xmm14[3]
 ; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm15 = ymm2[0,1],ymm3[2],ymm2[3,4,5],ymm3[6],ymm2[7]
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = <0,3,7,2,6,u,u,u>
-; AVX2-FAST-NEXT:    vpermd %ymm15, %ymm5, %ymm5
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm9 = <0,3,7,2,6,u,u,u>
+; AVX2-FAST-NEXT:    vpermd %ymm15, %ymm9, %ymm9
 ; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm15 = ymm6[0],ymm4[1],ymm6[2,3,4],ymm4[5],ymm6[6,7]
-; AVX2-FAST-NEXT:    vextracti128 $1, %ymm15, %xmm9
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm9 = xmm9[0],xmm15[1],xmm9[2],xmm15[3],xmm9[4,5,6,7]
+; AVX2-FAST-NEXT:    vextracti128 $1, %ymm15, %xmm5
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0],xmm15[1],xmm5[2],xmm15[3],xmm5[4,5,6,7]
 ; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm13 = ymm13[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20,21,26,27]
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm9 = xmm9[0,1,6,7,4,5,2,3,0,1,14,15,u,u,u,u]
-; AVX2-FAST-NEXT:    vinserti128 $1, %xmm9, %ymm0, %ymm9
-; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3,4,5,6],ymm13[7]
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[0,1,6,7,4,5,2,3,0,1,14,15,u,u,u,u]
+; AVX2-FAST-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
+; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3,4,5,6],ymm13[7]
 ; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm13 = <u,u,u,u,u,u,u,u,0,1,6,7,8,9,14,15,16,17,22,23,20,21,22,23,16,17,22,23,24,25,30,31>
-; AVX2-FAST-NEXT:    vpshufb %ymm13, %ymm5, %ymm5
-; AVX2-FAST-NEXT:    vpblendd {{.*#+}} xmm14 = xmm14[0,1],xmm5[2,3]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm5 = ymm5[0],ymm9[1,2,3,4,5,6,7],ymm5[8],ymm9[9,10,11,12,13,14,15]
+; AVX2-FAST-NEXT:    vpshufb %ymm13, %ymm9, %ymm9
+; AVX2-FAST-NEXT:    vpblendd {{.*#+}} xmm14 = xmm14[0,1],xmm9[2,3]
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm5 = ymm9[0],ymm5[1,2,3,4,5,6,7],ymm9[8],ymm5[9,10,11,12,13,14,15]
 ; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm9 = ymm14[0,1,2,3],ymm5[4,5,6,7]
 ; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm6[0,1],ymm4[2],ymm6[3,4],ymm4[5],ymm6[6,7]
 ; AVX2-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm14 = [2,5,1,4,2,5,1,4]
@@ -2493,11 +2493,11 @@ define void @load_i16_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm8 = xmm11[0],xmm8[1],xmm11[2,3,4,5],xmm8[6],xmm11[7]
 ; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm8 = xmm8[2,3,0,1,14,15,12,13,10,11,10,11,10,11,10,11]
 ; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm11 = ymm3[0,1],ymm2[2],ymm3[3,4],ymm2[5],ymm3[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm12 = ymm11[2,3,0,1]
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm11 = ymm11[0,1,2],ymm12[3],ymm11[4,5,6,7,8,9,10],ymm12[11],ymm11[12,13,14,15]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm12 = ymm11[2,3,2,3,2,3,2,3,8,9,8,9,6,7,4,5,18,19,18,19,18,19,18,19,24,25,24,25,22,23,20,21]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm11 = <255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0>
-; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm11, %ymm8, %ymm12, %ymm8
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm13 = ymm11[2,3,0,1]
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm11 = ymm11[0,1,2],ymm13[3],ymm11[4,5,6,7,8,9,10],ymm13[11],ymm11[12,13,14,15]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm13 = ymm11[2,3,2,3,2,3,2,3,8,9,8,9,6,7,4,5,18,19,18,19,18,19,18,19,24,25,24,25,22,23,20,21]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa %xmm12, %xmm11
+; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm11, %ymm8, %ymm13, %ymm8
 ; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm12 = ymm5[0,1],ymm6[2],ymm5[3,4,5],ymm6[6],ymm5[7]
 ; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm12, %xmm13
 ; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm12 = xmm12[0,1,2,3],xmm13[4],xmm12[5],xmm13[6],xmm12[7]
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-4.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-4.ll
index df33d3b9d8fd4b1..2d66e4d68dbfb93 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-4.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-4.ll
@@ -877,29 +877,27 @@ define void @load_i8_stride4_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX2-ONLY-NEXT:    vpshufb %xmm6, %xmm5, %xmm7
 ; AVX2-ONLY-NEXT:    vpshufb %xmm6, %xmm4, %xmm6
 ; AVX2-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1]
-; AVX2-ONLY-NEXT:    vpbroadcastd {{.*#+}} xmm7 = [0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12]
+; AVX2-ONLY-NEXT:    vpbroadcastd {{.*#+}} ymm7 = [0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12]
 ; AVX2-ONLY-NEXT:    vpshufb %xmm7, %xmm3, %xmm8
-; AVX2-ONLY-NEXT:    vpshufb %xmm7, %xmm2, %xmm7
-; AVX2-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm7 = xmm7[0],xmm8[0],xmm7[1],xmm8[1]
-; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} xmm7 = xmm7[0,1],xmm6[2,3]
-; AVX2-ONLY-NEXT:    vpbroadcastd {{.*#+}} ymm8 = [0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12]
-; AVX2-ONLY-NEXT:    vpshufb %ymm8, %ymm1, %ymm9
+; AVX2-ONLY-NEXT:    vpshufb %xmm7, %xmm2, %xmm9
+; AVX2-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm8 = xmm9[0],xmm8[0],xmm9[1],xmm8[1]
+; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} xmm8 = xmm8[0,1],xmm6[2,3]
+; AVX2-ONLY-NEXT:    vpshufb %ymm7, %ymm1, %ymm9
 ; AVX2-ONLY-NEXT:    vpbroadcastq {{.*#+}} ymm6 = [0,4,0,4,0,4,0,4]
 ; AVX2-ONLY-NEXT:    vpermd %ymm9, %ymm6, %ymm9
-; AVX2-ONLY-NEXT:    vpshufb %ymm8, %ymm0, %ymm8
-; AVX2-ONLY-NEXT:    vpermd %ymm8, %ymm6, %ymm8
-; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3,4,5],ymm9[6,7]
-; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm8[4,5,6,7]
+; AVX2-ONLY-NEXT:    vpshufb %ymm7, %ymm0, %ymm7
+; AVX2-ONLY-NEXT:    vpermd %ymm7, %ymm6, %ymm7
+; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm7 = ymm7[0,1,2,3,4,5],ymm9[6,7]
+; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm7 = ymm8[0,1,2,3],ymm7[4,5,6,7]
 ; AVX2-ONLY-NEXT:    vpbroadcastd {{.*#+}} xmm8 = [1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13]
 ; AVX2-ONLY-NEXT:    vpshufb %xmm8, %xmm5, %xmm9
 ; AVX2-ONLY-NEXT:    vpshufb %xmm8, %xmm4, %xmm8
 ; AVX2-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm8 = xmm8[0],xmm9[0],xmm8[1],xmm9[1]
-; AVX2-ONLY-NEXT:    vpbroadcastd {{.*#+}} xmm9 = [1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13]
-; AVX2-ONLY-NEXT:    vpshufb %xmm9, %xmm3, %xmm10
-; AVX2-ONLY-NEXT:    vpshufb %xmm9, %xmm2, %xmm9
-; AVX2-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm9 = xmm9[0],xmm10[0],xmm9[1],xmm10[1]
-; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} xmm8 = xmm9[0,1],xmm8[2,3]
 ; AVX2-ONLY-NEXT:    vpbroadcastd {{.*#+}} ymm9 = [1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13]
+; AVX2-ONLY-NEXT:    vpshufb %xmm9, %xmm3, %xmm10
+; AVX2-ONLY-NEXT:    vpshufb %xmm9, %xmm2, %xmm11
+; AVX2-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm10 = xmm11[0],xmm10[0],xmm11[1],xmm10[1]
+; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} xmm8 = xmm10[0,1],xmm8[2,3]
 ; AVX2-ONLY-NEXT:    vpshufb %ymm9, %ymm1, %ymm10
 ; AVX2-ONLY-NEXT:    vpermd %ymm10, %ymm6, %ymm10
 ; AVX2-ONLY-NEXT:    vpshufb %ymm9, %ymm0, %ymm9
@@ -910,12 +908,11 @@ define void @load_i8_stride4_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX2-ONLY-NEXT:    vpshufb %xmm9, %xmm5, %xmm10
 ; AVX2-ONLY-NEXT:    vpshufb %xmm9, %xmm4, %xmm9
 ; AVX2-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm9 = xmm9[0],xmm10[0],xmm9[1],xmm10[1]
-; AVX2-ONLY-NEXT:    vpbroadcastd {{.*#+}} xmm10 = [2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14]
-; AVX2-ONLY-NEXT:    vpshufb %xmm10, %xmm3, %xmm11
-; AVX2-ONLY-NEXT:    vpshufb %xmm10, %xmm2, %xmm10
-; AVX2-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm10 = xmm10[0],xmm11[0],xmm10[1],xmm11[1]
-; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} xmm9 = xmm10[0,1],xmm9[2,3]
 ; AVX2-ONLY-NEXT:    vpbroadcastd {{.*#+}} ymm10 = [2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14]
+; AVX2-ONLY-NEXT:    vpshufb %xmm10, %xmm3, %xmm11
+; AVX2-ONLY-NEXT:    vpshufb %xmm10, %xmm2, %xmm12
+; AVX2-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm11 = xmm12[0],xmm11[0],xmm12[1],xmm11[1]
+; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} xmm9 = xmm11[0,1],xmm9[2,3]
 ; AVX2-ONLY-NEXT:    vpshufb %ymm10, %ymm1, %ymm11
 ; AVX2-ONLY-NEXT:    vpermd %ymm11, %ymm6, %ymm11
 ; AVX2-ONLY-NEXT:    vpshufb %ymm10, %ymm0, %ymm10
@@ -926,15 +923,14 @@ define void @load_i8_stride4_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX2-ONLY-NEXT:    vpshufb %xmm10, %xmm5, %xmm5
 ; AVX2-ONLY-NEXT:    vpshufb %xmm10, %xmm4, %xmm4
 ; AVX2-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
-; AVX2-ONLY-NEXT:    vpbroadcastd {{.*#+}} xmm5 = [3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15]
+; AVX2-ONLY-NEXT:    vpbroadcastd {{.*#+}} ymm5 = [3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15]
 ; AVX2-ONLY-NEXT:    vpshufb %xmm5, %xmm3, %xmm3
 ; AVX2-ONLY-NEXT:    vpshufb %xmm5, %xmm2, %xmm2
 ; AVX2-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
 ; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} xmm2 = xmm2[0,1],xmm4[2,3]
-; AVX2-ONLY-NEXT:    vpbroadcastd {{.*#+}} ymm3 = [3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15]
-; AVX2-ONLY-NEXT:    vpshufb %ymm3, %ymm1, %ymm1
+; AVX2-ONLY-NEXT:    vpshufb %ymm5, %ymm1, %ymm1
 ; AVX2-ONLY-NEXT:    vpermd %ymm1, %ymm6, %ymm1
-; AVX2-ONLY-NEXT:    vpshufb %ymm3, %ymm0, %ymm0
+; AVX2-ONLY-NEXT:    vpshufb %ymm5, %ymm0, %ymm0
 ; AVX2-ONLY-NEXT:    vpermd %ymm0, %ymm6, %ymm0
 ; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
 ; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
@@ -1775,48 +1771,49 @@ define void @load_i8_stride4_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX2-ONLY-NEXT:    vmovdqa 64(%rdi), %ymm0
 ; AVX2-ONLY-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-ONLY-NEXT:    vmovdqa 96(%rdi), %ymm2
+; AVX2-ONLY-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-ONLY-NEXT:    vmovdqa (%rdi), %xmm4
-; AVX2-ONLY-NEXT:    vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX2-ONLY-NEXT:    vmovdqa 16(%rdi), %xmm5
-; AVX2-ONLY-NEXT:    vmovdqa 32(%rdi), %xmm6
+; AVX2-ONLY-NEXT:    vmovdqa 32(%rdi), %xmm12
 ; AVX2-ONLY-NEXT:    vmovdqa 48(%rdi), %xmm7
 ; AVX2-ONLY-NEXT:    vpbroadcastd {{.*#+}} xmm3 = [0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12]
 ; AVX2-ONLY-NEXT:    vpshufb %xmm3, %xmm7, %xmm1
 ; AVX2-ONLY-NEXT:    vmovdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-ONLY-NEXT:    vpshufb %xmm3, %xmm6, %xmm8
+; AVX2-ONLY-NEXT:    vpshufb %xmm3, %xmm12, %xmm8
 ; AVX2-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm1 = xmm8[0],xmm1[0],xmm8[1],xmm1[1]
-; AVX2-ONLY-NEXT:    vpbroadcastd {{.*#+}} xmm10 = [0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12]
-; AVX2-ONLY-NEXT:    vpshufb %xmm10, %xmm5, %xmm8
-; AVX2-ONLY-NEXT:    vpshufb %xmm10, %xmm4, %xmm9
+; AVX2-ONLY-NEXT:    vpbroadcastd {{.*#+}} ymm13 = [0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12]
+; AVX2-ONLY-NEXT:    vpshufb %xmm13, %xmm5, %xmm8
+; AVX2-ONLY-NEXT:    vpshufb %xmm13, %xmm4, %xmm9
 ; AVX2-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm8 = xmm9[0],xmm8[0],xmm9[1],xmm8[1]
 ; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} xmm8 = xmm8[0,1],xmm1[2,3]
-; AVX2-ONLY-NEXT:    vpbroadcastd {{.*#+}} ymm13 = [0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12]
 ; AVX2-ONLY-NEXT:    vpshufb %ymm13, %ymm2, %ymm9
-; AVX2-ONLY-NEXT:    vmovdqa %ymm2, %ymm4
-; AVX2-ONLY-NEXT:    vmovdqu %ymm2, (%rsp) # 32-byte Spill
 ; AVX2-ONLY-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [0,4,0,4,0,4,0,4]
 ; AVX2-ONLY-NEXT:    vpermd %ymm9, %ymm1, %ymm9
-; AVX2-ONLY-NEXT:    vpshufb %ymm13, %ymm0, %ymm11
-; AVX2-ONLY-NEXT:    vpermd %ymm11, %ymm1, %ymm11
-; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm9 = ymm11[0,1,2,3,4,5],ymm9[6,7]
+; AVX2-ONLY-NEXT:    vpshufb %ymm13, %ymm0, %ymm10
+; AVX2-ONLY-NEXT:    vpermd %ymm10, %ymm1, %ymm10
+; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm9 = ymm10[0,1,2,3,4,5],ymm9[6,7]
 ; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm0 = ymm8[0,1,2,3],ymm9[4,5,6,7]
 ; AVX2-ONLY-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-ONLY-NEXT:    vmovdqa 176(%rdi), %xmm8
-; AVX2-ONLY-NEXT:    vpshufb %xmm3, %xmm8, %xmm11
-; AVX2-ONLY-NEXT:    vmovdqa 160(%rdi), %xmm9
-; AVX2-ONLY-NEXT:    vpshufb %xmm3, %xmm9, %xmm3
-; AVX2-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm3 = xmm3[0],xmm11[0],xmm3[1],xmm11[1]
-; AVX2-ONLY-NEXT:    vmovdqa 144(%rdi), %xmm11
-; AVX2-ONLY-NEXT:    vpshufb %xmm10, %xmm11, %xmm14
-; AVX2-ONLY-NEXT:    vmovdqa 128(%rdi), %xmm12
-; AVX2-ONLY-NEXT:    vpshufb %xmm10, %xmm12, %xmm10
-; AVX2-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm14 = xmm10[0],xmm14[0],xmm10[1],xmm14[1]
-; AVX2-ONLY-NEXT:    vmovdqa 224(%rdi), %ymm10
+; AVX2-ONLY-NEXT:    vmovdqa 176(%rdi), %xmm0
+; AVX2-ONLY-NEXT:    vpshufb %xmm3, %xmm0, %xmm10
+; AVX2-ONLY-NEXT:    vmovdqa %xmm0, %xmm8
+; AVX2-ONLY-NEXT:    vmovdqa 160(%rdi), %xmm0
+; AVX2-ONLY-NEXT:    vpshufb %xmm3, %xmm0, %xmm3
+; AVX2-ONLY-NEXT:    vmovdqa %xmm0, %xmm9
+; AVX2-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm3 = xmm3[0],xmm10[0],xmm3[1],xmm10[1]
+; AVX2-ONLY-NEXT:    vmovdqa 144(%rdi), %xmm0
+; AVX2-ONLY-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX2-ONLY-NEXT:    vpshufb %xmm13, %xmm0, %xmm10
+; AVX2-ONLY-NEXT:    vmovdqa 128(%rdi), %xmm0
+; AVX2-ONLY-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX2-ONLY-NEXT:    vpshufb %xmm13, %xmm0, %xmm14
+; AVX2-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm14 = xmm14[0],xmm10[0],xmm14[1],xmm10[1]
+; AVX2-ONLY-NEXT:    vmovdqa 224(%rdi), %ymm11
 ; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} xmm3 = xmm14[0,1],xmm3[2,3]
-; AVX2-ONLY-NEXT:    vpshufb %ymm13, %ymm10, %ymm14
+; AVX2-ONLY-NEXT:    vpshufb %ymm13, %ymm11, %ymm14
 ; AVX2-ONLY-NEXT:    vpermd %ymm14, %ymm1, %ymm15
 ; AVX2-ONLY-NEXT:    vmovdqa 192(%rdi), %ymm0
-; AVX2-ONLY-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-ONLY-NEXT:    vmovdqu %ymm0, (%rsp) # 32-byte Spill
 ; AVX2-ONLY-NEXT:    vpshufb %ymm13, %ymm0, %ymm13
 ; AVX2-ONLY-NEXT:    vpermd %ymm13, %ymm1, %ymm13
 ; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm13 = ymm13[0,1,2,3,4,5],ymm15[6,7]
@@ -1824,104 +1821,104 @@ define void @load_i8_stride4_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX2-ONLY-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-ONLY-NEXT:    vpbroadcastd {{.*#+}} xmm3 = [1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13]
 ; AVX2-ONLY-NEXT:    vpshufb %xmm3, %xmm7, %xmm13
-; AVX2-ONLY-NEXT:    vpshufb %xmm3, %xmm6, %xmm15
-; AVX2-ONLY-NEXT:    vmovdqa %xmm6, %xmm7
-; AVX2-ONLY-NEXT:    vmovdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX2-ONLY-NEXT:    vpshufb %xmm3, %xmm12, %xmm15
 ; AVX2-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm13 = xmm15[0],xmm13[0],xmm15[1],xmm13[1]
-; AVX2-ONLY-NEXT:    vpbroadcastd {{.*#+}} xmm15 = [1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13]
+; AVX2-ONLY-NEXT:    vpbroadcastd {{.*#+}} ymm15 = [1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13]
 ; AVX2-ONLY-NEXT:    vpshufb %xmm15, %xmm5, %xmm0
-; AVX2-ONLY-NEXT:    vmovdqa %xmm5, %xmm14
-; AVX2-ONLY-NEXT:    vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
-; AVX2-ONLY-NEXT:    vpshufb %xmm15, %xmm5, %xmm2
+; AVX2-ONLY-NEXT:    vmovdqa %xmm5, %xmm10
+; AVX2-ONLY-NEXT:    vpshufb %xmm15, %xmm4, %xmm2
+; AVX2-ONLY-NEXT:    vmovdqa %xmm4, %xmm14
+; AVX2-ONLY-NEXT:    vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX2-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
 ; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm13[2,3]
-; AVX2-ONLY-NEXT:    vpbroadcastd {{.*#+}} ymm2 = [1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13]
-; AVX2-ONLY-NEXT:    vpshufb %ymm2, %ymm4, %ymm13
-; AVX2-ONLY-NEXT:    vpermd %ymm13, %ymm1, %ymm13
 ; AVX2-ONLY-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
-; AVX2-ONLY-NEXT:    vpshufb %ymm2, %ymm6, %ymm4
-; AVX2-ONLY-NEXT:    vpermd %ymm4, %ymm1, %ymm4
-; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5],ymm13[6,7]
-; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm4[4,5,6,7]
+; AVX2-ONLY-NEXT:    vpshufb %ymm15, %ymm6, %ymm2
+; AVX2-ONLY-NEXT:    vpermd %ymm2, %ymm1, %ymm2
+; AVX2-ONLY-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX2-ONLY-NEXT:    vpshufb %ymm15, %ymm5, %ymm13
+; AVX2-ONLY-NEXT:    vpermd %ymm13, %ymm1, %ymm13
+; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm2 = ymm13[0,1,2,3,4,5],ymm2[6,7]
+; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
 ; AVX2-ONLY-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-ONLY-NEXT:    vmovdqa %xmm8, %xmm4
 ; AVX2-ONLY-NEXT:    vpshufb %xmm3, %xmm8, %xmm0
-; AVX2-ONLY-NEXT:    vpshufb %xmm3, %xmm9, %xmm3
-; AVX2-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm3[0],xmm0[0],xmm3[1],xmm0[1]
-; AVX2-ONLY-NEXT:    vpshufb %xmm15, %xmm11, %xmm3
-; AVX2-ONLY-NEXT:    vpshufb %xmm15, %xmm12, %xmm4
-; AVX2-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
-; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} xmm0 = xmm3[0,1],xmm0[2,3]
-; AVX2-ONLY-NEXT:    vpshufb %ymm2, %ymm10, %ymm3
-; AVX2-ONLY-NEXT:    vpermd %ymm3, %ymm1, %ymm3
-; AVX2-ONLY-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX2-ONLY-NEXT:    vpshufb %ymm2, %ymm4, %ymm2
+; AVX2-ONLY-NEXT:    vmovdqa %xmm9, %xmm7
+; AVX2-ONLY-NEXT:    vpshufb %xmm3, %xmm9, %xmm2
+; AVX2-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
+; AVX2-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
+; AVX2-ONLY-NEXT:    vpshufb %xmm15, %xmm8, %xmm2
+; AVX2-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
+; AVX2-ONLY-NEXT:    vpshufb %xmm15, %xmm9, %xmm3
+; AVX2-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
+; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} xmm0 = xmm2[0,1],xmm0[2,3]
+; AVX2-ONLY-NEXT:    vpshufb %ymm15, %ymm11, %ymm2
 ; AVX2-ONLY-NEXT:    vpermd %ymm2, %ymm1, %ymm2
-; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5],ymm3[6,7]
+; AVX2-ONLY-NEXT:    vmovdqu (%rsp), %ymm3 # 32-byte Reload
+; AVX2-ONLY-NEXT:    vpshufb %ymm15, %ymm3, %ymm3
+; AVX2-ONLY-NEXT:    vpermd %ymm3, %ymm1, %ymm3
+; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3,4,5],ymm2[6,7]
 ; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
 ; AVX2-ONLY-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-ONLY-NEXT:    vpbroadcastd {{.*#+}} xmm0 = [2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14]
 ; AVX2-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
 ; AVX2-ONLY-NEXT:    vpshufb %xmm0, %xmm2, %xmm2
-; AVX2-ONLY-NEXT:    vpshufb %xmm0, %xmm7, %xmm3
+; AVX2-ONLY-NEXT:    vpshufb %xmm0, %xmm12, %xmm3
 ; AVX2-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
-; AVX2-ONLY-NEXT:    vpbroadcastd {{.*#+}} xmm3 = [2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14]
-; AVX2-ONLY-NEXT:    vpshufb %xmm3, %xmm14, %xmm4
-; AVX2-ONLY-NEXT:    vpshufb %xmm3, %xmm5, %xmm13
-; AVX2-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm4 = xmm13[0],xmm4[0],xmm13[1],xmm4[1]
-; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} xmm2 = xmm4[0,1],xmm2[2,3]
-; AVX2-ONLY-NEXT:    vpbroadcastd {{.*#+}} ymm4 = [2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14]
-; AVX2-ONLY-NEXT:    vmovdqu (%rsp), %ymm7 # 32-byte Reload
-; AVX2-ONLY-NEXT:    vpshufb %ymm4, %ymm7, %ymm13
+; AVX2-ONLY-NEXT:    vpbroadcastd {{.*#+}} ymm3 = [2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14]
+; AVX2-ONLY-NEXT:    vpshufb %xmm3, %xmm10, %xmm13
+; AVX2-ONLY-NEXT:    vpshufb %xmm3, %xmm14, %xmm15
+; AVX2-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm13 = xmm15[0],xmm13[0],xmm15[1],xmm13[1]
+; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} xmm2 = xmm13[0,1],xmm2[2,3]
+; AVX2-ONLY-NEXT:    vpshufb %ymm3, %ymm6, %ymm13
 ; AVX2-ONLY-NEXT:    vpermd %ymm13, %ymm1, %ymm13
-; AVX2-ONLY-NEXT:    vpshufb %ymm4, %ymm6, %ymm15
+; AVX2-ONLY-NEXT:    vpshufb %ymm3, %ymm5, %ymm15
 ; AVX2-ONLY-NEXT:    vpermd %ymm15, %ymm1, %ymm15
 ; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm13 = ymm15[0,1,2,3,4,5],ymm13[6,7]
 ; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm13 = ymm2[0,1,2,3],ymm13[4,5,6,7]
-; AVX2-ONLY-NEXT:    vpshufb %xmm0, %xmm8, %xmm2
-; AVX2-ONLY-NEXT:    vpshufb %xmm0, %xmm9, %xmm0
+; AVX2-ONLY-NEXT:    vpshufb %xmm0, %xmm4, %xmm2
+; AVX2-ONLY-NEXT:    vmovdqa %xmm4, %xmm14
+; AVX2-ONLY-NEXT:    vpshufb %xmm0, %xmm7, %xmm0
 ; AVX2-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
-; AVX2-ONLY-NEXT:    vpshufb %xmm3, %xmm11, %xmm2
-; AVX2-ONLY-NEXT:    vpshufb %xmm3, %xmm12, %xmm3
-; AVX2-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
+; AVX2-ONLY-NEXT:    vpshufb %xmm3, %xmm8, %xmm2
+; AVX2-ONLY-NEXT:    vpshufb %xmm3, %xmm9, %xmm15
+; AVX2-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm2 = xmm15[0],xmm2[0],xmm15[1],xmm2[1]
 ; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} xmm0 = xmm2[0,1],xmm0[2,3]
-; AVX2-ONLY-NEXT:    vpshufb %ymm4, %ymm10, %ymm2
+; AVX2-ONLY-NEXT:    vmovdqa %ymm11, %ymm15
+; AVX2-ONLY-NEXT:    vpshufb %ymm3, %ymm11, %ymm2
 ; AVX2-ONLY-NEXT:    vpermd %ymm2, %ymm1, %ymm2
-; AVX2-ONLY-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
-; AVX2-ONLY-NEXT:    vpshufb %ymm4, %ymm14, %ymm3
+; AVX2-ONLY-NEXT:    vmovdqu (%rsp), %ymm11 # 32-byte Reload
+; AVX2-ONLY-NEXT:    vpshufb %ymm3, %ymm11, %ymm3
 ; AVX2-ONLY-NEXT:    vpermd %ymm3, %ymm1, %ymm3
 ; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3,4,5],ymm2[6,7]
 ; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm3 = ymm0[0,1,2,3],ymm2[4,5,6,7]
 ; AVX2-ONLY-NEXT:    vpbroadcastd {{.*#+}} xmm0 = [3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15]
 ; AVX2-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
 ; AVX2-ONLY-NEXT:    vpshufb %xmm0, %xmm2, %xmm2
+; AVX2-ONLY-NEXT:    vpshufb %xmm0, %xmm12, %xmm6
+; AVX2-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm2 = xmm6[0],xmm2[0],xmm6[1],xmm2[1]
+; AVX2-ONLY-NEXT:    vpbroadcastd {{.*#+}} ymm6 = [3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15]
+; AVX2-ONLY-NEXT:    vpshufb %xmm6, %xmm10, %xmm5
 ; AVX2-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
-; AVX2-ONLY-NEXT:    vpshufb %xmm0, %xmm4, %xmm4
-; AVX2-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm2 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
-; AVX2-ONLY-NEXT:    vpbroadcastd {{.*#+}} xmm4 = [3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15]
-; AVX2-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
-; AVX2-ONLY-NEXT:    vpshufb %xmm4, %xmm5, %xmm5
-; AVX2-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; AVX2-ONLY-NEXT:    vpshufb %xmm4, %xmm6, %xmm6
-; AVX2-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm5 = xmm6[0],xmm5[0],xmm6[1],xmm5[1]
-; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} xmm2 = xmm5[0,1],xmm2[2,3]
-; AVX2-ONLY-NEXT:    vpbroadcastd {{.*#+}} ymm5 = [3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15]
-; AVX2-ONLY-NEXT:    vpshufb %ymm5, %ymm7, %ymm6
-; AVX2-ONLY-NEXT:    vpermd %ymm6, %ymm1, %ymm6
-; AVX2-ONLY-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
-; AVX2-ONLY-NEXT:    vpshufb %ymm5, %ymm7, %ymm7
-; AVX2-ONLY-NEXT:    vpermd %ymm7, %ymm1, %ymm7
-; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm6 = ymm7[0,1,2,3,4,5],ymm6[6,7]
-; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm6[4,5,6,7]
-; AVX2-ONLY-NEXT:    vpshufb %xmm0, %xmm8, %xmm6
-; AVX2-ONLY-NEXT:    vpshufb %xmm0, %xmm9, %xmm0
-; AVX2-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1]
-; AVX2-ONLY-NEXT:    vpshufb %xmm4, %xmm11, %xmm6
-; AVX2-ONLY-NEXT:    vpshufb %xmm4, %xmm12, %xmm4
-; AVX2-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm4 = xmm4[0],xmm6[0],xmm4[1],xmm6[1]
+; AVX2-ONLY-NEXT:    vpshufb %xmm6, %xmm4, %xmm4
+; AVX2-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
+; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} xmm2 = xmm4[0,1],xmm2[2,3]
+; AVX2-ONLY-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
+; AVX2-ONLY-NEXT:    vpshufb %ymm6, %ymm4, %ymm4
+; AVX2-ONLY-NEXT:    vpermd %ymm4, %ymm1, %ymm4
+; AVX2-ONLY-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX2-ONLY-NEXT:    vpshufb %ymm6, %ymm5, %ymm5
+; AVX2-ONLY-NEXT:    vpermd %ymm5, %ymm1, %ymm5
+; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm4 = ymm5[0,1,2,3,4,5],ymm4[6,7]
+; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7]
+; AVX2-ONLY-NEXT:    vpshufb %xmm0, %xmm14, %xmm4
+; AVX2-ONLY-NEXT:    vpshufb %xmm0, %xmm7, %xmm0
+; AVX2-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
+; AVX2-ONLY-NEXT:    vpshufb %xmm6, %xmm8, %xmm4
+; AVX2-ONLY-NEXT:    vpshufb %xmm6, %xmm9, %xmm5
+; AVX2-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm4 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
 ; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} xmm0 = xmm4[0,1],xmm0[2,3]
-; AVX2-ONLY-NEXT:    vpshufb %ymm5, %ymm10, %ymm4
-; AVX2-ONLY-NEXT:    vpshufb %ymm5, %ymm14, %ymm5
+; AVX2-ONLY-NEXT:    vpshufb %ymm6, %ymm15, %ymm4
+; AVX2-ONLY-NEXT:    vpshufb %ymm6, %ymm11, %ymm5
 ; AVX2-ONLY-NEXT:    vpermd %ymm4, %ymm1, %ymm4
 ; AVX2-ONLY-NEXT:    vpermd %ymm5, %ymm1, %ymm1
 ; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm4[6,7]
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-5.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-5.ll
index 0a902da4f2edd37..f0b7d41eb299598 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-5.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-5.ll
@@ -1667,112 +1667,112 @@ define void @load_i8_stride5_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ;
 ; AVX2-ONLY-LABEL: load_i8_stride5_vf32:
 ; AVX2-ONLY:       # %bb.0:
-; AVX2-ONLY-NEXT:    vmovdqa 128(%rdi), %ymm0
-; AVX2-ONLY-NEXT:    vmovdqa (%rdi), %ymm4
-; AVX2-ONLY-NEXT:    vmovdqa 32(%rdi), %ymm5
-; AVX2-ONLY-NEXT:    vmovdqa 64(%rdi), %ymm1
-; AVX2-ONLY-NEXT:    vmovdqa 96(%rdi), %ymm2
-; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} ymm3 = <255,255,0,0,255,255,u,u,0,0,255,255,0,0,255,255,u,u,0,0,255,255,0,0,255,255,u,u,0,0,255,255>
-; AVX2-ONLY-NEXT:    vpblendvb %ymm3, %ymm4, %ymm5, %ymm6
-; AVX2-ONLY-NEXT:    vextracti128 $1, %ymm6, %xmm7
-; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} xmm7 = zero,zero,zero,zero,xmm7[4,9,14],zero,zero,zero,xmm7[2,7,12,u,u,u]
-; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[0,5,10,15],zero,zero,zero,xmm6[3,8,13],zero,zero,zero,xmm6[u,u,u]
-; AVX2-ONLY-NEXT:    vpor %xmm7, %xmm6, %xmm6
+; AVX2-ONLY-NEXT:    vmovdqa (%rdi), %ymm3
+; AVX2-ONLY-NEXT:    vmovdqa 32(%rdi), %ymm4
+; AVX2-ONLY-NEXT:    vmovdqa 64(%rdi), %ymm0
+; AVX2-ONLY-NEXT:    vmovdqa 96(%rdi), %ymm1
+; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} ymm2 = <255,255,0,0,255,255,u,u,0,0,255,255,0,0,255,255,u,u,0,0,255,255,0,0,255,255,u,u,0,0,255,255>
+; AVX2-ONLY-NEXT:    vpblendvb %ymm2, %ymm3, %ymm4, %ymm5
+; AVX2-ONLY-NEXT:    vextracti128 $1, %ymm5, %xmm6
+; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} xmm6 = zero,zero,zero,zero,xmm6[4,9,14],zero,zero,zero,xmm6[2,7,12,u,u,u]
+; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[0,5,10,15],zero,zero,zero,xmm5[3,8,13],zero,zero,zero,xmm5[u,u,u]
+; AVX2-ONLY-NEXT:    vpor %xmm6, %xmm5, %xmm5
 ; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} ymm9 = <255,255,u,u,0,0,255,255,0,0,255,255,u,u,0,0,255,255,0,0,255,255,u,u,0,0,255,255,0,0,255,255>
-; AVX2-ONLY-NEXT:    vpblendvb %ymm9, %ymm1, %ymm2, %ymm7
-; AVX2-ONLY-NEXT:    vpermq {{.*#+}} ymm8 = ymm7[2,3,0,1]
-; AVX2-ONLY-NEXT:    vbroadcasti128 {{.*#+}} ymm10 = [255,255,0,255,0,255,255,0,255,0,255,255,0,255,0,255,255,255,0,255,0,255,255,0,255,0,255,255,0,255,0,255]
-; AVX2-ONLY-NEXT:    # ymm10 = mem[0,1,0,1]
-; AVX2-ONLY-NEXT:    vpblendvb %ymm10, %ymm7, %ymm8, %ymm7
-; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} ymm7 = ymm7[u,u,u,u,u,u,u,u,u,u,u,u,u,1,6,11,16,21,26,31,20,25,30,19,24,29,u,u,u,u,u,u]
-; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} xmm10 = <255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0>
-; AVX2-ONLY-NEXT:    vpblendvb %ymm10, %ymm6, %ymm7, %ymm6
-; AVX2-ONLY-NEXT:    vmovdqa 144(%rdi), %xmm7
-; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} xmm11 = xmm7[u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm7[1,6,11]
-; AVX2-ONLY-NEXT:    vmovdqa 128(%rdi), %xmm8
-; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} xmm12 = xmm8[u,u,u,u,u,u,u,u,u,u,2,7,12],zero,zero,zero
-; AVX2-ONLY-NEXT:    vpor %xmm11, %xmm12, %xmm11
-; AVX2-ONLY-NEXT:    vinserti128 $1, %xmm11, %ymm0, %ymm11
-; AVX2-ONLY-NEXT:    vpblendw {{.*#+}} ymm11 = ymm6[0,1,2,3,4],ymm11[5,6,7],ymm6[8,9,10,11,12],ymm11[13,14,15]
-; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm11[4,5,6,7]
-; AVX2-ONLY-NEXT:    vpblendvb %ymm9, %ymm4, %ymm5, %ymm9
-; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} xmm11 = xmm9[1,6,11],zero,zero,zero,zero,xmm9[4,9,14],zero,zero,zero,xmm9[u,u,u]
-; AVX2-ONLY-NEXT:    vextracti128 $1, %ymm9, %xmm9
-; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} xmm9 = zero,zero,zero,xmm9[0,5,10,15],zero,zero,zero,xmm9[3,8,13,u,u,u]
-; AVX2-ONLY-NEXT:    vpor %xmm11, %xmm9, %xmm9
-; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} ymm11 = <255,255,0,0,255,255,0,0,u,u,255,255,0,0,255,255,0,0,u,u,255,255,0,0,255,255,0,0,u,u,255,255>
-; AVX2-ONLY-NEXT:    vpblendvb %ymm11, %ymm2, %ymm1, %ymm12
-; AVX2-ONLY-NEXT:    vpermq {{.*#+}} ymm13 = ymm12[2,3,0,1]
-; AVX2-ONLY-NEXT:    vbroadcasti128 {{.*#+}} ymm14 = [0,255,255,0,255,0,255,255,0,255,0,255,255,0,255,0,0,255,255,0,255,0,255,255,0,255,0,255,255,0,255,0]
-; AVX2-ONLY-NEXT:    # ymm14 = mem[0,1,0,1]
-; AVX2-ONLY-NEXT:    vpblendvb %ymm14, %ymm12, %ymm13, %ymm12
-; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} ymm12 = ymm12[u,u,u,u,u,u,u,u,u,u,u,u,u,2,7,12,17,22,27,16,21,26,31,20,25,30,u,u,u,u,u,u]
-; AVX2-ONLY-NEXT:    vpblendvb %ymm10, %ymm9, %ymm12, %ymm9
-; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} xmm12 = xmm7[u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm7[2,7,12]
-; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} xmm13 = xmm8[u,u,u,u,u,u,u,u,u,u,3,8,13],zero,zero,zero
-; AVX2-ONLY-NEXT:    vpor %xmm12, %xmm13, %xmm12
-; AVX2-ONLY-NEXT:    vinserti128 $1, %xmm12, %ymm0, %ymm12
-; AVX2-ONLY-NEXT:    vpblendw {{.*#+}} ymm12 = ymm9[0,1,2,3,4],ymm12[5,6,7],ymm9[8,9,10,11,12],ymm12[13,14,15]
-; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm12[4,5,6,7]
-; AVX2-ONLY-NEXT:    vpblendvb %ymm11, %ymm5, %ymm4, %ymm11
-; AVX2-ONLY-NEXT:    vextracti128 $1, %ymm11, %xmm12
-; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} xmm12 = zero,zero,zero,xmm12[1,6,11],zero,zero,zero,zero,xmm12[4,9,14,u,u,u]
-; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} xmm11 = xmm11[2,7,12],zero,zero,zero,xmm11[0,5,10,15],zero,zero,zero,xmm11[u,u,u]
-; AVX2-ONLY-NEXT:    vpor %xmm12, %xmm11, %xmm11
-; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} ymm12 = <255,255,0,0,u,u,255,255,0,0,255,255,0,0,u,u,255,255,0,0,255,255,0,0,u,u,255,255,0,0,255,255>
-; AVX2-ONLY-NEXT:    vpblendvb %ymm12, %ymm2, %ymm1, %ymm13
-; AVX2-ONLY-NEXT:    vpermq {{.*#+}} ymm14 = ymm13[2,3,0,1]
-; AVX2-ONLY-NEXT:    vbroadcasti128 {{.*#+}} ymm15 = [255,0,255,255,0,255,0,255,255,0,255,0,255,255,0,255,255,0,255,255,0,255,0,255,255,0,255,0,255,255,0,255]
-; AVX2-ONLY-NEXT:    # ymm15 = mem[0,1,0,1]
-; AVX2-ONLY-NEXT:    vpblendvb %ymm15, %ymm13, %ymm14, %ymm13
-; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} ymm13 = ymm13[u,u,u,u,u,u,u,u,u,u,u,u,u,3,8,13,18,23,28,17,22,27,16,21,26,31,u,u,u,u,u,u]
-; AVX2-ONLY-NEXT:    vpblendvb %ymm10, %ymm11, %ymm13, %ymm10
-; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} xmm11 = xmm7[u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm7[3,8,13]
-; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} xmm13 = xmm8[u,u,u,u,u,u,u,u,u,u,4,9,14],zero,zero,zero
-; AVX2-ONLY-NEXT:    vpor %xmm11, %xmm13, %xmm11
-; AVX2-ONLY-NEXT:    vinserti128 $1, %xmm11, %ymm0, %ymm11
-; AVX2-ONLY-NEXT:    vpblendw {{.*#+}} ymm11 = ymm10[0,1,2,3,4],ymm11[5,6,7],ymm10[8,9,10,11,12],ymm11[13,14,15]
-; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm11[4,5,6,7]
-; AVX2-ONLY-NEXT:    vpblendvb %ymm12, %ymm5, %ymm4, %ymm11
-; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} xmm12 = xmm11[3,8,13],zero,zero,zero,xmm11[1,6,11],zero,zero,zero,zero,xmm11[u,u,u]
-; AVX2-ONLY-NEXT:    vextracti128 $1, %ymm11, %xmm11
-; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} xmm11 = zero,zero,zero,xmm11[2,7,12],zero,zero,zero,xmm11[0,5,10,15,u,u,u]
-; AVX2-ONLY-NEXT:    vpor %xmm12, %xmm11, %xmm11
-; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} ymm12 = <u,u,255,255,0,0,255,255,0,0,u,u,255,255,0,0,255,255,0,0,u,u,255,255,0,0,255,255,0,0,u,u>
-; AVX2-ONLY-NEXT:    vpblendvb %ymm12, %ymm2, %ymm1, %ymm13
-; AVX2-ONLY-NEXT:    vpermq {{.*#+}} ymm14 = ymm13[2,3,0,1]
-; AVX2-ONLY-NEXT:    vbroadcasti128 {{.*#+}} ymm15 = [0,255,0,255,255,0,255,0,255,255,0,255,0,255,255,0,0,255,0,255,255,0,255,0,255,255,0,255,0,255,255,0]
-; AVX2-ONLY-NEXT:    # ymm15 = mem[0,1,0,1]
-; AVX2-ONLY-NEXT:    vpblendvb %ymm15, %ymm13, %ymm14, %ymm13
-; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} ymm13 = ymm13[u,u,u,u,u,u,u,u,u,u,u,u,u,4,9,14,19,24,29,18,23,28,17,22,27,u,u,u,u,u,u,u]
-; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} ymm14 = <255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,u,u,u,u,u,u,u>
-; AVX2-ONLY-NEXT:    vpblendvb %ymm14, %ymm11, %ymm13, %ymm11
-; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} xmm7 = xmm7[u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,xmm7[4,9,14]
-; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} xmm8 = xmm8[u,u,u,u,u,u,u,u,u,0,5,10,15],zero,zero,zero
-; AVX2-ONLY-NEXT:    vpor %xmm7, %xmm8, %xmm7
-; AVX2-ONLY-NEXT:    vinserti128 $1, %xmm7, %ymm0, %ymm7
-; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} ymm8 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0]
-; AVX2-ONLY-NEXT:    vpblendvb %ymm8, %ymm11, %ymm7, %ymm7
-; AVX2-ONLY-NEXT:    vpblendvb %ymm12, %ymm5, %ymm4, %ymm4
-; AVX2-ONLY-NEXT:    vextracti128 $1, %ymm4, %xmm5
-; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} xmm5 = zero,zero,zero,xmm5[3,8,13],zero,zero,zero,xmm5[1,6,11,u,u,u,u]
-; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[4,9,14],zero,zero,zero,xmm4[2,7,12],zero,zero,zero,xmm4[u,u,u,u]
-; AVX2-ONLY-NEXT:    vpor %xmm5, %xmm4, %xmm4
-; AVX2-ONLY-NEXT:    vpblendvb %ymm3, %ymm1, %ymm2, %ymm1
-; AVX2-ONLY-NEXT:    vpermq {{.*#+}} ymm2 = ymm1[2,3,0,1]
-; AVX2-ONLY-NEXT:    vbroadcasti128 {{.*#+}} ymm3 = [255,0,255,0,255,255,0,255,0,255,255,0,255,0,255,255,255,0,255,0,255,255,0,255,0,255,255,0,255,0,255,255]
-; AVX2-ONLY-NEXT:    # ymm3 = mem[0,1,0,1]
-; AVX2-ONLY-NEXT:    vpblendvb %ymm3, %ymm1, %ymm2, %ymm1
-; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,0,5,10,15,20,25,30,19,24,29,18,23,28,u,u,u,u,u,u,u]
-; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm1 = ymm4[0,1,2],ymm1[3,4,5,6,7]
-; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[u,1,6,11,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,21,26,31,u,u,u,u,u,u,u,u]
-; AVX2-ONLY-NEXT:    vpbroadcastq {{.*#+}} ymm2 = [0,5,0,5,0,5,0,5]
-; AVX2-ONLY-NEXT:    vpermd %ymm0, %ymm2, %ymm0
-; AVX2-ONLY-NEXT:    vpblendvb %ymm8, %ymm1, %ymm0, %ymm0
-; AVX2-ONLY-NEXT:    vmovdqa %ymm6, (%rsi)
-; AVX2-ONLY-NEXT:    vmovdqa %ymm9, (%rdx)
-; AVX2-ONLY-NEXT:    vmovdqa %ymm10, (%rcx)
-; AVX2-ONLY-NEXT:    vmovdqa %ymm7, (%r8)
+; AVX2-ONLY-NEXT:    vpblendvb %ymm9, %ymm0, %ymm1, %ymm6
+; AVX2-ONLY-NEXT:    vpermq {{.*#+}} ymm7 = ymm6[2,3,0,1]
+; AVX2-ONLY-NEXT:    vbroadcasti128 {{.*#+}} ymm8 = [255,255,0,255,0,255,255,0,255,0,255,255,0,255,0,255,255,255,0,255,0,255,255,0,255,0,255,255,0,255,0,255]
+; AVX2-ONLY-NEXT:    # ymm8 = mem[0,1,0,1]
+; AVX2-ONLY-NEXT:    vpblendvb %ymm8, %ymm6, %ymm7, %ymm6
+; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} ymm6 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,1,6,11,16,21,26,31,20,25,30,19,24,29,u,u,u,u,u,u]
+; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} ymm8 = <255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,u,u,u,u,u,u,u>
+; AVX2-ONLY-NEXT:    vmovdqa %xmm8, %xmm7
+; AVX2-ONLY-NEXT:    vpblendvb %ymm7, %ymm5, %ymm6, %ymm6
+; AVX2-ONLY-NEXT:    vpblendvb %ymm9, %ymm3, %ymm4, %ymm5
+; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} xmm9 = xmm5[1,6,11],zero,zero,zero,zero,xmm5[4,9,14],zero,zero,zero,xmm5[u,u,u]
+; AVX2-ONLY-NEXT:    vextracti128 $1, %ymm5, %xmm5
+; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} xmm5 = zero,zero,zero,xmm5[0,5,10,15],zero,zero,zero,xmm5[3,8,13,u,u,u]
+; AVX2-ONLY-NEXT:    vpor %xmm5, %xmm9, %xmm5
+; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} ymm9 = <255,255,0,0,255,255,0,0,u,u,255,255,0,0,255,255,0,0,u,u,255,255,0,0,255,255,0,0,u,u,255,255>
+; AVX2-ONLY-NEXT:    vpblendvb %ymm9, %ymm1, %ymm0, %ymm10
+; AVX2-ONLY-NEXT:    vpermq {{.*#+}} ymm11 = ymm10[2,3,0,1]
+; AVX2-ONLY-NEXT:    vbroadcasti128 {{.*#+}} ymm12 = [0,255,255,0,255,0,255,255,0,255,0,255,255,0,255,0,0,255,255,0,255,0,255,255,0,255,0,255,255,0,255,0]
+; AVX2-ONLY-NEXT:    # ymm12 = mem[0,1,0,1]
+; AVX2-ONLY-NEXT:    vpblendvb %ymm12, %ymm10, %ymm11, %ymm10
+; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} ymm10 = ymm10[u,u,u,u,u,u,u,u,u,u,u,u,u,2,7,12,17,22,27,16,21,26,31,20,25,30,u,u,u,u,u,u]
+; AVX2-ONLY-NEXT:    vpblendvb %ymm7, %ymm5, %ymm10, %ymm5
+; AVX2-ONLY-NEXT:    vpblendvb %ymm9, %ymm4, %ymm3, %ymm9
+; AVX2-ONLY-NEXT:    vextracti128 $1, %ymm9, %xmm10
+; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} xmm10 = zero,zero,zero,xmm10[1,6,11],zero,zero,zero,zero,xmm10[4,9,14,u,u,u]
+; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} xmm9 = xmm9[2,7,12],zero,zero,zero,xmm9[0,5,10,15],zero,zero,zero,xmm9[u,u,u]
+; AVX2-ONLY-NEXT:    vpor %xmm10, %xmm9, %xmm9
+; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} ymm10 = <255,255,0,0,u,u,255,255,0,0,255,255,0,0,u,u,255,255,0,0,255,255,0,0,u,u,255,255,0,0,255,255>
+; AVX2-ONLY-NEXT:    vpblendvb %ymm10, %ymm1, %ymm0, %ymm11
+; AVX2-ONLY-NEXT:    vpermq {{.*#+}} ymm12 = ymm11[2,3,0,1]
+; AVX2-ONLY-NEXT:    vbroadcasti128 {{.*#+}} ymm13 = [255,0,255,255,0,255,0,255,255,0,255,0,255,255,0,255,255,0,255,255,0,255,0,255,255,0,255,0,255,255,0,255]
+; AVX2-ONLY-NEXT:    # ymm13 = mem[0,1,0,1]
+; AVX2-ONLY-NEXT:    vpblendvb %ymm13, %ymm11, %ymm12, %ymm11
+; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} ymm11 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,3,8,13,18,23,28,17,22,27,16,21,26,31,u,u,u,u,u,u]
+; AVX2-ONLY-NEXT:    vpblendvb %ymm7, %ymm9, %ymm11, %ymm7
+; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} ymm9 = <u,u,255,255,0,0,255,255,0,0,u,u,255,255,0,0,255,255,0,0,u,u,255,255,0,0,255,255,0,0,u,u>
+; AVX2-ONLY-NEXT:    vpblendvb %ymm9, %ymm1, %ymm0, %ymm11
+; AVX2-ONLY-NEXT:    vpermq {{.*#+}} ymm12 = ymm11[2,3,0,1]
+; AVX2-ONLY-NEXT:    vbroadcasti128 {{.*#+}} ymm13 = [0,255,0,255,255,0,255,0,255,255,0,255,0,255,255,0,0,255,0,255,255,0,255,0,255,255,0,255,0,255,255,0]
+; AVX2-ONLY-NEXT:    # ymm13 = mem[0,1,0,1]
+; AVX2-ONLY-NEXT:    vpblendvb %ymm13, %ymm11, %ymm12, %ymm11
+; AVX2-ONLY-NEXT:    vpblendvb %ymm10, %ymm4, %ymm3, %ymm10
+; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} xmm12 = xmm10[3,8,13],zero,zero,zero,xmm10[1,6,11],zero,zero,zero,zero,xmm10[u,u,u]
+; AVX2-ONLY-NEXT:    vextracti128 $1, %ymm10, %xmm10
+; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} xmm10 = zero,zero,zero,xmm10[2,7,12],zero,zero,zero,xmm10[0,5,10,15,u,u,u]
+; AVX2-ONLY-NEXT:    vpor %xmm12, %xmm10, %xmm10
+; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} ymm11 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,4,9,14,19,24,29,18,23,28,17,22,27,u,u,u,u,u,u,u]
+; AVX2-ONLY-NEXT:    vpblendvb %ymm8, %ymm10, %ymm11, %ymm10
+; AVX2-ONLY-NEXT:    vmovdqa 144(%rdi), %xmm8
+; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} xmm11 = xmm8[u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm8[1,6,11]
+; AVX2-ONLY-NEXT:    vpblendvb %ymm9, %ymm4, %ymm3, %ymm9
+; AVX2-ONLY-NEXT:    vmovdqa 128(%rdi), %xmm3
+; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} xmm4 = xmm3[u,u,u,u,u,u,u,u,u,u,2,7,12],zero,zero,zero
+; AVX2-ONLY-NEXT:    vpor %xmm4, %xmm11, %xmm4
+; AVX2-ONLY-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
+; AVX2-ONLY-NEXT:    vpblendw {{.*#+}} ymm4 = ymm6[0,1,2,3,4],ymm4[5,6,7],ymm6[8,9,10,11,12],ymm4[13,14,15]
+; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm4 = ymm6[0,1,2,3],ymm4[4,5,6,7]
+; AVX2-ONLY-NEXT:    vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
+; AVX2-ONLY-NEXT:    vpermq {{.*#+}} ymm1 = ymm0[2,3,0,1]
+; AVX2-ONLY-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = [255,0,255,0,255,255,0,255,0,255,255,0,255,0,255,255,255,0,255,0,255,255,0,255,0,255,255,0,255,0,255,255]
+; AVX2-ONLY-NEXT:    # ymm2 = mem[0,1,0,1]
+; AVX2-ONLY-NEXT:    vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
+; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} xmm1 = xmm8[u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,xmm8[4,9,14]
+; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} xmm2 = xmm3[u,u,u,u,u,u,u,u,u,0,5,10,15],zero,zero,zero
+; AVX2-ONLY-NEXT:    vpor %xmm1, %xmm2, %xmm1
+; AVX2-ONLY-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} ymm2 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0]
+; AVX2-ONLY-NEXT:    vpblendvb %ymm2, %ymm10, %ymm1, %ymm1
+; AVX2-ONLY-NEXT:    vextracti128 $1, %ymm9, %xmm6
+; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} xmm6 = zero,zero,zero,xmm6[3,8,13],zero,zero,zero,xmm6[1,6,11,u,u,u,u]
+; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} xmm9 = xmm9[4,9,14],zero,zero,zero,xmm9[2,7,12],zero,zero,zero,xmm9[u,u,u,u]
+; AVX2-ONLY-NEXT:    vpor %xmm6, %xmm9, %xmm6
+; AVX2-ONLY-NEXT:    vmovdqa 128(%rdi), %ymm9
+; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,0,5,10,15,20,25,30,19,24,29,18,23,28,u,u,u,u,u,u,u]
+; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm0 = ymm6[0,1,2],ymm0[3,4,5,6,7]
+; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} ymm6 = ymm9[u,1,6,11,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,21,26,31,u,u,u,u,u,u,u,u]
+; AVX2-ONLY-NEXT:    vpbroadcastq {{.*#+}} ymm9 = [0,5,0,5,0,5,0,5]
+; AVX2-ONLY-NEXT:    vpermd %ymm6, %ymm9, %ymm6
+; AVX2-ONLY-NEXT:    vpblendvb %ymm2, %ymm0, %ymm6, %ymm0
+; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} xmm2 = xmm8[u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm8[2,7,12]
+; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} xmm6 = xmm3[u,u,u,u,u,u,u,u,u,u,3,8,13],zero,zero,zero
+; AVX2-ONLY-NEXT:    vpor %xmm2, %xmm6, %xmm2
+; AVX2-ONLY-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX2-ONLY-NEXT:    vpblendw {{.*#+}} ymm2 = ymm5[0,1,2,3,4],ymm2[5,6,7],ymm5[8,9,10,11,12],ymm2[13,14,15]
+; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm2 = ymm5[0,1,2,3],ymm2[4,5,6,7]
+; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} xmm5 = xmm8[u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm8[3,8,13]
+; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u,u,u,u,u,u,u,4,9,14],zero,zero,zero
+; AVX2-ONLY-NEXT:    vpor %xmm5, %xmm3, %xmm3
+; AVX2-ONLY-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX2-ONLY-NEXT:    vpblendw {{.*#+}} ymm3 = ymm7[0,1,2,3,4],ymm3[5,6,7],ymm7[8,9,10,11,12],ymm3[13,14,15]
+; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm3 = ymm7[0,1,2,3],ymm3[4,5,6,7]
+; AVX2-ONLY-NEXT:    vmovdqa %ymm4, (%rsi)
+; AVX2-ONLY-NEXT:    vmovdqa %ymm2, (%rdx)
+; AVX2-ONLY-NEXT:    vmovdqa %ymm3, (%rcx)
+; AVX2-ONLY-NEXT:    vmovdqa %ymm1, (%r8)
 ; AVX2-ONLY-NEXT:    vmovdqa %ymm0, (%r9)
 ; AVX2-ONLY-NEXT:    vzeroupper
 ; AVX2-ONLY-NEXT:    retq
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-6.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-6.ll
index 9ec0e2f036e85c5..f6c22d9fb7a103d 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-6.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-6.ll
@@ -4535,10 +4535,10 @@ define void @load_i8_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    subq $40, %rsp
 ; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm1 = <0,6,12,128,128,128,4,10,128,128,128,u,u,u,u,u>
-; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm11 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535]
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm12 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535]
 ; AVX512F-NEXT:    vmovdqa64 224(%rdi), %ymm25
 ; AVX512F-NEXT:    vmovdqa64 192(%rdi), %ymm26
-; AVX512F-NEXT:    vmovdqa %ymm11, %ymm0
+; AVX512F-NEXT:    vmovdqa %ymm12, %ymm0
 ; AVX512F-NEXT:    vpternlogq $202, %ymm25, %ymm26, %ymm0
 ; AVX512F-NEXT:    vpshufb %xmm1, %xmm0, %xmm3
 ; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm5 = <128,128,128,2,8,14,128,128,0,6,12,u,u,u,u,u>
@@ -4547,29 +4547,29 @@ define void @load_i8_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512F-NEXT:    vpor %xmm3, %xmm6, %xmm9
 ; AVX512F-NEXT:    vmovdqa64 (%rdi), %ymm30
 ; AVX512F-NEXT:    vmovdqa64 32(%rdi), %ymm31
-; AVX512F-NEXT:    vmovdqa64 128(%rdi), %ymm29
+; AVX512F-NEXT:    vmovdqa64 128(%rdi), %ymm24
 ; AVX512F-NEXT:    vmovdqa64 160(%rdi), %ymm18
-; AVX512F-NEXT:    vmovdqa %ymm11, %ymm6
-; AVX512F-NEXT:    vpternlogq $202, %ymm29, %ymm18, %ymm6
+; AVX512F-NEXT:    vmovdqa %ymm12, %ymm6
+; AVX512F-NEXT:    vpternlogq $202, %ymm24, %ymm18, %ymm6
 ; AVX512F-NEXT:    vextracti128 $1, %ymm6, %xmm7
 ; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm3 = <u,u,u,u,u,u,128,128,0,6,12,128,128,128,4,10>
-; AVX512F-NEXT:    vpshufb %xmm3, %xmm7, %xmm12
+; AVX512F-NEXT:    vpshufb %xmm3, %xmm7, %xmm10
 ; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm8 = <u,u,u,u,u,u,4,10,128,128,128,2,8,14,128,128>
 ; AVX512F-NEXT:    vpshufb %xmm8, %xmm6, %xmm13
-; AVX512F-NEXT:    vpor %xmm12, %xmm13, %xmm12
-; AVX512F-NEXT:    vinserti128 $1, %xmm12, %ymm0, %ymm12
-; AVX512F-NEXT:    vinserti32x4 $2, %xmm9, %zmm12, %zmm2
+; AVX512F-NEXT:    vpor %xmm10, %xmm13, %xmm10
+; AVX512F-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
+; AVX512F-NEXT:    vinserti32x4 $2, %xmm9, %zmm10, %zmm2
 ; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa %ymm11, %ymm9
+; AVX512F-NEXT:    vmovdqa %ymm12, %ymm9
 ; AVX512F-NEXT:    vpternlogq $202, %ymm31, %ymm30, %ymm9
 ; AVX512F-NEXT:    vpshufb %xmm1, %xmm9, %xmm1
 ; AVX512F-NEXT:    vextracti128 $1, %ymm9, %xmm13
 ; AVX512F-NEXT:    vpshufb %xmm5, %xmm13, %xmm5
 ; AVX512F-NEXT:    vporq %xmm1, %xmm5, %xmm17
-; AVX512F-NEXT:    vmovdqa64 320(%rdi), %ymm24
+; AVX512F-NEXT:    vmovdqa64 320(%rdi), %ymm29
 ; AVX512F-NEXT:    vmovdqa64 352(%rdi), %ymm22
-; AVX512F-NEXT:    vmovdqa %ymm11, %ymm1
-; AVX512F-NEXT:    vpternlogq $202, %ymm24, %ymm22, %ymm1
+; AVX512F-NEXT:    vmovdqa %ymm12, %ymm1
+; AVX512F-NEXT:    vpternlogq $202, %ymm29, %ymm22, %ymm1
 ; AVX512F-NEXT:    vextracti128 $1, %ymm1, %xmm5
 ; AVX512F-NEXT:    vpshufb %xmm3, %xmm5, %xmm3
 ; AVX512F-NEXT:    vpshufb %xmm8, %xmm1, %xmm8
@@ -4577,8 +4577,8 @@ define void @load_i8_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512F-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
 ; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm8 = <1,7,13,128,128,128,5,11,128,128,128,u,u,u,u,u>
 ; AVX512F-NEXT:    vpshufb %xmm8, %xmm0, %xmm0
-; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm12 = <128,128,128,3,9,15,128,128,1,7,13,u,u,u,u,u>
-; AVX512F-NEXT:    vpshufb %xmm12, %xmm4, %xmm4
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm10 = <128,128,128,3,9,15,128,128,1,7,13,u,u,u,u,u>
+; AVX512F-NEXT:    vpshufb %xmm10, %xmm4, %xmm4
 ; AVX512F-NEXT:    vpor %xmm0, %xmm4, %xmm0
 ; AVX512F-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm0 = <u,u,u,u,u,u,128,128,1,7,13,128,128,128,5,11>
@@ -4587,7 +4587,7 @@ define void @load_i8_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512F-NEXT:    vpshufb %xmm7, %xmm6, %xmm6
 ; AVX512F-NEXT:    vporq %xmm4, %xmm6, %xmm28
 ; AVX512F-NEXT:    vpshufb %xmm8, %xmm9, %xmm4
-; AVX512F-NEXT:    vpshufb %xmm12, %xmm13, %xmm6
+; AVX512F-NEXT:    vpshufb %xmm10, %xmm13, %xmm6
 ; AVX512F-NEXT:    vporq %xmm4, %xmm6, %xmm21
 ; AVX512F-NEXT:    vpshufb %xmm0, %xmm5, %xmm0
 ; AVX512F-NEXT:    vpshufb %xmm7, %xmm1, %xmm1
@@ -4602,14 +4602,14 @@ define void @load_i8_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512F-NEXT:    vpshufb %xmm6, %xmm4, %xmm5
 ; AVX512F-NEXT:    vpor %xmm1, %xmm5, %xmm1
 ; AVX512F-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-NEXT:    vmovdqa %ymm11, %ymm5
-; AVX512F-NEXT:    vpternlogq $202, %ymm18, %ymm29, %ymm5
-; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm7 = <u,u,u,u,u,0,6,12,128,128,128,4,10,128,128,128>
-; AVX512F-NEXT:    vpshufb %xmm7, %xmm5, %xmm8
+; AVX512F-NEXT:    vmovdqa %ymm12, %ymm5
+; AVX512F-NEXT:    vpternlogq $202, %ymm18, %ymm24, %ymm5
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm8 = <u,u,u,u,u,0,6,12,128,128,128,4,10,128,128,128>
+; AVX512F-NEXT:    vpshufb %xmm8, %xmm5, %xmm7
 ; AVX512F-NEXT:    vextracti128 $1, %ymm5, %xmm1
-; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm12 = <u,u,u,u,u,128,128,128,2,8,14,128,128,0,6,12>
-; AVX512F-NEXT:    vpshufb %xmm12, %xmm1, %xmm13
-; AVX512F-NEXT:    vpor %xmm8, %xmm13, %xmm2
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm10 = <u,u,u,u,u,128,128,128,2,8,14,128,128,0,6,12>
+; AVX512F-NEXT:    vpshufb %xmm10, %xmm1, %xmm13
+; AVX512F-NEXT:    vpor %xmm7, %xmm13, %xmm2
 ; AVX512F-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX512F-NEXT:    vmovdqa %ymm9, %ymm13
 ; AVX512F-NEXT:    vpternlogq $202, %ymm30, %ymm31, %ymm13
@@ -4617,15 +4617,15 @@ define void @load_i8_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512F-NEXT:    vpshufb %xmm0, %xmm14, %xmm0
 ; AVX512F-NEXT:    vpshufb %xmm6, %xmm13, %xmm6
 ; AVX512F-NEXT:    vporq %xmm0, %xmm6, %xmm16
-; AVX512F-NEXT:    vmovdqa %ymm11, %ymm10
-; AVX512F-NEXT:    vpternlogq $202, %ymm22, %ymm24, %ymm10
-; AVX512F-NEXT:    vpshufb %xmm7, %xmm10, %xmm8
-; AVX512F-NEXT:    vextracti128 $1, %ymm10, %xmm7
-; AVX512F-NEXT:    vpshufb %xmm12, %xmm7, %xmm12
-; AVX512F-NEXT:    vpor %xmm8, %xmm12, %xmm0
+; AVX512F-NEXT:    vmovdqa %ymm12, %ymm11
+; AVX512F-NEXT:    vpternlogq $202, %ymm22, %ymm29, %ymm11
+; AVX512F-NEXT:    vpshufb %xmm8, %xmm11, %xmm8
+; AVX512F-NEXT:    vextracti128 $1, %ymm11, %xmm7
+; AVX512F-NEXT:    vpshufb %xmm10, %xmm7, %xmm10
+; AVX512F-NEXT:    vpor %xmm8, %xmm10, %xmm0
 ; AVX512F-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm12 = <128,128,128,5,11,128,128,128,3,9,15,u,u,u,u,u>
-; AVX512F-NEXT:    vpshufb %xmm12, %xmm15, %xmm15
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm10 = <128,128,128,5,11,128,128,128,3,9,15,u,u,u,u,u>
+; AVX512F-NEXT:    vpshufb %xmm10, %xmm15, %xmm15
 ; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm8 = <3,9,15,128,128,1,7,13,128,128,128,u,u,u,u,u>
 ; AVX512F-NEXT:    vpshufb %xmm8, %xmm4, %xmm4
 ; AVX512F-NEXT:    vpor %xmm4, %xmm15, %xmm0
@@ -4667,104 +4667,102 @@ define void @load_i8_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512F-NEXT:    vpternlogq $226, %zmm2, %zmm17, %zmm1
 ; AVX512F-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm17
 ; AVX512F-NEXT:    vpternlogq $184, %zmm1, %zmm3, %zmm17
-; AVX512F-NEXT:    vpshufb %xmm12, %xmm14, %xmm0
+; AVX512F-NEXT:    vpshufb %xmm10, %xmm14, %xmm0
 ; AVX512F-NEXT:    vpshufb %xmm8, %xmm13, %xmm1
 ; AVX512F-NEXT:    vporq %xmm0, %xmm1, %xmm21
-; AVX512F-NEXT:    vpshufb {{.*#+}} xmm0 = xmm10[u,u,u,u,u,1,7,13],zero,zero,zero,xmm10[5,11],zero,zero,zero
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm0 = xmm11[u,u,u,u,u,1,7,13],zero,zero,zero,xmm11[5,11],zero,zero,zero
 ; AVX512F-NEXT:    vpshufb {{.*#+}} xmm1 = xmm7[u,u,u,u,u],zero,zero,zero,xmm7[3,9,15],zero,zero,xmm7[1,7,13]
 ; AVX512F-NEXT:    vporq %xmm0, %xmm1, %xmm28
-; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm2 = <128,128,0,6,12,128,128,128,4,10,u,u,u,u,u,u>
-; AVX512F-NEXT:    vmovdqa64 %ymm25, %ymm7
-; AVX512F-NEXT:    vpternlogq $226, %ymm26, %ymm11, %ymm7
-; AVX512F-NEXT:    vextracti128 $1, %ymm7, %xmm0
-; AVX512F-NEXT:    vpshufb %xmm2, %xmm0, %xmm1
-; AVX512F-NEXT:    vmovdqa64 %xmm2, %xmm25
-; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm14 = <4,10,128,128,128,2,8,14,128,128,u,u,u,u,u,u>
-; AVX512F-NEXT:    vpshufb %xmm14, %xmm7, %xmm2
+; AVX512F-NEXT:    vmovdqa64 %ymm25, %ymm11
+; AVX512F-NEXT:    vpternlogq $226, %ymm26, %ymm12, %ymm11
+; AVX512F-NEXT:    vextracti128 $1, %ymm11, %xmm0
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm1 = zero,zero,xmm0[0,6,12],zero,zero,zero,xmm0[4,10,u,u,u,u,u,u]
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm3 = <4,10,128,128,128,2,8,14,128,128,u,u,u,u,u,u>
+; AVX512F-NEXT:    vpshufb %xmm3, %xmm11, %xmm2
+; AVX512F-NEXT:    vmovdqa64 %xmm3, %xmm25
 ; AVX512F-NEXT:    vporq %xmm1, %xmm2, %xmm26
-; AVX512F-NEXT:    vmovdqa64 %ymm18, %ymm10
-; AVX512F-NEXT:    vpternlogq $226, %ymm29, %ymm9, %ymm10
-; AVX512F-NEXT:    vextracti128 $1, %ymm10, %xmm8
+; AVX512F-NEXT:    vmovdqa64 %ymm18, %ymm14
+; AVX512F-NEXT:    vpternlogq $226, %ymm24, %ymm9, %ymm14
+; AVX512F-NEXT:    vextracti128 $1, %ymm14, %xmm10
 ; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm1 = <u,u,u,u,u,128,128,128,4,10,128,128,128,2,8,14>
-; AVX512F-NEXT:    vpshufb %xmm1, %xmm8, %xmm2
+; AVX512F-NEXT:    vpshufb %xmm1, %xmm10, %xmm2
 ; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm3 = <u,u,u,u,u,2,8,14,128,128,0,6,12,128,128,128>
-; AVX512F-NEXT:    vpshufb %xmm3, %xmm10, %xmm4
+; AVX512F-NEXT:    vpshufb %xmm3, %xmm14, %xmm4
 ; AVX512F-NEXT:    vporq %xmm2, %xmm4, %xmm27
-; AVX512F-NEXT:    vpternlogq $202, %ymm30, %ymm31, %ymm11
+; AVX512F-NEXT:    vpternlogq $202, %ymm30, %ymm31, %ymm12
 ; AVX512F-NEXT:    vmovdqa %ymm5, %ymm4
 ; AVX512F-NEXT:    vpternlogq $202, %ymm23, %ymm6, %ymm4
-; AVX512F-NEXT:    vpternlogq $202, %ymm24, %ymm22, %ymm9
-; AVX512F-NEXT:    vextracti128 $1, %ymm9, %xmm12
-; AVX512F-NEXT:    vpshufb %xmm1, %xmm12, %xmm1
+; AVX512F-NEXT:    vpternlogq $202, %ymm29, %ymm22, %ymm9
+; AVX512F-NEXT:    vextracti128 $1, %ymm9, %xmm8
+; AVX512F-NEXT:    vpshufb %xmm1, %xmm8, %xmm1
 ; AVX512F-NEXT:    vpshufb %xmm3, %xmm9, %xmm2
-; AVX512F-NEXT:    vporq %xmm1, %xmm2, %xmm24
+; AVX512F-NEXT:    vpor %xmm1, %xmm2, %xmm7
 ; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm1 = <128,128,1,7,13,128,128,128,5,11,u,u,u,u,u,u>
 ; AVX512F-NEXT:    vpshufb %xmm1, %xmm0, %xmm0
-; AVX512F-NEXT:    vmovdqa64 %xmm1, %xmm29
+; AVX512F-NEXT:    vmovdqa64 %xmm1, %xmm22
 ; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm13 = <5,11,128,128,128,3,9,15,128,128,u,u,u,u,u,u>
-; AVX512F-NEXT:    vpshufb %xmm13, %xmm7, %xmm1
-; AVX512F-NEXT:    vporq %xmm0, %xmm1, %xmm18
+; AVX512F-NEXT:    vpshufb %xmm13, %xmm11, %xmm1
+; AVX512F-NEXT:    vpor %xmm0, %xmm1, %xmm3
 ; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm2 = <u,u,u,u,u,128,128,128,5,11,128,128,128,3,9,15>
-; AVX512F-NEXT:    vpshufb %xmm2, %xmm8, %xmm1
+; AVX512F-NEXT:    vpshufb %xmm2, %xmm10, %xmm1
 ; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm0 = <u,u,u,u,u,3,9,15,128,128,1,7,13,128,128,128>
-; AVX512F-NEXT:    vpshufb %xmm0, %xmm10, %xmm8
-; AVX512F-NEXT:    vpor %xmm1, %xmm8, %xmm8
+; AVX512F-NEXT:    vpshufb %xmm0, %xmm14, %xmm10
+; AVX512F-NEXT:    vpor %xmm1, %xmm10, %xmm10
 ; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm1 = [128,128,128,128,128,128,128,128,128,128,128,4,10,0,6,12,18,24,30,20,26,128,128,128,128,128,128,128,128,128,128,128]
-; AVX512F-NEXT:    vpshufb %ymm1, %ymm4, %ymm3
-; AVX512F-NEXT:    vmovdqa64 {{.*#+}} ymm22 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255]
-; AVX512F-NEXT:    vpternlogq $236, %ymm22, %ymm3, %ymm16
-; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm3 = [128,128,128,128,128,128,128,128,128,128,128,5,11,1,7,13,19,25,31,21,27,128,128,128,128,128,128,128,128,128,128,128]
-; AVX512F-NEXT:    vpshufb %ymm3, %ymm4, %ymm4
-; AVX512F-NEXT:    vpternlogq $236, %ymm22, %ymm4, %ymm21
+; AVX512F-NEXT:    vpshufb %ymm1, %ymm4, %ymm11
+; AVX512F-NEXT:    vmovdqa64 {{.*#+}} ymm18 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255]
+; AVX512F-NEXT:    vpternlogq $236, %ymm18, %ymm11, %ymm16
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm11 = [128,128,128,128,128,128,128,128,128,128,128,5,11,1,7,13,19,25,31,21,27,128,128,128,128,128,128,128,128,128,128,128]
+; AVX512F-NEXT:    vpshufb %ymm11, %ymm4, %ymm4
+; AVX512F-NEXT:    vpternlogq $236, %ymm18, %ymm4, %ymm21
 ; AVX512F-NEXT:    vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm4 # 16-byte Folded Reload
 ; AVX512F-NEXT:    vpternlogq $202, %ymm20, %ymm19, %ymm5
 ; AVX512F-NEXT:    vpshufb %ymm1, %ymm5, %ymm1
-; AVX512F-NEXT:    vpternlogq $248, %ymm22, %ymm4, %ymm1
+; AVX512F-NEXT:    vpternlogq $248, %ymm18, %ymm4, %ymm1
 ; AVX512F-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm4
-; AVX512F-NEXT:    vpshufb %ymm3, %ymm5, %ymm3
-; AVX512F-NEXT:    vextracti128 $1, %ymm11, %xmm1
-; AVX512F-NEXT:    vmovdqa64 %xmm25, %xmm5
-; AVX512F-NEXT:    vpshufb %xmm5, %xmm1, %xmm5
-; AVX512F-NEXT:    vpshufb %xmm14, %xmm11, %xmm14
-; AVX512F-NEXT:    vpor %xmm5, %xmm14, %xmm5
-; AVX512F-NEXT:    vpshufb %xmm2, %xmm12, %xmm2
+; AVX512F-NEXT:    vpshufb %ymm11, %ymm5, %ymm5
+; AVX512F-NEXT:    vextracti128 $1, %ymm12, %xmm1
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm11 = zero,zero,xmm1[0,6,12],zero,zero,zero,xmm1[4,10,u,u,u,u,u,u]
+; AVX512F-NEXT:    vmovdqa64 %xmm25, %xmm14
+; AVX512F-NEXT:    vpshufb %xmm14, %xmm12, %xmm14
+; AVX512F-NEXT:    vpor %xmm11, %xmm14, %xmm11
+; AVX512F-NEXT:    vpshufb %xmm2, %xmm8, %xmm2
 ; AVX512F-NEXT:    vpshufb %xmm0, %xmm9, %xmm0
 ; AVX512F-NEXT:    vpor %xmm2, %xmm0, %xmm0
 ; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535]
 ; AVX512F-NEXT:    vpternlogq $226, %ymm23, %ymm2, %ymm6
-; AVX512F-NEXT:    vpshufb {{.*#+}} ymm9 = ymm6[u,u,u,u,u,u,u,u,u,u,0,6,12,2,8,14,20,26,16,22,28,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3,4],xmm9[5,6,7]
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm9[4,5,6,7]
+; AVX512F-NEXT:    vpshufb {{.*#+}} ymm8 = ymm6[u,u,u,u,u,u,u,u,u,u,0,6,12,2,8,14,20,26,16,22,28,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512F-NEXT:    vpblendw {{.*#+}} xmm9 = xmm11[0,1,2,3,4],xmm8[5,6,7]
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm8 = ymm9[0,1,2,3],ymm8[4,5,6,7]
 ; AVX512F-NEXT:    vinserti32x4 $1, %xmm28, %ymm0, %ymm9
-; AVX512F-NEXT:    vpternlogq $248, %ymm22, %ymm9, %ymm3
-; AVX512F-NEXT:    vinserti64x4 $1, %ymm3, %zmm0, %zmm3
-; AVX512F-NEXT:    vinserti32x4 $1, %xmm24, %ymm0, %ymm9
+; AVX512F-NEXT:    vpternlogq $248, %ymm18, %ymm9, %ymm5
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm5, %zmm0, %zmm5
 ; AVX512F-NEXT:    vpternlogq $202, %ymm20, %ymm19, %ymm2
-; AVX512F-NEXT:    vpshufb {{.*#+}} ymm10 = ymm2[u,u,u,u,u,u,u,u,u,u,0,6,12,2,8,14,20,26,16,22,28],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm12 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0]
-; AVX512F-NEXT:    vpternlogq $242, %ymm9, %ymm12, %ymm10
-; AVX512F-NEXT:    vinserti64x4 $1, %ymm10, %zmm0, %zmm9
-; AVX512F-NEXT:    vinserti32x4 $1, %xmm27, %ymm0, %ymm10
-; AVX512F-NEXT:    vinserti32x4 $2, %xmm26, %zmm10, %zmm10
-; AVX512F-NEXT:    vmovdqa64 {{.*#+}} zmm14 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX512F-NEXT:    vpternlogq $226, %zmm10, %zmm14, %zmm5
-; AVX512F-NEXT:    vmovdqa64 {{.*#+}} zmm10 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0]
-; AVX512F-NEXT:    vpternlogq $184, %zmm5, %zmm10, %zmm9
-; AVX512F-NEXT:    vmovdqa64 %xmm29, %xmm5
-; AVX512F-NEXT:    vpshufb %xmm5, %xmm1, %xmm1
-; AVX512F-NEXT:    vpshufb %xmm13, %xmm11, %xmm5
-; AVX512F-NEXT:    vpor %xmm1, %xmm5, %xmm1
-; AVX512F-NEXT:    vpshufb {{.*#+}} ymm5 = ymm6[u,u,u,u,u,u,u,u,u,u,1,7,13,3,9,15,21,27,17,23,29,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm5[5,6,7]
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm5[4,5,6,7]
+; AVX512F-NEXT:    vmovdqa64 {{.*#+}} zmm9 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
+; AVX512F-NEXT:    vinserti128 $1, %xmm7, %ymm0, %ymm7
+; AVX512F-NEXT:    vpshufb {{.*#+}} ymm11 = ymm2[u,u,u,u,u,u,u,u,u,u,0,6,12,2,8,14,20,26,16,22,28],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512F-NEXT:    vpternlogq $242, %ymm7, %ymm9, %ymm11
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm11, %zmm0, %zmm7
+; AVX512F-NEXT:    vinserti32x4 $1, %xmm27, %ymm0, %ymm11
+; AVX512F-NEXT:    vinserti32x4 $2, %xmm26, %zmm11, %zmm11
+; AVX512F-NEXT:    vpternlogq $226, %zmm11, %zmm9, %zmm8
+; AVX512F-NEXT:    vmovdqa64 {{.*#+}} zmm11 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0]
+; AVX512F-NEXT:    vpternlogq $184, %zmm8, %zmm11, %zmm7
+; AVX512F-NEXT:    vmovdqa64 %xmm22, %xmm8
+; AVX512F-NEXT:    vpshufb %xmm8, %xmm1, %xmm1
+; AVX512F-NEXT:    vpshufb %xmm13, %xmm12, %xmm8
+; AVX512F-NEXT:    vpor %xmm1, %xmm8, %xmm1
+; AVX512F-NEXT:    vpshufb {{.*#+}} ymm6 = ymm6[u,u,u,u,u,u,u,u,u,u,1,7,13,3,9,15,21,27,17,23,29,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512F-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm6[5,6,7]
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm6[4,5,6,7]
 ; AVX512F-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
 ; AVX512F-NEXT:    vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,1,7,13,3,9,15,21,27,17,23,29],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512F-NEXT:    vpternlogq $242, %ymm0, %ymm12, %ymm2
+; AVX512F-NEXT:    vpternlogq $242, %ymm0, %ymm9, %ymm2
 ; AVX512F-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm0
-; AVX512F-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm2
-; AVX512F-NEXT:    vinserti32x4 $2, %xmm18, %zmm2, %zmm2
-; AVX512F-NEXT:    vpternlogq $226, %zmm2, %zmm14, %zmm1
-; AVX512F-NEXT:    vpternlogq $184, %zmm1, %zmm10, %zmm0
+; AVX512F-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm2
+; AVX512F-NEXT:    vinserti32x4 $2, %xmm3, %zmm2, %zmm2
+; AVX512F-NEXT:    vpternlogq $226, %zmm2, %zmm9, %zmm1
+; AVX512F-NEXT:    vpternlogq $184, %zmm1, %zmm11, %zmm0
 ; AVX512F-NEXT:    vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 16-byte Folded Reload
 ; AVX512F-NEXT:    vinserti32x4 $2, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm1 # 16-byte Folded Reload
 ; AVX512F-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
@@ -4774,12 +4772,12 @@ define void @load_i8_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512F-NEXT:    vpternlogq $184, %zmm1, %zmm2, %zmm21
 ; AVX512F-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
 ; AVX512F-NEXT:    vpternlogq $184, %zmm16, %zmm1, %zmm4
-; AVX512F-NEXT:    vpternlogq $184, %zmm21, %zmm1, %zmm3
+; AVX512F-NEXT:    vpternlogq $184, %zmm21, %zmm1, %zmm5
 ; AVX512F-NEXT:    vmovdqa64 %zmm15, (%rsi)
 ; AVX512F-NEXT:    vmovdqa64 %zmm17, (%rdx)
 ; AVX512F-NEXT:    vmovdqa64 %zmm4, (%rcx)
-; AVX512F-NEXT:    vmovdqa64 %zmm3, (%r8)
-; AVX512F-NEXT:    vmovdqa64 %zmm9, (%r9)
+; AVX512F-NEXT:    vmovdqa64 %zmm5, (%r8)
+; AVX512F-NEXT:    vmovdqa64 %zmm7, (%r9)
 ; AVX512F-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512F-NEXT:    vmovdqa64 %zmm0, (%rax)
 ; AVX512F-NEXT:    addq $40, %rsp
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-3.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-3.ll
index b12edef01085f3e..f107e762ac311be 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-3.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-3.ll
@@ -1217,57 +1217,56 @@ define void @store_i16_stride3_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-NEXT:    vinserti128 $1, %xmm6, %ymm3, %ymm3
 ; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm3 = zmm3[0,1,2,3],zmm4[4,5,6,7]
 ; AVX512F-NEXT:    vmovdqa64 {{.*#+}} zmm4 = <5,5,u,6,6,u,7,7,u,8,8,u,9,9,u,10>
-; AVX512F-NEXT:    vpermd (%rdx), %zmm4, %zmm4
-; AVX512F-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm4
+; AVX512F-NEXT:    vpermd (%rdx), %zmm4, %zmm5
+; AVX512F-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm5
 ; AVX512F-NEXT:    vmovdqa 32(%rdi), %ymm3
-; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm5 = [128,128,128,128,12,13,128,128,128,128,14,15,128,128,128,128,16,17,128,128,128,128,18,19,128,128,128,128,20,21,128,128]
-; AVX512F-NEXT:    vpshufb %ymm5, %ymm3, %ymm3
-; AVX512F-NEXT:    vmovdqa 32(%rsi), %ymm6
-; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm8 = [10,11,0,1,128,128,12,13,2,3,128,128,14,15,4,5,128,128,16,17,28,29,128,128,18,19,18,19,128,128,20,21]
-; AVX512F-NEXT:    vpshufb %ymm8, %ymm6, %ymm6
-; AVX512F-NEXT:    vpor %ymm3, %ymm6, %ymm3
-; AVX512F-NEXT:    vmovdqa 48(%rdi), %xmm6
-; AVX512F-NEXT:    vmovdqa 48(%rsi), %xmm9
-; AVX512F-NEXT:    vpunpckhwd {{.*#+}} xmm10 = xmm9[4],xmm6[4],xmm9[5],xmm6[5],xmm9[6],xmm6[6],xmm9[7],xmm6[7]
-; AVX512F-NEXT:    vpshufb %xmm7, %xmm10, %xmm7
-; AVX512F-NEXT:    vprold $16, %xmm9, %xmm9
-; AVX512F-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[1,1,2,2]
-; AVX512F-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0,1],xmm9[2],xmm6[3,4],xmm9[5],xmm6[6,7]
-; AVX512F-NEXT:    vinserti128 $1, %xmm7, %ymm6, %ymm6
-; AVX512F-NEXT:    vinserti64x4 $1, %ymm6, %zmm0, %zmm6
-; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm3 = zmm3[0,1,2,3],zmm6[4,5,6,7]
-; AVX512F-NEXT:    vmovdqa (%rdx), %ymm6
-; AVX512F-NEXT:    vmovdqa 32(%rdx), %ymm7
-; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm9 = [128,128,10,11,128,128,128,128,12,13,128,128,128,128,14,15,128,128,128,128,16,17,128,128,128,128,18,19,128,128,128,128]
-; AVX512F-NEXT:    vpshufb %ymm9, %ymm7, %ymm10
-; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm11 = <5,5,u,6,6,u,7,7>
-; AVX512F-NEXT:    vpermd %ymm7, %ymm11, %ymm7
-; AVX512F-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm7, %ymm7
-; AVX512F-NEXT:    vinserti64x4 $1, %ymm7, %zmm10, %zmm7
-; AVX512F-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm7
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm6 = [128,128,128,128,12,13,128,128,128,128,14,15,128,128,128,128,16,17,128,128,128,128,18,19,128,128,128,128,20,21,128,128]
+; AVX512F-NEXT:    vpshufb %ymm6, %ymm3, %ymm3
+; AVX512F-NEXT:    vmovdqa 32(%rsi), %ymm8
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm9 = [10,11,0,1,128,128,12,13,2,3,128,128,14,15,4,5,128,128,16,17,28,29,128,128,18,19,18,19,128,128,20,21]
+; AVX512F-NEXT:    vpshufb %ymm9, %ymm8, %ymm8
+; AVX512F-NEXT:    vpor %ymm3, %ymm8, %ymm3
+; AVX512F-NEXT:    vmovdqa 48(%rdi), %xmm8
+; AVX512F-NEXT:    vmovdqa 48(%rsi), %xmm10
+; AVX512F-NEXT:    vpunpckhwd {{.*#+}} xmm11 = xmm10[4],xmm8[4],xmm10[5],xmm8[5],xmm10[6],xmm8[6],xmm10[7],xmm8[7]
+; AVX512F-NEXT:    vpshufb %xmm7, %xmm11, %xmm7
+; AVX512F-NEXT:    vprold $16, %xmm10, %xmm10
+; AVX512F-NEXT:    vpshufd {{.*#+}} xmm8 = xmm8[1,1,2,2]
+; AVX512F-NEXT:    vpblendw {{.*#+}} xmm8 = xmm8[0,1],xmm10[2],xmm8[3,4],xmm10[5],xmm8[6,7]
+; AVX512F-NEXT:    vinserti128 $1, %xmm7, %ymm8, %ymm7
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm7, %zmm0, %zmm7
+; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm3 = zmm3[0,1,2,3],zmm7[4,5,6,7]
+; AVX512F-NEXT:    vmovdqa (%rdx), %ymm7
+; AVX512F-NEXT:    vmovdqa 32(%rdx), %ymm8
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm10 = [128,128,10,11,128,128,128,128,12,13,128,128,128,128,14,15,128,128,128,128,16,17,128,128,128,128,18,19,128,128,128,128]
+; AVX512F-NEXT:    vpshufb %ymm10, %ymm8, %ymm11
+; AVX512F-NEXT:    vpermd %ymm8, %ymm4, %ymm4
+; AVX512F-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm4
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm4, %zmm11, %zmm4
+; AVX512F-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm4
 ; AVX512F-NEXT:    vmovdqa (%rdi), %ymm3
-; AVX512F-NEXT:    vpshufb %ymm5, %ymm3, %ymm3
-; AVX512F-NEXT:    vmovdqa (%rsi), %ymm5
-; AVX512F-NEXT:    vpshufb %ymm8, %ymm5, %ymm5
-; AVX512F-NEXT:    vpor %ymm3, %ymm5, %ymm3
+; AVX512F-NEXT:    vpshufb %ymm6, %ymm3, %ymm3
+; AVX512F-NEXT:    vmovdqa (%rsi), %ymm6
+; AVX512F-NEXT:    vpshufb %ymm9, %ymm6, %ymm6
+; AVX512F-NEXT:    vpor %ymm3, %ymm6, %ymm3
 ; AVX512F-NEXT:    vinserti64x4 $1, %ymm3, %zmm0, %zmm3
-; AVX512F-NEXT:    vprold $16, %xmm0, %xmm5
+; AVX512F-NEXT:    vprold $16, %xmm0, %xmm6
 ; AVX512F-NEXT:    vpshufd {{.*#+}} xmm8 = xmm1[1,1,2,2]
-; AVX512F-NEXT:    vpblendw {{.*#+}} xmm5 = xmm8[0,1],xmm5[2],xmm8[3,4],xmm5[5],xmm8[6,7]
+; AVX512F-NEXT:    vpblendw {{.*#+}} xmm6 = xmm8[0,1],xmm6[2],xmm8[3,4],xmm6[5],xmm8[6,7]
 ; AVX512F-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
 ; AVX512F-NEXT:    vpshufb %xmm2, %xmm0, %xmm0
-; AVX512F-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm0
+; AVX512F-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm0
 ; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm3[4,5,6,7]
-; AVX512F-NEXT:    vpshufb %ymm9, %ymm6, %ymm1
+; AVX512F-NEXT:    vpshufb %ymm10, %ymm7, %ymm1
 ; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm2 = <u,0,0,u,1,1,u,2>
-; AVX512F-NEXT:    vpermd %ymm6, %ymm2, %ymm2
-; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm3 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535]
+; AVX512F-NEXT:    vpermd %ymm7, %ymm2, %ymm2
+; AVX512F-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
 ; AVX512F-NEXT:    vpandn %ymm2, %ymm3, %ymm2
 ; AVX512F-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm1
-; AVX512F-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm1
+; AVX512F-NEXT:    vpternlogq $248, %zmm3, %zmm0, %zmm1
 ; AVX512F-NEXT:    vmovdqa64 %zmm1, (%rcx)
-; AVX512F-NEXT:    vmovdqa64 %zmm7, 128(%rcx)
-; AVX512F-NEXT:    vmovdqa64 %zmm4, 64(%rcx)
+; AVX512F-NEXT:    vmovdqa64 %zmm4, 128(%rcx)
+; AVX512F-NEXT:    vmovdqa64 %zmm5, 64(%rcx)
 ; AVX512F-NEXT:    vzeroupper
 ; AVX512F-NEXT:    retq
 ;
@@ -2333,24 +2332,25 @@ define void @store_i16_stride3_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm1 = [128,128,128,128,12,13,128,128,128,128,14,15,128,128,128,128,16,17,128,128,128,128,18,19,128,128,128,128,20,21,128,128]
 ; AVX512F-NEXT:    vpshufb %ymm1, %ymm0, %ymm0
-; AVX512F-NEXT:    vmovdqa %ymm1, %ymm7
+; AVX512F-NEXT:    vmovdqa %ymm1, %ymm6
 ; AVX512F-NEXT:    vmovdqa (%rsi), %ymm1
 ; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm2 = [10,11,0,1,128,128,12,13,2,3,128,128,14,15,4,5,128,128,16,17,28,29,128,128,18,19,18,19,128,128,20,21]
 ; AVX512F-NEXT:    vpshufb %ymm2, %ymm1, %ymm1
+; AVX512F-NEXT:    vmovdqa %ymm2, %ymm7
 ; AVX512F-NEXT:    vpor %ymm0, %ymm1, %ymm0
 ; AVX512F-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm3
 ; AVX512F-NEXT:    vmovdqa (%rsi), %xmm5
-; AVX512F-NEXT:    vmovdqa64 16(%rsi), %xmm24
-; AVX512F-NEXT:    vmovdqa 32(%rsi), %xmm6
+; AVX512F-NEXT:    vmovdqa64 16(%rsi), %xmm20
+; AVX512F-NEXT:    vmovdqa64 32(%rsi), %xmm24
 ; AVX512F-NEXT:    vprold $16, %xmm5, %xmm8
 ; AVX512F-NEXT:    vmovdqa (%rdi), %xmm9
-; AVX512F-NEXT:    vmovdqa64 16(%rdi), %xmm25
+; AVX512F-NEXT:    vmovdqa64 16(%rdi), %xmm21
 ; AVX512F-NEXT:    vmovdqa 32(%rdi), %xmm4
 ; AVX512F-NEXT:    vpshufd {{.*#+}} xmm10 = xmm9[1,1,2,2]
 ; AVX512F-NEXT:    vpblendw {{.*#+}} xmm8 = xmm10[0,1],xmm8[2],xmm10[3,4],xmm8[5],xmm10[6,7]
 ; AVX512F-NEXT:    vpunpcklwd {{.*#+}} xmm9 = xmm9[0],xmm5[0],xmm9[1],xmm5[1],xmm9[2],xmm5[2],xmm9[3],xmm5[3]
-; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm0 = [0,1,2,3,4,5,4,5,6,7,10,11,8,9,10,11]
-; AVX512F-NEXT:    vpshufb %xmm0, %xmm9, %xmm9
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm1 = [0,1,2,3,4,5,4,5,6,7,10,11,8,9,10,11]
+; AVX512F-NEXT:    vpshufb %xmm1, %xmm9, %xmm9
 ; AVX512F-NEXT:    vinserti128 $1, %xmm8, %ymm9, %ymm8
 ; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm10 = zmm8[0,1,2,3],zmm3[4,5,6,7]
 ; AVX512F-NEXT:    vmovdqa (%rdx), %ymm3
@@ -2358,125 +2358,122 @@ define void @store_i16_stride3_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-NEXT:    vmovdqa 64(%rdx), %ymm14
 ; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm9 = [128,128,10,11,128,128,128,128,12,13,128,128,128,128,14,15,128,128,128,128,16,17,128,128,128,128,18,19,128,128,128,128]
 ; AVX512F-NEXT:    vpshufb %ymm9, %ymm3, %ymm11
-; AVX512F-NEXT:    vmovdqa64 {{.*#+}} ymm19 = <u,0,0,u,1,1,u,2>
-; AVX512F-NEXT:    vpermd %ymm3, %ymm19, %ymm3
-; AVX512F-NEXT:    vmovdqa64 {{.*#+}} ymm16 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535]
-; AVX512F-NEXT:    vpandnq %ymm3, %ymm16, %ymm3
+; AVX512F-NEXT:    vmovdqa64 {{.*#+}} ymm16 = <u,0,0,u,1,1,u,2>
+; AVX512F-NEXT:    vpermd %ymm3, %ymm16, %ymm3
+; AVX512F-NEXT:    vmovdqa64 {{.*#+}} zmm15 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
+; AVX512F-NEXT:    vpandn %ymm3, %ymm15, %ymm3
 ; AVX512F-NEXT:    vinserti64x4 $1, %ymm11, %zmm3, %zmm3
-; AVX512F-NEXT:    vmovdqa64 {{.*#+}} zmm17 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
-; AVX512F-NEXT:    vpternlogq $248, %zmm17, %zmm10, %zmm3
+; AVX512F-NEXT:    vpternlogq $248, %zmm15, %zmm10, %zmm3
 ; AVX512F-NEXT:    vmovdqa 96(%rsi), %xmm10
 ; AVX512F-NEXT:    vprold $16, %xmm10, %xmm11
 ; AVX512F-NEXT:    vmovdqa 96(%rdi), %xmm12
 ; AVX512F-NEXT:    vpshufd {{.*#+}} xmm13 = xmm12[1,1,2,2]
 ; AVX512F-NEXT:    vpblendw {{.*#+}} xmm11 = xmm13[0,1],xmm11[2],xmm13[3,4],xmm11[5],xmm13[6,7]
 ; AVX512F-NEXT:    vpunpcklwd {{.*#+}} xmm10 = xmm12[0],xmm10[0],xmm12[1],xmm10[1],xmm12[2],xmm10[2],xmm12[3],xmm10[3]
-; AVX512F-NEXT:    vpshufb %xmm0, %xmm10, %xmm10
-; AVX512F-NEXT:    vmovdqa64 %xmm0, %xmm26
+; AVX512F-NEXT:    vpshufb %xmm1, %xmm10, %xmm10
 ; AVX512F-NEXT:    vinserti128 $1, %xmm11, %ymm10, %ymm10
 ; AVX512F-NEXT:    vinserti64x4 $1, %ymm10, %zmm0, %zmm10
 ; AVX512F-NEXT:    vmovdqa 80(%rdi), %xmm12
 ; AVX512F-NEXT:    vmovdqa 80(%rsi), %xmm13
-; AVX512F-NEXT:    vpunpckhwd {{.*#+}} xmm15 = xmm13[4],xmm12[4],xmm13[5],xmm12[5],xmm13[6],xmm12[6],xmm13[7],xmm12[7]
+; AVX512F-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm13[4],xmm12[4],xmm13[5],xmm12[5],xmm13[6],xmm12[6],xmm13[7],xmm12[7]
 ; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm11 = [4,5,10,11,10,11,8,9,8,9,14,15,12,13,14,15]
-; AVX512F-NEXT:    vpshufb %xmm11, %xmm15, %xmm15
+; AVX512F-NEXT:    vpshufb %xmm11, %xmm0, %xmm0
 ; AVX512F-NEXT:    vprold $16, %xmm13, %xmm13
 ; AVX512F-NEXT:    vpshufd {{.*#+}} xmm12 = xmm12[1,1,2,2]
 ; AVX512F-NEXT:    vpblendw {{.*#+}} xmm12 = xmm12[0,1],xmm13[2],xmm12[3,4],xmm13[5],xmm12[6,7]
-; AVX512F-NEXT:    vinserti128 $1, %xmm15, %ymm12, %ymm12
-; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm15 = zmm12[0,1,2,3],zmm10[4,5,6,7]
-; AVX512F-NEXT:    vmovdqa64 {{.*#+}} zmm20 = <5,5,u,6,6,u,7,7,u,8,8,u,9,9,u,10>
-; AVX512F-NEXT:    vpermd 64(%rdx), %zmm20, %zmm10
-; AVX512F-NEXT:    vmovdqa64 {{.*#+}} zmm21 = [0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535]
-; AVX512F-NEXT:    vpternlogq $184, %zmm15, %zmm21, %zmm10
-; AVX512F-NEXT:    vmovdqa 96(%rdi), %ymm15
-; AVX512F-NEXT:    vmovdqa %ymm7, %ymm1
-; AVX512F-NEXT:    vpshufb %ymm7, %ymm15, %ymm15
-; AVX512F-NEXT:    vmovdqa 96(%rsi), %ymm12
-; AVX512F-NEXT:    vpshufb %ymm2, %ymm12, %ymm12
-; AVX512F-NEXT:    vpor %ymm15, %ymm12, %ymm12
-; AVX512F-NEXT:    vmovdqa 112(%rdi), %xmm15
-; AVX512F-NEXT:    vmovdqa 112(%rsi), %xmm13
-; AVX512F-NEXT:    vpunpckhwd {{.*#+}} xmm5 = xmm13[4],xmm15[4],xmm13[5],xmm15[5],xmm13[6],xmm15[6],xmm13[7],xmm15[7]
-; AVX512F-NEXT:    vpshufb %xmm11, %xmm5, %xmm5
-; AVX512F-NEXT:    vprold $16, %xmm13, %xmm13
-; AVX512F-NEXT:    vpshufd {{.*#+}} xmm15 = xmm15[1,1,2,2]
-; AVX512F-NEXT:    vpblendw {{.*#+}} xmm13 = xmm15[0,1],xmm13[2],xmm15[3,4],xmm13[5],xmm15[6,7]
-; AVX512F-NEXT:    vinserti128 $1, %xmm5, %ymm13, %ymm5
-; AVX512F-NEXT:    vinserti64x4 $1, %ymm5, %zmm0, %zmm5
-; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm5 = zmm12[0,1,2,3],zmm5[4,5,6,7]
-; AVX512F-NEXT:    vmovdqa 96(%rdx), %ymm12
-; AVX512F-NEXT:    vmovdqa64 {{.*#+}} ymm23 = <5,5,u,6,6,u,7,7>
-; AVX512F-NEXT:    vpermd %ymm12, %ymm23, %ymm15
-; AVX512F-NEXT:    vmovdqa64 {{.*#+}} ymm22 = [0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0]
-; AVX512F-NEXT:    vpandnq %ymm15, %ymm22, %ymm15
-; AVX512F-NEXT:    vpshufb %ymm9, %ymm12, %ymm12
-; AVX512F-NEXT:    vinserti64x4 $1, %ymm15, %zmm12, %zmm18
-; AVX512F-NEXT:    vmovdqa64 {{.*#+}} zmm12 = [65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0]
-; AVX512F-NEXT:    vpternlogq $248, %zmm12, %zmm5, %zmm18
-; AVX512F-NEXT:    vmovdqa 64(%rdi), %ymm5
+; AVX512F-NEXT:    vinserti128 $1, %xmm0, %ymm12, %ymm0
+; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm10[4,5,6,7]
+; AVX512F-NEXT:    vmovdqa64 {{.*#+}} zmm18 = <5,5,u,6,6,u,7,7,u,8,8,u,9,9,u,10>
+; AVX512F-NEXT:    vpermd 64(%rdx), %zmm18, %zmm10
+; AVX512F-NEXT:    vmovdqa64 {{.*#+}} zmm22 = [0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535]
+; AVX512F-NEXT:    vpternlogq $184, %zmm0, %zmm22, %zmm10
+; AVX512F-NEXT:    vmovdqa 96(%rdi), %ymm0
+; AVX512F-NEXT:    vmovdqa %ymm6, %ymm2
+; AVX512F-NEXT:    vpshufb %ymm6, %ymm0, %ymm0
+; AVX512F-NEXT:    vmovdqa 96(%rsi), %ymm5
+; AVX512F-NEXT:    vmovdqa %ymm7, %ymm13
 ; AVX512F-NEXT:    vpshufb %ymm7, %ymm5, %ymm5
-; AVX512F-NEXT:    vmovdqa 64(%rsi), %ymm15
-; AVX512F-NEXT:    vpshufb %ymm2, %ymm15, %ymm15
-; AVX512F-NEXT:    vpor %ymm5, %ymm15, %ymm5
+; AVX512F-NEXT:    vpor %ymm0, %ymm5, %ymm0
+; AVX512F-NEXT:    vmovdqa 112(%rdi), %xmm5
+; AVX512F-NEXT:    vmovdqa 112(%rsi), %xmm12
+; AVX512F-NEXT:    vpunpckhwd {{.*#+}} xmm7 = xmm12[4],xmm5[4],xmm12[5],xmm5[5],xmm12[6],xmm5[6],xmm12[7],xmm5[7]
+; AVX512F-NEXT:    vpshufb %xmm11, %xmm7, %xmm7
+; AVX512F-NEXT:    vprold $16, %xmm12, %xmm12
+; AVX512F-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[1,1,2,2]
+; AVX512F-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0,1],xmm12[2],xmm5[3,4],xmm12[5],xmm5[6,7]
+; AVX512F-NEXT:    vinserti128 $1, %xmm7, %ymm5, %ymm5
 ; AVX512F-NEXT:    vinserti64x4 $1, %ymm5, %zmm0, %zmm5
-; AVX512F-NEXT:    vmovdqa 64(%rsi), %xmm15
-; AVX512F-NEXT:    vprold $16, %xmm15, %xmm0
-; AVX512F-NEXT:    vmovdqa 64(%rdi), %xmm13
-; AVX512F-NEXT:    vpshufd {{.*#+}} xmm7 = xmm13[1,1,2,2]
-; AVX512F-NEXT:    vpblendw {{.*#+}} xmm0 = xmm7[0,1],xmm0[2],xmm7[3,4],xmm0[5],xmm7[6,7]
-; AVX512F-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm13[0],xmm15[0],xmm13[1],xmm15[1],xmm13[2],xmm15[2],xmm13[3],xmm15[3]
-; AVX512F-NEXT:    vmovdqa64 %xmm26, %xmm15
-; AVX512F-NEXT:    vpshufb %xmm15, %xmm7, %xmm7
-; AVX512F-NEXT:    vinserti128 $1, %xmm0, %ymm7, %ymm0
 ; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm5[4,5,6,7]
+; AVX512F-NEXT:    vmovdqa 96(%rdx), %ymm5
+; AVX512F-NEXT:    vpermd %ymm5, %ymm18, %ymm7
+; AVX512F-NEXT:    vpandnq %ymm7, %ymm22, %ymm7
+; AVX512F-NEXT:    vpshufb %ymm9, %ymm5, %ymm5
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm7, %zmm5, %zmm17
+; AVX512F-NEXT:    vmovdqa64 {{.*#+}} zmm19 = [65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0]
+; AVX512F-NEXT:    vpternlogq $248, %zmm19, %zmm0, %zmm17
+; AVX512F-NEXT:    vmovdqa 64(%rdi), %ymm0
+; AVX512F-NEXT:    vpshufb %ymm6, %ymm0, %ymm0
+; AVX512F-NEXT:    vmovdqa 64(%rsi), %ymm7
+; AVX512F-NEXT:    vpshufb %ymm13, %ymm7, %ymm7
+; AVX512F-NEXT:    vpor %ymm0, %ymm7, %ymm0
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm0
+; AVX512F-NEXT:    vmovdqa 64(%rsi), %xmm7
+; AVX512F-NEXT:    vprold $16, %xmm7, %xmm12
+; AVX512F-NEXT:    vmovdqa 64(%rdi), %xmm5
+; AVX512F-NEXT:    vpshufd {{.*#+}} xmm6 = xmm5[1,1,2,2]
+; AVX512F-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0,1],xmm12[2],xmm6[3,4],xmm12[5],xmm6[6,7]
+; AVX512F-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm5[0],xmm7[0],xmm5[1],xmm7[1],xmm5[2],xmm7[2],xmm5[3],xmm7[3]
+; AVX512F-NEXT:    vpshufb %xmm1, %xmm5, %xmm5
+; AVX512F-NEXT:    vinserti128 $1, %xmm6, %ymm5, %ymm5
+; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm5[0,1,2,3],zmm0[4,5,6,7]
 ; AVX512F-NEXT:    vpshufb %ymm9, %ymm14, %ymm5
-; AVX512F-NEXT:    vpermd %ymm14, %ymm19, %ymm7
-; AVX512F-NEXT:    vpandnq %ymm7, %ymm16, %ymm7
-; AVX512F-NEXT:    vinserti64x4 $1, %ymm5, %zmm7, %zmm5
-; AVX512F-NEXT:    vpternlogq $248, %zmm17, %zmm0, %zmm5
+; AVX512F-NEXT:    vpermd %ymm14, %ymm16, %ymm6
+; AVX512F-NEXT:    vpandn %ymm6, %ymm15, %ymm6
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm5, %zmm6, %zmm5
+; AVX512F-NEXT:    vpternlogq $248, %zmm15, %zmm0, %zmm5
 ; AVX512F-NEXT:    vmovdqa 32(%rdi), %ymm0
-; AVX512F-NEXT:    vpshufb %ymm1, %ymm0, %ymm0
-; AVX512F-NEXT:    vmovdqa 32(%rsi), %ymm7
-; AVX512F-NEXT:    vpshufb %ymm2, %ymm7, %ymm7
-; AVX512F-NEXT:    vpor %ymm0, %ymm7, %ymm0
-; AVX512F-NEXT:    vmovdqa 48(%rdi), %xmm7
-; AVX512F-NEXT:    vmovdqa 48(%rsi), %xmm13
-; AVX512F-NEXT:    vpunpckhwd {{.*#+}} xmm14 = xmm13[4],xmm7[4],xmm13[5],xmm7[5],xmm13[6],xmm7[6],xmm13[7],xmm7[7]
-; AVX512F-NEXT:    vpshufb %xmm11, %xmm14, %xmm14
-; AVX512F-NEXT:    vprold $16, %xmm13, %xmm13
-; AVX512F-NEXT:    vpshufd {{.*#+}} xmm7 = xmm7[1,1,2,2]
-; AVX512F-NEXT:    vpblendw {{.*#+}} xmm7 = xmm7[0,1],xmm13[2],xmm7[3,4],xmm13[5],xmm7[6,7]
-; AVX512F-NEXT:    vinserti128 $1, %xmm14, %ymm7, %ymm7
-; AVX512F-NEXT:    vinserti64x4 $1, %ymm7, %zmm0, %zmm7
-; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm7[4,5,6,7]
-; AVX512F-NEXT:    vpermd %ymm8, %ymm23, %ymm7
-; AVX512F-NEXT:    vpandnq %ymm7, %ymm22, %ymm7
-; AVX512F-NEXT:    vpshufb %ymm9, %ymm8, %ymm8
-; AVX512F-NEXT:    vinserti64x4 $1, %ymm7, %zmm8, %zmm7
-; AVX512F-NEXT:    vpternlogq $248, %zmm12, %zmm0, %zmm7
-; AVX512F-NEXT:    vprold $16, %xmm6, %xmm0
-; AVX512F-NEXT:    vpshufd {{.*#+}} xmm8 = xmm4[1,1,2,2]
-; AVX512F-NEXT:    vpblendw {{.*#+}} xmm0 = xmm8[0,1],xmm0[2],xmm8[3,4],xmm0[5],xmm8[6,7]
-; AVX512F-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm4[0],xmm6[0],xmm4[1],xmm6[1],xmm4[2],xmm6[2],xmm4[3],xmm6[3]
-; AVX512F-NEXT:    vpshufb %xmm15, %xmm2, %xmm2
+; AVX512F-NEXT:    vpshufb %ymm2, %ymm0, %ymm0
+; AVX512F-NEXT:    vmovdqa 32(%rsi), %ymm6
+; AVX512F-NEXT:    vpshufb %ymm13, %ymm6, %ymm6
+; AVX512F-NEXT:    vpor %ymm0, %ymm6, %ymm0
+; AVX512F-NEXT:    vmovdqa 48(%rdi), %xmm6
+; AVX512F-NEXT:    vmovdqa 48(%rsi), %xmm7
+; AVX512F-NEXT:    vpunpckhwd {{.*#+}} xmm12 = xmm7[4],xmm6[4],xmm7[5],xmm6[5],xmm7[6],xmm6[6],xmm7[7],xmm6[7]
+; AVX512F-NEXT:    vpshufb %xmm11, %xmm12, %xmm12
+; AVX512F-NEXT:    vprold $16, %xmm7, %xmm7
+; AVX512F-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[1,1,2,2]
+; AVX512F-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0,1],xmm7[2],xmm6[3,4],xmm7[5],xmm6[6,7]
+; AVX512F-NEXT:    vinserti128 $1, %xmm12, %ymm6, %ymm6
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm6, %zmm0, %zmm6
+; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm6[4,5,6,7]
+; AVX512F-NEXT:    vpermd %ymm8, %ymm18, %ymm6
+; AVX512F-NEXT:    vpandnq %ymm6, %ymm22, %ymm6
+; AVX512F-NEXT:    vpshufb %ymm9, %ymm8, %ymm7
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm6, %zmm7, %zmm6
+; AVX512F-NEXT:    vpternlogq $248, %zmm19, %zmm0, %zmm6
+; AVX512F-NEXT:    vmovdqa64 %xmm24, %xmm2
+; AVX512F-NEXT:    vprold $16, %xmm24, %xmm0
+; AVX512F-NEXT:    vpshufd {{.*#+}} xmm7 = xmm4[1,1,2,2]
+; AVX512F-NEXT:    vpblendw {{.*#+}} xmm0 = xmm7[0,1],xmm0[2],xmm7[3,4],xmm0[5],xmm7[6,7]
+; AVX512F-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3]
+; AVX512F-NEXT:    vpshufb %xmm1, %xmm2, %xmm2
 ; AVX512F-NEXT:    vinserti128 $1, %xmm0, %ymm2, %ymm0
 ; AVX512F-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm0
-; AVX512F-NEXT:    vmovdqa64 %xmm24, %xmm1
-; AVX512F-NEXT:    vmovdqa64 %xmm25, %xmm6
-; AVX512F-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm1[4],xmm6[4],xmm1[5],xmm6[5],xmm1[6],xmm6[6],xmm1[7],xmm6[7]
+; AVX512F-NEXT:    vmovdqa64 %xmm20, %xmm4
+; AVX512F-NEXT:    vmovdqa64 %xmm21, %xmm1
+; AVX512F-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm4[4],xmm1[4],xmm4[5],xmm1[5],xmm4[6],xmm1[6],xmm4[7],xmm1[7]
 ; AVX512F-NEXT:    vpshufb %xmm11, %xmm2, %xmm2
-; AVX512F-NEXT:    vprold $16, %xmm24, %xmm4
-; AVX512F-NEXT:    vpshufd {{.*#+}} xmm1 = xmm25[1,1,2,2]
+; AVX512F-NEXT:    vprold $16, %xmm20, %xmm4
+; AVX512F-NEXT:    vpshufd {{.*#+}} xmm1 = xmm21[1,1,2,2]
 ; AVX512F-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm4[2],xmm1[3,4],xmm4[5],xmm1[6,7]
 ; AVX512F-NEXT:    vinserti128 $1, %xmm2, %ymm1, %ymm1
 ; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm1[0,1,2,3],zmm0[4,5,6,7]
-; AVX512F-NEXT:    vpermd (%rdx), %zmm20, %zmm1
-; AVX512F-NEXT:    vpternlogq $184, %zmm0, %zmm21, %zmm1
+; AVX512F-NEXT:    vpermd (%rdx), %zmm18, %zmm1
+; AVX512F-NEXT:    vpternlogq $184, %zmm0, %zmm22, %zmm1
 ; AVX512F-NEXT:    vmovdqa64 %zmm1, 64(%rcx)
-; AVX512F-NEXT:    vmovdqa64 %zmm7, 128(%rcx)
+; AVX512F-NEXT:    vmovdqa64 %zmm6, 128(%rcx)
 ; AVX512F-NEXT:    vmovdqa64 %zmm5, 192(%rcx)
-; AVX512F-NEXT:    vmovdqa64 %zmm18, 320(%rcx)
+; AVX512F-NEXT:    vmovdqa64 %zmm17, 320(%rcx)
 ; AVX512F-NEXT:    vmovdqa64 %zmm10, 256(%rcx)
 ; AVX512F-NEXT:    vmovdqa64 %zmm3, (%rcx)
 ; AVX512F-NEXT:    vzeroupper
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-5.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-5.ll
index f77719f01a857a2..dc7fb6abe419d5a 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-5.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-5.ll
@@ -2835,11 +2835,11 @@ define void @store_i16_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-SLOW-NEXT:    vmovdqa 32(%r8), %ymm9
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm11 = [128,128,128,128,12,13,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,16,17,128,128,128,128,128,128]
 ; AVX512F-SLOW-NEXT:    vpshufb %ymm11, %ymm9, %ymm4
+; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm21 = [65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[0,1,1,1]
-; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm21 = [65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535]
 ; AVX512F-SLOW-NEXT:    vpandnq %ymm9, %ymm21, %ymm9
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm9, %zmm9
-; AVX512F-SLOW-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm9
+; AVX512F-SLOW-NEXT:    vpternlogq $248, %zmm21, %zmm2, %zmm9
 ; AVX512F-SLOW-NEXT:    vmovdqa (%rdx), %xmm2
 ; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm12[0],xmm2[0],xmm12[1],xmm2[1],xmm12[2],xmm2[2],xmm12[3],xmm2[3]
 ; AVX512F-SLOW-NEXT:    vpshufb %xmm13, %xmm4, %xmm4
@@ -2925,68 +2925,69 @@ define void @store_i16_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-FAST-LABEL: store_i16_stride5_vf32:
 ; AVX512F-FAST:       # %bb.0:
 ; AVX512F-FAST-NEXT:    vmovdqa 32(%rsi), %ymm5
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm11 = <u,u,0,1,u,u,u,u,14,15,u,u,2,3,u,u,u,u,16,17,u,u,u,u,30,31,u,u,18,19,u,u>
-; AVX512F-FAST-NEXT:    vpshufb %ymm11, %ymm5, %ymm0
-; AVX512F-FAST-NEXT:    vmovdqa64 32(%rdi), %ymm19
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm19[1,1,2,2]
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = <u,u,0,1,u,u,u,u,14,15,u,u,2,3,u,u,u,u,16,17,u,u,u,u,30,31,u,u,18,19,u,u>
+; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm5, %ymm0
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm1, %ymm23
+; AVX512F-FAST-NEXT:    vmovdqa 32(%rdi), %ymm7
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm7[1,1,2,2]
 ; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5],ymm0[6],ymm1[7,8],ymm0[9],ymm1[10,11],ymm0[12],ymm1[13],ymm0[14],ymm1[15]
 ; AVX512F-FAST-NEXT:    vmovdqa (%rsi), %xmm10
 ; AVX512F-FAST-NEXT:    vmovdqa 32(%rsi), %xmm2
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm12 = <6,7,u,u,10,11,6,7,u,u,8,9,u,u,12,13>
 ; AVX512F-FAST-NEXT:    vpshufb %xmm12, %xmm2, %xmm1
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm2, %xmm21
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm2, %xmm19
 ; AVX512F-FAST-NEXT:    vpbroadcastq 40(%rdi), %xmm2
 ; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3],xmm2[4],xmm1[5],xmm2[6],xmm1[7]
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,1,0,1]
 ; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
-; AVX512F-FAST-NEXT:    vmovdqa (%rcx), %ymm2
+; AVX512F-FAST-NEXT:    vmovdqa (%rcx), %ymm4
 ; AVX512F-FAST-NEXT:    vmovdqa 32(%rcx), %ymm8
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = <u,u,12,13,u,u,0,1,u,u,u,u,14,15,u,u,u,u,28,29,u,u,16,17,u,u,u,u,30,31,u,u>
-; AVX512F-FAST-NEXT:    vpshufb %ymm3, %ymm8, %ymm1
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm3, %ymm20
-; AVX512F-FAST-NEXT:    vmovdqa64 (%rdx), %ymm18
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = <u,u,12,13,u,u,0,1,u,u,u,u,14,15,u,u,u,u,28,29,u,u,16,17,u,u,u,u,30,31,u,u>
+; AVX512F-FAST-NEXT:    vpshufb %ymm2, %ymm8, %ymm1
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm2, %ymm24
+; AVX512F-FAST-NEXT:    vmovdqa64 (%rdx), %ymm17
 ; AVX512F-FAST-NEXT:    vmovdqa 32(%rdx), %ymm9
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm4 = ymm9[3,0,3,0,7,4,7,4]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm4[0],ymm1[1],ymm4[2],ymm1[3],ymm4[4,5],ymm1[6],ymm4[7,8],ymm1[9],ymm4[10],ymm1[11],ymm4[12,13],ymm1[14],ymm4[15]
-; AVX512F-FAST-NEXT:    vmovdqa 32(%rcx), %xmm4
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm2 = ymm9[3,0,3,0,7,4,7,4]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2],ymm1[3],ymm2[4,5],ymm1[6],ymm2[7,8],ymm1[9],ymm2[10],ymm1[11],ymm2[12,13],ymm1[14],ymm2[15]
+; AVX512F-FAST-NEXT:    vmovdqa 32(%rcx), %xmm11
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm15 = <10,11,u,u,6,7,u,u,8,9,8,9,u,u,8,9>
-; AVX512F-FAST-NEXT:    vpshufb %xmm15, %xmm4, %xmm13
+; AVX512F-FAST-NEXT:    vpshufb %xmm15, %xmm11, %xmm2
 ; AVX512F-FAST-NEXT:    vmovdqa 32(%rdx), %xmm6
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} xmm14 = xmm6[1,2,2,2]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm13 = xmm13[0],xmm14[1],xmm13[2],xmm14[3],xmm13[4,5],xmm14[6],xmm13[7]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[0,1,0,0]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm13, %zmm1
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} xmm13 = xmm6[1,2,2,2]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm13[1],xmm2[2],xmm13[3],xmm2[4,5],xmm13[6],xmm2[7]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,1,0,0]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm1
 ; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm1
 ; AVX512F-FAST-NEXT:    vmovdqa (%r8), %ymm0
-; AVX512F-FAST-NEXT:    vmovdqa 32(%r8), %ymm14
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm7 = [128,128,128,128,12,13,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,16,17,128,128,128,128,128,128]
-; AVX512F-FAST-NEXT:    vpshufb %ymm7, %ymm14, %ymm3
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm7, %ymm24
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[0,1,1,1]
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm16 = [65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535]
-; AVX512F-FAST-NEXT:    vpandnq %ymm14, %ymm16, %ymm14
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm14, %zmm17
-; AVX512F-FAST-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm17
+; AVX512F-FAST-NEXT:    vmovdqa 32(%r8), %ymm2
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm13 = [128,128,128,128,12,13,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,16,17,128,128,128,128,128,128]
+; AVX512F-FAST-NEXT:    vpshufb %ymm13, %ymm2, %ymm3
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm13, %ymm25
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm16 = [65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,1,1,1]
+; AVX512F-FAST-NEXT:    vpandnq %ymm2, %ymm16, %ymm2
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm2, %zmm18
+; AVX512F-FAST-NEXT:    vpternlogq $248, %zmm16, %zmm1, %zmm18
 ; AVX512F-FAST-NEXT:    vmovdqa (%rcx), %xmm1
 ; AVX512F-FAST-NEXT:    vpshufb %xmm15, %xmm1, %xmm3
-; AVX512F-FAST-NEXT:    vmovdqa (%rdx), %xmm14
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} xmm15 = xmm14[1,2,2,2]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0],xmm15[1],xmm3[2],xmm15[3],xmm3[4,5],xmm15[6],xmm3[7]
-; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm14[0],xmm1[1],xmm14[1],xmm1[2],xmm14[2],xmm1[3],xmm14[3]
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm7 = [4,5,2,3,2,3,0,1,10,11,8,9,4,5,6,7]
-; AVX512F-FAST-NEXT:    vpshufb %xmm7, %xmm1, %xmm1
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm7, %xmm22
+; AVX512F-FAST-NEXT:    vmovdqa (%rdx), %xmm15
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} xmm14 = xmm15[1,2,2,2]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0],xmm14[1],xmm3[2],xmm14[3],xmm3[4,5],xmm14[6],xmm3[7]
+; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm15[0],xmm1[1],xmm15[1],xmm1[2],xmm15[2],xmm1[3],xmm15[3]
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm2 = [4,5,2,3,2,3,0,1,10,11,8,9,4,5,6,7]
+; AVX512F-FAST-NEXT:    vpshufb %xmm2, %xmm1, %xmm1
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm2, %xmm20
 ; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm14 = [0,1,0,1,8,9,8,8]
 ; AVX512F-FAST-NEXT:    vpermi2q %zmm3, %zmm1, %zmm14
 ; AVX512F-FAST-NEXT:    vpshufb %xmm12, %xmm10, %xmm1
 ; AVX512F-FAST-NEXT:    vpbroadcastq 8(%rdi), %xmm3
 ; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm3[1],xmm1[2,3],xmm3[4],xmm1[5],xmm3[6],xmm1[7]
 ; AVX512F-FAST-NEXT:    vmovdqa (%rdi), %xmm3
-; AVX512F-FAST-NEXT:    vmovdqa 32(%rdi), %xmm12
+; AVX512F-FAST-NEXT:    vmovdqa64 32(%rdi), %xmm21
 ; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm3[0],xmm10[0],xmm3[1],xmm10[1],xmm3[2],xmm10[2],xmm3[3],xmm10[3]
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm7 = [0,1,2,3,8,9,10,11,4,5,4,5,6,7,12,13]
-; AVX512F-FAST-NEXT:    vpshufb %xmm7, %xmm3, %xmm3
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm7, %xmm23
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,1,2,3,8,9,10,11,4,5,4,5,6,7,12,13]
+; AVX512F-FAST-NEXT:    vpshufb %xmm2, %xmm3, %xmm3
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm2, %xmm22
 ; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm3, %zmm1
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm1 = zmm1[0,1,0,1,4,5,4,5]
 ; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm16 = [65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535]
@@ -2996,7 +2997,8 @@ define void @store_i16_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm10, %zmm3, %zmm10
 ; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm10
 ; AVX512F-FAST-NEXT:    vmovdqa (%rsi), %ymm15
-; AVX512F-FAST-NEXT:    vpshufb %ymm11, %ymm15, %ymm1
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm23, %ymm1
+; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm15, %ymm1
 ; AVX512F-FAST-NEXT:    vmovdqa (%rdi), %ymm14
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm14[1,1,2,2]
 ; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm3[0],ymm1[1],ymm3[2,3],ymm1[4],ymm3[5],ymm1[6],ymm3[7,8],ymm1[9],ymm3[10,11],ymm1[12],ymm3[13],ymm1[14],ymm3[15]
@@ -3005,67 +3007,68 @@ define void @store_i16_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm13[0,1],ymm3[2],ymm13[3],ymm3[4],ymm13[5,6],ymm3[7],ymm13[8,9],ymm3[10],ymm13[11],ymm3[12],ymm13[13,14],ymm3[15]
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,3,2,3]
 ; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm1, %zmm3
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm20, %ymm1
-; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm2, %ymm1
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm13 = ymm18[3,0,3,0,7,4,7,4]
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm24, %ymm1
+; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm4, %ymm1
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm13 = ymm17[3,0,3,0,7,4,7,4]
 ; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm13 = ymm13[0],ymm1[1],ymm13[2],ymm1[3],ymm13[4,5],ymm1[6],ymm13[7,8],ymm1[9],ymm13[10],ymm1[11],ymm13[12,13],ymm1[14],ymm13[15]
 ; AVX512F-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [18,19,0,0,22,23,22,23,0,0,20,21,0,0,24,25,18,19,0,0,22,23,22,23,0,0,20,21,0,0,24,25]
 ; AVX512F-FAST-NEXT:    # ymm1 = mem[0,1,0,1]
-; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm2, %ymm7
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm11 = ymm18[1,1,1,2,5,5,5,6]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm7 = ymm7[0],ymm11[1],ymm7[2,3],ymm11[4],ymm7[5],ymm11[6],ymm7[7,8],ymm11[9],ymm7[10,11],ymm11[12],ymm7[13],ymm11[14],ymm7[15]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[2,3,2,3]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm13, %zmm7
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm20 = [65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535]
-; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm3, %zmm20, %zmm7
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm24, %ymm3
+; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm4, %ymm2
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm12 = ymm17[1,1,1,2,5,5,5,6]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0],ymm12[1],ymm2[2,3],ymm12[4],ymm2[5],ymm12[6],ymm2[7,8],ymm12[9],ymm2[10,11],ymm12[12],ymm2[13],ymm12[14],ymm2[15]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,3,2,3]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm13, %zmm2
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm12 = [65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535]
+; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm3, %zmm12, %zmm2
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm25, %ymm3
 ; AVX512F-FAST-NEXT:    vpshufb %ymm3, %ymm0, %ymm0
 ; AVX512F-FAST-NEXT:    vpbroadcastq 16(%r8), %ymm3
 ; AVX512F-FAST-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm3
 ; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm0, %zmm0
-; AVX512F-FAST-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm7, %zmm0
+; AVX512F-FAST-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm0
 ; AVX512F-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm13 = [30,31,28,29,26,27,30,31,30,31,28,29,30,31,28,29,30,31,28,29,26,27,30,31,30,31,28,29,30,31,28,29]
 ; AVX512F-FAST-NEXT:    # ymm13 = mem[0,1,0,1]
-; AVX512F-FAST-NEXT:    vpshufb %ymm13, %ymm5, %ymm3
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm7 = ymm19[2,3,2,3,6,7,6,7]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm3[0],ymm7[1],ymm3[2],ymm7[3],ymm3[4,5],ymm7[6],ymm3[7,8],ymm7[9],ymm3[10],ymm7[11],ymm3[12,13],ymm7[14],ymm3[15]
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm7 = ymm19[0,1,2,1,4,5,6,5]
+; AVX512F-FAST-NEXT:    vpshufb %ymm13, %ymm5, %ymm2
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm3 = ymm7[2,3,2,3,6,7,6,7]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0],ymm3[1],ymm2[2],ymm3[3],ymm2[4,5],ymm3[6],ymm2[7,8],ymm3[9],ymm2[10],ymm3[11],ymm2[12,13],ymm3[14],ymm2[15]
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm3 = ymm7[0,1,2,1,4,5,6,5]
 ; AVX512F-FAST-NEXT:    vprolq $16, %ymm5, %ymm5
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm5 = ymm5[0,1],ymm7[2],ymm5[3],ymm7[4],ymm5[5,6],ymm7[7],ymm5[8,9],ymm7[10],ymm5[11],ymm7[12],ymm5[13,14],ymm7[15]
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm7 = [2,3,2,3,10,11,10,10]
-; AVX512F-FAST-NEXT:    vpermi2q %zmm3, %zmm5, %zmm7
-; AVX512F-FAST-NEXT:    vpbroadcastq {{.*#+}} ymm3 = [26,27,28,29,30,31,30,31,26,27,28,29,30,31,30,31,26,27,28,29,30,31,30,31,26,27,28,29,30,31,30,31]
-; AVX512F-FAST-NEXT:    vpshufb %ymm3, %ymm8, %ymm5
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm11 = ymm9[3,2,3,3,7,6,7,7]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm5 = ymm5[0],ymm11[1],ymm5[2],ymm11[3,4],ymm5[5,6,7,8],ymm11[9],ymm5[10],ymm11[11,12],ymm5[13,14,15]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm5[0,1],ymm3[2],ymm5[3],ymm3[4],ymm5[5,6],ymm3[7],ymm5[8,9],ymm3[10],ymm5[11],ymm3[12],ymm5[13,14],ymm3[15]
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [2,3,2,3,10,11,10,10]
+; AVX512F-FAST-NEXT:    vpermi2q %zmm2, %zmm3, %zmm5
+; AVX512F-FAST-NEXT:    vpbroadcastq {{.*#+}} ymm2 = [26,27,28,29,30,31,30,31,26,27,28,29,30,31,30,31,26,27,28,29,30,31,30,31,26,27,28,29,30,31,30,31]
+; AVX512F-FAST-NEXT:    vpshufb %ymm2, %ymm8, %ymm3
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm7 = ymm9[3,2,3,3,7,6,7,7]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm3[0],ymm7[1],ymm3[2],ymm7[3,4],ymm3[5,6,7,8],ymm7[9],ymm3[10],ymm7[11,12],ymm3[13,14,15]
 ; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm8, %ymm1
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm8 = ymm9[1,1,1,2,5,5,5,6]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0],ymm8[1],ymm1[2,3],ymm8[4],ymm1[5],ymm8[6],ymm1[7,8],ymm8[9],ymm1[10,11],ymm8[12],ymm1[13],ymm8[14],ymm1[15]
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm8 = [2,3,2,3,10,10,11,10]
-; AVX512F-FAST-NEXT:    vpermi2q %zmm5, %zmm1, %zmm8
-; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm7, %zmm16, %zmm8
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm7 = ymm9[1,1,1,2,5,5,5,6]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0],ymm7[1],ymm1[2,3],ymm7[4],ymm1[5],ymm7[6],ymm1[7,8],ymm7[9],ymm1[10,11],ymm7[12],ymm1[13],ymm7[14],ymm1[15]
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm7 = [2,3,2,3,10,10,11,10]
+; AVX512F-FAST-NEXT:    vpermi2q %zmm3, %zmm1, %zmm7
+; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm5, %zmm16, %zmm7
 ; AVX512F-FAST-NEXT:    vpbroadcastq 48(%r8), %ymm1
-; AVX512F-FAST-NEXT:    vpbroadcastq 56(%r8), %ymm5
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm5, %zmm1, %zmm1
-; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm8, %zmm1
-; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm4[0],xmm6[0],xmm4[1],xmm6[1],xmm4[2],xmm6[2],xmm4[3],xmm6[3]
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm22, %xmm5
-; AVX512F-FAST-NEXT:    vpshufb %xmm5, %xmm4, %xmm4
-; AVX512F-FAST-NEXT:    vpshufb %ymm3, %ymm2, %ymm2
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm3 = ymm18[3,2,3,3,7,6,7,7]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0],ymm3[1],ymm2[2],ymm3[3,4],ymm2[5,6,7,8],ymm3[9],ymm2[10],ymm3[11,12],ymm2[13,14,15]
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [2,2,3,2,8,9,8,9]
-; AVX512F-FAST-NEXT:    vpermi2q %zmm4, %zmm2, %zmm3
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm21, %xmm2
-; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm12[0],xmm2[0],xmm12[1],xmm2[1],xmm12[2],xmm2[2],xmm12[3],xmm2[3]
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm23, %xmm4
-; AVX512F-FAST-NEXT:    vpshufb %xmm4, %xmm2, %xmm2
-; AVX512F-FAST-NEXT:    vpshufb %ymm13, %ymm15, %ymm4
+; AVX512F-FAST-NEXT:    vpbroadcastq 56(%r8), %ymm3
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm1, %zmm1
+; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm7, %zmm1
+; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm11[0],xmm6[0],xmm11[1],xmm6[1],xmm11[2],xmm6[2],xmm11[3],xmm6[3]
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm20, %xmm5
+; AVX512F-FAST-NEXT:    vpshufb %xmm5, %xmm3, %xmm3
+; AVX512F-FAST-NEXT:    vpshufb %ymm2, %ymm4, %ymm2
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm4 = ymm17[3,2,3,3,7,6,7,7]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0],ymm4[1],ymm2[2],ymm4[3,4],ymm2[5,6,7,8],ymm4[9],ymm2[10],ymm4[11,12],ymm2[13,14,15]
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [2,2,3,2,8,9,8,9]
+; AVX512F-FAST-NEXT:    vpermi2q %zmm3, %zmm2, %zmm4
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm19, %xmm2
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm21, %xmm3
+; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm22, %xmm3
+; AVX512F-FAST-NEXT:    vpshufb %xmm3, %xmm2, %xmm2
+; AVX512F-FAST-NEXT:    vpshufb %ymm13, %ymm15, %ymm3
 ; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm5 = ymm14[2,3,2,3,6,7,6,7]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm4 = ymm4[0],ymm5[1],ymm4[2],ymm5[3],ymm4[4,5],ymm5[6],ymm4[7,8],ymm5[9],ymm4[10],ymm5[11],ymm4[12,13],ymm5[14],ymm4[15]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm3[0],ymm5[1],ymm3[2],ymm5[3],ymm3[4,5],ymm5[6],ymm3[7,8],ymm5[9],ymm3[10],ymm5[11],ymm3[12,13],ymm5[14],ymm3[15]
 ; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [2,3,2,2,8,9,8,9]
-; AVX512F-FAST-NEXT:    vpermi2q %zmm2, %zmm4, %zmm5
-; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm3, %zmm20, %zmm5
+; AVX512F-FAST-NEXT:    vpermi2q %zmm2, %zmm3, %zmm5
+; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm4, %zmm12, %zmm5
 ; AVX512F-FAST-NEXT:    vpbroadcastq 24(%r8), %ymm2
 ; AVX512F-FAST-NEXT:    vpbroadcastq 32(%r8), %ymm3
 ; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm2, %zmm2
@@ -3074,7 +3077,7 @@ define void @store_i16_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm1, 256(%r9)
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm0, 64(%r9)
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm10, (%r9)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm17, 192(%r9)
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm18, 192(%r9)
 ; AVX512F-FAST-NEXT:    vzeroupper
 ; AVX512F-FAST-NEXT:    retq
 ;
@@ -5686,338 +5689,338 @@ define void @store_i16_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ;
 ; AVX512F-SLOW-LABEL: store_i16_stride5_vf64:
 ; AVX512F-SLOW:       # %bb.0:
-; AVX512F-SLOW-NEXT:    subq $520, %rsp # imm = 0x208
-; AVX512F-SLOW-NEXT:    vmovdqa 96(%rcx), %ymm15
+; AVX512F-SLOW-NEXT:    subq $488, %rsp # imm = 0x1E8
+; AVX512F-SLOW-NEXT:    vmovdqa 96(%rcx), %ymm11
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm1 = <u,u,12,13,u,u,0,1,u,u,u,u,14,15,u,u,u,u,28,29,u,u,16,17,u,u,u,u,30,31,u,u>
-; AVX512F-SLOW-NEXT:    vpshufb %ymm1, %ymm15, %ymm0
-; AVX512F-SLOW-NEXT:    vmovdqa %ymm1, %ymm8
-; AVX512F-SLOW-NEXT:    vmovdqa64 96(%rdx), %ymm16
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm16[3,0,3,0,7,4,7,4]
+; AVX512F-SLOW-NEXT:    vpshufb %ymm1, %ymm11, %ymm0
+; AVX512F-SLOW-NEXT:    vmovdqa %ymm1, %ymm14
+; AVX512F-SLOW-NEXT:    vmovdqa64 96(%rdx), %ymm17
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm17[3,0,3,0,7,4,7,4]
 ; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4,5],ymm0[6],ymm1[7,8],ymm0[9],ymm1[10],ymm0[11],ymm1[12,13],ymm0[14],ymm1[15]
 ; AVX512F-SLOW-NEXT:    vmovdqa 96(%rcx), %xmm0
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm14 = <10,11,u,u,6,7,u,u,8,9,8,9,u,u,8,9>
-; AVX512F-SLOW-NEXT:    vpshufb %xmm14, %xmm0, %xmm2
-; AVX512F-SLOW-NEXT:    vmovdqa 96(%rdx), %xmm3
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm3[1,2,2,2]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm4[1],xmm2[2],xmm4[3],xmm2[4,5],xmm4[6],xmm2[7]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,1,0,0]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm1
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm7 = <10,11,u,u,6,7,u,u,8,9,8,9,u,u,8,9>
+; AVX512F-SLOW-NEXT:    vpshufb %xmm7, %xmm0, %xmm3
+; AVX512F-SLOW-NEXT:    vmovdqa 96(%rdx), %xmm2
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm2[1,2,2,2]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0],xmm4[1],xmm3[2],xmm4[3],xmm3[4,5],xmm4[6],xmm3[7]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[0,1,0,0]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm3, %zmm1
 ; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa 96(%rsi), %ymm2
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm12 = <u,u,0,1,u,u,u,u,14,15,u,u,2,3,u,u,u,u,16,17,u,u,u,u,30,31,u,u,18,19,u,u>
-; AVX512F-SLOW-NEXT:    vpshufb %ymm12, %ymm2, %ymm1
-; AVX512F-SLOW-NEXT:    vmovdqa64 96(%rdi), %ymm18
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm18[1,1,2,2]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm4 = ymm4[0],ymm1[1],ymm4[2,3],ymm1[4],ymm4[5],ymm1[6],ymm4[7,8],ymm1[9],ymm4[10,11],ymm1[12],ymm4[13],ymm1[14],ymm4[15]
+; AVX512F-SLOW-NEXT:    vmovdqa 96(%rsi), %ymm9
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm3 = <u,u,0,1,u,u,u,u,14,15,u,u,2,3,u,u,u,u,16,17,u,u,u,u,30,31,u,u,18,19,u,u>
+; AVX512F-SLOW-NEXT:    vpshufb %ymm3, %ymm9, %ymm1
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm3, %ymm20
+; AVX512F-SLOW-NEXT:    vmovdqa 96(%rdi), %ymm5
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm5[1,1,2,2]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm3 = ymm3[0],ymm1[1],ymm3[2,3],ymm1[4],ymm3[5],ymm1[6],ymm3[7,8],ymm1[9],ymm3[10,11],ymm1[12],ymm3[13],ymm1[14],ymm3[15]
 ; AVX512F-SLOW-NEXT:    vmovdqa 96(%rsi), %xmm1
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm10 = <6,7,u,u,10,11,6,7,u,u,8,9,u,u,12,13>
-; AVX512F-SLOW-NEXT:    vpshufb %xmm10, %xmm1, %xmm5
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm12 = <6,7,u,u,10,11,6,7,u,u,8,9,u,u,12,13>
+; AVX512F-SLOW-NEXT:    vpshufb %xmm12, %xmm1, %xmm4
 ; AVX512F-SLOW-NEXT:    vpbroadcastq 104(%rdi), %xmm6
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0],xmm6[1],xmm5[2,3],xmm6[4],xmm5[5],xmm6[6],xmm5[7]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[0,1,0,1]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm5, %zmm4
-; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa 96(%r8), %ymm4
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm6 = [128,128,128,128,12,13,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,16,17,128,128,128,128,128,128]
-; AVX512F-SLOW-NEXT:    vpshufb %ymm6, %ymm4, %ymm5
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm6, %ymm29
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[0,1,1,1]
-; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm17 = [65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535]
-; AVX512F-SLOW-NEXT:    vpandnq %ymm4, %ymm17, %ymm4
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm4, %zmm4
-; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm5 = [4,5,2,3,2,3,0,1,10,11,8,9,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpshufb %xmm5, %xmm0, %xmm0
-; AVX512F-SLOW-NEXT:    vmovdqa (%rdx), %ymm13
-; AVX512F-SLOW-NEXT:    vmovdqa 64(%rdx), %ymm3
-; AVX512F-SLOW-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm3[3,2,3,3,7,6,7,7]
-; AVX512F-SLOW-NEXT:    vmovdqa 64(%rcx), %ymm4
-; AVX512F-SLOW-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm4 = ymm4[0,1,2,3,5,6,7,7,8,9,10,11,13,14,15,15]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm4 = ymm4[2,3,2,3,6,7,6,7]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm3 = ymm4[0],ymm3[1],ymm4[2],ymm3[3,4],ymm4[5,6,7,8],ymm3[9],ymm4[10],ymm3[11,12],ymm4[13,14,15]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,2,3,2]
-; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm3[0,1,2,3],zmm0[0,1,0,1]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0],xmm6[1],xmm4[2,3],xmm6[4],xmm4[5],xmm6[6],xmm4[7]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[0,1,0,1]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm4, %zmm3
+; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-SLOW-NEXT:    vmovdqa 96(%r8), %ymm3
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm4 = zero,zero,zero,zero,ymm3[12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm3[14,15],zero,zero,zero,zero,zero,zero,zero,zero,ymm3[16,17],zero,zero,zero,zero,zero,zero
+; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm15 = [65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[0,1,1,1]
+; AVX512F-SLOW-NEXT:    vpandn %ymm3, %ymm15, %ymm3
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm3, %zmm3
+; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm13 = [4,5,2,3,2,3,0,1,10,11,8,9,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshufb %xmm13, %xmm0, %xmm0
+; AVX512F-SLOW-NEXT:    vmovdqa64 (%rdx), %ymm16
+; AVX512F-SLOW-NEXT:    vmovdqa 64(%rdx), %ymm2
+; AVX512F-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm2[3,2,3,3,7,6,7,7]
+; AVX512F-SLOW-NEXT:    vmovdqa 64(%rcx), %ymm3
+; AVX512F-SLOW-NEXT:    vmovdqu %ymm3, (%rsp) # 32-byte Spill
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm3 = ymm3[0,1,2,3,5,6,7,7,8,9,10,11,13,14,15,15]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm3[2,3,2,3,6,7,6,7]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm3[0],ymm2[1],ymm3[2],ymm2[3,4],ymm3[5,6,7,8],ymm2[9],ymm3[10],ymm2[11,12],ymm3[13,14,15]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,2,3,2]
+; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm2[0,1,2,3],zmm0[0,1,0,1]
 ; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-SLOW-NEXT:    vmovdqa 96(%rdi), %xmm0
 ; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
 ; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,5,6]
-; AVX512F-SLOW-NEXT:    vmovdqa64 64(%rdi), %ymm19
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm19[2,3,2,3,6,7,6,7]
-; AVX512F-SLOW-NEXT:    vmovdqa 64(%rsi), %ymm3
-; AVX512F-SLOW-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm3 = ymm3[0,1,2,3,7,6,5,7,8,9,10,11,15,14,13,15]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm3[2,3,2,2,6,7,6,6]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm3[0],ymm1[1],ymm3[2],ymm1[3],ymm3[4,5],ymm1[6],ymm3[7,8],ymm1[9],ymm3[10],ymm1[11],ymm3[12,13],ymm1[14],ymm3[15]
+; AVX512F-SLOW-NEXT:    vmovdqa64 64(%rdi), %ymm18
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm18[2,3,2,3,6,7,6,7]
+; AVX512F-SLOW-NEXT:    vmovdqa 64(%rsi), %ymm2
+; AVX512F-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm2 = ymm2[0,1,2,3,7,6,5,7,8,9,10,11,15,14,13,15]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm2[2,3,2,2,6,7,6,6]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2],ymm1[3],ymm2[4,5],ymm1[6],ymm2[7,8],ymm1[9],ymm2[10],ymm1[11],ymm2[12,13],ymm1[14],ymm2[15]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,3,2,2]
 ; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm31 = zmm1[0,1,2,3],zmm0[0,1,0,1]
 ; AVX512F-SLOW-NEXT:    vmovdqa 64(%rdx), %xmm0
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%rcx), %xmm7
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%rcx), %xmm6
 ; AVX512F-SLOW-NEXT:    vmovdqa 64(%rcx), %xmm1
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm11 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; AVX512F-SLOW-NEXT:    vpshufb %xmm14, %xmm1, %xmm1
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; AVX512F-SLOW-NEXT:    vpshufb %xmm7, %xmm1, %xmm1
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,2,2,2]
 ; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3],xmm1[4,5],xmm0[6],xmm1[7]
 ; AVX512F-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-SLOW-NEXT:    vmovdqa 64(%rsi), %xmm0
-; AVX512F-SLOW-NEXT:    vpshufb %xmm10, %xmm0, %xmm1
-; AVX512F-SLOW-NEXT:    vpbroadcastq 72(%rdi), %xmm3
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm3[1],xmm1[2,3],xmm3[4],xmm1[5],xmm3[6],xmm1[7]
-; AVX512F-SLOW-NEXT:    vmovdqa 64(%rdi), %xmm3
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3]
-; AVX512F-SLOW-NEXT:    vmovdqa (%rsi), %xmm4
+; AVX512F-SLOW-NEXT:    vpshufb %xmm12, %xmm0, %xmm1
+; AVX512F-SLOW-NEXT:    vpbroadcastq 72(%rdi), %xmm2
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3],xmm2[4],xmm1[5],xmm2[6],xmm1[7]
+; AVX512F-SLOW-NEXT:    vmovdqa 64(%rdi), %xmm2
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
+; AVX512F-SLOW-NEXT:    vmovdqa (%rsi), %xmm0
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,2,1,3]
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,4,5,6]
+; AVX512F-SLOW-NEXT:    vinserti32x4 $2, %xmm1, %zmm2, %zmm1
+; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-SLOW-NEXT:    vpshufb %xmm12, %xmm0, %xmm1
+; AVX512F-SLOW-NEXT:    vpbroadcastq 8(%rdi), %xmm2
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3],xmm2[4],xmm1[5],xmm2[6],xmm1[7]
+; AVX512F-SLOW-NEXT:    vmovdqa (%rdi), %xmm2
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm4 = ymm16[3,2,3,3,7,6,7,7]
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
 ; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,5,6]
 ; AVX512F-SLOW-NEXT:    vinserti32x4 $2, %xmm1, %zmm0, %zmm0
 ; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vpshufb %xmm10, %xmm4, %xmm0
-; AVX512F-SLOW-NEXT:    vpbroadcastq 8(%rdi), %xmm1
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3],xmm1[4],xmm0[5],xmm1[6],xmm0[7]
-; AVX512F-SLOW-NEXT:    vmovdqa (%rdi), %xmm1
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm13[3,2,3,3,7,6,7,7]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,2,1,3]
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,5,6]
-; AVX512F-SLOW-NEXT:    vinserti32x4 $2, %xmm0, %zmm1, %zmm0
-; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa (%rcx), %ymm6
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm0 = ymm6[0,1,2,3,5,6,7,7,8,9,10,11,13,14,15,15]
+; AVX512F-SLOW-NEXT:    vmovdqa (%rcx), %ymm2
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm0 = ymm2[0,1,2,3,5,6,7,7,8,9,10,11,13,14,15,15]
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[2,3,2,3,6,7,6,7]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm3[1],ymm0[2],ymm3[3,4],ymm0[5,6,7,8],ymm3[9],ymm0[10],ymm3[11,12],ymm0[13,14,15]
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdx), %xmm3
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm4[1],ymm0[2],ymm4[3,4],ymm0[5,6,7,8],ymm4[9],ymm0[10],ymm4[11,12],ymm0[13,14,15]
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdx), %xmm4
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,2,3,2]
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm7[0],xmm3[0],xmm7[1],xmm3[1],xmm7[2],xmm3[2],xmm7[3],xmm3[3]
-; AVX512F-SLOW-NEXT:    vpshufb %xmm5, %xmm1, %xmm1
-; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm1[0,1,0,1]
-; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm0, (%rsp) # 64-byte Spill
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm6[0],xmm4[0],xmm6[1],xmm4[1],xmm6[2],xmm4[2],xmm6[3],xmm4[3]
+; AVX512F-SLOW-NEXT:    vpshufb %xmm13, %xmm1, %xmm1
+; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm25 = zmm0[0,1,2,3],zmm1[0,1,0,1]
 ; AVX512F-SLOW-NEXT:    vmovdqa64 (%rdi), %ymm30
-; AVX512F-SLOW-NEXT:    vmovdqa (%rsi), %ymm9
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm0 = ymm9[0,1,2,3,7,6,5,7,8,9,10,11,15,14,13,15]
+; AVX512F-SLOW-NEXT:    vmovdqa (%rsi), %ymm10
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm0 = ymm10[0,1,2,3,7,6,5,7,8,9,10,11,15,14,13,15]
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[2,3,2,2,6,7,6,6]
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm30[2,3,2,3,6,7,6,7]
 ; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4,5],ymm1[6],ymm0[7,8],ymm1[9],ymm0[10],ymm1[11],ymm0[12,13],ymm1[14],ymm0[15]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,3,2,2]
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%rsi), %xmm4
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%rsi), %xmm8
 ; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdi), %xmm1
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3]
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm8[0],xmm1[1],xmm8[1],xmm1[2],xmm8[2],xmm1[3],xmm8[3]
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,2,1,3]
 ; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,5,6]
-; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm28 = zmm0[0,1,2,3],zmm1[0,1,0,1]
-; AVX512F-SLOW-NEXT:    vmovdqa64 32(%rdx), %ymm22
+; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm27 = zmm0[0,1,2,3],zmm1[0,1,0,1]
+; AVX512F-SLOW-NEXT:    vmovdqa64 32(%rdx), %ymm19
 ; AVX512F-SLOW-NEXT:    vmovdqa 32(%rcx), %ymm1
-; AVX512F-SLOW-NEXT:    vpshufb %ymm8, %ymm1, %ymm0
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm8 = ymm22[3,0,3,0,7,4,7,4]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm8[0],ymm0[1],ymm8[2],ymm0[3],ymm8[4,5],ymm0[6],ymm8[7,8],ymm0[9],ymm8[10],ymm0[11],ymm8[12,13],ymm0[14],ymm8[15]
-; AVX512F-SLOW-NEXT:    vpshufb %xmm14, %xmm7, %xmm7
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[1,2,2,2]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm7[0],xmm3[1],xmm7[2],xmm3[3],xmm7[4,5],xmm3[6],xmm7[7]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[0,1,0,0]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm3, %zmm0
+; AVX512F-SLOW-NEXT:    vpshufb %ymm14, %ymm1, %ymm0
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm14 = ymm19[3,0,3,0,7,4,7,4]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm14[0],ymm0[1],ymm14[2],ymm0[3],ymm14[4,5],ymm0[6],ymm14[7,8],ymm0[9],ymm14[10],ymm0[11],ymm14[12,13],ymm0[14],ymm14[15]
+; AVX512F-SLOW-NEXT:    vpshufb %xmm7, %xmm6, %xmm6
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[1,2,2,2]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm4 = xmm6[0],xmm4[1],xmm6[2],xmm4[3],xmm6[4,5],xmm4[6],xmm6[7]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[0,1,0,0]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm4, %zmm0
 ; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdi), %ymm3
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdi), %ymm4
 ; AVX512F-SLOW-NEXT:    vmovdqa 32(%rsi), %ymm0
-; AVX512F-SLOW-NEXT:    vpshufb %ymm12, %ymm0, %ymm7
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm3[1,1,2,2]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm7 = ymm8[0],ymm7[1],ymm8[2,3],ymm7[4],ymm8[5],ymm7[6],ymm8[7,8],ymm7[9],ymm8[10,11],ymm7[12],ymm8[13],ymm7[14],ymm8[15]
-; AVX512F-SLOW-NEXT:    vpshufb %xmm10, %xmm4, %xmm4
-; AVX512F-SLOW-NEXT:    vpbroadcastq 40(%rdi), %xmm8
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0],xmm8[1],xmm4[2,3],xmm8[4],xmm4[5],xmm8[6],xmm4[7]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[0,1,0,1]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm4, %zmm25
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%r8), %ymm4
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm4[0,1,1,1]
-; AVX512F-SLOW-NEXT:    vpandnq %ymm7, %ymm17, %ymm7
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm29, %ymm8
-; AVX512F-SLOW-NEXT:    vpshufb %ymm8, %ymm4, %ymm4
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm7, %zmm17
-; AVX512F-SLOW-NEXT:    vpshufb %xmm5, %xmm11, %xmm4
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm4, %ymm24
-; AVX512F-SLOW-NEXT:    vmovdqa (%rcx), %xmm4
-; AVX512F-SLOW-NEXT:    vmovdqa (%rdx), %xmm7
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm4[0],xmm7[0],xmm4[1],xmm7[1],xmm4[2],xmm7[2],xmm4[3],xmm7[3]
-; AVX512F-SLOW-NEXT:    vpshufb %xmm5, %xmm8, %xmm5
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm5, %ymm27
-; AVX512F-SLOW-NEXT:    vpshufb %xmm14, %xmm4, %xmm4
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm5 = xmm7[1,2,2,2]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0],xmm5[1],xmm4[2],xmm5[3],xmm4[4,5],xmm5[6],xmm4[7]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm4, %ymm26
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm4 = ymm18[0,1,2,1,4,5,6,5]
-; AVX512F-SLOW-NEXT:    vprolq $16, %ymm2, %ymm5
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm4 = ymm5[0,1],ymm4[2],ymm5[3],ymm4[4],ymm5[5,6],ymm4[7],ymm5[8,9],ymm4[10],ymm5[11],ymm4[12],ymm5[13,14],ymm4[15]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm4, %ymm21
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm2 = ymm2[0,1,2,3,7,6,5,7,8,9,10,11,15,14,13,15]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm2[2,3,2,2,6,7,6,6]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm4 = ymm18[2,3,2,3,6,7,6,7]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm11 = ymm2[0],ymm4[1],ymm2[2],ymm4[3],ymm2[4,5],ymm4[6],ymm2[7,8],ymm4[9],ymm2[10],ymm4[11],ymm2[12,13],ymm4[14],ymm2[15]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm20, %ymm6
+; AVX512F-SLOW-NEXT:    vpshufb %ymm6, %ymm0, %ymm6
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm14 = ymm4[1,1,2,2]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm6 = ymm14[0],ymm6[1],ymm14[2,3],ymm6[4],ymm14[5],ymm6[6],ymm14[7,8],ymm6[9],ymm14[10,11],ymm6[12],ymm14[13],ymm6[14],ymm14[15]
+; AVX512F-SLOW-NEXT:    vpshufb %xmm12, %xmm8, %xmm8
+; AVX512F-SLOW-NEXT:    vpbroadcastq 40(%rdi), %xmm12
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm8 = xmm8[0],xmm12[1],xmm8[2,3],xmm12[4],xmm8[5],xmm12[6],xmm8[7]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[0,1,0,1]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm6, %zmm8, %zmm24
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%r8), %ymm6
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm6[0,1,1,1]
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm14 = [128,128,128,128,12,13,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,16,17,128,128,128,128,128,128]
+; AVX512F-SLOW-NEXT:    vpshufb %ymm14, %ymm6, %ymm6
+; AVX512F-SLOW-NEXT:    vpandn %ymm8, %ymm15, %ymm8
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm6, %zmm8, %zmm26
+; AVX512F-SLOW-NEXT:    vpshufb %xmm13, %xmm3, %xmm3
+; AVX512F-SLOW-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-SLOW-NEXT:    vmovdqa (%rcx), %xmm3
+; AVX512F-SLOW-NEXT:    vmovdqa (%rdx), %xmm6
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm12 = xmm3[0],xmm6[0],xmm3[1],xmm6[1],xmm3[2],xmm6[2],xmm3[3],xmm6[3]
+; AVX512F-SLOW-NEXT:    vpshufb %xmm13, %xmm12, %xmm8
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm8, %ymm28
+; AVX512F-SLOW-NEXT:    vpshufb %xmm7, %xmm3, %xmm3
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[1,2,2,2]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0],xmm6[1],xmm3[2],xmm6[3],xmm3[4,5],xmm6[6],xmm3[7]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm3, %ymm23
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm5[0,1,2,1,4,5,6,5]
+; AVX512F-SLOW-NEXT:    vprolq $16, %ymm9, %ymm6
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm3 = ymm6[0,1],ymm3[2],ymm6[3],ymm3[4],ymm6[5,6],ymm3[7],ymm6[8,9],ymm3[10],ymm6[11],ymm3[12],ymm6[13,14],ymm3[15]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm3, %ymm22
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm3 = ymm9[0,1,2,3,7,6,5,7,8,9,10,11,15,14,13,15]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm3[2,3,2,2,6,7,6,6]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm5 = ymm5[2,3,2,3,6,7,6,7]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm13 = ymm3[0],ymm5[1],ymm3[2],ymm5[3],ymm3[4,5],ymm5[6],ymm3[7,8],ymm5[9],ymm3[10],ymm5[11],ymm3[12,13],ymm5[14],ymm3[15]
 ; AVX512F-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm7 = [18,19,0,0,22,23,22,23,0,0,20,21,0,0,24,25,18,19,0,0,22,23,22,23,0,0,20,21,0,0,24,25]
 ; AVX512F-SLOW-NEXT:    # ymm7 = mem[0,1,0,1]
-; AVX512F-SLOW-NEXT:    vpshufb %ymm7, %ymm15, %ymm4
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm5 = ymm16[1,1,1,2,5,5,5,6]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm8 = ymm4[0],ymm5[1],ymm4[2,3],ymm5[4],ymm4[5],ymm5[6],ymm4[7,8],ymm5[9],ymm4[10,11],ymm5[12],ymm4[13],ymm5[14],ymm4[15]
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm4 = ymm15[0,1,2,3,5,6,7,7,8,9,10,11,13,14,15,15]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm4 = ymm4[2,3,2,3,6,7,6,7]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm5 = ymm16[3,2,3,3,7,6,7,7]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm15 = ymm4[0],ymm5[1],ymm4[2],ymm5[3,4],ymm4[5,6,7,8],ymm5[9],ymm4[10],ymm5[11,12],ymm4[13,14,15]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm4 = ymm3[0,1,2,1,4,5,6,5]
-; AVX512F-SLOW-NEXT:    vprolq $16, %ymm0, %ymm5
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm5[0,1],ymm4[2],ymm5[3],ymm4[4],ymm5[5,6],ymm4[7],ymm5[8,9],ymm4[10],ymm5[11],ymm4[12],ymm5[13,14],ymm4[15]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm20
+; AVX512F-SLOW-NEXT:    vpshufb %ymm7, %ymm11, %ymm3
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm6 = ymm17[1,1,1,2,5,5,5,6]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm12 = ymm3[0],ymm6[1],ymm3[2,3],ymm6[4],ymm3[5],ymm6[6],ymm3[7,8],ymm6[9],ymm3[10,11],ymm6[12],ymm3[13],ymm6[14],ymm3[15]
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm3 = ymm11[0,1,2,3,5,6,7,7,8,9,10,11,13,14,15,15]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm3[2,3,2,3,6,7,6,7]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm6 = ymm17[3,2,3,3,7,6,7,7]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm11 = ymm3[0],ymm6[1],ymm3[2],ymm6[3,4],ymm3[5,6,7,8],ymm6[9],ymm3[10],ymm6[11,12],ymm3[13,14,15]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm4[0,1,2,1,4,5,6,5]
+; AVX512F-SLOW-NEXT:    vprolq $16, %ymm0, %ymm6
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm15 = ymm6[0,1],ymm3[2],ymm6[3],ymm3[4],ymm6[5,6],ymm3[7],ymm6[8,9],ymm3[10],ymm6[11],ymm3[12],ymm6[13,14],ymm3[15]
 ; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,7,6,5,7,8,9,10,11,15,14,13,15]
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[2,3,2,2,6,7,6,6]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm3[2,3,2,3,6,7,6,7]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm5 = ymm0[0],ymm3[1],ymm0[2],ymm3[3],ymm0[4,5],ymm3[6],ymm0[7,8],ymm3[9],ymm0[10],ymm3[11],ymm0[12,13],ymm3[14],ymm0[15]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm4[2,3,2,3,6,7,6,7]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm3[1],ymm0[2],ymm3[3],ymm0[4,5],ymm3[6],ymm0[7,8],ymm3[9],ymm0[10],ymm3[11],ymm0[12,13],ymm3[14],ymm0[15]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm21
 ; AVX512F-SLOW-NEXT:    vpshufb %ymm7, %ymm1, %ymm0
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm22[1,1,1,2,5,5,5,6]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm4 = ymm0[0],ymm3[1],ymm0[2,3],ymm3[4],ymm0[5],ymm3[6],ymm0[7,8],ymm3[9],ymm0[10,11],ymm3[12],ymm0[13],ymm3[14],ymm0[15]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm19[1,1,1,2,5,5,5,6]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm6 = ymm0[0],ymm3[1],ymm0[2,3],ymm3[4],ymm0[5],ymm3[6],ymm0[7,8],ymm3[9],ymm0[10,11],ymm3[12],ymm0[13],ymm3[14],ymm0[15]
 ; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm0 = ymm1[0,1,2,3,5,6,7,7,8,9,10,11,13,14,15,15]
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[2,3,2,3,6,7,6,7]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm22[3,2,3,3,7,6,7,7]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm10 = ymm0[0],ymm1[1],ymm0[2],ymm1[3,4],ymm0[5,6,7,8],ymm1[9],ymm0[10],ymm1[11,12],ymm0[13,14,15]
-; AVX512F-SLOW-NEXT:    vmovdqa %ymm12, %ymm2
-; AVX512F-SLOW-NEXT:    vpshufb %ymm12, %ymm9, %ymm1
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm14 = ymm30[1,1,2,2]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm14[0],ymm1[1],ymm14[2,3],ymm1[4],ymm14[5],ymm1[6],ymm14[7,8],ymm1[9],ymm14[10,11],ymm1[12],ymm14[13],ymm1[14],ymm14[15]
-; AVX512F-SLOW-NEXT:    vprolq $16, %ymm9, %ymm9
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm14 = ymm30[0,1,2,1,4,5,6,5]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm9 = ymm9[0,1],ymm14[2],ymm9[3],ymm14[4],ymm9[5,6],ymm14[7],ymm9[8,9],ymm14[10],ymm9[11],ymm14[12],ymm9[13,14],ymm14[15]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm9, %zmm1, %zmm23
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm0 = <u,u,12,13,u,u,0,1,u,u,u,u,14,15,u,u,u,u,28,29,u,u,16,17,u,u,u,u,30,31,u,u>
-; AVX512F-SLOW-NEXT:    vpshufb %ymm0, %ymm6, %ymm1
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm9 = ymm13[3,0,3,0,7,4,7,4]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm9[0],ymm1[1],ymm9[2],ymm1[3],ymm9[4,5],ymm1[6],ymm9[7,8],ymm1[9],ymm9[10],ymm1[11],ymm9[12,13],ymm1[14],ymm9[15]
-; AVX512F-SLOW-NEXT:    vpshufb %ymm7, %ymm6, %ymm6
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm9 = ymm13[1,1,1,2,5,5,5,6]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm6 = ymm6[0],ymm9[1],ymm6[2,3],ymm9[4],ymm6[5],ymm9[6],ymm6[7,8],ymm9[9],ymm6[10,11],ymm9[12],ymm6[13],ymm9[14],ymm6[15]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm6, %zmm1, %zmm6
-; AVX512F-SLOW-NEXT:    vpbroadcastq 16(%r8), %ymm1
-; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm18 = [65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535]
-; AVX512F-SLOW-NEXT:    vpandnq %ymm1, %ymm18, %ymm1
-; AVX512F-SLOW-NEXT:    vmovdqa (%r8), %ymm9
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm29, %ymm3
-; AVX512F-SLOW-NEXT:    vpshufb %ymm3, %ymm9, %ymm13
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm13, %zmm13
-; AVX512F-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
-; AVX512F-SLOW-NEXT:    vpshufb %ymm2, %ymm12, %ymm1
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm14 = ymm19[1,1,2,2]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm14[0],ymm1[1],ymm14[2,3],ymm1[4],ymm14[5],ymm1[6],ymm14[7,8],ymm1[9],ymm14[10,11],ymm1[12],ymm14[13],ymm1[14],ymm14[15]
-; AVX512F-SLOW-NEXT:    vprolq $16, %ymm12, %ymm12
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm14 = ymm19[0,1,2,1,4,5,6,5]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm12 = ymm12[0,1],ymm14[2],ymm12[3],ymm14[4],ymm12[5,6],ymm14[7],ymm12[8,9],ymm14[10],ymm12[11],ymm14[12],ymm12[13,14],ymm14[15]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm12, %zmm1, %zmm30
-; AVX512F-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX512F-SLOW-NEXT:    vpshufb %ymm0, %ymm2, %ymm1
-; AVX512F-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm14 = ymm12[3,0,3,0,7,4,7,4]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm14[0],ymm1[1],ymm14[2],ymm1[3],ymm14[4,5],ymm1[6],ymm14[7,8],ymm1[9],ymm14[10],ymm1[11],ymm14[12,13],ymm1[14],ymm14[15]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm19[3,2,3,3,7,6,7,7]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm8 = ymm0[0],ymm1[1],ymm0[2],ymm1[3,4],ymm0[5,6,7,8],ymm1[9],ymm0[10],ymm1[11,12],ymm0[13,14,15]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm20, %ymm5
+; AVX512F-SLOW-NEXT:    vpshufb %ymm5, %ymm10, %ymm0
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm30[1,1,2,2]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5],ymm0[6],ymm1[7,8],ymm0[9],ymm1[10,11],ymm0[12],ymm1[13],ymm0[14],ymm1[15]
+; AVX512F-SLOW-NEXT:    vprolq $16, %ymm10, %ymm1
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm30[0,1,2,1,4,5,6,5]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0,1],ymm3[2],ymm1[3],ymm3[4],ymm1[5,6],ymm3[7],ymm1[8,9],ymm3[10],ymm1[11],ymm3[12],ymm1[13,14],ymm3[15]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm30
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm4 = <u,u,12,13,u,u,0,1,u,u,u,u,14,15,u,u,u,u,28,29,u,u,16,17,u,u,u,u,30,31,u,u>
+; AVX512F-SLOW-NEXT:    vpshufb %ymm4, %ymm2, %ymm0
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm16[3,0,3,0,7,4,7,4]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4,5],ymm0[6],ymm1[7,8],ymm0[9],ymm1[10],ymm0[11],ymm1[12,13],ymm0[14],ymm1[15]
 ; AVX512F-SLOW-NEXT:    vpshufb %ymm7, %ymm2, %ymm1
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm12[1,1,1,2,5,5,5,6]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm1[0],ymm2[1],ymm1[2,3],ymm2[4],ymm1[5],ymm2[6],ymm1[7,8],ymm2[9],ymm1[10,11],ymm2[12],ymm1[13],ymm2[14],ymm1[15]
-; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm16 = [65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535]
-; AVX512F-SLOW-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm31 # 64-byte Folded Reload
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm16[1,1,1,2,5,5,5,6]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3],ymm2[4],ymm1[5],ymm2[6],ymm1[7,8],ymm2[9],ymm1[10,11],ymm2[12],ymm1[13],ymm2[14],ymm1[15]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm9
+; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm16 = [65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0]
+; AVX512F-SLOW-NEXT:    vpbroadcastq 16(%r8), %ymm0
+; AVX512F-SLOW-NEXT:    vpandnq %ymm0, %ymm16, %ymm0
+; AVX512F-SLOW-NEXT:    vmovdqa (%r8), %ymm10
+; AVX512F-SLOW-NEXT:    vpshufb %ymm14, %ymm10, %ymm1
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm14, %ymm29
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm17
+; AVX512F-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX512F-SLOW-NEXT:    vpshufb %ymm5, %ymm2, %ymm0
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm18[1,1,2,2]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5],ymm0[6],ymm1[7,8],ymm0[9],ymm1[10,11],ymm0[12],ymm1[13],ymm0[14],ymm1[15]
+; AVX512F-SLOW-NEXT:    vprolq $16, %ymm2, %ymm1
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm18[0,1,2,1,4,5,6,5]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3],ymm2[4],ymm1[5,6],ymm2[7],ymm1[8,9],ymm2[10],ymm1[11],ymm2[12],ymm1[13,14],ymm2[15]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm20
+; AVX512F-SLOW-NEXT:    vmovdqu (%rsp), %ymm2 # 32-byte Reload
+; AVX512F-SLOW-NEXT:    vpshufb %ymm4, %ymm2, %ymm0
+; AVX512F-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm3[3,0,3,0,7,4,7,4]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4,5],ymm0[6],ymm1[7,8],ymm0[9],ymm1[10],ymm0[11],ymm1[12,13],ymm0[14],ymm1[15]
+; AVX512F-SLOW-NEXT:    vpshufb %ymm7, %ymm2, %ymm5
+; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm19 = [65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535]
+; AVX512F-SLOW-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm19, %zmm31 # 64-byte Folded Reload
 ; AVX512F-SLOW-NEXT:    vpbroadcastq 88(%r8), %ymm1
-; AVX512F-SLOW-NEXT:    vpbroadcastq 96(%r8), %ymm14
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm14, %zmm1, %zmm29
-; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm19 = [0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535]
-; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm31, %zmm19, %zmm29
-; AVX512F-SLOW-NEXT:    vpternlogq $226, (%rsp), %zmm16, %zmm28 # 64-byte Folded Reload
-; AVX512F-SLOW-NEXT:    vpbroadcastq 24(%r8), %ymm14
-; AVX512F-SLOW-NEXT:    vpbroadcastq 32(%r8), %ymm22
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm22, %zmm14, %zmm14
-; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm28, %zmm19, %zmm14
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm19 = ymm24[0,1,0,1]
-; AVX512F-SLOW-NEXT:    vpermq $4, {{[-0-9]+}}(%r{{[sb]}}p), %ymm22 # 32-byte Folded Reload
-; AVX512F-SLOW-NEXT:    # ymm22 = mem[0,1,0,0]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm0
-; AVX512F-SLOW-NEXT:    vpbroadcastq 80(%r8), %ymm2
-; AVX512F-SLOW-NEXT:    vpandnq %ymm2, %ymm18, %ymm2
-; AVX512F-SLOW-NEXT:    vmovdqa 64(%r8), %ymm12
-; AVX512F-SLOW-NEXT:    vpshufb %ymm3, %ymm12, %ymm1
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[0,1,1,1]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm18 = ymm27[0,1,0,1]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm26 = ymm26[0,1,0,0]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[0,1,1,1]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm28 = ymm21[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[2,3,2,2]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm15 = ymm15[2,2,3,2]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm20[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,3,2,2]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm10[2,2,3,2]
+; AVX512F-SLOW-NEXT:    vpbroadcastq 96(%r8), %ymm2
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm1, %zmm1
-; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0]
-; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Reload
-; AVX512F-SLOW-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm21 # 64-byte Folded Reload
-; AVX512F-SLOW-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm25 # 64-byte Folded Reload
-; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535]
-; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
-; AVX512F-SLOW-NEXT:    vpternlogq $248, %zmm2, %zmm21, %zmm10
-; AVX512F-SLOW-NEXT:    vpternlogq $248, %zmm2, %zmm25, %zmm17
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm22, %zmm19, %zmm2
-; AVX512F-SLOW-NEXT:    vpermq $68, {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Folded Reload
-; AVX512F-SLOW-NEXT:    # zmm19 = mem[0,1,0,1,4,5,4,5]
-; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm21 = [65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535]
-; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm2, %zmm21, %zmm19
-; AVX512F-SLOW-NEXT:    vpbroadcastq 64(%r8), %ymm2
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm12, %zmm2, %zmm2
-; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm12 = [65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535]
-; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm19, %zmm12, %zmm2
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm26, %zmm18, %zmm18
-; AVX512F-SLOW-NEXT:    vpermq $68, {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Folded Reload
-; AVX512F-SLOW-NEXT:    # zmm19 = mem[0,1,0,1,4,5,4,5]
-; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm18, %zmm21, %zmm19
-; AVX512F-SLOW-NEXT:    vpbroadcastq (%r8), %ymm18
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm9, %zmm18, %zmm9
-; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm19, %zmm12, %zmm9
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm11, %zmm28, %zmm11
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm15, %zmm8, %zmm8
-; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm11, %zmm21, %zmm8
-; AVX512F-SLOW-NEXT:    vpbroadcastq 112(%r8), %ymm11
-; AVX512F-SLOW-NEXT:    vpbroadcastq 120(%r8), %ymm12
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm12, %zmm11, %zmm11
-; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm12 = [65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0]
-; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm8, %zmm12, %zmm11
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm7, %zmm5
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm4, %zmm3
-; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm5, %zmm21, %zmm3
-; AVX512F-SLOW-NEXT:    vpbroadcastq 48(%r8), %ymm4
-; AVX512F-SLOW-NEXT:    vpbroadcastq 56(%r8), %ymm5
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm4, %zmm4
-; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm3, %zmm12, %zmm4
-; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm23, %zmm16, %zmm6
-; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm30, %zmm16, %zmm0
-; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535]
-; AVX512F-SLOW-NEXT:    vpternlogq $248, %zmm3, %zmm6, %zmm13
-; AVX512F-SLOW-NEXT:    vpternlogq $248, %zmm3, %zmm0, %zmm1
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm1, 384(%r9)
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm13, 64(%r9)
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm4, 256(%r9)
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm11, 576(%r9)
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm9, (%r9)
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm17, 192(%r9)
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm14, 128(%r9)
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm2, 320(%r9)
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm29, 448(%r9)
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm10, 512(%r9)
-; AVX512F-SLOW-NEXT:    addq $520, %rsp # imm = 0x208
+; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm18 = [0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535]
+; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm31, %zmm18, %zmm1
+; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm25, %zmm19, %zmm27
+; AVX512F-SLOW-NEXT:    vpbroadcastq 24(%r8), %ymm2
+; AVX512F-SLOW-NEXT:    vpbroadcastq 32(%r8), %ymm25
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm25, %zmm2, %zmm2
+; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm27, %zmm18, %zmm2
+; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm18 = [65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0]
+; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Reload
+; AVX512F-SLOW-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm18, %zmm31 # 64-byte Folded Reload
+; AVX512F-SLOW-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm18, %zmm24 # 64-byte Folded Reload
+; AVX512F-SLOW-NEXT:    vpermq $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm18 # 32-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # ymm18 = mem[0,1,0,1]
+; AVX512F-SLOW-NEXT:    vpermq $4, {{[-0-9]+}}(%r{{[sb]}}p), %ymm25 # 32-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # ymm25 = mem[0,1,0,0]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm27 = ymm28[0,1,0,1]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm28 = ymm23[0,1,0,0]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[0,1,1,1]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm14 = ymm22[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[2,3,2,2]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[2,2,3,2]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm15 = ymm15[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm21[2,3,2,2]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm8[2,2,3,2]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm8 = ymm3[1,1,1,2,5,5,5,6]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm5 = ymm5[0],ymm8[1],ymm5[2,3],ymm8[4],ymm5[5],ymm8[6],ymm5[7,8],ymm8[9],ymm5[10,11],ymm8[12],ymm5[13],ymm8[14],ymm5[15]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm0, %zmm0
+; AVX512F-SLOW-NEXT:    vmovdqa 64(%r8), %ymm5
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm5[0,1,1,1]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm29, %ymm3
+; AVX512F-SLOW-NEXT:    vpshufb %ymm3, %ymm5, %ymm5
+; AVX512F-SLOW-NEXT:    vpbroadcastq 80(%r8), %ymm29
+; AVX512F-SLOW-NEXT:    vpandnq %ymm29, %ymm16, %ymm29
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm29, %zmm5, %zmm5
+; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm22 = [65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535]
+; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload
+; AVX512F-SLOW-NEXT:    vpternlogq $248, %zmm22, %zmm31, %zmm23
+; AVX512F-SLOW-NEXT:    vpternlogq $248, %zmm22, %zmm24, %zmm26
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm25, %zmm18, %zmm18
+; AVX512F-SLOW-NEXT:    vpermq $68, {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # zmm22 = mem[0,1,0,1,4,5,4,5]
+; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm24 = [65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535]
+; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm18, %zmm24, %zmm22
+; AVX512F-SLOW-NEXT:    vpbroadcastq 64(%r8), %ymm18
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm8, %zmm18, %zmm8
+; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm18 = [65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535]
+; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm22, %zmm18, %zmm8
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm28, %zmm27, %zmm22
+; AVX512F-SLOW-NEXT:    vpermq $68, {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # zmm21 = mem[0,1,0,1,4,5,4,5]
+; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm22, %zmm24, %zmm21
+; AVX512F-SLOW-NEXT:    vpbroadcastq (%r8), %ymm22
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm10, %zmm22, %zmm10
+; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm21, %zmm18, %zmm10
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm13, %zmm14, %zmm13
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm11, %zmm12, %zmm11
+; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm13, %zmm24, %zmm11
+; AVX512F-SLOW-NEXT:    vpbroadcastq 112(%r8), %ymm12
+; AVX512F-SLOW-NEXT:    vpbroadcastq 120(%r8), %ymm13
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm13, %zmm12, %zmm12
+; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm11, %zmm16, %zmm12
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm15, %zmm7
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm6, %zmm4
+; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm7, %zmm24, %zmm4
+; AVX512F-SLOW-NEXT:    vpbroadcastq 48(%r8), %ymm6
+; AVX512F-SLOW-NEXT:    vpbroadcastq 56(%r8), %ymm7
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm6, %zmm6
+; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm4, %zmm16, %zmm6
+; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm30, %zmm19, %zmm9
+; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm20, %zmm19, %zmm0
+; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535]
+; AVX512F-SLOW-NEXT:    vpternlogq $248, %zmm4, %zmm9, %zmm17
+; AVX512F-SLOW-NEXT:    vpternlogq $248, %zmm4, %zmm0, %zmm5
+; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm5, 384(%r9)
+; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm17, 64(%r9)
+; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm6, 256(%r9)
+; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm12, 576(%r9)
+; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm10, (%r9)
+; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm26, 192(%r9)
+; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm2, 128(%r9)
+; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm8, 320(%r9)
+; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm1, 448(%r9)
+; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm23, 512(%r9)
+; AVX512F-SLOW-NEXT:    addq $488, %rsp # imm = 0x1E8
 ; AVX512F-SLOW-NEXT:    vzeroupper
 ; AVX512F-SLOW-NEXT:    retq
 ;
 ; AVX512F-FAST-LABEL: store_i16_stride5_vf64:
 ; AVX512F-FAST:       # %bb.0:
 ; AVX512F-FAST-NEXT:    subq $360, %rsp # imm = 0x168
-; AVX512F-FAST-NEXT:    vmovdqa 96(%rcx), %ymm6
+; AVX512F-FAST-NEXT:    vmovdqa 96(%rcx), %ymm8
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm15 = <u,u,12,13,u,u,0,1,u,u,u,u,14,15,u,u,u,u,28,29,u,u,16,17,u,u,u,u,30,31,u,u>
-; AVX512F-FAST-NEXT:    vpshufb %ymm15, %ymm6, %ymm0
-; AVX512F-FAST-NEXT:    vmovdqa64 96(%rdx), %ymm18
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm1 = ymm18[3,0,3,0,7,4,7,4]
+; AVX512F-FAST-NEXT:    vpshufb %ymm15, %ymm8, %ymm0
+; AVX512F-FAST-NEXT:    vmovdqa64 96(%rdx), %ymm19
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm1 = ymm19[3,0,3,0,7,4,7,4]
 ; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4,5],ymm0[6],ymm1[7,8],ymm0[9],ymm1[10],ymm0[11],ymm1[12,13],ymm0[14],ymm1[15]
 ; AVX512F-FAST-NEXT:    vmovdqa 96(%rcx), %xmm2
 ; AVX512F-FAST-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
@@ -6030,12 +6033,12 @@ define void @store_i16_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,1,0,0]
 ; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm1
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa 96(%rsi), %ymm2
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm8 = <u,u,0,1,u,u,u,u,14,15,u,u,2,3,u,u,u,u,16,17,u,u,u,u,30,31,u,u,18,19,u,u>
-; AVX512F-FAST-NEXT:    vpshufb %ymm8, %ymm2, %ymm1
-; AVX512F-FAST-NEXT:    vmovdqa64 96(%rdi), %ymm23
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm23[1,1,2,2]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm3[0],ymm1[1],ymm3[2,3],ymm1[4],ymm3[5],ymm1[6],ymm3[7,8],ymm1[9],ymm3[10,11],ymm1[12],ymm3[13],ymm1[14],ymm3[15]
+; AVX512F-FAST-NEXT:    vmovdqa 96(%rsi), %ymm3
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm12 = <u,u,0,1,u,u,u,u,14,15,u,u,2,3,u,u,u,u,16,17,u,u,u,u,30,31,u,u,18,19,u,u>
+; AVX512F-FAST-NEXT:    vpshufb %ymm12, %ymm3, %ymm1
+; AVX512F-FAST-NEXT:    vmovdqa64 96(%rdi), %ymm24
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm24[1,1,2,2]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0],ymm1[1],ymm2[2,3],ymm1[4],ymm2[5],ymm1[6],ymm2[7,8],ymm1[9],ymm2[10,11],ymm1[12],ymm2[13],ymm1[14],ymm2[15]
 ; AVX512F-FAST-NEXT:    vmovdqa 96(%rsi), %xmm4
 ; AVX512F-FAST-NEXT:    vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm1 = <6,7,u,u,10,11,6,7,u,u,8,9,u,u,12,13>
@@ -6043,275 +6046,273 @@ define void @store_i16_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-FAST-NEXT:    vpbroadcastq 104(%rdi), %xmm5
 ; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0],xmm5[1],xmm4[2,3],xmm5[4],xmm4[5],xmm5[6],xmm4[7]
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[0,1,0,1]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm4, %zmm3
-; AVX512F-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa 96(%r8), %ymm3
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm14 = [128,128,128,128,12,13,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,16,17,128,128,128,128,128,128]
-; AVX512F-FAST-NEXT:    vpshufb %ymm14, %ymm3, %ymm4
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm3[0,1,1,1]
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = [65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535]
-; AVX512F-FAST-NEXT:    vpandn %ymm5, %ymm3, %ymm5
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm5, %zmm4
-; AVX512F-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa 32(%rcx), %ymm4
-; AVX512F-FAST-NEXT:    vmovdqu %ymm4, (%rsp) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vpshufb %ymm15, %ymm4, %ymm4
-; AVX512F-FAST-NEXT:    vmovdqa64 32(%rdx), %ymm27
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm5 = ymm27[3,0,3,0,7,4,7,4]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm4 = ymm5[0],ymm4[1],ymm5[2],ymm4[3],ymm5[4,5],ymm4[6],ymm5[7,8],ymm4[9],ymm5[10],ymm4[11],ymm5[12,13],ymm4[14],ymm5[15]
-; AVX512F-FAST-NEXT:    vmovdqa 32(%rcx), %xmm5
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm4, %zmm2
+; AVX512F-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-FAST-NEXT:    vmovdqa 96(%r8), %ymm2
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm13 = [128,128,128,128,12,13,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,16,17,128,128,128,128,128,128]
+; AVX512F-FAST-NEXT:    vpshufb %ymm13, %ymm2, %ymm4
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,1,1,1]
+; AVX512F-FAST-NEXT:    vpandn %ymm2, %ymm5, %ymm2
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm5, %zmm9
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm2, %zmm2
+; AVX512F-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-FAST-NEXT:    vmovdqa 32(%rcx), %ymm2
+; AVX512F-FAST-NEXT:    vmovdqu %ymm2, (%rsp) # 32-byte Spill
+; AVX512F-FAST-NEXT:    vpshufb %ymm15, %ymm2, %ymm2
+; AVX512F-FAST-NEXT:    vmovdqa64 32(%rdx), %ymm29
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm4 = ymm29[3,0,3,0,7,4,7,4]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm4[0],ymm2[1],ymm4[2],ymm2[3],ymm4[4,5],ymm2[6],ymm4[7,8],ymm2[9],ymm4[10],ymm2[11],ymm4[12,13],ymm2[14],ymm4[15]
+; AVX512F-FAST-NEXT:    vmovdqa 32(%rcx), %xmm4
+; AVX512F-FAST-NEXT:    vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-FAST-NEXT:    vpshufb %xmm0, %xmm4, %xmm4
+; AVX512F-FAST-NEXT:    vmovdqa 32(%rdx), %xmm5
 ; AVX512F-FAST-NEXT:    vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-FAST-NEXT:    vpshufb %xmm0, %xmm5, %xmm5
-; AVX512F-FAST-NEXT:    vmovdqa 32(%rdx), %xmm7
-; AVX512F-FAST-NEXT:    vmovdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} xmm7 = xmm7[1,2,2,2]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0],xmm7[1],xmm5[2],xmm7[3],xmm5[4,5],xmm7[6],xmm5[7]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[0,1,0,0]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm5, %zmm4
-; AVX512F-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa 64(%rsi), %xmm4
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[1,2,2,2]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0],xmm5[1],xmm4[2],xmm5[3],xmm4[4,5],xmm5[6],xmm4[7]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[0,1,0,0]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm4, %zmm2
+; AVX512F-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-FAST-NEXT:    vmovdqa 64(%rsi), %xmm2
+; AVX512F-FAST-NEXT:    vpshufb %xmm1, %xmm2, %xmm4
+; AVX512F-FAST-NEXT:    vpbroadcastq 72(%rdi), %xmm5
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0],xmm5[1],xmm4[2,3],xmm5[4],xmm4[5],xmm5[6],xmm4[7]
+; AVX512F-FAST-NEXT:    vmovdqa 64(%rdi), %xmm5
+; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm5[0],xmm2[0],xmm5[1],xmm2[1],xmm5[2],xmm2[2],xmm5[3],xmm2[3]
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm5 = [0,1,2,3,8,9,10,11,4,5,4,5,6,7,12,13]
+; AVX512F-FAST-NEXT:    vpshufb %xmm5, %xmm2, %xmm2
+; AVX512F-FAST-NEXT:    vmovdqa %xmm5, %xmm7
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm2, %zmm2
+; AVX512F-FAST-NEXT:    vmovdqa (%rsi), %xmm4
 ; AVX512F-FAST-NEXT:    vpshufb %xmm1, %xmm4, %xmm5
-; AVX512F-FAST-NEXT:    vpbroadcastq 72(%rdi), %xmm7
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0],xmm7[1],xmm5[2,3],xmm7[4],xmm5[5],xmm7[6],xmm5[7]
-; AVX512F-FAST-NEXT:    vmovdqa 64(%rdi), %xmm7
-; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm7[0],xmm4[0],xmm7[1],xmm4[1],xmm7[2],xmm4[2],xmm7[3],xmm4[3]
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm7 = [0,1,2,3,8,9,10,11,4,5,4,5,6,7,12,13]
+; AVX512F-FAST-NEXT:    vpbroadcastq 8(%rdi), %xmm6
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0],xmm6[1],xmm5[2,3],xmm6[4],xmm5[5],xmm6[6],xmm5[7]
+; AVX512F-FAST-NEXT:    vmovdqa (%rdi), %xmm6
+; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm6[0],xmm4[0],xmm6[1],xmm4[1],xmm6[2],xmm4[2],xmm6[3],xmm4[3]
 ; AVX512F-FAST-NEXT:    vpshufb %xmm7, %xmm4, %xmm4
-; AVX512F-FAST-NEXT:    vmovdqa %xmm7, %xmm10
 ; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm5, %zmm4, %zmm4
-; AVX512F-FAST-NEXT:    vmovdqa (%rsi), %xmm5
-; AVX512F-FAST-NEXT:    vpshufb %xmm1, %xmm5, %xmm7
-; AVX512F-FAST-NEXT:    vpbroadcastq 8(%rdi), %xmm9
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm7 = xmm7[0],xmm9[1],xmm7[2,3],xmm9[4],xmm7[5],xmm9[6],xmm7[7]
-; AVX512F-FAST-NEXT:    vmovdqa (%rdi), %xmm9
-; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm9[0],xmm5[0],xmm9[1],xmm5[1],xmm9[2],xmm5[2],xmm9[3],xmm5[3]
-; AVX512F-FAST-NEXT:    vpshufb %xmm10, %xmm5, %xmm5
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm5, %zmm5
-; AVX512F-FAST-NEXT:    vmovdqa 32(%rsi), %ymm13
-; AVX512F-FAST-NEXT:    vpshufb %ymm8, %ymm13, %ymm7
-; AVX512F-FAST-NEXT:    vmovdqa64 32(%rdi), %ymm30
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm9 = ymm30[1,1,2,2]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm7 = ymm9[0],ymm7[1],ymm9[2,3],ymm7[4],ymm9[5],ymm7[6],ymm9[7,8],ymm7[9],ymm9[10,11],ymm7[12],ymm9[13],ymm7[14],ymm9[15]
-; AVX512F-FAST-NEXT:    vmovdqa 32(%rsi), %xmm9
-; AVX512F-FAST-NEXT:    vmovdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-FAST-NEXT:    vpshufb %xmm1, %xmm9, %xmm1
-; AVX512F-FAST-NEXT:    vpbroadcastq 40(%rdi), %xmm9
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm9[1],xmm1[2,3],xmm9[4],xmm1[5],xmm9[6],xmm1[7]
+; AVX512F-FAST-NEXT:    vmovdqa 32(%rsi), %ymm14
+; AVX512F-FAST-NEXT:    vpshufb %ymm12, %ymm14, %ymm5
+; AVX512F-FAST-NEXT:    vmovdqa 32(%rdi), %ymm6
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm7 = ymm6[1,1,2,2]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm5 = ymm7[0],ymm5[1],ymm7[2,3],ymm5[4],ymm7[5],ymm5[6],ymm7[7,8],ymm5[9],ymm7[10,11],ymm5[12],ymm7[13],ymm5[14],ymm7[15]
+; AVX512F-FAST-NEXT:    vmovdqa 32(%rsi), %xmm7
+; AVX512F-FAST-NEXT:    vmovdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-FAST-NEXT:    vpshufb %xmm1, %xmm7, %xmm1
+; AVX512F-FAST-NEXT:    vpbroadcastq 40(%rdi), %xmm7
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm7[1],xmm1[2,3],xmm7[4],xmm1[5],xmm7[6],xmm1[7]
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,1,0,1]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm1, %zmm16
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm5, %zmm1, %zmm18
 ; AVX512F-FAST-NEXT:    vmovdqa 32(%r8), %ymm1
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm7 = ymm1[0,1,1,1]
-; AVX512F-FAST-NEXT:    vpshufb %ymm14, %ymm1, %ymm1
-; AVX512F-FAST-NEXT:    vpandn %ymm7, %ymm3, %ymm3
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm3, %zmm29
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm1[0,1,1,1]
+; AVX512F-FAST-NEXT:    vpshufb %ymm13, %ymm1, %ymm1
+; AVX512F-FAST-NEXT:    vpandn %ymm5, %ymm9, %ymm5
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm5, %zmm31
 ; AVX512F-FAST-NEXT:    vmovdqa 64(%rcx), %xmm1
-; AVX512F-FAST-NEXT:    vpshufb %xmm0, %xmm1, %xmm3
+; AVX512F-FAST-NEXT:    vpshufb %xmm0, %xmm1, %xmm5
 ; AVX512F-FAST-NEXT:    vmovdqa 64(%rdx), %xmm7
 ; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} xmm9 = xmm7[1,2,2,2]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0],xmm9[1],xmm3[2],xmm9[3],xmm3[4,5],xmm9[6],xmm3[7]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0],xmm9[1],xmm5[2],xmm9[3],xmm5[4,5],xmm9[6],xmm5[7]
 ; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm7[0],xmm1[1],xmm7[1],xmm1[2],xmm7[2],xmm1[3],xmm7[3]
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm7 = [4,5,2,3,2,3,0,1,10,11,8,9,4,5,6,7]
 ; AVX512F-FAST-NEXT:    vpshufb %xmm7, %xmm1, %xmm1
 ; AVX512F-FAST-NEXT:    vmovdqa %xmm7, %xmm9
 ; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm7 = [0,1,0,1,8,9,8,8]
-; AVX512F-FAST-NEXT:    vpermt2q %zmm3, %zmm7, %zmm1
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm19 = zmm4[0,1,0,1,4,5,4,5]
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm31 = [65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535]
-; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm1, %zmm31, %zmm19
+; AVX512F-FAST-NEXT:    vpermt2q %zmm5, %zmm7, %zmm1
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm20 = zmm2[0,1,0,1,4,5,4,5]
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm21 = [65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535]
+; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm1, %zmm21, %zmm20
 ; AVX512F-FAST-NEXT:    vmovdqa (%rcx), %xmm1
 ; AVX512F-FAST-NEXT:    vpshufb %xmm0, %xmm1, %xmm0
-; AVX512F-FAST-NEXT:    vmovdqa (%rdx), %xmm3
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} xmm4 = xmm3[1,2,2,2]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm4[1],xmm0[2],xmm4[3],xmm0[4,5],xmm4[6],xmm0[7]
-; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
+; AVX512F-FAST-NEXT:    vmovdqa (%rdx), %xmm2
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} xmm5 = xmm2[1,2,2,2]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm5[1],xmm0[2],xmm5[3],xmm0[4,5],xmm5[6],xmm0[7]
+; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
 ; AVX512F-FAST-NEXT:    vpshufb %xmm9, %xmm1, %xmm1
 ; AVX512F-FAST-NEXT:    vpermt2q %zmm0, %zmm7, %zmm1
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm21 = zmm5[0,1,0,1,4,5,4,5]
-; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm1, %zmm31, %zmm21
-; AVX512F-FAST-NEXT:    vmovdqa (%rsi), %ymm3
-; AVX512F-FAST-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa64 (%rdi), %ymm28
-; AVX512F-FAST-NEXT:    vpshufb %ymm8, %ymm3, %ymm0
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm28[1,1,2,2]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm22 = zmm4[0,1,0,1,4,5,4,5]
+; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm1, %zmm21, %zmm22
+; AVX512F-FAST-NEXT:    vmovdqa (%rsi), %ymm2
+; AVX512F-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-FAST-NEXT:    vmovdqa64 (%rdi), %ymm30
+; AVX512F-FAST-NEXT:    vpshufb %ymm12, %ymm2, %ymm0
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm30[1,1,2,2]
 ; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5],ymm0[6],ymm1[7,8],ymm0[9],ymm1[10,11],ymm0[12],ymm1[13],ymm0[14],ymm1[15]
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm1 = ymm28[0,1,2,1,4,5,6,5]
-; AVX512F-FAST-NEXT:    vprolq $16, %ymm3, %ymm3
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm3[0,1],ymm1[2],ymm3[3],ymm1[4],ymm3[5,6],ymm1[7],ymm3[8,9],ymm1[10],ymm3[11],ymm1[12],ymm3[13,14],ymm1[15]
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm1 = ymm30[0,1,2,1,4,5,6,5]
+; AVX512F-FAST-NEXT:    vprolq $16, %ymm2, %ymm2
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm2[0,1],ymm1[2],ymm2[3],ymm1[4],ymm2[5,6],ymm1[7],ymm2[8,9],ymm1[10],ymm2[11],ymm1[12],ymm2[13,14],ymm1[15]
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
 ; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa (%rdx), %ymm10
+; AVX512F-FAST-NEXT:    vmovdqa (%rdx), %ymm5
 ; AVX512F-FAST-NEXT:    vmovdqa (%rcx), %ymm4
 ; AVX512F-FAST-NEXT:    vpshufb %ymm15, %ymm4, %ymm0
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm1 = ymm10[3,0,3,0,7,4,7,4]
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm1 = ymm5[3,0,3,0,7,4,7,4]
 ; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4,5],ymm0[6],ymm1[7,8],ymm0[9],ymm1[10],ymm0[11],ymm1[12,13],ymm0[14],ymm1[15]
 ; AVX512F-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm7 = [18,19,0,0,22,23,22,23,0,0,20,21,0,0,24,25,18,19,0,0,22,23,22,23,0,0,20,21,0,0,24,25]
 ; AVX512F-FAST-NEXT:    # ymm7 = mem[0,1,0,1]
 ; AVX512F-FAST-NEXT:    vpshufb %ymm7, %ymm4, %ymm1
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm3 = ymm10[1,1,1,2,5,5,5,6]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0],ymm3[1],ymm1[2,3],ymm3[4],ymm1[5],ymm3[6],ymm1[7,8],ymm3[9],ymm1[10,11],ymm3[12],ymm1[13],ymm3[14],ymm1[15]
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm2 = ymm5[1,1,1,2,5,5,5,6]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3],ymm2[4],ymm1[5],ymm2[6],ymm1[7,8],ymm2[9],ymm1[10,11],ymm2[12],ymm1[13],ymm2[14],ymm1[15]
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm25
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm26
 ; AVX512F-FAST-NEXT:    vpbroadcastq 16(%r8), %ymm0
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm9 = [65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535]
-; AVX512F-FAST-NEXT:    vpandn %ymm0, %ymm9, %ymm0
-; AVX512F-FAST-NEXT:    vmovdqa (%r8), %ymm3
-; AVX512F-FAST-NEXT:    vpshufb %ymm14, %ymm3, %ymm1
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm3, %ymm24
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm26
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,30,31,28,29,26,27,30,31,30,31,28,29,30,31,28,29]
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm1 = ymm23[2,3,2,3,6,7,6,7]
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm17 = [65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0]
+; AVX512F-FAST-NEXT:    vpandnq %ymm0, %ymm17, %ymm0
+; AVX512F-FAST-NEXT:    vmovdqa (%r8), %ymm2
+; AVX512F-FAST-NEXT:    vpshufb %ymm13, %ymm2, %ymm1
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm2, %ymm27
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm28
+; AVX512F-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm11 = [30,31,28,29,26,27,30,31,30,31,28,29,30,31,28,29,30,31,28,29,26,27,30,31,30,31,28,29,30,31,28,29]
+; AVX512F-FAST-NEXT:    # ymm11 = mem[0,1,0,1]
+; AVX512F-FAST-NEXT:    vpshufb %ymm11, %ymm3, %ymm0
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm1 = ymm24[2,3,2,3,6,7,6,7]
 ; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4,5],ymm1[6],ymm0[7,8],ymm1[9],ymm0[10],ymm1[11],ymm0[12,13],ymm1[14],ymm0[15]
-; AVX512F-FAST-NEXT:    vprolq $16, %ymm2, %ymm1
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm2 = ymm23[0,1,2,1,4,5,6,5]
+; AVX512F-FAST-NEXT:    vprolq $16, %ymm3, %ymm1
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm2 = ymm24[0,1,2,1,4,5,6,5]
 ; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3],ymm2[4],ymm1[5,6],ymm2[7],ymm1[8,9],ymm2[10],ymm1[11],ymm2[12],ymm1[13,14],ymm2[15]
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm20 = [2,3,2,3,10,11,10,10]
-; AVX512F-FAST-NEXT:    vpermt2q %zmm0, %zmm20, %zmm1
-; AVX512F-FAST-NEXT:    vpbroadcastq {{.*#+}} ymm11 = [26,27,28,29,30,31,30,31,26,27,28,29,30,31,30,31,26,27,28,29,30,31,30,31,26,27,28,29,30,31,30,31]
-; AVX512F-FAST-NEXT:    vpshufb %ymm11, %ymm6, %ymm0
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm2 = ymm18[3,2,3,3,7,6,7,7]
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm23 = [2,3,2,3,10,11,10,10]
+; AVX512F-FAST-NEXT:    vpermt2q %zmm0, %zmm23, %zmm1
+; AVX512F-FAST-NEXT:    vpbroadcastq {{.*#+}} ymm9 = [26,27,28,29,30,31,30,31,26,27,28,29,30,31,30,31,26,27,28,29,30,31,30,31,26,27,28,29,30,31,30,31]
+; AVX512F-FAST-NEXT:    vpshufb %ymm9, %ymm8, %ymm0
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm2 = ymm19[3,2,3,3,7,6,7,7]
 ; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2],ymm2[3,4],ymm0[5,6,7,8],ymm2[9],ymm0[10],ymm2[11,12],ymm0[13,14,15]
-; AVX512F-FAST-NEXT:    vpshufb %ymm7, %ymm6, %ymm2
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm3 = ymm18[1,1,1,2,5,5,5,6]
+; AVX512F-FAST-NEXT:    vpshufb %ymm7, %ymm8, %ymm2
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm3 = ymm19[1,1,1,2,5,5,5,6]
 ; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0],ymm3[1],ymm2[2,3],ymm3[4],ymm2[5],ymm3[6],ymm2[7,8],ymm3[9],ymm2[10,11],ymm3[12],ymm2[13],ymm3[14],ymm2[15]
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm18 = [2,3,2,3,10,10,11,10]
-; AVX512F-FAST-NEXT:    vpermt2q %zmm0, %zmm18, %zmm2
-; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm1, %zmm31, %zmm2
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm19 = [2,3,2,3,10,10,11,10]
+; AVX512F-FAST-NEXT:    vpermt2q %zmm0, %zmm19, %zmm2
+; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm1, %zmm21, %zmm2
 ; AVX512F-FAST-NEXT:    vpbroadcastq 112(%r8), %ymm0
 ; AVX512F-FAST-NEXT:    vpbroadcastq 120(%r8), %ymm1
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm12
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm17 = [65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0]
-; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm2, %zmm17, %zmm12
-; AVX512F-FAST-NEXT:    vmovdqa 64(%rsi), %ymm6
-; AVX512F-FAST-NEXT:    vpshufb %ymm8, %ymm6, %ymm0
-; AVX512F-FAST-NEXT:    vmovdqa64 64(%rdi), %ymm23
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm23[1,1,2,2]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm16
+; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm2, %zmm17, %zmm16
+; AVX512F-FAST-NEXT:    vmovdqa 64(%rsi), %ymm8
+; AVX512F-FAST-NEXT:    vpshufb %ymm12, %ymm8, %ymm0
+; AVX512F-FAST-NEXT:    vmovdqa64 64(%rdi), %ymm24
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm24[1,1,2,2]
 ; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5],ymm0[6],ymm1[7,8],ymm0[9],ymm1[10,11],ymm0[12],ymm1[13],ymm0[14],ymm1[15]
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm1 = ymm23[0,1,2,1,4,5,6,5]
-; AVX512F-FAST-NEXT:    vprolq $16, %ymm6, %ymm2
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm1 = ymm24[0,1,2,1,4,5,6,5]
+; AVX512F-FAST-NEXT:    vprolq $16, %ymm8, %ymm2
 ; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm2[0,1],ymm1[2],ymm2[3],ymm1[4],ymm2[5,6],ymm1[7],ymm2[8,9],ymm1[10],ymm2[11],ymm1[12],ymm2[13,14],ymm1[15]
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm22
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm25
 ; AVX512F-FAST-NEXT:    vmovdqa 64(%rcx), %ymm1
 ; AVX512F-FAST-NEXT:    vpshufb %ymm15, %ymm1, %ymm2
-; AVX512F-FAST-NEXT:    vmovdqa 64(%rdx), %ymm3
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm8 = ymm3[3,0,3,0,7,4,7,4]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm8[0],ymm2[1],ymm8[2],ymm2[3],ymm8[4,5],ymm2[6],ymm8[7,8],ymm2[9],ymm8[10],ymm2[11],ymm8[12,13],ymm2[14],ymm8[15]
-; AVX512F-FAST-NEXT:    vpshufb %ymm7, %ymm1, %ymm8
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm15 = ymm3[1,1,1,2,5,5,5,6]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm8 = ymm8[0],ymm15[1],ymm8[2,3],ymm15[4],ymm8[5],ymm15[6],ymm8[7,8],ymm15[9],ymm8[10,11],ymm15[12],ymm8[13],ymm15[14],ymm8[15]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[2,3,2,3]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm8, %zmm2, %zmm8
+; AVX512F-FAST-NEXT:    vmovdqa 64(%rdx), %ymm0
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm12 = ymm0[3,0,3,0,7,4,7,4]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm12[0],ymm2[1],ymm12[2],ymm2[3],ymm12[4,5],ymm2[6],ymm12[7,8],ymm2[9],ymm12[10],ymm2[11],ymm12[12,13],ymm2[14],ymm12[15]
+; AVX512F-FAST-NEXT:    vpshufb %ymm7, %ymm1, %ymm12
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm15 = ymm0[1,1,1,2,5,5,5,6]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm12 = ymm12[0],ymm15[1],ymm12[2,3],ymm15[4],ymm12[5],ymm15[6],ymm12[7,8],ymm15[9],ymm12[10,11],ymm15[12],ymm12[13],ymm15[14],ymm12[15]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[2,3,2,3]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm12, %zmm2, %zmm12
 ; AVX512F-FAST-NEXT:    vpbroadcastq 80(%r8), %ymm2
-; AVX512F-FAST-NEXT:    vpandn %ymm2, %ymm9, %ymm9
-; AVX512F-FAST-NEXT:    vmovdqa 64(%r8), %ymm5
-; AVX512F-FAST-NEXT:    vpshufb %ymm14, %ymm5, %ymm15
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm9, %zmm15, %zmm15
-; AVX512F-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = [30,31,28,29,26,27,30,31,30,31,28,29,30,31,28,29,30,31,28,29,26,27,30,31,30,31,28,29,30,31,28,29]
-; AVX512F-FAST-NEXT:    # ymm2 = mem[0,1,0,1]
-; AVX512F-FAST-NEXT:    vpshufb %ymm2, %ymm13, %ymm9
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm14 = ymm30[2,3,2,3,6,7,6,7]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm9 = ymm9[0],ymm14[1],ymm9[2],ymm14[3],ymm9[4,5],ymm14[6],ymm9[7,8],ymm14[9],ymm9[10],ymm14[11],ymm9[12,13],ymm14[14],ymm9[15]
-; AVX512F-FAST-NEXT:    vprolq $16, %ymm13, %ymm13
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm14 = ymm30[0,1,2,1,4,5,6,5]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm13 = ymm13[0,1],ymm14[2],ymm13[3],ymm14[4],ymm13[5,6],ymm14[7],ymm13[8,9],ymm14[10],ymm13[11],ymm14[12],ymm13[13,14],ymm14[15]
-; AVX512F-FAST-NEXT:    vpermt2q %zmm9, %zmm20, %zmm13
-; AVX512F-FAST-NEXT:    vmovdqu (%rsp), %ymm0 # 32-byte Reload
-; AVX512F-FAST-NEXT:    vpshufb %ymm11, %ymm0, %ymm9
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm14 = ymm27[3,2,3,3,7,6,7,7]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm9 = ymm9[0],ymm14[1],ymm9[2],ymm14[3,4],ymm9[5,6,7,8],ymm14[9],ymm9[10],ymm14[11,12],ymm9[13,14,15]
-; AVX512F-FAST-NEXT:    vpshufb %ymm7, %ymm0, %ymm7
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm14 = ymm27[1,1,1,2,5,5,5,6]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm14 = ymm7[0],ymm14[1],ymm7[2,3],ymm14[4],ymm7[5],ymm14[6],ymm7[7,8],ymm14[9],ymm7[10,11],ymm14[12],ymm7[13],ymm14[14],ymm7[15]
-; AVX512F-FAST-NEXT:    vpermt2q %zmm9, %zmm18, %zmm14
-; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm13, %zmm31, %zmm14
-; AVX512F-FAST-NEXT:    vpbroadcastq 48(%r8), %ymm7
-; AVX512F-FAST-NEXT:    vpbroadcastq 56(%r8), %ymm9
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm9, %zmm7, %zmm7
-; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm14, %zmm17, %zmm7
-; AVX512F-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
-; AVX512F-FAST-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm9, %xmm9 # 16-byte Folded Reload
-; AVX512F-FAST-NEXT:    # xmm9 = xmm9[0],mem[0],xmm9[1],mem[1],xmm9[2],mem[2],xmm9[3],mem[3]
-; AVX512F-FAST-NEXT:    vpshufb %ymm11, %ymm1, %ymm1
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm0 = ymm3[3,2,3,3,7,6,7,7]
+; AVX512F-FAST-NEXT:    vpandnq %ymm2, %ymm17, %ymm15
+; AVX512F-FAST-NEXT:    vmovdqa 64(%r8), %ymm3
+; AVX512F-FAST-NEXT:    vpshufb %ymm13, %ymm3, %ymm13
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm15, %zmm13, %zmm15
+; AVX512F-FAST-NEXT:    vpshufb %ymm11, %ymm14, %ymm13
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm10 = ymm6[2,3,2,3,6,7,6,7]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm10 = ymm13[0],ymm10[1],ymm13[2],ymm10[3],ymm13[4,5],ymm10[6],ymm13[7,8],ymm10[9],ymm13[10],ymm10[11],ymm13[12,13],ymm10[14],ymm13[15]
+; AVX512F-FAST-NEXT:    vprolq $16, %ymm14, %ymm13
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm6 = ymm6[0,1,2,1,4,5,6,5]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm6 = ymm13[0,1],ymm6[2],ymm13[3],ymm6[4],ymm13[5,6],ymm6[7],ymm13[8,9],ymm6[10],ymm13[11],ymm6[12],ymm13[13,14],ymm6[15]
+; AVX512F-FAST-NEXT:    vpermt2q %zmm10, %zmm23, %zmm6
+; AVX512F-FAST-NEXT:    vmovdqu (%rsp), %ymm14 # 32-byte Reload
+; AVX512F-FAST-NEXT:    vpshufb %ymm9, %ymm14, %ymm10
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm13 = ymm29[3,2,3,3,7,6,7,7]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm10 = ymm10[0],ymm13[1],ymm10[2],ymm13[3,4],ymm10[5,6,7,8],ymm13[9],ymm10[10],ymm13[11,12],ymm10[13,14,15]
+; AVX512F-FAST-NEXT:    vpshufb %ymm7, %ymm14, %ymm7
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm13 = ymm29[1,1,1,2,5,5,5,6]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm7 = ymm7[0],ymm13[1],ymm7[2,3],ymm13[4],ymm7[5],ymm13[6],ymm7[7,8],ymm13[9],ymm7[10,11],ymm13[12],ymm7[13],ymm13[14],ymm7[15]
+; AVX512F-FAST-NEXT:    vpermt2q %zmm10, %zmm19, %zmm7
+; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm6, %zmm21, %zmm7
+; AVX512F-FAST-NEXT:    vpbroadcastq 48(%r8), %ymm6
+; AVX512F-FAST-NEXT:    vpbroadcastq 56(%r8), %ymm10
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm10, %zmm6, %zmm6
+; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm7, %zmm17, %zmm6
+; AVX512F-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
+; AVX512F-FAST-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm7, %xmm7 # 16-byte Folded Reload
+; AVX512F-FAST-NEXT:    # xmm7 = xmm7[0],mem[0],xmm7[1],mem[1],xmm7[2],mem[2],xmm7[3],mem[3]
+; AVX512F-FAST-NEXT:    vpshufb %ymm9, %ymm1, %ymm1
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[3,2,3,3,7,6,7,7]
 ; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3,4],ymm1[5,6,7,8],ymm0[9],ymm1[10],ymm0[11,12],ymm1[13,14,15]
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm14 = [4,5,2,3,2,3,0,1,10,11,8,9,4,5,6,7]
-; AVX512F-FAST-NEXT:    vpshufb %xmm14, %xmm9, %xmm1
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm9 = [2,2,3,2,8,9,8,9]
-; AVX512F-FAST-NEXT:    vpermt2q %zmm1, %zmm9, %zmm0
+; AVX512F-FAST-NEXT:    vpshufb %xmm14, %xmm7, %xmm1
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm7 = [2,2,3,2,8,9,8,9]
+; AVX512F-FAST-NEXT:    vpermt2q %zmm1, %zmm7, %zmm0
 ; AVX512F-FAST-NEXT:    vmovdqa 96(%rdi), %xmm1
 ; AVX512F-FAST-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
 ; AVX512F-FAST-NEXT:    # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3]
-; AVX512F-FAST-NEXT:    vpshufb %ymm2, %ymm6, %ymm6
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm2, %ymm17
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm13 = ymm23[2,3,2,3,6,7,6,7]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm6 = ymm6[0],ymm13[1],ymm6[2],ymm13[3],ymm6[4,5],ymm13[6],ymm6[7,8],ymm13[9],ymm6[10],ymm13[11],ymm6[12,13],ymm13[14],ymm6[15]
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm3 = [0,1,2,3,8,9,10,11,4,5,4,5,6,7,12,13]
-; AVX512F-FAST-NEXT:    vpshufb %xmm3, %xmm1, %xmm1
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm13 = [2,3,2,2,8,9,8,9]
-; AVX512F-FAST-NEXT:    vpermt2q %zmm1, %zmm13, %zmm6
+; AVX512F-FAST-NEXT:    vpshufb %ymm11, %ymm8, %ymm8
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm10 = ymm24[2,3,2,3,6,7,6,7]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm8 = ymm8[0],ymm10[1],ymm8[2],ymm10[3],ymm8[4,5],ymm10[6],ymm8[7,8],ymm10[9],ymm8[10],ymm10[11],ymm8[12,13],ymm10[14],ymm8[15]
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm13 = [0,1,2,3,8,9,10,11,4,5,4,5,6,7,12,13]
+; AVX512F-FAST-NEXT:    vpshufb %xmm13, %xmm1, %xmm1
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm10 = [2,3,2,2,8,9,8,9]
+; AVX512F-FAST-NEXT:    vpermt2q %zmm1, %zmm10, %zmm8
 ; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535]
-; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm0, %zmm1, %zmm6
+; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm0, %zmm1, %zmm8
 ; AVX512F-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
 ; AVX512F-FAST-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
 ; AVX512F-FAST-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
 ; AVX512F-FAST-NEXT:    vpshufb %xmm14, %xmm0, %xmm0
-; AVX512F-FAST-NEXT:    vpshufb %ymm11, %ymm4, %ymm4
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm5[0,1,1,1]
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm5 = ymm10[3,2,3,3,7,6,7,7]
+; AVX512F-FAST-NEXT:    vpshufb %ymm9, %ymm4, %ymm4
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm3[0,1,1,1]
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm5 = ymm5[3,2,3,3,7,6,7,7]
 ; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm4 = ymm4[0],ymm5[1],ymm4[2],ymm5[3,4],ymm4[5,6,7,8],ymm5[9],ymm4[10],ymm5[11,12],ymm4[13,14,15]
-; AVX512F-FAST-NEXT:    vpermt2q %zmm0, %zmm9, %zmm4
+; AVX512F-FAST-NEXT:    vpermt2q %zmm0, %zmm7, %zmm4
 ; AVX512F-FAST-NEXT:    vpbroadcastq 88(%r8), %ymm0
 ; AVX512F-FAST-NEXT:    vpbroadcastq 96(%r8), %ymm5
 ; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm5, %zmm0, %zmm0
 ; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535]
-; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm6, %zmm5, %zmm0
-; AVX512F-FAST-NEXT:    vmovdqa 32(%rdi), %xmm6
-; AVX512F-FAST-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm6, %xmm6 # 16-byte Folded Reload
-; AVX512F-FAST-NEXT:    # xmm6 = xmm6[0],mem[0],xmm6[1],mem[1],xmm6[2],mem[2],xmm6[3],mem[3]
-; AVX512F-FAST-NEXT:    vpshufb %xmm3, %xmm6, %xmm6
+; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm8, %zmm5, %zmm0
+; AVX512F-FAST-NEXT:    vmovdqa 32(%rdi), %xmm7
+; AVX512F-FAST-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm7, %xmm7 # 16-byte Folded Reload
+; AVX512F-FAST-NEXT:    # xmm7 = xmm7[0],mem[0],xmm7[1],mem[1],xmm7[2],mem[2],xmm7[3],mem[3]
+; AVX512F-FAST-NEXT:    vpshufb %xmm13, %xmm7, %xmm7
 ; AVX512F-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm17, %ymm9
-; AVX512F-FAST-NEXT:    vpshufb %ymm9, %ymm3, %ymm9
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm10 = ymm24[0,1,1,1]
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm11 = ymm28[2,3,2,3,6,7,6,7]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm9 = ymm9[0],ymm11[1],ymm9[2],ymm11[3],ymm9[4,5],ymm11[6],ymm9[7,8],ymm11[9],ymm9[10],ymm11[11],ymm9[12,13],ymm11[14],ymm9[15]
-; AVX512F-FAST-NEXT:    vpermt2q %zmm6, %zmm13, %zmm9
-; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm4, %zmm1, %zmm9
+; AVX512F-FAST-NEXT:    vpshufb %ymm11, %ymm3, %ymm8
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm9 = ymm27[0,1,1,1]
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm11 = ymm30[2,3,2,3,6,7,6,7]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm8 = ymm8[0],ymm11[1],ymm8[2],ymm11[3],ymm8[4,5],ymm11[6],ymm8[7,8],ymm11[9],ymm8[10],ymm11[11],ymm8[12,13],ymm11[14],ymm8[15]
+; AVX512F-FAST-NEXT:    vpermt2q %zmm7, %zmm10, %zmm8
+; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm4, %zmm1, %zmm8
 ; AVX512F-FAST-NEXT:    vpbroadcastq 24(%r8), %ymm4
-; AVX512F-FAST-NEXT:    vpbroadcastq 32(%r8), %ymm6
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm6, %zmm4, %zmm4
-; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm9, %zmm5, %zmm4
+; AVX512F-FAST-NEXT:    vpbroadcastq 32(%r8), %ymm7
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm4, %zmm4
+; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm8, %zmm5, %zmm4
 ; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0]
 ; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
 ; AVX512F-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm5, %zmm3 # 64-byte Folded Reload
-; AVX512F-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm5, %zmm16 # 64-byte Folded Reload
+; AVX512F-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm5, %zmm18 # 64-byte Folded Reload
 ; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535]
-; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512F-FAST-NEXT:    vpternlogq $248, %zmm5, %zmm3, %zmm9
-; AVX512F-FAST-NEXT:    vpternlogq $248, %zmm5, %zmm16, %zmm29
+; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512F-FAST-NEXT:    vpternlogq $248, %zmm5, %zmm3, %zmm8
+; AVX512F-FAST-NEXT:    vpternlogq $248, %zmm5, %zmm18, %zmm31
 ; AVX512F-FAST-NEXT:    vpbroadcastq 64(%r8), %ymm5
 ; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm5, %zmm2
 ; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535]
-; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm19, %zmm5, %zmm2
-; AVX512F-FAST-NEXT:    vpbroadcastq (%r8), %ymm6
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm10, %zmm6, %zmm6
-; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm21, %zmm5, %zmm6
-; AVX512F-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm25 # 64-byte Folded Reload
-; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm22, %zmm1, %zmm8
+; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm20, %zmm5, %zmm2
+; AVX512F-FAST-NEXT:    vpbroadcastq (%r8), %ymm7
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm9, %zmm7, %zmm7
+; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm22, %zmm5, %zmm7
+; AVX512F-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm26 # 64-byte Folded Reload
+; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm25, %zmm1, %zmm12
 ; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535]
-; AVX512F-FAST-NEXT:    vpternlogq $248, %zmm1, %zmm25, %zmm26
-; AVX512F-FAST-NEXT:    vpternlogq $248, %zmm1, %zmm8, %zmm15
+; AVX512F-FAST-NEXT:    vpternlogq $248, %zmm1, %zmm26, %zmm28
+; AVX512F-FAST-NEXT:    vpternlogq $248, %zmm1, %zmm12, %zmm15
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm4, 128(%r9)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm7, 256(%r9)
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm6, 256(%r9)
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm0, 448(%r9)
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm15, 384(%r9)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm12, 576(%r9)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm26, 64(%r9)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm6, (%r9)
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm16, 576(%r9)
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm28, 64(%r9)
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm7, (%r9)
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm2, 320(%r9)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm29, 192(%r9)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm9, 512(%r9)
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm31, 192(%r9)
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm8, 512(%r9)
 ; AVX512F-FAST-NEXT:    addq $360, %rsp # imm = 0x168
 ; AVX512F-FAST-NEXT:    vzeroupper
 ; AVX512F-FAST-NEXT:    retq
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-7.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-7.ll
index c874adb7e93a444..006edc71450157a 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-7.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-7.ll
@@ -5584,288 +5584,289 @@ define void @store_i16_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ;
 ; AVX512F-ONLY-FAST-LABEL: store_i16_stride7_vf32:
 ; AVX512F-ONLY-FAST:       # %bb.0:
-; AVX512F-ONLY-FAST-NEXT:    subq $232, %rsp
+; AVX512F-ONLY-FAST-NEXT:    subq $248, %rsp
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rcx), %ymm1
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = [128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128,128,128,128,128]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm0, %ymm1, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm1, %ymm9
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm1, %ymm10
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rdx), %ymm4
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = <u,u,u,u,12,13,14,15,128,128,u,u,u,u,u,u,u,u,u,u,16,17,128,128,u,u,u,u,u,u,u,u>
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm1, %ymm4, %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm4, %ymm11
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm4, %ymm18
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vpor %ymm2, %ymm3, %ymm2
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rsi), %ymm8
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rsi), %ymm10
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm4 = [128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128,128,128,128,128,128,128,128,128]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm4, %ymm8, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm8, (%rsp) # 32-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rsi), %ymm4
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rsi), %ymm11
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = [128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128,128,128,128,128,128,128,128,128]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm3, %ymm4, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm4, %ymm9
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rdi), %ymm6
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = <12,13,14,15,128,128,u,u,u,u,u,u,u,u,u,u,16,17,128,128,u,u,u,u,u,u,u,u,16,17,18,19>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm5, %ymm6, %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm6, %ymm18
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm5, %ymm6, %ymm4
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm6, %ymm16
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpor %ymm2, %ymm3, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vpor %ymm2, %ymm4, %ymm2
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%r9), %ymm13
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [128,128,128,128,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm13, %ymm6
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%r8), %ymm14
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = <u,u,u,u,u,u,u,u,12,13,14,15,128,128,u,u,u,u,u,u,u,u,u,u,16,17,128,128,u,u,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm3, %ymm14, %ymm7
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm4 = <u,u,u,u,u,u,u,u,12,13,14,15,128,128,u,u,u,u,u,u,u,u,u,u,16,17,128,128,u,u,u,u>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm4, %ymm14, %ymm7
 ; AVX512F-ONLY-FAST-NEXT:    vporq %ymm6, %ymm7, %ymm25
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm4, %ymm10, %ymm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rdi), %ymm6
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm5, %ymm6, %ymm5
-; AVX512F-ONLY-FAST-NEXT:    vpor %ymm4, %ymm5, %ymm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rcx), %ymm4
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm0, %ymm4, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm4, %ymm31
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm3, %ymm11, %ymm6
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rdi), %ymm8
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm5, %ymm8, %ymm5
+; AVX512F-ONLY-FAST-NEXT:    vpor %ymm6, %ymm5, %ymm3
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rcx), %ymm3
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm0, %ymm3, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm3, %ymm29
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rdx), %ymm12
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm1, %ymm12, %ymm1
 ; AVX512F-ONLY-FAST-NEXT:    vpor %ymm0, %ymm1, %ymm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%r9), %ymm15
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm15, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%r8), %ymm4
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm3, %ymm4, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%r8), %ymm6
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm4, %ymm6, %ymm1
 ; AVX512F-ONLY-FAST-NEXT:    vporq %ymm0, %ymm1, %ymm21
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = <u,u,u,u,u,u,u,u,u,u,u,u,14,15,12,13,u,u,u,u,30,31,28,29,u,u,u,u,30,31,28,29>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm0, %ymm10, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm2 = ymm6[3,3,3,3,7,7,7,7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm0, %ymm11, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm2 = ymm8[3,3,3,3,7,7,7,7]
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5],ymm1[6],ymm2[7,8,9,10],ymm1[11],ymm2[12,13],ymm1[14],ymm2[15]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm10[10,11,8,9,u,u,u,u,u,u,u,u,u,u,u,u,26,27,24,25,u,u,u,u,26,27,24,25,u,u,u,u]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm10, %ymm29
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm3 = ymm6[2,2,2,2,6,6,6,6]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm3[0],ymm2[1],ymm3[2,3],ymm2[4],ymm3[5,6,7,8],ymm2[9],ymm3[10,11],ymm2[12],ymm3[13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm22 = <u,2,2,3,10,u,11,u>
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm1, %zmm2, %zmm22
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm9[14,15,12,13,u,u,u,u,u,u,u,u,u,u,u,u,30,31,28,29,u,u,u,u,30,31,28,29,u,u,u,u]
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm2 = ymm11[3,3,3,3,7,7,7,7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm11[10,11,8,9,u,u,u,u,u,u,u,u,u,u,u,u,26,27,24,25,u,u,u,u,26,27,24,25,u,u,u,u]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm11, %ymm30
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm4 = ymm8[2,2,2,2,6,6,6,6]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm4[0],ymm2[1],ymm4[2,3],ymm2[4],ymm4[5,6,7,8],ymm2[9],ymm4[10,11],ymm2[12],ymm4[13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm24 = <u,2,2,3,10,u,11,u>
+; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm1, %zmm2, %zmm24
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm10[14,15,12,13,u,u,u,u,u,u,u,u,u,u,u,u,30,31,28,29,u,u,u,u,30,31,28,29,u,u,u,u]
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm2 = ymm18[3,3,3,3,7,7,7,7]
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3],ymm1[4],ymm2[5,6,7,8],ymm1[9],ymm2[10,11],ymm1[12],ymm2[13,14,15]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rcx), %xmm10
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rdx), %xmm11
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm11[0],xmm10[0],xmm11[1],xmm10[1],xmm11[2],xmm10[2],xmm11[3],xmm10[3]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rdx), %xmm5
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm5[0],xmm10[0],xmm5[1],xmm10[1],xmm5[2],xmm10[2],xmm5[3],xmm10[3]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[0,1,2,3,0,1,2,3,6,7,4,5,6,7,4,5]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm23 = <u,2,2,3,8,u,9,u>
 ; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm2, %zmm1, %zmm23
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm0, %ymm8, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm1 = ymm18[3,3,3,3,7,7,7,7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm1[0,1,2],ymm0[3],ymm1[4,5],ymm0[6],ymm1[7,8,9,10],ymm0[11],ymm1[12,13],ymm0[14],ymm1[15]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rdi), %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm5 = [0,1,2,3,4,5,4,5,6,7,10,11,8,9,10,11]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rsi), %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm5, %xmm3, %xmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm5, %xmm30
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm19 = [2,1,3,3,8,8,9,9]
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm3, %zmm2, %zmm19
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm13[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,26,27,28,29,26,27,28,29,26,27,28,29,30,31,30,31]
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm3 = ymm14[3,3,3,3,7,7,7,7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0,1],ymm3[2],ymm2[3,4],ymm3[5],ymm2[6,7,8,9],ymm3[10],ymm2[11,12],ymm3[13],ymm2[14,15]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm20 = [2,2,2,3,8,8,8,9]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%r9), %xmm3
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm0, %ymm9, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm1 = ymm16[3,3,3,3,7,7,7,7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5],ymm0[6],ymm1[7,8,9,10],ymm0[11],ymm1[12,13],ymm0[14],ymm1[15]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rdi), %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm11 = [0,1,2,3,4,5,4,5,6,7,10,11,8,9,10,11]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rsi), %xmm2
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm11, %xmm4, %xmm4
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm20 = [2,1,3,3,8,8,9,9]
+; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm4, %zmm0, %zmm20
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm13[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,26,27,28,29,26,27,28,29,26,27,28,29,30,31,30,31]
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm4 = ymm14[3,3,3,3,7,7,7,7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm4 = ymm0[0,1],ymm4[2],ymm0[3,4],ymm4[5],ymm0[6,7,8,9],ymm4[10],ymm0[11,12],ymm4[13],ymm0[14,15]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm22 = [2,2,2,3,8,8,8,9]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%r9), %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%r8), %xmm3
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%r8), %xmm5
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm5[0],xmm3[0],xmm5[1],xmm3[1],xmm5[2],xmm3[2],xmm5[3],xmm3[3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm3 = [0,1,2,3,6,7,4,5,6,7,4,5,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm3, %xmm7, %xmm5
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm7, %xmm24
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm5, %zmm2, %zmm20
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; AVX512F-ONLY-FAST-NEXT:    vprold $16, %xmm1, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3,4],xmm1[5],xmm0[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm9 = [4,5,2,3,4,5,6,7,8,9,10,11,10,11,8,9]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm2, %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm0 = [0,1,2,3,6,7,4,5,6,7,4,5,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm0, %xmm3, %xmm7
+; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm7, %zmm4, %zmm22
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm4 = xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; AVX512F-ONLY-FAST-NEXT:    vprold $16, %xmm2, %xmm2
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[1,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3,4],xmm2[5],xmm1[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm7 = [4,5,2,3,4,5,6,7,8,9,10,11,10,11,8,9]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm7, %xmm4, %xmm2
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm26 = <u,0,u,1,8,8,9,u>
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm1, %zmm0, %zmm26
+; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm2, %zmm1, %zmm26
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%r9), %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%r8), %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm3, %xmm0, %xmm3
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,8,9,10,11,14,15,12,13,14,15,12,13]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%r8), %xmm9
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm9[0],xmm2[0],xmm9[1],xmm2[1],xmm9[2],xmm2[2],xmm9[3],xmm2[3]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm0, %xmm4, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[0,1,2,3,8,9,10,11,14,15,12,13,14,15,12,13]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm27 = [0,0,0,1,8,9,9,11]
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm0, %zmm3, %zmm27
+; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm4, %zmm0, %zmm27
 ; AVX512F-ONLY-FAST-NEXT:    vprold $16, %ymm13, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm3 = ymm14[1,2,2,3,5,6,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm3[0,1],ymm0[2],ymm3[3,4],ymm0[5],ymm3[6,7,8,9],ymm0[10],ymm3[11,12],ymm0[13],ymm3[14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpbroadcastd {{.*#+}} ymm5 = [18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm5, %ymm13, %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm7 = ymm14[0,0,2,1,4,4,6,5]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm3[0,1,2],ymm7[3],ymm3[4,5],ymm7[6],ymm3[7,8,9,10],ymm7[11],ymm3[12,13],ymm7[14],ymm3[15]
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm4 = ymm14[1,2,2,3,5,6,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm4[0,1],ymm0[2],ymm4[3,4],ymm0[5],ymm4[6,7,8,9],ymm0[10],ymm4[11,12],ymm0[13],ymm4[14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpbroadcastd {{.*#+}} ymm3 = [18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm3, %ymm13, %ymm4
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm13 = ymm14[0,0,2,1,4,4,6,5]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm4 = ymm4[0,1,2],ymm13[3],ymm4[4,5],ymm13[6],ymm4[7,8,9,10],ymm13[11],ymm4[12,13],ymm13[14],ymm4[15]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm28 = [2,2,3,3,10,9,11,10]
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm0, %zmm3, %zmm28
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rdi), %xmm8
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rsi), %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm0[4],xmm8[4],xmm0[5],xmm8[5],xmm0[6],xmm8[6],xmm0[7],xmm8[7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm3, %xmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm3, %ymm17
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm25, %zmm0, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm30, %xmm9
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm1, %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm0, %zmm4, %zmm28
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rdi), %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rsi), %xmm14
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm4 = xmm14[4],xmm0[4],xmm14[5],xmm0[5],xmm14[6],xmm0[6],xmm14[7],xmm0[7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm7, %xmm4, %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm1, %ymm19
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm9[4],xmm2[4],xmm9[5],xmm2[5],xmm9[6],xmm2[6],xmm9[7],xmm2[7]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm25, %zmm0, %zmm4
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm11, %xmm2, %xmm2
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm25 = <0,0,1,1,12,13,u,15>
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm2, %zmm1, %zmm25
+; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm4, %zmm2, %zmm25
 ; AVX512F-ONLY-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-ONLY-FAST-NEXT:    vpbroadcastd 8(%rax), %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rax), %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [12,13,128,128,128,128,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm3, %ymm7
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm1, %zmm30
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm29, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21,18,19,20,21,24,25,26,27,22,23,22,23]
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm6 = ymm6[1,1,1,1,5,5,5,5]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0,1],ymm6[2],ymm1[3,4],ymm6[5],ymm1[6,7,8,9],ymm6[10],ymm1[11,12],ymm6[13],ymm1[14,15]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm1, %ymm16
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm31, %ymm13
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm1 = ymm13[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm6 = ymm12[0,1,1,3,4,5,5,7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm7 = ymm6[0,1],ymm1[2],ymm6[3,4],ymm1[5],ymm6[6,7,8,9],ymm1[10],ymm6[11,12],ymm1[13],ymm6[14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm5, %ymm15, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm5 = ymm4[0,0,2,1,4,4,6,5]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0,1,2],ymm5[3],ymm1[4,5],ymm5[6],ymm1[7,8,9,10],ymm5[11],ymm1[12,13],ymm5[14],ymm1[15]
+; AVX512F-ONLY-FAST-NEXT:    vpbroadcastd 8(%rax), %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rax), %ymm9
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = [12,13,128,128,128,128,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm1, %ymm9, %ymm4
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm2, %zmm31
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm30, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21,18,19,20,21,24,25,26,27,22,23,22,23]
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm4 = ymm8[1,1,1,1,5,5,5,5]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0,1],ymm4[2],ymm2[3,4],ymm4[5],ymm2[6,7,8,9],ymm4[10],ymm2[11,12],ymm4[13],ymm2[14,15]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm2, %ymm18
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm29, %ymm7
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm2 = ymm7[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm4 = ymm12[0,1,1,3,4,5,5,7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm4[0,1],ymm2[2],ymm4[3,4],ymm2[5],ymm4[6,7,8,9],ymm2[10],ymm4[11,12],ymm2[13],ymm4[14,15]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm2, %ymm17
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm3, %ymm15, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm3 = ymm6[0,0,2,1,4,4,6,5]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0,1,2],ymm3[3],ymm2[4,5],ymm3[6],ymm2[7,8,9,10],ymm3[11],ymm2[12,13],ymm3[14],ymm2[15]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm29 = <0,1,u,3,10,10,11,11>
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm1, %zmm21, %zmm29
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rax), %ymm6
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm6, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = <u,u,4,u,u,u,5,u>
-; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm6, %ymm2, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm2, %zmm21, %zmm29
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rax), %ymm8
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm1, %ymm8, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm21 = [0,13,4,0,0,14,5,0,0,13,4,0,0,14,5,0]
+; AVX512F-ONLY-FAST-NEXT:    # zmm21 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm8, %ymm21, %ymm2
 ; AVX512F-ONLY-FAST-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm1, %zmm14
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm13[14,15,12,13,u,u,u,u,u,u,u,u,u,u,u,u,30,31,28,29,u,u,u,u,30,31,28,29,u,u,u,u]
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm5 = ymm12[3,3,3,3,7,7,7,7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm5[0],ymm2[1],ymm5[2,3],ymm2[4],ymm5[5,6,7,8],ymm2[9],ymm5[10,11],ymm2[12],ymm5[13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm5 = [22,23,26,27,0,0,24,25,26,27,0,0,26,27,26,27,22,23,26,27,0,0,24,25,26,27,0,0,26,27,26,27]
-; AVX512F-ONLY-FAST-NEXT:    # ymm5 = mem[0,1,0,1]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm5, %ymm13, %ymm13
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm1, %zmm16
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm7, %ymm4
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm7[14,15,12,13,u,u,u,u,u,u,u,u,u,u,u,u,30,31,28,29,u,u,u,u,30,31,28,29,u,u,u,u]
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm3 = ymm12[3,3,3,3,7,7,7,7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm3[0],ymm2[1],ymm3[2,3],ymm2[4],ymm3[5,6,7,8],ymm2[9],ymm3[10,11],ymm2[12],ymm3[13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm7 = [22,23,26,27,0,0,24,25,26,27,0,0,26,27,26,27,22,23,26,27,0,0,24,25,26,27,0,0,26,27,26,27]
+; AVX512F-ONLY-FAST-NEXT:    # ymm7 = mem[0,1,0,1]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm7, %ymm4, %ymm13
 ; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm12 = ymm12[2,2,2,2,6,6,6,6]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm13 = ymm13[0,1],ymm12[2],ymm13[3,4],ymm12[5],ymm13[6,7,8,9],ymm12[10],ymm13[11,12],ymm12[13],ymm13[14,15]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm21 = [2,2,2,3,8,10,10,11]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm21, %zmm13
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm15[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,26,27,28,29,26,27,28,29,26,27,28,29,30,31,30,31]
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm12 = ymm4[3,3,3,3,7,7,7,7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0,1],ymm12[2],ymm2[3,4],ymm12[5],ymm2[6,7,8,9],ymm12[10],ymm2[11,12],ymm12[13],ymm2[14,15]
-; AVX512F-ONLY-FAST-NEXT:    vprold $16, %ymm15, %ymm12
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm4 = ymm4[1,2,2,3,5,6,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm12 = ymm4[0,1],ymm12[2],ymm4[3,4],ymm12[5],ymm4[6,7,8,9],ymm12[10],ymm4[11,12],ymm12[13],ymm4[14,15]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm31 = [2,1,3,2,10,10,10,11]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm31, %zmm12
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm18 = [65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535]
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, %zmm22, %zmm18, %zmm13
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm13, %zmm12
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 (%rax), %zmm15
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [30,5,0,0,31,6,0,31,30,5,0,0,31,6,0,31]
-; AVX512F-ONLY-FAST-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpermi2d %zmm15, %zmm6, %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm12, %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm6 = xmm11[4],xmm10[4],xmm11[5],xmm10[5],xmm11[6],xmm10[6],xmm11[7],xmm10[7]
-; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq {{.*#+}} xmm12 = [6,7,4,5,0,0,8,9,6,7,4,5,0,0,8,9]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm12, %xmm10, %xmm10
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm11 = xmm11[1,1,2,2]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm10 = xmm11[0],xmm10[1],xmm11[2,3],xmm10[4],xmm11[5,6],xmm10[7]
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm6[0,2,3,3,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm11 = [0,0,1,1,8,8,10,9]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm6, %zmm11, %zmm10
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm12 = ymm13[0,1],ymm12[2],ymm13[3,4],ymm12[5],ymm13[6,7,8,9],ymm12[10],ymm13[11,12],ymm12[13],ymm13[14,15]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm30 = [2,2,2,3,8,10,10,11]
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm30, %zmm12
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm3 = ymm15[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,26,27,28,29,26,27,28,29,26,27,28,29,30,31,30,31]
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm13 = ymm6[3,3,3,3,7,7,7,7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm3[0,1],ymm13[2],ymm3[3,4],ymm13[5],ymm3[6,7,8,9],ymm13[10],ymm3[11,12],ymm13[13],ymm3[14,15]
+; AVX512F-ONLY-FAST-NEXT:    vprold $16, %ymm15, %ymm13
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm6 = ymm6[1,2,2,3,5,6,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm6 = ymm6[0,1],ymm13[2],ymm6[3,4],ymm13[5],ymm6[6,7,8,9],ymm13[10],ymm6[11,12],ymm13[13],ymm6[14,15]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm13 = [2,1,3,2,10,10,10,11]
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm13, %zmm6
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm5[4],xmm10[4],xmm5[5],xmm10[5],xmm5[6],xmm10[6],xmm5[7],xmm10[7]
+; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq {{.*#+}} xmm15 = [6,7,4,5,0,0,8,9,6,7,4,5,0,0,8,9]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm15, %xmm10, %xmm10
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[1,1,2,2]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0],xmm10[1],xmm5[2,3],xmm10[4],xmm5[5,6],xmm10[7]
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm3[0,2,3,3,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [0,0,1,1,8,8,10,9]
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm2, %zmm5
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm6 # 16-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # xmm6 = xmm1[4],mem[4],xmm1[5],mem[5],xmm1[6],mem[6],xmm1[7],mem[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm24, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm13 = xmm1[0,1,2,3,8,9,10,11,14,15,12,13,14,15,12,13]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm6, %xmm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm22 = [0,1,1,3,8,8,9,9]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm6, %zmm22, %zmm13
-; AVX512F-ONLY-FAST-NEXT:    vprold $16, %xmm0, %xmm6
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm2 = xmm8[1,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm6[2],xmm2[3,4],xmm6[5],xmm2[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm8[0],xmm0[0],xmm8[1],xmm0[1],xmm8[2],xmm0[2],xmm8[3],xmm0[3]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm0, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm11, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rcx), %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm12, %xmm2, %xmm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rdx), %xmm11
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm12 = xmm11[1,1,2,2]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm6 = xmm12[0],xmm6[1],xmm12[2,3],xmm6[4],xmm12[5,6],xmm6[7]
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm12 = xmm11[4],xmm2[4],xmm11[5],xmm2[5],xmm11[6],xmm2[6],xmm11[7],xmm2[7]
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm11[0],xmm2[0],xmm11[1],xmm2[1],xmm11[2],xmm2[2],xmm11[3],xmm2[3]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[0,1,2,3,0,1,2,3,6,7,4,5,6,7,4,5]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm6, %zmm22, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm3 # 16-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    # xmm3 = xmm1[4],mem[4],xmm1[5],mem[5],xmm1[6],mem[6],xmm1[7],mem[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm10 = xmm1[0,1,2,3,8,9,10,11,14,15,12,13,14,15,12,13]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm11, %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm11, %xmm3, %xmm3
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [0,1,1,3,8,8,9,9]
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm4, %zmm10
+; AVX512F-ONLY-FAST-NEXT:    vprold $16, %xmm14, %xmm3
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm11 = xmm0[1,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm11[0,1],xmm3[2],xmm11[3,4],xmm3[5],xmm11[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm14[0],xmm0[1],xmm14[1],xmm0[2],xmm14[2],xmm0[3],xmm14[3]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm1, %xmm0, %xmm11
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm2, %zmm11
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rcx), %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm15, %xmm0, %xmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rdx), %xmm3
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm14 = xmm3[1,1,2,2]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm14[0],xmm2[1],xmm14[2,3],xmm2[4],xmm14[5,6],xmm2[7]
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm14 = xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,0,1,2,3,6,7,4,5,6,7,4,5]
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm4, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535]
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, %zmm24, %zmm2, %zmm12
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm12, %zmm6
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm7, %ymm12, %ymm3
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm4 = ymm1[2,2,2,2,6,6,6,6]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm3[0,1],ymm4[2],ymm3[3,4],ymm4[5],ymm3[6,7,8,9],ymm4[10],ymm3[11,12],ymm4[13],ymm3[14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm4 = ymm12[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm7 = ymm1[0,1,1,3,4,5,5,7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm4 = ymm7[0,1],ymm4[2],ymm7[3,4],ymm4[5],ymm7[6,7,8,9],ymm4[10],ymm7[11,12],ymm4[13],ymm7[14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm13, %zmm4
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 (%rax), %zmm3
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm7 = [30,5,0,0,31,6,0,31,30,5,0,0,31,6,0,31]
+; AVX512F-ONLY-FAST-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpermi2d %zmm3, %zmm8, %zmm7
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm6, %zmm7
 ; AVX512F-ONLY-FAST-NEXT:    vpbroadcastd 36(%rax), %ymm6
-; AVX512F-ONLY-FAST-NEXT:    vpbroadcastd 40(%rax), %ymm11
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm11, %zmm6, %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm13, %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm11 = [65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535]
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, %zmm26, %zmm11, %zmm10
+; AVX512F-ONLY-FAST-NEXT:    vpbroadcastd 40(%rax), %ymm8
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm8, %zmm6, %zmm6
 ; AVX512F-ONLY-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm10, %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm5, %ymm10, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm5 = ymm1[2,2,2,2,6,6,6,6]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm5[2],ymm0[3,4],ymm5[5],ymm0[6,7,8,9],ymm5[10],ymm0[11,12],ymm5[13],ymm0[14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm5 = ymm10[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm10 = ymm1[0,1,1,3,4,5,5,7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm5 = ymm10[0,1],ymm5[2],ymm10[3,4],ymm5[5],ymm10[6,7,8,9],ymm5[10],ymm10[11,12],ymm5[13],ymm10[14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm12[0,2,3,3,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm8 = [65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535]
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, %zmm26, %zmm8, %zmm5
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm6
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm11, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vpbroadcastd (%rax), %ymm5
+; AVX512F-ONLY-FAST-NEXT:    vpbroadcastd 4(%rax), %ymm10
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm10, %zmm5, %zmm5
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm14[0,2,3,3,4,5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[0,0,2,1]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm9 = ymm17[0,0,1,3]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm8 = ymm16[2,2,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[2,1,3,2]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm31, %zmm5
-; AVX512F-ONLY-FAST-NEXT:    vpbroadcastd (%rax), %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpbroadcastd 4(%rax), %ymm12
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm12, %zmm0, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm27, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu (%rsp), %ymm13 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm13[10,11,8,9,u,u,u,u,u,u,u,u,u,u,u,u,26,27,24,25,u,u,u,u,26,27,24,25,u,u,u,u]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm11 = ymm19[0,0,1,3]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm12 = ymm18[2,2,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm13 = ymm17[2,1,3,2]
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm27, %zmm5
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm5
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm15[10,11,8,9,u,u,u,u,u,u,u,u,u,u,u,u,26,27,24,25,u,u,u,u,26,27,24,25,u,u,u,u]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm12 = ymm1[2,2,2,2,6,6,6,6]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm12[0],ymm2[1],ymm12[2,3],ymm2[4],ymm12[5,6,7,8],ymm2[9],ymm12[10,11],ymm2[12],ymm12[13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm12 = ymm13[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21,18,19,20,21,24,25,26,27,22,23,22,23]
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm13 = ymm1[1,1,1,1,5,5,5,5]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm12 = ymm12[0,1],ymm13[2],ymm12[3,4],ymm13[5],ymm12[6,7,8,9],ymm13[10],ymm12[11,12],ymm13[13],ymm12[14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm21, %zmm12
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm12
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = <6,u,u,u,7,u,u,7>
-; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm3, %ymm2, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm15, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm5 = [0,13,4,0,0,14,5,0,0,13,4,0,0,14,5,0]
-; AVX512F-ONLY-FAST-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm3, %zmm5, %zmm3
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm14 = ymm1[2,2,2,2,6,6,6,6]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm14[0],ymm0[1],ymm14[2,3],ymm0[4],ymm14[5,6,7,8],ymm0[9],ymm14[10,11],ymm0[12],ymm14[13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm14 = ymm15[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21,18,19,20,21,24,25,26,27,22,23,22,23]
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm15 = ymm1[1,1,1,1,5,5,5,5]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm14 = ymm14[0,1],ymm15[2],ymm14[3,4],ymm15[5],ymm14[6,7,8,9],ymm15[10],ymm14[11,12],ymm15[13],ymm14[14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm30, %zmm14
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm4, %zmm14
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = <6,u,u,u,7,u,u,7>
+; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm9, %ymm0, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm9, %zmm3, %zmm3
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm3, %zmm21, %zmm3
 ; AVX512F-ONLY-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm28, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm12, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm10, %zmm5 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm9, %zmm9 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, %zmm5, %zmm11, %zmm9
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, %zmm23, %zmm18, %zmm19
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm25, %zmm30
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm9, %zmm30
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm8, %zmm5, %zmm5
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm8, %zmm7
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm7
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm29, %zmm14
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm7, %zmm14
-; AVX512F-ONLY-FAST-NEXT:    vpbroadcastd 32(%rax), %ymm5
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm5, %zmm2, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm20, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm19, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm14, %zmm3
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm10, %zmm4 # 32-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm11, %zmm9 # 32-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, %zmm4, %zmm8, %zmm9
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, %zmm23, %zmm2, %zmm20
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm25, %zmm31
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm9, %zmm31
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm12, %zmm2, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm13, %zmm4, %zmm4
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm4
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm29, %zmm16
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm4, %zmm16
+; AVX512F-ONLY-FAST-NEXT:    vpbroadcastd 32(%rax), %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm22, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm20, %zmm0
 ; AVX512F-ONLY-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, 128(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, (%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm14, 320(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, (%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm16, 320(%rax)
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm6, 256(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, 192(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, 384(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm30, 64(%rax)
-; AVX512F-ONLY-FAST-NEXT:    addq $232, %rsp
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, 192(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, 384(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm31, 64(%rax)
+; AVX512F-ONLY-FAST-NEXT:    addq $248, %rsp
 ; AVX512F-ONLY-FAST-NEXT:    vzeroupper
 ; AVX512F-ONLY-FAST-NEXT:    retq
 ;
@@ -6206,288 +6207,289 @@ define void @store_i16_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ;
 ; AVX512DQ-FAST-LABEL: store_i16_stride7_vf32:
 ; AVX512DQ-FAST:       # %bb.0:
-; AVX512DQ-FAST-NEXT:    subq $232, %rsp
+; AVX512DQ-FAST-NEXT:    subq $248, %rsp
 ; AVX512DQ-FAST-NEXT:    vmovdqa (%rcx), %ymm1
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = [128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128,128,128,128,128]
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm0, %ymm1, %ymm2
-; AVX512DQ-FAST-NEXT:    vmovdqa %ymm1, %ymm9
+; AVX512DQ-FAST-NEXT:    vmovdqa %ymm1, %ymm10
 ; AVX512DQ-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqa (%rdx), %ymm4
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = <u,u,u,u,12,13,14,15,128,128,u,u,u,u,u,u,u,u,u,u,16,17,128,128,u,u,u,u,u,u,u,u>
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm1, %ymm4, %ymm3
-; AVX512DQ-FAST-NEXT:    vmovdqa %ymm4, %ymm11
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm4, %ymm18
 ; AVX512DQ-FAST-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQ-FAST-NEXT:    vpor %ymm2, %ymm3, %ymm2
 ; AVX512DQ-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa (%rsi), %ymm8
-; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rsi), %ymm10
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm4 = [128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128,128,128,128,128,128,128,128,128]
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm4, %ymm8, %ymm2
-; AVX512DQ-FAST-NEXT:    vmovdqu %ymm8, (%rsp) # 32-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa (%rsi), %ymm4
+; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rsi), %ymm11
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = [128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128,128,128,128,128,128,128,128,128]
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm3, %ymm4, %ymm2
+; AVX512DQ-FAST-NEXT:    vmovdqa %ymm4, %ymm9
+; AVX512DQ-FAST-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqa (%rdi), %ymm6
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = <12,13,14,15,128,128,u,u,u,u,u,u,u,u,u,u,16,17,128,128,u,u,u,u,u,u,u,u,16,17,18,19>
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm5, %ymm6, %ymm3
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm6, %ymm18
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm5, %ymm6, %ymm4
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm6, %ymm16
 ; AVX512DQ-FAST-NEXT:    vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vpor %ymm2, %ymm3, %ymm2
+; AVX512DQ-FAST-NEXT:    vpor %ymm2, %ymm4, %ymm2
 ; AVX512DQ-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqa (%r9), %ymm13
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [128,128,128,128,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128]
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm2, %ymm13, %ymm6
 ; AVX512DQ-FAST-NEXT:    vmovdqa (%r8), %ymm14
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = <u,u,u,u,u,u,u,u,12,13,14,15,128,128,u,u,u,u,u,u,u,u,u,u,16,17,128,128,u,u,u,u>
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm3, %ymm14, %ymm7
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm4 = <u,u,u,u,u,u,u,u,12,13,14,15,128,128,u,u,u,u,u,u,u,u,u,u,16,17,128,128,u,u,u,u>
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm4, %ymm14, %ymm7
 ; AVX512DQ-FAST-NEXT:    vporq %ymm6, %ymm7, %ymm25
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm4, %ymm10, %ymm4
-; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rdi), %ymm6
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm5, %ymm6, %ymm5
-; AVX512DQ-FAST-NEXT:    vpor %ymm4, %ymm5, %ymm4
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rcx), %ymm4
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm0, %ymm4, %ymm0
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm4, %ymm31
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm3, %ymm11, %ymm6
+; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rdi), %ymm8
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm5, %ymm8, %ymm5
+; AVX512DQ-FAST-NEXT:    vpor %ymm6, %ymm5, %ymm3
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rcx), %ymm3
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm0, %ymm3, %ymm0
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm3, %ymm29
 ; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rdx), %ymm12
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm1, %ymm12, %ymm1
 ; AVX512DQ-FAST-NEXT:    vpor %ymm0, %ymm1, %ymm0
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqa 32(%r9), %ymm15
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm2, %ymm15, %ymm0
-; AVX512DQ-FAST-NEXT:    vmovdqa 32(%r8), %ymm4
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm3, %ymm4, %ymm1
+; AVX512DQ-FAST-NEXT:    vmovdqa 32(%r8), %ymm6
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm4, %ymm6, %ymm1
 ; AVX512DQ-FAST-NEXT:    vporq %ymm0, %ymm1, %ymm21
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = <u,u,u,u,u,u,u,u,u,u,u,u,14,15,12,13,u,u,u,u,30,31,28,29,u,u,u,u,30,31,28,29>
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm0, %ymm10, %ymm1
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm2 = ymm6[3,3,3,3,7,7,7,7]
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm0, %ymm11, %ymm1
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm2 = ymm8[3,3,3,3,7,7,7,7]
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5],ymm1[6],ymm2[7,8,9,10],ymm1[11],ymm2[12,13],ymm1[14],ymm2[15]
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm10[10,11,8,9,u,u,u,u,u,u,u,u,u,u,u,u,26,27,24,25,u,u,u,u,26,27,24,25,u,u,u,u]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm10, %ymm29
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm3 = ymm6[2,2,2,2,6,6,6,6]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm3[0],ymm2[1],ymm3[2,3],ymm2[4],ymm3[5,6,7,8],ymm2[9],ymm3[10,11],ymm2[12],ymm3[13,14,15]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm22 = <u,2,2,3,10,u,11,u>
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm1, %zmm2, %zmm22
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm9[14,15,12,13,u,u,u,u,u,u,u,u,u,u,u,u,30,31,28,29,u,u,u,u,30,31,28,29,u,u,u,u]
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm2 = ymm11[3,3,3,3,7,7,7,7]
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm11[10,11,8,9,u,u,u,u,u,u,u,u,u,u,u,u,26,27,24,25,u,u,u,u,26,27,24,25,u,u,u,u]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm11, %ymm30
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm4 = ymm8[2,2,2,2,6,6,6,6]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm4[0],ymm2[1],ymm4[2,3],ymm2[4],ymm4[5,6,7,8],ymm2[9],ymm4[10,11],ymm2[12],ymm4[13,14,15]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm24 = <u,2,2,3,10,u,11,u>
+; AVX512DQ-FAST-NEXT:    vpermi2q %zmm1, %zmm2, %zmm24
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm10[14,15,12,13,u,u,u,u,u,u,u,u,u,u,u,u,30,31,28,29,u,u,u,u,30,31,28,29,u,u,u,u]
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm2 = ymm18[3,3,3,3,7,7,7,7]
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3],ymm1[4],ymm2[5,6,7,8],ymm1[9],ymm2[10,11],ymm1[12],ymm2[13,14,15]
 ; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rcx), %xmm10
-; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rdx), %xmm11
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm11[0],xmm10[0],xmm11[1],xmm10[1],xmm11[2],xmm10[2],xmm11[3],xmm10[3]
+; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rdx), %xmm5
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm5[0],xmm10[0],xmm5[1],xmm10[1],xmm5[2],xmm10[2],xmm5[3],xmm10[3]
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[0,1,2,3,0,1,2,3,6,7,4,5,6,7,4,5]
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm23 = <u,2,2,3,8,u,9,u>
 ; AVX512DQ-FAST-NEXT:    vpermi2q %zmm2, %zmm1, %zmm23
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm0, %ymm8, %ymm0
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm1 = ymm18[3,3,3,3,7,7,7,7]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm1[0,1,2],ymm0[3],ymm1[4,5],ymm0[6],ymm1[7,8,9,10],ymm0[11],ymm1[12,13],ymm0[14],ymm1[15]
-; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rdi), %xmm0
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm5 = [0,1,2,3,4,5,4,5,6,7,10,11,8,9,10,11]
-; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rsi), %xmm1
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm5, %xmm3, %xmm3
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm5, %xmm30
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm19 = [2,1,3,3,8,8,9,9]
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm3, %zmm2, %zmm19
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm13[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,26,27,28,29,26,27,28,29,26,27,28,29,30,31,30,31]
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm3 = ymm14[3,3,3,3,7,7,7,7]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0,1],ymm3[2],ymm2[3,4],ymm3[5],ymm2[6,7,8,9],ymm3[10],ymm2[11,12],ymm3[13],ymm2[14,15]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm20 = [2,2,2,3,8,8,8,9]
-; AVX512DQ-FAST-NEXT:    vmovdqa 32(%r9), %xmm3
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm0, %ymm9, %ymm0
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm1 = ymm16[3,3,3,3,7,7,7,7]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5],ymm0[6],ymm1[7,8,9,10],ymm0[11],ymm1[12,13],ymm0[14],ymm1[15]
+; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rdi), %xmm1
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm11 = [0,1,2,3,4,5,4,5,6,7,10,11,8,9,10,11]
+; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rsi), %xmm2
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm11, %xmm4, %xmm4
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm20 = [2,1,3,3,8,8,9,9]
+; AVX512DQ-FAST-NEXT:    vpermi2q %zmm4, %zmm0, %zmm20
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm13[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,26,27,28,29,26,27,28,29,26,27,28,29,30,31,30,31]
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm4 = ymm14[3,3,3,3,7,7,7,7]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm4 = ymm0[0,1],ymm4[2],ymm0[3,4],ymm4[5],ymm0[6,7,8,9],ymm4[10],ymm0[11,12],ymm4[13],ymm0[14,15]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm22 = [2,2,2,3,8,8,8,9]
+; AVX512DQ-FAST-NEXT:    vmovdqa 32(%r9), %xmm0
+; AVX512DQ-FAST-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa 32(%r8), %xmm3
+; AVX512DQ-FAST-NEXT:    vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3]
 ; AVX512DQ-FAST-NEXT:    vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa 32(%r8), %xmm5
-; AVX512DQ-FAST-NEXT:    vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm5[0],xmm3[0],xmm5[1],xmm3[1],xmm5[2],xmm3[2],xmm5[3],xmm3[3]
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm3 = [0,1,2,3,6,7,4,5,6,7,4,5,12,13,14,15]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm3, %xmm7, %xmm5
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm7, %xmm24
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm5, %zmm2, %zmm20
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; AVX512DQ-FAST-NEXT:    vprold $16, %xmm1, %xmm1
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3,4],xmm1[5],xmm0[6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm9 = [4,5,2,3,4,5,6,7,8,9,10,11,10,11,8,9]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm9, %xmm2, %xmm1
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm0 = [0,1,2,3,6,7,4,5,6,7,4,5,12,13,14,15]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm0, %xmm3, %xmm7
+; AVX512DQ-FAST-NEXT:    vpermi2q %zmm7, %zmm4, %zmm22
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm4 = xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; AVX512DQ-FAST-NEXT:    vprold $16, %xmm2, %xmm2
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[1,1,2,3]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3,4],xmm2[5],xmm1[6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm7 = [4,5,2,3,4,5,6,7,8,9,10,11,10,11,8,9]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm7, %xmm4, %xmm2
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm26 = <u,0,u,1,8,8,9,u>
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm1, %zmm0, %zmm26
+; AVX512DQ-FAST-NEXT:    vpermi2q %zmm2, %zmm1, %zmm26
 ; AVX512DQ-FAST-NEXT:    vmovdqa (%r9), %xmm2
-; AVX512DQ-FAST-NEXT:    vmovdqa (%r8), %xmm1
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm3, %xmm0, %xmm3
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,8,9,10,11,14,15,12,13,14,15,12,13]
+; AVX512DQ-FAST-NEXT:    vmovdqa (%r8), %xmm9
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm9[0],xmm2[0],xmm9[1],xmm2[1],xmm9[2],xmm2[2],xmm9[3],xmm2[3]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm0, %xmm4, %xmm0
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[0,1,2,3,8,9,10,11,14,15,12,13,14,15,12,13]
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm27 = [0,0,0,1,8,9,9,11]
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm0, %zmm3, %zmm27
+; AVX512DQ-FAST-NEXT:    vpermi2q %zmm4, %zmm0, %zmm27
 ; AVX512DQ-FAST-NEXT:    vprold $16, %ymm13, %ymm0
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm3 = ymm14[1,2,2,3,5,6,6,7]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm3[0,1],ymm0[2],ymm3[3,4],ymm0[5],ymm3[6,7,8,9],ymm0[10],ymm3[11,12],ymm0[13],ymm3[14,15]
-; AVX512DQ-FAST-NEXT:    vpbroadcastd {{.*#+}} ymm5 = [18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21]
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm5, %ymm13, %ymm3
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm7 = ymm14[0,0,2,1,4,4,6,5]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm3[0,1,2],ymm7[3],ymm3[4,5],ymm7[6],ymm3[7,8,9,10],ymm7[11],ymm3[12,13],ymm7[14],ymm3[15]
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm4 = ymm14[1,2,2,3,5,6,6,7]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm4[0,1],ymm0[2],ymm4[3,4],ymm0[5],ymm4[6,7,8,9],ymm0[10],ymm4[11,12],ymm0[13],ymm4[14,15]
+; AVX512DQ-FAST-NEXT:    vpbroadcastd {{.*#+}} ymm3 = [18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21]
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm3, %ymm13, %ymm4
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm13 = ymm14[0,0,2,1,4,4,6,5]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm4 = ymm4[0,1,2],ymm13[3],ymm4[4,5],ymm13[6],ymm4[7,8,9,10],ymm13[11],ymm4[12,13],ymm13[14],ymm4[15]
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm28 = [2,2,3,3,10,9,11,10]
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm0, %zmm3, %zmm28
-; AVX512DQ-FAST-NEXT:    vmovdqa (%rdi), %xmm8
-; AVX512DQ-FAST-NEXT:    vmovdqa (%rsi), %xmm0
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm0[4],xmm8[4],xmm0[5],xmm8[5],xmm0[6],xmm8[6],xmm0[7],xmm8[7]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm9, %xmm3, %xmm3
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm3, %ymm17
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm25, %zmm0, %zmm2
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm30, %xmm9
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm9, %xmm1, %xmm1
+; AVX512DQ-FAST-NEXT:    vpermi2q %zmm0, %zmm4, %zmm28
+; AVX512DQ-FAST-NEXT:    vmovdqa (%rdi), %xmm0
+; AVX512DQ-FAST-NEXT:    vmovdqa (%rsi), %xmm14
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm4 = xmm14[4],xmm0[4],xmm14[5],xmm0[5],xmm14[6],xmm0[6],xmm14[7],xmm0[7]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm7, %xmm4, %xmm1
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm1, %ymm19
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm9[4],xmm2[4],xmm9[5],xmm2[5],xmm9[6],xmm2[6],xmm9[7],xmm2[7]
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm25, %zmm0, %zmm4
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm11, %xmm2, %xmm2
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm25 = <0,0,1,1,12,13,u,15>
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm2, %zmm1, %zmm25
+; AVX512DQ-FAST-NEXT:    vpermi2q %zmm4, %zmm2, %zmm25
 ; AVX512DQ-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-FAST-NEXT:    vpbroadcastd 8(%rax), %ymm1
-; AVX512DQ-FAST-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
-; AVX512DQ-FAST-NEXT:    vmovdqa (%rax), %ymm3
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [12,13,128,128,128,128,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128]
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm2, %ymm3, %ymm7
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm1, %zmm30
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm29, %ymm1
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21,18,19,20,21,24,25,26,27,22,23,22,23]
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm6 = ymm6[1,1,1,1,5,5,5,5]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0,1],ymm6[2],ymm1[3,4],ymm6[5],ymm1[6,7,8,9],ymm6[10],ymm1[11,12],ymm6[13],ymm1[14,15]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm1, %ymm16
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm31, %ymm13
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm1 = ymm13[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm6 = ymm12[0,1,1,3,4,5,5,7]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm7 = ymm6[0,1],ymm1[2],ymm6[3,4],ymm1[5],ymm6[6,7,8,9],ymm1[10],ymm6[11,12],ymm1[13],ymm6[14,15]
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm5, %ymm15, %ymm1
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm5 = ymm4[0,0,2,1,4,4,6,5]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0,1,2],ymm5[3],ymm1[4,5],ymm5[6],ymm1[7,8,9,10],ymm5[11],ymm1[12,13],ymm5[14],ymm1[15]
+; AVX512DQ-FAST-NEXT:    vpbroadcastd 8(%rax), %ymm2
+; AVX512DQ-FAST-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2
+; AVX512DQ-FAST-NEXT:    vmovdqa (%rax), %ymm9
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = [12,13,128,128,128,128,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128]
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm1, %ymm9, %ymm4
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm2, %zmm31
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm30, %ymm2
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21,18,19,20,21,24,25,26,27,22,23,22,23]
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm4 = ymm8[1,1,1,1,5,5,5,5]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0,1],ymm4[2],ymm2[3,4],ymm4[5],ymm2[6,7,8,9],ymm4[10],ymm2[11,12],ymm4[13],ymm2[14,15]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm2, %ymm18
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm29, %ymm7
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm2 = ymm7[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm4 = ymm12[0,1,1,3,4,5,5,7]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm4[0,1],ymm2[2],ymm4[3,4],ymm2[5],ymm4[6,7,8,9],ymm2[10],ymm4[11,12],ymm2[13],ymm4[14,15]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm2, %ymm17
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm3, %ymm15, %ymm2
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm3 = ymm6[0,0,2,1,4,4,6,5]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0,1,2],ymm3[3],ymm2[4,5],ymm3[6],ymm2[7,8,9,10],ymm3[11],ymm2[12,13],ymm3[14],ymm2[15]
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm29 = <0,1,u,3,10,10,11,11>
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm1, %zmm21, %zmm29
-; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rax), %ymm6
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm2, %ymm6, %ymm1
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = <u,u,4,u,u,u,5,u>
-; AVX512DQ-FAST-NEXT:    vpermd %ymm6, %ymm2, %ymm2
+; AVX512DQ-FAST-NEXT:    vpermi2q %zmm2, %zmm21, %zmm29
+; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rax), %ymm8
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm1, %ymm8, %ymm1
+; AVX512DQ-FAST-NEXT:    vbroadcasti32x8 {{.*#+}} zmm21 = [0,13,4,0,0,14,5,0,0,13,4,0,0,14,5,0]
+; AVX512DQ-FAST-NEXT:    # zmm21 = mem[0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpermd %ymm8, %ymm21, %ymm2
 ; AVX512DQ-FAST-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm1, %zmm14
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm13[14,15,12,13,u,u,u,u,u,u,u,u,u,u,u,u,30,31,28,29,u,u,u,u,30,31,28,29,u,u,u,u]
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm5 = ymm12[3,3,3,3,7,7,7,7]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm5[0],ymm2[1],ymm5[2,3],ymm2[4],ymm5[5,6,7,8],ymm2[9],ymm5[10,11],ymm2[12],ymm5[13,14,15]
-; AVX512DQ-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm5 = [22,23,26,27,0,0,24,25,26,27,0,0,26,27,26,27,22,23,26,27,0,0,24,25,26,27,0,0,26,27,26,27]
-; AVX512DQ-FAST-NEXT:    # ymm5 = mem[0,1,0,1]
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm5, %ymm13, %ymm13
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm1, %zmm16
+; AVX512DQ-FAST-NEXT:    vmovdqa %ymm7, %ymm4
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm7[14,15,12,13,u,u,u,u,u,u,u,u,u,u,u,u,30,31,28,29,u,u,u,u,30,31,28,29,u,u,u,u]
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm3 = ymm12[3,3,3,3,7,7,7,7]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm3[0],ymm2[1],ymm3[2,3],ymm2[4],ymm3[5,6,7,8],ymm2[9],ymm3[10,11],ymm2[12],ymm3[13,14,15]
+; AVX512DQ-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm7 = [22,23,26,27,0,0,24,25,26,27,0,0,26,27,26,27,22,23,26,27,0,0,24,25,26,27,0,0,26,27,26,27]
+; AVX512DQ-FAST-NEXT:    # ymm7 = mem[0,1,0,1]
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm7, %ymm4, %ymm13
 ; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm12 = ymm12[2,2,2,2,6,6,6,6]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm13 = ymm13[0,1],ymm12[2],ymm13[3,4],ymm12[5],ymm13[6,7,8,9],ymm12[10],ymm13[11,12],ymm12[13],ymm13[14,15]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm21 = [2,2,2,3,8,10,10,11]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm2, %zmm21, %zmm13
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm15[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,26,27,28,29,26,27,28,29,26,27,28,29,30,31,30,31]
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm12 = ymm4[3,3,3,3,7,7,7,7]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0,1],ymm12[2],ymm2[3,4],ymm12[5],ymm2[6,7,8,9],ymm12[10],ymm2[11,12],ymm12[13],ymm2[14,15]
-; AVX512DQ-FAST-NEXT:    vprold $16, %ymm15, %ymm12
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm4 = ymm4[1,2,2,3,5,6,6,7]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm12 = ymm4[0,1],ymm12[2],ymm4[3,4],ymm12[5],ymm4[6,7,8,9],ymm12[10],ymm4[11,12],ymm12[13],ymm4[14,15]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm31 = [2,1,3,2,10,10,10,11]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm2, %zmm31, %zmm12
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm18 = [65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535]
-; AVX512DQ-FAST-NEXT:    vpternlogq $226, %zmm22, %zmm18, %zmm13
-; AVX512DQ-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm13, %zmm12
-; AVX512DQ-FAST-NEXT:    vmovdqa64 (%rax), %zmm15
-; AVX512DQ-FAST-NEXT:    vbroadcasti32x8 {{.*#+}} zmm4 = [30,5,0,0,31,6,0,31,30,5,0,0,31,6,0,31]
-; AVX512DQ-FAST-NEXT:    # zmm4 = mem[0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpermi2d %zmm15, %zmm6, %zmm4
-; AVX512DQ-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm12, %zmm4
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm6 = xmm11[4],xmm10[4],xmm11[5],xmm10[5],xmm11[6],xmm10[6],xmm11[7],xmm10[7]
-; AVX512DQ-FAST-NEXT:    vpbroadcastq {{.*#+}} xmm12 = [6,7,4,5,0,0,8,9,6,7,4,5,0,0,8,9]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm12, %xmm10, %xmm10
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm11 = xmm11[1,1,2,2]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm10 = xmm11[0],xmm10[1],xmm11[2,3],xmm10[4],xmm11[5,6],xmm10[7]
-; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm6[0,2,3,3,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm11 = [0,0,1,1,8,8,10,9]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm6, %zmm11, %zmm10
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm12 = ymm13[0,1],ymm12[2],ymm13[3,4],ymm12[5],ymm13[6,7,8,9],ymm12[10],ymm13[11,12],ymm12[13],ymm13[14,15]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm30 = [2,2,2,3,8,10,10,11]
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm3, %zmm30, %zmm12
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm3 = ymm15[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,26,27,28,29,26,27,28,29,26,27,28,29,30,31,30,31]
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm13 = ymm6[3,3,3,3,7,7,7,7]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm3[0,1],ymm13[2],ymm3[3,4],ymm13[5],ymm3[6,7,8,9],ymm13[10],ymm3[11,12],ymm13[13],ymm3[14,15]
+; AVX512DQ-FAST-NEXT:    vprold $16, %ymm15, %ymm13
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm6 = ymm6[1,2,2,3,5,6,6,7]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm6 = ymm6[0,1],ymm13[2],ymm6[3,4],ymm13[5],ymm6[6,7,8,9],ymm13[10],ymm6[11,12],ymm13[13],ymm6[14,15]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm13 = [2,1,3,2,10,10,10,11]
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm3, %zmm13, %zmm6
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm5[4],xmm10[4],xmm5[5],xmm10[5],xmm5[6],xmm10[6],xmm5[7],xmm10[7]
+; AVX512DQ-FAST-NEXT:    vpbroadcastq {{.*#+}} xmm15 = [6,7,4,5,0,0,8,9,6,7,4,5,0,0,8,9]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm15, %xmm10, %xmm10
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[1,1,2,2]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0],xmm10[1],xmm5[2,3],xmm10[4],xmm5[5,6],xmm10[7]
+; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm3[0,2,3,3,4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [0,0,1,1,8,8,10,9]
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm3, %zmm2, %zmm5
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm6 # 16-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # xmm6 = xmm1[4],mem[4],xmm1[5],mem[5],xmm1[6],mem[6],xmm1[7],mem[7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm24, %xmm1
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm13 = xmm1[0,1,2,3,8,9,10,11,14,15,12,13,14,15,12,13]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm9, %xmm6, %xmm6
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm22 = [0,1,1,3,8,8,9,9]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm6, %zmm22, %zmm13
-; AVX512DQ-FAST-NEXT:    vprold $16, %xmm0, %xmm6
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm2 = xmm8[1,1,2,3]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm6[2],xmm2[3,4],xmm6[5],xmm2[6,7]
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm8[0],xmm0[0],xmm8[1],xmm0[1],xmm8[2],xmm0[2],xmm8[3],xmm0[3]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm9, %xmm0, %xmm0
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm2, %zmm11, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqa (%rcx), %xmm2
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm12, %xmm2, %xmm6
-; AVX512DQ-FAST-NEXT:    vmovdqa (%rdx), %xmm11
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm12 = xmm11[1,1,2,2]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm6 = xmm12[0],xmm6[1],xmm12[2,3],xmm6[4],xmm12[5,6],xmm6[7]
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm12 = xmm11[4],xmm2[4],xmm11[5],xmm2[5],xmm11[6],xmm2[6],xmm11[7],xmm2[7]
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm11[0],xmm2[0],xmm11[1],xmm2[1],xmm11[2],xmm2[2],xmm11[3],xmm2[3]
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[0,1,2,3,0,1,2,3,6,7,4,5,6,7,4,5]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm6, %zmm22, %zmm2
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm3 # 16-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    # xmm3 = xmm1[4],mem[4],xmm1[5],mem[5],xmm1[6],mem[6],xmm1[7],mem[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm10 = xmm1[0,1,2,3,8,9,10,11,14,15,12,13,14,15,12,13]
+; AVX512DQ-FAST-NEXT:    vmovdqa %xmm11, %xmm1
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm11, %xmm3, %xmm3
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [0,1,1,3,8,8,9,9]
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm3, %zmm4, %zmm10
+; AVX512DQ-FAST-NEXT:    vprold $16, %xmm14, %xmm3
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm11 = xmm0[1,1,2,3]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm11[0,1],xmm3[2],xmm11[3,4],xmm3[5],xmm11[6,7]
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm14[0],xmm0[1],xmm14[1],xmm0[2],xmm14[2],xmm0[3],xmm14[3]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm1, %xmm0, %xmm11
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm3, %zmm2, %zmm11
+; AVX512DQ-FAST-NEXT:    vmovdqa (%rcx), %xmm0
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm15, %xmm0, %xmm2
+; AVX512DQ-FAST-NEXT:    vmovdqa (%rdx), %xmm3
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm14 = xmm3[1,1,2,2]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm14[0],xmm2[1],xmm14[2,3],xmm2[4],xmm14[5,6],xmm2[7]
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm14 = xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3]
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,0,1,2,3,6,7,4,5,6,7,4,5]
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm2, %zmm4, %zmm0
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535]
+; AVX512DQ-FAST-NEXT:    vpternlogq $226, %zmm24, %zmm2, %zmm12
+; AVX512DQ-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm12, %zmm6
+; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm7, %ymm12, %ymm3
+; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm4 = ymm1[2,2,2,2,6,6,6,6]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm3[0,1],ymm4[2],ymm3[3,4],ymm4[5],ymm3[6,7,8,9],ymm4[10],ymm3[11,12],ymm4[13],ymm3[14,15]
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm4 = ymm12[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm7 = ymm1[0,1,1,3,4,5,5,7]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm4 = ymm7[0,1],ymm4[2],ymm7[3,4],ymm4[5],ymm7[6,7,8,9],ymm4[10],ymm7[11,12],ymm4[13],ymm7[14,15]
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm3, %zmm13, %zmm4
+; AVX512DQ-FAST-NEXT:    vmovdqa64 (%rax), %zmm3
+; AVX512DQ-FAST-NEXT:    vbroadcasti32x8 {{.*#+}} zmm7 = [30,5,0,0,31,6,0,31,30,5,0,0,31,6,0,31]
+; AVX512DQ-FAST-NEXT:    # zmm7 = mem[0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpermi2d %zmm3, %zmm8, %zmm7
+; AVX512DQ-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm6, %zmm7
 ; AVX512DQ-FAST-NEXT:    vpbroadcastd 36(%rax), %ymm6
-; AVX512DQ-FAST-NEXT:    vpbroadcastd 40(%rax), %ymm11
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm11, %zmm6, %zmm6
-; AVX512DQ-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm13, %zmm6
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm11 = [65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535]
-; AVX512DQ-FAST-NEXT:    vpternlogq $226, %zmm26, %zmm11, %zmm10
+; AVX512DQ-FAST-NEXT:    vpbroadcastd 40(%rax), %ymm8
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm8, %zmm6, %zmm6
 ; AVX512DQ-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm10, %zmm6
-; AVX512DQ-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm2
-; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm5, %ymm10, %ymm0
-; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm5 = ymm1[2,2,2,2,6,6,6,6]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm5[2],ymm0[3,4],ymm5[5],ymm0[6,7,8,9],ymm5[10],ymm0[11,12],ymm5[13],ymm0[14,15]
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm5 = ymm10[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm10 = ymm1[0,1,1,3,4,5,5,7]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm5 = ymm10[0,1],ymm5[2],ymm10[3,4],ymm5[5],ymm10[6,7,8,9],ymm5[10],ymm10[11,12],ymm5[13],ymm10[14,15]
-; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm12[0,2,3,3,4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm8 = [65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535]
+; AVX512DQ-FAST-NEXT:    vpternlogq $226, %zmm26, %zmm8, %zmm5
+; AVX512DQ-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm6
+; AVX512DQ-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm11, %zmm0
+; AVX512DQ-FAST-NEXT:    vpbroadcastd (%rax), %ymm5
+; AVX512DQ-FAST-NEXT:    vpbroadcastd 4(%rax), %ymm10
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm10, %zmm5, %zmm5
+; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm14[0,2,3,3,4,5,6,7]
 ; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[0,0,2,1]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm9 = ymm17[0,0,1,3]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm8 = ymm16[2,2,2,3]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[2,1,3,2]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm0, %zmm31, %zmm5
-; AVX512DQ-FAST-NEXT:    vpbroadcastd (%rax), %ymm0
-; AVX512DQ-FAST-NEXT:    vpbroadcastd 4(%rax), %ymm12
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm12, %zmm0, %zmm0
-; AVX512DQ-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm27, %zmm0
-; AVX512DQ-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu (%rsp), %ymm13 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm13[10,11,8,9,u,u,u,u,u,u,u,u,u,u,u,u,26,27,24,25,u,u,u,u,26,27,24,25,u,u,u,u]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm11 = ymm19[0,0,1,3]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm12 = ymm18[2,2,2,3]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm13 = ymm17[2,1,3,2]
+; AVX512DQ-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm27, %zmm5
+; AVX512DQ-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm5
+; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm15[10,11,8,9,u,u,u,u,u,u,u,u,u,u,u,u,26,27,24,25,u,u,u,u,26,27,24,25,u,u,u,u]
 ; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm12 = ymm1[2,2,2,2,6,6,6,6]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm12[0],ymm2[1],ymm12[2,3],ymm2[4],ymm12[5,6,7,8],ymm2[9],ymm12[10,11],ymm2[12],ymm12[13,14,15]
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm12 = ymm13[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21,18,19,20,21,24,25,26,27,22,23,22,23]
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm13 = ymm1[1,1,1,1,5,5,5,5]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm12 = ymm12[0,1],ymm13[2],ymm12[3,4],ymm13[5],ymm12[6,7,8,9],ymm13[10],ymm12[11,12],ymm13[13],ymm12[14,15]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm2, %zmm21, %zmm12
-; AVX512DQ-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm12
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = <6,u,u,u,7,u,u,7>
-; AVX512DQ-FAST-NEXT:    vpermd %ymm3, %ymm2, %ymm2
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm15, %zmm3
-; AVX512DQ-FAST-NEXT:    vbroadcasti32x8 {{.*#+}} zmm5 = [0,13,4,0,0,14,5,0,0,13,4,0,0,14,5,0]
-; AVX512DQ-FAST-NEXT:    # zmm5 = mem[0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpermd %zmm3, %zmm5, %zmm3
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm14 = ymm1[2,2,2,2,6,6,6,6]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm14[0],ymm0[1],ymm14[2,3],ymm0[4],ymm14[5,6,7,8],ymm0[9],ymm14[10,11],ymm0[12],ymm14[13,14,15]
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm14 = ymm15[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21,18,19,20,21,24,25,26,27,22,23,22,23]
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm15 = ymm1[1,1,1,1,5,5,5,5]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm14 = ymm14[0,1],ymm15[2],ymm14[3,4],ymm15[5],ymm14[6,7,8,9],ymm15[10],ymm14[11,12],ymm15[13],ymm14[14,15]
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm0, %zmm30, %zmm14
+; AVX512DQ-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm4, %zmm14
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = <6,u,u,u,7,u,u,7>
+; AVX512DQ-FAST-NEXT:    vpermd %ymm9, %ymm0, %ymm0
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm9, %zmm3, %zmm3
+; AVX512DQ-FAST-NEXT:    vpermd %zmm3, %zmm21, %zmm3
 ; AVX512DQ-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm28, %zmm3
-; AVX512DQ-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm12, %zmm3
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm10, %zmm5 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm9, %zmm9 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    vpternlogq $226, %zmm5, %zmm11, %zmm9
-; AVX512DQ-FAST-NEXT:    vpternlogq $226, %zmm23, %zmm18, %zmm19
-; AVX512DQ-FAST-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm25, %zmm30
-; AVX512DQ-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm9, %zmm30
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm8, %zmm5, %zmm5
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm8, %zmm7
-; AVX512DQ-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm7
-; AVX512DQ-FAST-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm29, %zmm14
-; AVX512DQ-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm7, %zmm14
-; AVX512DQ-FAST-NEXT:    vpbroadcastd 32(%rax), %ymm5
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm5, %zmm2, %zmm2
-; AVX512DQ-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm20, %zmm2
-; AVX512DQ-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm19, %zmm2
+; AVX512DQ-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm14, %zmm3
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm10, %zmm4 # 32-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm11, %zmm9 # 32-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    vpternlogq $226, %zmm4, %zmm8, %zmm9
+; AVX512DQ-FAST-NEXT:    vpternlogq $226, %zmm23, %zmm2, %zmm20
+; AVX512DQ-FAST-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm25, %zmm31
+; AVX512DQ-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm9, %zmm31
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm12, %zmm2, %zmm2
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm13, %zmm4, %zmm4
+; AVX512DQ-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm4
+; AVX512DQ-FAST-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm29, %zmm16
+; AVX512DQ-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm4, %zmm16
+; AVX512DQ-FAST-NEXT:    vpbroadcastd 32(%rax), %ymm2
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm0
+; AVX512DQ-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm22, %zmm0
+; AVX512DQ-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm20, %zmm0
 ; AVX512DQ-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm3, 128(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, (%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm14, 320(%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm5, (%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm16, 320(%rax)
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm6, 256(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, 192(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm4, 384(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm30, 64(%rax)
-; AVX512DQ-FAST-NEXT:    addq $232, %rsp
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, 192(%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm7, 384(%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm31, 64(%rax)
+; AVX512DQ-FAST-NEXT:    addq $248, %rsp
 ; AVX512DQ-FAST-NEXT:    vzeroupper
 ; AVX512DQ-FAST-NEXT:    retq
 ;
@@ -11575,49 +11577,50 @@ define void @store_i16_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ;
 ; AVX512F-ONLY-SLOW-LABEL: store_i16_stride7_vf64:
 ; AVX512F-ONLY-SLOW:       # %bb.0:
-; AVX512F-ONLY-SLOW-NEXT:    subq $2168, %rsp # imm = 0x878
+; AVX512F-ONLY-SLOW-NEXT:    subq $2200, %rsp # imm = 0x898
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 96(%rcx), %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 96(%rdx), %ymm6
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 96(%rdx), %ymm9
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 96(%rdi), %ymm7
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 96(%rsi), %ymm9
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 96(%rsi), %ymm8
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm0 = [128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128,128,128,128,128]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm0, %ymm2, %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm16
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm19
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm11 = <u,u,u,u,12,13,14,15,128,128,u,u,u,u,u,u,u,u,u,u,16,17,128,128,u,u,u,u,u,u,u,u>
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm11, %ymm6, %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vporq %ymm1, %ymm2, %ymm18
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm14 = [128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128,128,128,128,128,128,128,128,128]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm14, %ymm9, %ymm1
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm11, %ymm9, %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vporq %ymm1, %ymm2, %ymm16
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm12 = [128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128,128,128,128,128,128,128,128,128]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm12, %ymm8, %ymm1
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm13 = <12,13,14,15,128,128,u,u,u,u,u,u,u,u,u,u,16,17,128,128,u,u,u,u,u,u,u,u,16,17,18,19>
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm13, %ymm7, %ymm2
 ; AVX512F-ONLY-SLOW-NEXT:    vporq %ymm1, %ymm2, %ymm17
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm3 = [128,128,128,128,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 64(%r9), %ymm2
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm3, %ymm2, %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm3, %ymm8
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm21
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm3, %ymm10
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm20
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 64(%r8), %ymm3
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm15 = <u,u,u,u,u,u,u,u,12,13,14,15,128,128,u,u,u,u,u,u,u,u,u,u,16,17,128,128,u,u,u,u>
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm15, %ymm3, %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm3, %ymm23
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm3, %ymm24
 ; AVX512F-ONLY-SLOW-NEXT:    vpor %ymm1, %ymm2, %ymm1
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 64(%rcx), %ymm10
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm0, %ymm10, %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 64(%rdx), %ymm4
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm11, %ymm4, %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 64(%rcx), %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm0, %ymm2, %ymm1
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm18
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 64(%rdx), %ymm6
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm11, %ymm6, %ymm2
 ; AVX512F-ONLY-SLOW-NEXT:    vpor %ymm1, %ymm2, %ymm1
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 64(%rsi), %ymm5
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm14, %ymm5, %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 64(%rdi), %ymm3
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm13, %ymm3, %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm12, %ymm5, %ymm1
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 64(%rdi), %ymm4
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm13, %ymm4, %ymm2
 ; AVX512F-ONLY-SLOW-NEXT:    vpor %ymm1, %ymm2, %ymm1
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%r9), %ymm1
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm8, %ymm1, %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm8, %ymm12
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm10, %ymm1, %ymm1
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm10, %ymm3
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%r8), %ymm2
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm15, %ymm2, %ymm2
@@ -11632,558 +11635,567 @@ define void @store_i16_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-ONLY-SLOW-NEXT:    vpor %ymm1, %ymm2, %ymm1
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%rsi), %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm14, %ymm2, %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm24
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%rdi), %ymm8
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm13, %ymm8, %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm8, %ymm19
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm12, %ymm2, %ymm1
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm23
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%rdi), %ymm10
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm13, %ymm10, %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm10, %ymm25
 ; AVX512F-ONLY-SLOW-NEXT:    vpor %ymm1, %ymm2, %ymm1
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%rcx), %ymm8
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm0, %ymm8, %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%rdx), %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm11, %ymm2, %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vpor %ymm0, %ymm1, %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%rcx), %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm0, %ymm2, %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%rdx), %ymm1
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm11, %ymm1, %ymm11
+; AVX512F-ONLY-SLOW-NEXT:    vpor %ymm0, %ymm11, %ymm0
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%rsi), %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm14, %ymm1, %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%rsi), %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm12, %ymm0, %ymm12
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%rdi), %ymm11
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm13, %ymm11, %ymm13
-; AVX512F-ONLY-SLOW-NEXT:    vpor %ymm0, %ymm13, %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vpor %ymm12, %ymm13, %ymm10
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%r8), %ymm13
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm15, %ymm13, %ymm14
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%r9), %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm12, %ymm0, %ymm15
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm12, %ymm20
-; AVX512F-ONLY-SLOW-NEXT:    vpor %ymm15, %ymm14, %ymm12
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vprold $16, %ymm0, %ymm14
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm15 = ymm13[1,2,2,3,5,6,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm14 = ymm15[0,1],ymm14[2],ymm15[3,4],ymm14[5],ymm15[6,7,8,9],ymm14[10],ymm15[11,12],ymm14[13],ymm15[14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm15 = ymm0[0,1,2,3,5,6,7,7,8,9,10,11,13,14,15,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm15 = ymm15[2,2,2,3,6,6,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm12 = ymm13[3,3,3,3,7,7,7,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm12 = ymm15[0,1],ymm12[2],ymm15[3,4],ymm12[5],ymm15[6,7,8,9],ymm12[10],ymm15[11,12],ymm12[13],ymm15[14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm15 = [2,1,3,2,10,10,10,11]
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm12, %zmm14, %zmm15
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm14 = [22,23,26,27,0,0,24,25,26,27,0,0,26,27,26,27,22,23,26,27,0,0,24,25,26,27,0,0,26,27,26,27]
-; AVX512F-ONLY-SLOW-NEXT:    # ymm14 = mem[0,1,0,1]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm16, %ymm15
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm14, %ymm15, %ymm12
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm14, %ymm16
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm14 = ymm6[2,2,2,2,6,6,6,6]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm12 = ymm12[0,1],ymm14[2],ymm12[3,4],ymm14[5],ymm12[6,7,8,9],ymm14[10],ymm12[11,12],ymm14[13],ymm12[14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm12 = ymm15[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm12 = ymm12[2,2,2,2,6,6,6,6]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm14 = ymm6[3,3,3,3,7,7,7,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm12 = ymm14[0],ymm12[1],ymm14[2,3],ymm12[4],ymm14[5,6,7,8],ymm12[9],ymm14[10,11],ymm12[12],ymm14[13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm12 = ymm9[0,1,2,3,5,4,6,7,8,9,10,11,13,12,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm12 = ymm12[2,2,2,2,6,6,6,6]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm14 = ymm7[2,2,2,2,6,6,6,6]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm12 = ymm14[0],ymm12[1],ymm14[2,3],ymm12[4],ymm14[5,6,7,8],ymm12[9],ymm14[10,11],ymm12[12],ymm14[13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm12, %ymm25
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm12 = ymm9[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm12 = ymm12[2,2,2,2,6,6,6,6]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm14 = ymm7[3,3,3,3,7,7,7,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm12 = ymm14[0,1,2],ymm12[3],ymm14[4,5],ymm12[6],ymm14[7,8,9,10],ymm12[11],ymm14[12,13],ymm12[14],ymm14[15]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm6 = ymm6[0,1,1,3,4,5,5,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm12 = ymm15[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm12 = ymm12[0,0,0,0,4,4,4,4]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm6 = ymm6[0,1],ymm12[2],ymm6[3,4],ymm12[5],ymm6[6,7,8,9],ymm12[10],ymm6[11,12],ymm12[13],ymm6[14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[2,1,3,2]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm15, %ymm13, %ymm12
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%r9), %ymm15
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm3, %ymm15, %ymm14
+; AVX512F-ONLY-SLOW-NEXT:    vpor %ymm14, %ymm12, %ymm10
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vprold $16, %ymm15, %ymm12
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm14 = ymm13[1,2,2,3,5,6,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm12 = ymm14[0,1],ymm12[2],ymm14[3,4],ymm12[5],ymm14[6,7,8,9],ymm12[10],ymm14[11,12],ymm12[13],ymm14[14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm14 = ymm15[0,1,2,3,5,6,7,7,8,9,10,11,13,14,15,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm14 = ymm14[2,2,2,3,6,6,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm10 = ymm13[3,3,3,3,7,7,7,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm10 = ymm14[0,1],ymm10[2],ymm14[3,4],ymm10[5],ymm14[6,7,8,9],ymm10[10],ymm14[11,12],ymm10[13],ymm14[14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm14 = [2,1,3,2,10,10,10,11]
+; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm10, %zmm12, %zmm14
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm12 = [22,23,26,27,0,0,24,25,26,27,0,0,26,27,26,27,22,23,26,27,0,0,24,25,26,27,0,0,26,27,26,27]
+; AVX512F-ONLY-SLOW-NEXT:    # ymm12 = mem[0,1,0,1]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm19, %ymm14
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm12, %ymm14, %ymm10
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm12, %ymm19
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm12 = ymm9[2,2,2,2,6,6,6,6]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm10 = ymm10[0,1],ymm12[2],ymm10[3,4],ymm12[5],ymm10[6,7,8,9],ymm12[10],ymm10[11,12],ymm12[13],ymm10[14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm10 = ymm14[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm10 = ymm10[2,2,2,2,6,6,6,6]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm12 = ymm9[3,3,3,3,7,7,7,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm10 = ymm12[0],ymm10[1],ymm12[2,3],ymm10[4],ymm12[5,6,7,8],ymm10[9],ymm12[10,11],ymm10[12],ymm12[13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm10 = ymm8[0,1,2,3,5,4,6,7,8,9,10,11,13,12,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm10 = ymm10[2,2,2,2,6,6,6,6]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm12 = ymm7[2,2,2,2,6,6,6,6]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm10 = ymm12[0],ymm10[1],ymm12[2,3],ymm10[4],ymm12[5,6,7,8],ymm10[9],ymm12[10,11],ymm10[12],ymm12[13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm10 = ymm8[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm10 = ymm10[2,2,2,2,6,6,6,6]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm12 = ymm7[3,3,3,3,7,7,7,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm10 = ymm12[0,1,2],ymm10[3],ymm12[4,5],ymm10[6],ymm12[7,8,9,10],ymm10[11],ymm12[12,13],ymm10[14],ymm12[15]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm9 = ymm9[0,1,1,3,4,5,5,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm10 = ymm14[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm10 = ymm10[0,0,0,0,4,4,4,4]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm9 = ymm9[0,1],ymm10[2],ymm9[3,4],ymm10[5],ymm9[6,7,8,9],ymm10[10],ymm9[11,12],ymm10[13],ymm9[14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[2,1,3,2]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm7 = ymm7[1,1,1,1,5,5,5,5]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm9 = ymm9[1,2,3,3,4,5,6,7,9,10,11,11,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm9 = ymm9[0,0,2,1,4,4,6,5]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm7 = ymm9[0,1],ymm7[2],ymm9[3,4],ymm7[5],ymm9[6,7,8,9],ymm7[10],ymm9[11,12],ymm7[13],ymm9[14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm8 = ymm8[1,2,3,3,4,5,6,7,9,10,11,11,12,13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm8 = ymm8[0,0,2,1,4,4,6,5]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm7 = ymm8[0,1],ymm7[2],ymm8[3,4],ymm7[5],ymm8[6,7,8,9],ymm7[10],ymm8[11,12],ymm7[13],ymm8[14,15]
 ; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[2,2,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm6, %zmm18, %zmm6
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm9, %zmm16, %zmm8
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm17, %zmm7
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm6, %zmm7
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm9 = [65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535]
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm8, %zmm7
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm17 = [65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 96(%r8), %ymm12
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm6 = ymm12[u,u],zero,zero,zero,zero,zero,zero,zero,zero,ymm12[14,15,u,u,u,u],zero,zero,zero,zero,zero,zero,zero,zero,ymm12[16,17,u,u,u,u],zero,zero
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $248, %ymm9, %ymm7, %ymm6
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 96(%r9), %ymm15
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm15, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm20, %ymm14
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm14, %ymm15, %ymm14
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm6, %ymm14
-; AVX512F-ONLY-SLOW-NEXT:    vextracti64x4 $1, %zmm7, %ymm6
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm7 = ymm12[0,0,2,1,4,4,6,5]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[2,1,3,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm6, %ymm7
-; AVX512F-ONLY-SLOW-NEXT:    vprold $16, %ymm15, %ymm6
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[2,2,2,2]
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm7, %ymm6
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm6, %zmm0, %zmm6
-; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm6 = zmm14[0,1,2,3],zmm6[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm8 = ymm12[u,u],zero,zero,zero,zero,zero,zero,zero,zero,ymm12[14,15,u,u,u,u],zero,zero,zero,zero,zero,zero,zero,zero,ymm12[16,17,u,u,u,u],zero,zero
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $248, %ymm17, %ymm7, %ymm8
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 96(%r9), %ymm14
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm3, %ymm14, %ymm9
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm10 = [65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535]
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $248, %ymm10, %ymm8, %ymm9
+; AVX512F-ONLY-SLOW-NEXT:    vextracti64x4 $1, %zmm7, %ymm7
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm8 = ymm12[0,0,2,1,4,4,6,5]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[2,1,3,3]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm10 = [65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0]
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %ymm7, %ymm10, %ymm8
+; AVX512F-ONLY-SLOW-NEXT:    vprold $16, %ymm14, %ymm7
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[2,2,2,2]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm10 = [65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535]
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %ymm8, %ymm10, %ymm7
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm16
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm0, %zmm7
+; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm7 = zmm9[0,1,2,3],zmm7[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm18 = [6,5,0,0,7,6,0,7,6,5,0,0,7,6,0,7]
-; AVX512F-ONLY-SLOW-NEXT:    # zmm18 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 96(%rax), %ymm6
-; AVX512F-ONLY-SLOW-NEXT:    vpermd %zmm6, %zmm18, %zmm12
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm14 = ymm6[0,1,1,3,4,5,5,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm15 = [12,13,128,128,128,128,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm15, %ymm6, %ymm6
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[0,2,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpandn %ymm14, %ymm9, %ymm14
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm14, %zmm6, %zmm6
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm17 = [65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535]
-; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastd 72(%rax), %ymm6
-; AVX512F-ONLY-SLOW-NEXT:    vpandnq %ymm6, %ymm17, %ymm14
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 64(%rax), %ymm6
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm15, %ymm6, %ymm12
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm12, %zmm14, %zmm12
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastd 8(%rax), %ymm12
+; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm8 = [6,5,0,0,7,6,0,7,6,5,0,0,7,6,0,7]
+; AVX512F-ONLY-SLOW-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 96(%rax), %ymm7
+; AVX512F-ONLY-SLOW-NEXT:    vpermd %zmm7, %zmm8, %zmm9
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm9 = ymm7[0,1,1,3,4,5,5,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm10 = [12,13,128,128,128,128,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm10, %ymm7, %ymm7
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[0,2,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpandnq %ymm9, %ymm17, %ymm9
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm9, %zmm7, %zmm7
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm14 = [65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535]
+; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastd 72(%rax), %ymm7
+; AVX512F-ONLY-SLOW-NEXT:    vpandn %ymm7, %ymm14, %ymm9
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 64(%rax), %ymm7
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm10, %ymm7, %ymm12
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm12, %zmm9, %zmm9
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastd 8(%rax), %ymm9
+; AVX512F-ONLY-SLOW-NEXT:    vpandn %ymm9, %ymm14, %ymm9
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%rax), %ymm14
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm10, %ymm14, %ymm12
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm14, %ymm22
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm12, %zmm9, %zmm9
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm9 = ymm2[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm9 = ymm9[0,0,0,0,4,4,4,4]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm12 = ymm1[0,1,1,3,4,5,5,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm9 = ymm12[0,1],ymm9[2],ymm12[3,4],ymm9[5],ymm12[6,7,8,9],ymm9[10],ymm12[11,12],ymm9[13],ymm12[14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm9 = ymm0[1,2,3,3,4,5,6,7,9,10,11,11,12,13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm9 = ymm9[0,0,2,1,4,4,6,5]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm12 = ymm11[1,1,1,1,5,5,5,5]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm9 = ymm9[0,1],ymm12[2],ymm9[3,4],ymm12[5],ymm9[6,7,8,9],ymm12[10],ymm9[11,12],ymm12[13],ymm9[14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%rax), %ymm9
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm10, %ymm9, %ymm10
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm12 = ymm9[0,1,1,3,4,5,5,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[0,2,2,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vpandnq %ymm12, %ymm17, %ymm12
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%rax), %ymm7
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm15, %ymm7, %ymm14
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm7, %ymm22
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm14, %zmm12, %zmm12
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm12 = ymm8[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm12, %zmm10, %zmm10
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm10 = ymm13[0,0,2,1,4,4,6,5]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm12 = ymm15[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm12 = ymm12[0,0,0,0,4,4,4,4]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm14 = ymm2[0,1,1,3,4,5,5,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm12 = ymm14[0,1],ymm12[2],ymm14[3,4],ymm12[5],ymm14[6,7,8,9],ymm12[10],ymm14[11,12],ymm12[13],ymm14[14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm12 = ymm1[1,2,3,3,4,5,6,7,9,10,11,11,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm12 = ymm12[0,0,2,1,4,4,6,5]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm14 = ymm11[1,1,1,1,5,5,5,5]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm12 = ymm12[0,1],ymm14[2],ymm12[3,4],ymm14[5],ymm12[6,7,8,9],ymm14[10],ymm12[11,12],ymm14[13],ymm12[14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%rax), %ymm12
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm14 = ymm12[0,1,1,3,4,5,5,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[0,2,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpandn %ymm14, %ymm9, %ymm9
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm15, %ymm12, %ymm14
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm9, %zmm14, %zmm9
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm9 = ymm13[0,0,2,1,4,4,6,5]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm0[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[0,0,0,0,4,4,4,4]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1,2],ymm9[3],ymm0[4,5],ymm9[6],ymm0[7,8,9,10],ymm9[11],ymm0[12,13],ymm9[14],ymm0[15]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm16, %ymm7
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm7, %ymm8, %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm9 = ymm2[2,2,2,2,6,6,6,6]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm9[2],ymm0[3,4],ymm9[5],ymm0[6,7,8,9],ymm9[10],ymm0[11,12],ymm9[13],ymm0[14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm2[3,3,3,3,7,7,7,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm2 = ymm8[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm10 = ymm12[0,1,2],ymm10[3],ymm12[4,5],ymm10[6],ymm12[7,8,9,10],ymm10[11],ymm12[12,13],ymm10[14],ymm12[15]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm19, %ymm13
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm13, %ymm2, %ymm10
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm12 = ymm1[2,2,2,2,6,6,6,6]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm10 = ymm10[0,1],ymm12[2],ymm10[3,4],ymm12[5],ymm10[6,7,8,9],ymm12[10],ymm10[11,12],ymm12[13],ymm10[14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm1[3,3,3,3,7,7,7,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm2 = ymm2[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm2[2,2,2,2,6,6,6,6]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2,3],ymm2[4],ymm0[5,6,7,8],ymm2[9],ymm0[10,11],ymm2[12],ymm0[13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm0 = ymm1[0,1,2,3,5,4,6,7,8,9,10,11,13,12,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[2,2,2,2,6,6,6,6]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm11[2,2,2,2,6,6,6,6]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm2[0],ymm0[1],ymm2[2,3],ymm0[4],ymm2[5,6,7,8],ymm0[9],ymm2[10,11],ymm0[12],ymm2[13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm31
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm11[3,3,3,3,7,7,7,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm1 = ymm1[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3],ymm2[4],ymm1[5,6,7,8],ymm2[9],ymm1[10,11],ymm2[12],ymm1[13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm1, %ymm31
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm1 = ymm0[0,1,2,3,5,4,6,7,8,9,10,11,13,12,14,15]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm1[2,2,2,2,6,6,6,6]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3],ymm0[4,5],ymm1[6],ymm0[7,8,9,10],ymm1[11],ymm0[12,13],ymm1[14],ymm0[15]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm11[2,2,2,2,6,6,6,6]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3],ymm1[4],ymm2[5,6,7,8],ymm1[9],ymm2[10,11],ymm1[12],ymm2[13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm11[3,3,3,3,7,7,7,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[2,2,2,2,6,6,6,6]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5],ymm0[6],ymm1[7,8,9,10],ymm0[11],ymm1[12,13],ymm0[14],ymm1[15]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermd %zmm12, %zmm18, %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vpermd %zmm9, %zmm8, %zmm0
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm5[1,2,3,3,4,5,6,7,9,10,11,11,12,13,14,15]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[0,0,2,1,4,4,6,5]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm3[1,1,1,1,5,5,5,5]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm4[1,1,1,1,5,5,5,5]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7,8,9],ymm1[10],ymm0[11,12],ymm1[13],ymm0[14,15]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm0 = ymm5[0,1,2,3,5,4,6,7,8,9,10,11,13,12,14,15]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[2,2,2,2,6,6,6,6]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm3[2,2,2,2,6,6,6,6]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm4[2,2,2,2,6,6,6,6]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6,7,8],ymm0[9],ymm1[10,11],ymm0[12],ymm1[13,14,15]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm10[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm18, %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm2[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[0,0,0,0,4,4,4,4]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm4[0,1,1,3,4,5,5,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm6[0,1,1,3,4,5,5,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7,8,9],ymm0[10],ymm1[11,12],ymm0[13],ymm1[14,15]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm7, %ymm10, %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm4[2,2,2,2,6,6,6,6]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm13, %ymm2, %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm18, %ymm3
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm6[2,2,2,2,6,6,6,6]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7,8,9],ymm1[10],ymm0[11,12],ymm1[13],ymm0[14,15]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm21, %ymm9
-; AVX512F-ONLY-SLOW-NEXT:    vprold $16, %ymm21, %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm23[1,2,2,3,5,6,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm20, %ymm8
+; AVX512F-ONLY-SLOW-NEXT:    vprold $16, %ymm20, %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm24[1,2,2,3,5,6,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7,8,9],ymm0[10],ymm1[11,12],ymm0[13],ymm1[14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm9[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm8[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm1[0,0,0,0,4,4,4,4]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm23[0,0,2,1,4,4,6,5]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm24[0,0,2,1,4,4,6,5]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3],ymm1[4,5],ymm2[6],ymm1[7,8,9,10],ymm2[11],ymm1[12,13],ymm2[14],ymm1[15]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm7 = [2,2,3,3,10,9,11,10]
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm7, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm15 = [0,5,4,0,0,6,5,0,0,5,4,0,0,6,5,0]
-; AVX512F-ONLY-SLOW-NEXT:    # zmm15 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermd 64(%rax), %zmm15, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm14 = [65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535]
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm1, %zmm14, %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm10 = [2,2,3,3,10,9,11,10]
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm10, %zmm1
+; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm11 = [0,5,4,0,0,6,5,0,0,5,4,0,0,6,5,0]
+; AVX512F-ONLY-SLOW-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpermd 64(%rax), %zmm11, %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm1, %zmm17, %zmm0
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm4[3,3,3,3,7,7,7,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm1 = ymm10[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm6[3,3,3,3,7,7,7,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm1 = ymm3[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm1[2,2,2,2,6,6,6,6]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6,7,8],ymm1[9],ymm0[10,11],ymm1[12],ymm0[13,14,15]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm3[3,3,3,3,7,7,7,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm4[3,3,3,3,7,7,7,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm1 = ymm5[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm1[2,2,2,2,6,6,6,6]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3],ymm0[4,5],ymm1[6],ymm0[7,8,9,10],ymm1[11],ymm0[12,13],ymm1[14],ymm0[15]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm23[3,3,3,3,7,7,7,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm1 = ymm9[0,1,2,3,5,6,7,7,8,9,10,11,13,14,15,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm24[3,3,3,3,7,7,7,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm1 = ymm8[0,1,2,3,5,6,7,7,8,9,10,11,13,14,15,15]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm1[2,2,2,3,6,6,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm4 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7,8,9],ymm0[10],ymm1[11,12],ymm0[13],ymm1[14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm11 = [4,5,4,5,4,5,6,7,16,17,16,17,16,17,17,19]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7,8,9],ymm0[10],ymm1[11,12],ymm0[13],ymm1[14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm12 = [4,5,4,5,4,5,6,7,16,17,16,17,16,17,17,19]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 96(%r9), %xmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 96(%r8), %xmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm3[0,1,3,2,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2d %zmm1, %zmm11, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm6[2,3,3,3,6,7,7,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,1,3,2]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 96(%r8), %xmm1
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm3[0,1,3,2,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2d %zmm4, %zmm12, %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm4 = ymm7[2,3,3,3,6,7,7,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,1,3,2]
 ; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastd 96(%rax), %ymm5
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm1, %zmm5
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535]
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm4, %zmm1, %zmm5
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 96(%rsi), %xmm4
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 96(%rdi), %xmm5
-; AVX512F-ONLY-SLOW-NEXT:    vprold $16, %xmm4, %xmm6
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm9 = xmm5[1,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm6 = xmm9[0,1],xmm6[2],xmm9[3,4],xmm6[5],xmm9[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm6, %xmm23
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm4 = xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 96(%rcx), %xmm4
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 96(%rdx), %xmm5
-; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastq {{.*#+}} xmm13 = [6,7,4,5,0,0,8,9,6,7,4,5,0,0,8,9]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm13, %xmm4, %xmm6
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm9 = xmm5[1,1,2,2]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm6 = xmm9[0],xmm6[1],xmm9[2,3],xmm6[4],xmm9[5,6],xmm6[7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm4 = xmm5[4],xmm4[4],xmm5[5],xmm4[5],xmm5[6],xmm4[6],xmm5[7],xmm4[7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,7,6]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm4, %zmm4
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535]
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm2, %zmm5, %zmm4
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm15
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 96(%rsi), %xmm2
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 96(%rdi), %xmm4
+; AVX512F-ONLY-SLOW-NEXT:    vprold $16, %xmm2, %xmm5
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm4[1,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm5 = xmm6[0,1],xmm5[2],xmm6[3,4],xmm5[5],xmm6[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm5, %xmm18
+; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 96(%rcx), %xmm2
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 96(%rdx), %xmm4
+; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastq {{.*#+}} xmm6 = [6,7,4,5,0,0,8,9,6,7,4,5,0,0,8,9]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm6, %xmm2, %xmm5
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %xmm6, %xmm7
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm4[1,1,2,2]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm5 = xmm6[0],xmm5[1],xmm6[2,3],xmm5[4],xmm6[5,6],xmm5[7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm4[4],xmm2[4],xmm4[5],xmm2[5],xmm4[6],xmm2[6],xmm4[7],xmm2[7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm3[0,1,2,3,4,5,7,6]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm0 = [0,1,2,3,4,5,4,5,6,7,10,11,8,9,10,11]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm0, %xmm2, %xmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm30 = <16,18,19,19,19,19,u,u,0,1,0,1,2,3,2,3>
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2d %zmm3, %zmm30, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastd 100(%rax), %ymm3
-; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastd 104(%rax), %ymm4
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm3, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm28 = [65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535]
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm2, %zmm28, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 64(%rcx), %xmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 64(%rdx), %xmm3
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm4 = xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm4, %xmm29
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm13, %xmm2, %xmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[1,1,2,2]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm3[0],xmm2[1],xmm3[2,3],xmm2[4],xmm3[5,6],xmm2[7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm4[0,1,3,2,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm5 = <16,16,17,17,17,17,u,u,0,1,0,1,2,3,2,3>
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2d %zmm3, %zmm5, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 64(%rdi), %xmm3
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm0, %xmm1, %xmm1
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm28 = <16,18,19,19,19,19,u,u,0,1,0,1,2,3,2,3>
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2d %zmm2, %zmm28, %zmm1
+; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastd 100(%rax), %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastd 104(%rax), %ymm3
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm2, %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm1, %zmm16, %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 64(%rcx), %xmm1
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 64(%rdx), %xmm2
+; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm3, %xmm30
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm7, %xmm1, %xmm1
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %xmm7, %xmm8
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[1,1,2,2]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2,3],xmm1[4],xmm2[5,6],xmm1[7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm3[0,1,3,2,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm3 = <16,16,17,17,17,17,u,u,0,1,0,1,2,3,2,3>
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2d %zmm2, %zmm3, %zmm1
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 64(%rdi), %xmm2
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 64(%rsi), %xmm4
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm0, %xmm6, %xmm6
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[0,0,1,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm9 = xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm9, %xmm27
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm0, %xmm5, %xmm5
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[0,0,1,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm6 = xmm4[4],xmm2[4],xmm4[5],xmm2[5],xmm4[6],xmm2[6],xmm4[7],xmm2[7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm6, %xmm26
 ; AVX512F-ONLY-SLOW-NEXT:    vprold $16, %xmm4, %xmm4
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[1,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm4[2],xmm3[3,4],xmm4[5],xmm3[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[0,0,2,1]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm6, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0]
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, %zmm2, %zmm3, %zmm4
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[1,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm4[2],xmm2[3,4],xmm4[5],xmm2[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,0,2,1]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm5, %zmm4
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0]
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, %zmm1, %zmm2, %zmm4
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 64(%r9), %xmm2
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 64(%r9), %xmm1
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 64(%r8), %xmm4
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm8 = xmm4[4],xmm2[4],xmm4[5],xmm2[5],xmm4[6],xmm2[6],xmm4[7],xmm2[7]
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm2[0,1,2,3,4,5,7,6]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[0,1,3,2,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm5 = xmm4[4],xmm1[4],xmm4[5],xmm1[5],xmm4[6],xmm1[6],xmm4[7],xmm1[7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm5, %xmm16
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm1[0,1,2,3,4,5,7,6]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[0,1,3,2,4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm6 = <0,1,0,1,0,1,1,3,16,18,19,19,19,19,u,u>
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2d %zmm4, %zmm6, %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2d %zmm4, %zmm6, %zmm1
 ; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastd 64(%rax), %ymm4
-; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastd 68(%rax), %ymm9
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm9, %zmm4, %zmm4
+; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastd 68(%rax), %ymm5
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm4, %zmm4
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm9 = [65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535]
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm2, %zmm9, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%rcx), %xmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%rdx), %xmm10
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm4 = xmm10[4],xmm2[4],xmm10[5],xmm2[5],xmm10[6],xmm2[6],xmm10[7],xmm2[7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm4, %xmm26
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm12 = xmm10[0],xmm2[0],xmm10[1],xmm2[1],xmm10[2],xmm2[2],xmm10[3],xmm2[3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm13, %xmm2, %xmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm10 = xmm10[1,1,2,2]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm10[0],xmm2[1],xmm10[2,3],xmm2[4],xmm10[5,6],xmm2[7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm12[0,1,3,2,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2d %zmm10, %zmm5, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%rdi), %xmm5
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%rsi), %xmm10
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm12 = xmm5[0],xmm10[0],xmm5[1],xmm10[1],xmm5[2],xmm10[2],xmm5[3],xmm10[3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm0, %xmm12, %xmm12
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[0,0,1,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm4 = xmm10[4],xmm5[4],xmm10[5],xmm5[5],xmm10[6],xmm5[6],xmm10[7],xmm5[7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm4, %xmm21
-; AVX512F-ONLY-SLOW-NEXT:    vprold $16, %xmm10, %xmm10
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[1,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0,1],xmm10[2],xmm5[3,4],xmm10[5],xmm5[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[0,0,2,1]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm12, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, %zmm2, %zmm3, %zmm4
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm1, %zmm9, %zmm4
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%r9), %xmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%r8), %xmm3
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm4 = xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm2[0,1,2,3,4,5,7,6]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[0,1,3,2,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2d %zmm3, %zmm6, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastd (%rax), %ymm3
-; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastd 4(%rax), %ymm5
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm3, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm2, %zmm9, %zmm3
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%rcx), %xmm1
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%rdx), %xmm5
+; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm4 = xmm5[4],xmm1[4],xmm5[5],xmm1[5],xmm5[6],xmm1[6],xmm5[7],xmm1[7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm4, %xmm19
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm5[0],xmm1[0],xmm5[1],xmm1[1],xmm5[2],xmm1[2],xmm5[3],xmm1[3]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm8, %xmm1, %xmm1
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm8, %xmm29
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[1,1,2,2]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm5[0],xmm1[1],xmm5[2,3],xmm1[4],xmm5[5,6],xmm1[7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm7[0,1,3,2,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2d %zmm5, %zmm3, %zmm1
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%rdi), %xmm3
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%rsi), %xmm5
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm3[0],xmm5[0],xmm3[1],xmm5[1],xmm3[2],xmm5[2],xmm3[3],xmm5[3]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm0, %xmm7, %xmm7
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[0,0,1,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm14 = xmm5[4],xmm3[4],xmm5[5],xmm3[5],xmm5[6],xmm3[6],xmm5[7],xmm3[7]
+; AVX512F-ONLY-SLOW-NEXT:    vprold $16, %xmm5, %xmm5
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[1,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm5[2],xmm3[3,4],xmm5[5],xmm3[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[0,0,2,1]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm7, %zmm3
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, %zmm1, %zmm2, %zmm3
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm24, %ymm10
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm2 = ymm10[1,2,3,3,4,5,6,7,9,10,11,11,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm2[0,0,2,1,4,4,6,5]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm5 = ymm19[1,1,1,1,5,5,5,5]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0,1],ymm5[2],ymm2[3,4],ymm5[5],ymm2[6,7,8,9],ymm5[10],ymm2[11,12],ymm5[13],ymm2[14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm20
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vprold $16, %ymm3, %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm5 = ymm9[1,2,2,3,5,6,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm5[0,1],ymm2[2],ymm5[3,4],ymm2[5],ymm5[6,7,8,9],ymm2[10],ymm5[11,12],ymm2[13],ymm5[14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm5 = ymm3[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm5 = ymm5[0,0,0,0,4,4,4,4]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm6 = ymm9[0,0,2,1,4,4,6,5]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm5 = ymm5[0,1,2],ymm6[3],ymm5[4,5],ymm6[6],ymm5[7,8,9,10],ymm6[11],ymm5[12,13],ymm6[14],ymm5[15]
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm7, %zmm5
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm2 = ymm10[0,1,2,3,5,4,6,7,8,9,10,11,13,12,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm2[2,2,2,2,6,6,6,6]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm6 = ymm19[2,2,2,2,6,6,6,6]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm6[0],ymm2[1],ymm6[2,3],ymm2[4],ymm6[5,6,7,8],ymm2[9],ymm6[10,11],ymm2[12],ymm6[13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm16
-; AVX512F-ONLY-SLOW-NEXT:    vpermd (%rax), %zmm15, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm5, %zmm14, %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%r9), %xmm1
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%r8), %xmm2
+; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm4 = xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm1[0,1,2,3,4,5,7,6]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[0,1,3,2,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2d %zmm2, %zmm6, %zmm1
+; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastd (%rax), %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastd 4(%rax), %ymm3
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm2, %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm1, %zmm9, %zmm2
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm2 = ymm6[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm23, %ymm7
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm7[1,2,3,3,4,5,6,7,9,10,11,11,12,13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm1[0,0,2,1,4,4,6,5]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm25[1,1,1,1,5,5,5,5]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7,8,9],ymm2[10],ymm1[11,12],ymm2[13],ymm1[14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm1, %ymm20
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm1 = ymm7[0,1,2,3,5,4,6,7,8,9,10,11,13,12,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm1[2,2,2,2,6,6,6,6]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm25[2,2,2,2,6,6,6,6]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3],ymm1[4],ymm2[5,6,7,8],ymm1[9],ymm2[10,11],ymm1[12],ymm2[13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm1, %ymm27
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vprold $16, %ymm3, %ymm1
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm5[1,2,2,3,5,6,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7,8,9],ymm1[10],ymm2[11,12],ymm1[13],ymm2[14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm2 = ymm3[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm2[0,0,0,0,4,4,4,4]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm5 = ymm7[0,1,1,3,4,5,5,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm5[0,1],ymm2[2],ymm5[3,4],ymm2[5],ymm5[6,7,8,9],ymm2[10],ymm5[11,12],ymm2[13],ymm5[14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm17
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,u,u,24,25,26,27,u,u,26,27,26,27]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm5 = ymm7[2,2,2,2,6,6,6,6]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0,1],ymm5[2],ymm2[3,4],ymm5[5],ymm2[6,7,8,9],ymm5[10],ymm2[11,12],ymm5[13],ymm2[14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm18
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm7[3,3,3,3,7,7,7,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm5 = ymm6[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm5 = ymm5[2,2,2,2,6,6,6,6]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0],ymm5[1],ymm2[2,3],ymm5[4],ymm2[5,6,7,8],ymm5[9],ymm2[10,11],ymm5[12],ymm2[13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm19[3,3,3,3,7,7,7,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm5 = ymm10[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm5 = ymm5[2,2,2,2,6,6,6,6]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0,1,2],ymm5[3],ymm2[4,5],ymm5[6],ymm2[7,8,9,10],ymm5[11],ymm2[12,13],ymm5[14],ymm2[15]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm9[3,3,3,3,7,7,7,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm5 = ymm3[0,1,2,3,5,6,7,7,8,9,10,11,13,14,15,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm5 = ymm5[2,2,2,3,6,6,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm5[0,1],ymm2[2],ymm5[3,4],ymm2[5],ymm5[6,7,8,9],ymm2[10],ymm5[11,12],ymm2[13],ymm5[14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%r9), %xmm6
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%r8), %xmm14
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm14[0],xmm6[0],xmm14[1],xmm6[1],xmm14[2],xmm6[2],xmm14[3],xmm6[3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm5[0,1,3,2,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2d %zmm12, %zmm11, %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm6 = ymm5[0,0,2,1,4,4,6,5]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0,1,2],ymm6[3],ymm2[4,5],ymm6[6],ymm2[7,8,9,10],ymm6[11],ymm2[12,13],ymm6[14],ymm2[15]
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm10, %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm8[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm1[0,0,0,0,4,4,4,4]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm6 = ymm10[0,1,1,3,4,5,5,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm6[0,1],ymm1[2],ymm6[3,4],ymm1[5],ymm6[6,7,8,9],ymm1[10],ymm6[11,12],ymm1[13],ymm6[14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm1, %ymm24
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm13, %ymm8, %ymm1
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm6 = ymm10[2,2,2,2,6,6,6,6]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0,1],ymm6[2],ymm1[3,4],ymm6[5],ymm1[6,7,8,9],ymm6[10],ymm1[11,12],ymm6[13],ymm1[14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm1, %ymm21
+; AVX512F-ONLY-SLOW-NEXT:    vpermd (%rax), %zmm11, %zmm1
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm2, %zmm17, %zmm1
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm10[3,3,3,3,7,7,7,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm2 = ymm8[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm2[2,2,2,2,6,6,6,6]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3],ymm2[4],ymm1[5,6,7,8],ymm2[9],ymm1[10,11],ymm2[12],ymm1[13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm25[3,3,3,3,7,7,7,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm2 = ymm7[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm2[2,2,2,2,6,6,6,6]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3],ymm1[4,5],ymm2[6],ymm1[7,8,9,10],ymm2[11],ymm1[12,13],ymm2[14],ymm1[15]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm5[3,3,3,3,7,7,7,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm2 = ymm3[0,1,2,3,5,6,7,7,8,9,10,11,13,14,15,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm2[2,2,2,3,6,6,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7,8,9],ymm1[10],ymm2[11,12],ymm1[13],ymm2[14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%r9), %xmm7
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%r8), %xmm2
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm2[0],xmm7[0],xmm2[1],xmm7[1],xmm2[2],xmm7[2],xmm2[3],xmm7[3]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm11 = xmm6[0,1,3,2,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2d %zmm11, %zmm12, %zmm1
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm11 = ymm22[2,3,3,3,6,7,7,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[2,1,3,2]
 ; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastd 32(%rax), %ymm12
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm12, %zmm11, %zmm22
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm2, %zmm1, %zmm22
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm12, %zmm11, %zmm25
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm1, %zmm15, %zmm25
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%rdi), %xmm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%rsi), %xmm2
-; AVX512F-ONLY-SLOW-NEXT:    vprold $16, %xmm2, %xmm12
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm15 = xmm1[1,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm15[0,1],xmm12[2],xmm15[3,4],xmm12[5],xmm15[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%rsi), %xmm12
+; AVX512F-ONLY-SLOW-NEXT:    vprold $16, %xmm12, %xmm15
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm13 = xmm1[1,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm13[0,1],xmm15[2],xmm13[3,4],xmm15[5],xmm13[6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm12 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm13 = xmm1[0],xmm12[0],xmm1[1],xmm12[1],xmm1[2],xmm12[2],xmm1[3],xmm12[3]
+; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm12[4],xmm1[4],xmm12[5],xmm1[5],xmm12[6],xmm1[6],xmm12[7],xmm1[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm14[4],xmm6[4],xmm14[5],xmm6[5],xmm14[6],xmm6[6],xmm14[7],xmm6[7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm0, %xmm8, %xmm2
+; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm2[4],xmm7[4],xmm2[5],xmm7[5],xmm2[6],xmm7[6],xmm2[7],xmm7[7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm16, %xmm1
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm0, %xmm1, %xmm1
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm0, %xmm4, %xmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm23, %xmm4
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm18, %xmm4
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm0, %xmm4, %xmm4
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm0, %xmm12, %xmm4
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm0, %xmm13, %xmm4
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm0, %xmm1, %xmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpermq $234, {{[-0-9]+}}(%r{{[sb]}}p), %ymm24 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # ymm24 = mem[2,2,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq $232, {{[-0-9]+}}(%r{{[sb]}}p), %ymm19 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # ymm19 = mem[0,2,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm25 = ymm25[0,2,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq $246, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # ymm8 = mem[2,1,3,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm5[0,1,2,3,4,5,7,6]
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2d %zmm1, %zmm30, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastd 36(%rax), %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastd 40(%rax), %ymm5
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm1, %zmm30
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm0, %zmm28, %zmm30
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%rcx), %xmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm13, %xmm0, %xmm5
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%rdx), %xmm14
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm15 = xmm14[1,1,2,2]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm15[0],xmm5[1],xmm15[2,3],xmm5[4],xmm15[5,6],xmm5[7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm14[0],xmm0[0],xmm14[1],xmm0[1],xmm14[2],xmm0[2],xmm14[3],xmm0[3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm14[4],xmm0[4],xmm14[5],xmm0[5],xmm14[6],xmm0[6],xmm14[7],xmm0[7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm0, %xmm2, %xmm2
+; AVX512F-ONLY-SLOW-NEXT:    vpermq $234, {{[-0-9]+}}(%r{{[sb]}}p), %ymm23 # 32-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    # ymm23 = mem[2,2,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq $232, {{[-0-9]+}}(%r{{[sb]}}p), %ymm22 # 32-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    # ymm22 = mem[0,2,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq $232, {{[-0-9]+}}(%r{{[sb]}}p), %ymm18 # 32-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    # ymm18 = mem[0,2,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq $246, {{[-0-9]+}}(%r{{[sb]}}p), %ymm17 # 32-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    # ymm17 = mem[2,1,3,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm6[0,1,2,3,4,5,7,6]
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2d %zmm0, %zmm28, %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastd 36(%rax), %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastd 40(%rax), %ymm6
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm6, %zmm0, %zmm28
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm28
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%rcx), %xmm6
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm29, %xmm0
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm0, %xmm6, %xmm2
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%rdx), %xmm0
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm0[1,1,2,2]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm7[0],xmm2[1],xmm7[2,3],xmm2[4],xmm7[5,6],xmm2[7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm0[0],xmm6[0],xmm0[1],xmm6[1],xmm0[2],xmm6[2],xmm0[3],xmm6[3]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm6[4],xmm0[5],xmm6[5],xmm0[6],xmm6[6],xmm0[7],xmm6[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm0 = ymm4[0,1,2,3,5,5,7,6,8,9,10,11,13,13,15,14]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm0[3,3,3,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd $233, {{[-0-9]+}}(%r{{[sb]}}p), %ymm23 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # ymm23 = mem[1,2,2,3,5,6,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm13 = ymm4[2,1,2,3,6,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm13 = ymm13[0,0,3,3,4,5,6,7,8,8,11,11,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm28 = ymm13[2,2,2,2]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm13 = ymm2[0,0,1,1]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm29, %xmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm0[0,2,3,3,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm2[0,0,2,1]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm27, %xmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm0[2,1,2,3,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,4,5,5,4]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[0,0,1,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm3[0,0,1,1]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm26, %xmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm0[0,2,3,3,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm3[0,0,2,1]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm21, %xmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm0[2,1,2,3,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,5,5,4]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm5[0,0,1,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq $182, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # ymm5 = mem[2,1,3,2]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq $234, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # ymm10 = mem[2,2,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq $250, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # ymm11 = mem[2,2,3,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermpd $234, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # ymm0 = mem[2,2,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermq $232, {{[-0-9]+}}(%r{{[sb]}}p), %ymm29 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # ymm29 = mem[0,2,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm29 = ymm0[3,3,3,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd $233, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    # ymm2 = mem[1,2,2,3,5,6,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm10 = ymm4[2,1,2,3,6,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm10 = ymm10[0,0,3,3,4,5,6,7,8,8,11,11,12,13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm10[2,2,2,2]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,0,1,1]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm30, %xmm4
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm4[0,2,3,3,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[0,0,2,1]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm26, %xmm4
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm8 = xmm4[2,1,2,3,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,4,5,5,4]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[0,0,1,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[0,0,1,1]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm19, %xmm4
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm15 = xmm4[0,2,3,3,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm15 = ymm15[0,0,2,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm14[2,1,2,3,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm12 = xmm12[0,1,2,3,4,5,5,4]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[0,0,1,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq $182, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    # ymm4 = mem[2,1,3,2]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq $234, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    # ymm5 = mem[2,2,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq $250, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    # ymm6 = mem[2,2,3,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpermpd $234, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    # ymm7 = mem[2,2,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vmovups %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm31 = ymm31[0,2,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq $232, {{[-0-9]+}}(%r{{[sb]}}p), %ymm30 # 32-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    # ymm30 = mem[0,2,2,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vpermq $246, {{[-0-9]+}}(%r{{[sb]}}p), %ymm26 # 32-byte Folded Reload
 ; AVX512F-ONLY-SLOW-NEXT:    # ymm26 = mem[2,1,3,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq $234, {{[-0-9]+}}(%r{{[sb]}}p), %ymm21 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # ymm21 = mem[2,2,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq $232, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # ymm15 = mem[0,2,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq $182, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # ymm4 = mem[2,1,3,2]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq $234, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # ymm12 = mem[2,2,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq $234, {{[-0-9]+}}(%r{{[sb]}}p), %ymm19 # 32-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    # ymm19 = mem[2,2,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq $232, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    # ymm11 = mem[0,2,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq $182, {{[-0-9]+}}(%r{{[sb]}}p), %ymm16 # 32-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    # ymm16 = mem[2,1,3,2]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq $234, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    # ymm7 = mem[2,2,2,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm20 = ymm20[2,2,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm16 = ymm16[0,2,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm17 = ymm17[2,1,3,2]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm14 = ymm18[2,2,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm19, %zmm24, %zmm24
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm8, %zmm25, %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm13 = ymm27[0,2,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm14 = ymm24[2,1,3,2]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm21[2,2,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm22, %zmm23, %zmm22
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm17, %zmm18, %zmm23
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm27 = [65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535]
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm24, %zmm27, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm23[2,1,3,2]
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm8
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm8, %ymm28
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm8 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm8 = zmm13[0,1,2,3],zmm8[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm13 = [65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535]
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $234, {{[-0-9]+}}(%r{{[sb]}}p), %zmm13, %zmm8 # 64-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm23 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm23 = zmm2[0,1,2,3],zmm23[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $234, {{[-0-9]+}}(%r{{[sb]}}p), %zmm13, %zmm23 # 64-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm7, %zmm13 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm9, %zmm7 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm24 = [65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535]
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, %zmm13, %zmm24, %zmm7
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm9 = [0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535]
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, %zmm8, %zmm9, %zmm7
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm6, %zmm6 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm8 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, %zmm6, %zmm24, %zmm8
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, %zmm23, %zmm9, %zmm8
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm2, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm10, %zmm2, %zmm18
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm18
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm22, %zmm27, %zmm23
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,1,3,2]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm17 = [65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535]
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %ymm23, %ymm17, %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm17 = [65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535]
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %ymm2, %ymm17, %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm2 # 32-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm2 = zmm1[0,1,2,3],zmm2[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $234, {{[-0-9]+}}(%r{{[sb]}}p), %zmm17, %zmm2 # 64-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm1 # 32-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm3 = zmm3[0,1,2,3],zmm1[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $234, {{[-0-9]+}}(%r{{[sb]}}p), %zmm17, %zmm3 # 64-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm10, %zmm10 # 32-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm8 # 32-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm22 = [65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535]
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, %zmm10, %zmm22, %zmm8
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm17 = [0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535]
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, %zmm2, %zmm17, %zmm8
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm15, %zmm2 # 32-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm12, %zmm10 # 32-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, %zmm2, %zmm22, %zmm10
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, %zmm3, %zmm17, %zmm10
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm1, %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm1, %zmm18
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm18
+; AVX512F-ONLY-SLOW-NEXT:    vextracti64x4 $1, %zmm23, %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535]
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogd $226, 124(%r8){1to8}, %ymm1, %ymm2
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $248, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm19 # 64-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm11, %zmm0, %zmm5
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm23 = zmm2[0,1,2,3],zmm5[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $234, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm23 # 64-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    vextracti64x4 $1, %zmm0, %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm2 = [65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535]
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogd $226, 124(%r8){1to8}, %ymm2, %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm28[0,1,2,3],zmm0[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %ymm2, %ymm3, %ymm29
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $248, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm21 # 64-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm6, %zmm0, %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm24 = zmm1[0,1,2,3],zmm2[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $234, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm24 # 64-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm29, %zmm0, %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm2[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm0, %zmm1, %zmm25
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm28 # 64-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm0, %zmm1, %zmm23
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm29 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm29 # 64-byte Folded Reload
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm13 # 64-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm9 # 64-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm15, %zmm21, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm12, %zmm4, %zmm1
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm17 # 64-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm6 # 64-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm11, %zmm19, %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm16, %zmm2
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535]
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, %zmm0, %zmm3, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm16, %zmm20, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm14, %zmm17, %zmm5
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, %zmm0, %zmm3, %zmm5
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, %zmm0, %zmm3, %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm13, %zmm20, %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm9, %zmm14, %zmm4
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, %zmm0, %zmm3, %zmm4
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm2, %zmm0, %zmm19
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm1, %zmm0, %zmm20
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm5, %zmm0, %zmm21
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm4, %zmm0, %zmm20
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm29, %zmm0, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm26, %zmm31, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm0, %zmm27, %zmm1
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm31, %zmm0, %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm26, %zmm30, %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm0, %zmm27, %zmm2
 ; AVX512F-ONLY-SLOW-NEXT:    vpermq $232, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
 ; AVX512F-ONLY-SLOW-NEXT:    # ymm0 = mem[0,2,2,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshuflw $180, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
@@ -12193,754 +12205,769 @@ define void @store_i16_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm0, %zmm0
 ; AVX512F-ONLY-SLOW-NEXT:    vpermq $246, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Folded Reload
 ; AVX512F-ONLY-SLOW-NEXT:    # ymm3 = mem[2,1,3,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq $80, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # ymm5 = mem[0,0,1,1]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm3, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vpermq $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # ymm5 = mem[0,0,2,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw $230, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # xmm6 = mem[2,1,2,3,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,5,5,4]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[0,0,1,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq $80, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    # ymm4 = mem[0,0,1,1]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm3, %zmm3
+; AVX512F-ONLY-SLOW-NEXT:    vpermq $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    # ymm4 = mem[0,0,2,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw $230, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    # xmm5 = mem[2,1,2,3,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,5,5,4]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[0,0,1,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vpermq $80, {{[-0-9]+}}(%r{{[sb]}}p), %ymm26 # 32-byte Folded Reload
 ; AVX512F-ONLY-SLOW-NEXT:    # ymm26 = mem[0,0,1,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw $248, {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # xmm10 = mem[0,2,3,3,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[0,0,2,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq $232, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # ymm11 = mem[0,2,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw $180, {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # xmm12 = mem[0,1,3,2,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm12 = xmm12[0,0,1,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq $246, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # ymm14 = mem[2,1,3,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq $80, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # ymm15 = mem[0,0,1,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm16 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # ymm16 = mem[0,0,2,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw $230, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # xmm2 = mem[2,1,2,3,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,5,4]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,0,1,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq $80, {{[-0-9]+}}(%r{{[sb]}}p), %ymm17 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # ymm17 = mem[0,0,1,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw $248, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # xmm4 = mem[0,2,3,3,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[0,0,2,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw $248, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    # xmm7 = mem[0,2,3,3,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[0,0,2,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq $232, {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    # ymm9 = mem[0,2,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw $180, {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    # xmm11 = mem[0,1,3,2,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm11 = xmm11[0,0,1,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq $246, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    # ymm12 = mem[2,1,3,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq $80, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    # ymm13 = mem[0,0,1,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    # ymm14 = mem[0,0,2,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw $230, {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    # xmm15 = mem[2,1,2,3,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm15 = xmm15[0,1,2,3,4,5,5,4]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm15 = ymm15[0,0,1,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq $80, {{[-0-9]+}}(%r{{[sb]}}p), %ymm16 # 32-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    # ymm16 = mem[0,0,1,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw $248, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    # xmm1 = mem[0,2,3,3,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,0,2,1]
 ; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, %zmm0, %zmm27, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm12[0,1,1,3]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm11, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm15, %zmm14, %zmm11
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, %zmm0, %zmm27, %zmm11
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm11[0,1,1,3]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm9, %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm13, %zmm12, %zmm9
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, %zmm0, %zmm27, %zmm9
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm3, %zmm0, %zmm12
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm11, %zmm0, %zmm22
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm6, %zmm5, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm10, %zmm26, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, %zmm0, %zmm24, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm16, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm17, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, %zmm0, %zmm24, %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm3, %zmm0, %zmm11
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm9, %zmm0, %zmm25
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm4, %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm26, %zmm3
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, %zmm0, %zmm22, %zmm3
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm15, %zmm14, %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm16, %zmm1
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, %zmm0, %zmm22, %zmm1
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
 ; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm3, %zmm0, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm2, %zmm0, %zmm30
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm18, %zmm23
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm28, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm1, %zmm0, %zmm28
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm18, %zmm24
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm29, %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm0
 ; AVX512F-ONLY-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm23, 320(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm30, 256(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm22, 192(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm21, 128(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm8, 64(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, (%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm13, 448(%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm24, 320(%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm28, 256(%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm25, 192(%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm20, 128(%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, 64(%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, (%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm17, 448(%rax)
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, 704(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm12, 640(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm20, 576(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, 512(%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, 640(%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm19, 576(%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm8, 512(%rax)
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, 384(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm19, 768(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm25, 832(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    addq $2168, %rsp # imm = 0x878
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm21, 768(%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm23, 832(%rax)
+; AVX512F-ONLY-SLOW-NEXT:    addq $2200, %rsp # imm = 0x898
 ; AVX512F-ONLY-SLOW-NEXT:    vzeroupper
 ; AVX512F-ONLY-SLOW-NEXT:    retq
 ;
 ; AVX512F-ONLY-FAST-LABEL: store_i16_stride7_vf64:
 ; AVX512F-ONLY-FAST:       # %bb.0:
-; AVX512F-ONLY-FAST-NEXT:    subq $1432, %rsp # imm = 0x598
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 96(%rcx), %ymm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm11 = [128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128,128,128,128,128]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm11, %ymm4, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 96(%rdx), %ymm5
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm14 = <u,u,u,u,12,13,14,15,128,128,u,u,u,u,u,u,u,u,u,u,16,17,128,128,u,u,u,u,u,u,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm14, %ymm5, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vporq %ymm0, %ymm1, %ymm16
+; AVX512F-ONLY-FAST-NEXT:    subq $1496, %rsp # imm = 0x5D8
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 96(%rcx), %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm15 = [128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128,128,128,128,128]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm15, %ymm0, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm0, %ymm20
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 96(%rdx), %ymm9
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = <u,u,u,u,12,13,14,15,128,128,u,u,u,u,u,u,u,u,u,u,16,17,128,128,u,u,u,u,u,u,u,u>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm0, %ymm9, %ymm3
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm0, %ymm10
+; AVX512F-ONLY-FAST-NEXT:    vporq %ymm2, %ymm3, %ymm16
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 96(%rsi), %ymm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm15 = [128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128,128,128,128,128,128,128,128,128]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm15, %ymm6, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm14 = [128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128,128,128,128,128,128,128,128,128]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm14, %ymm6, %ymm2
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 96(%rdi), %ymm7
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm8 = <12,13,14,15,128,128,u,u,u,u,u,u,u,u,u,u,16,17,128,128,u,u,u,u,u,u,u,u,16,17,18,19>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm8, %ymm7, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vporq %ymm0, %ymm1, %ymm19
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm10 = [128,128,128,128,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm11 = <12,13,14,15,128,128,u,u,u,u,u,u,u,u,u,u,16,17,128,128,u,u,u,u,u,u,u,u,16,17,18,19>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm11, %ymm7, %ymm3
+; AVX512F-ONLY-FAST-NEXT:    vporq %ymm2, %ymm3, %ymm17
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = [128,128,128,128,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 64(%r9), %ymm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm10, %ymm0, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 64(%r8), %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm12 = <u,u,u,u,u,u,u,u,12,13,14,15,128,128,u,u,u,u,u,u,u,u,u,u,16,17,128,128,u,u,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm12, %ymm1, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vporq %ymm0, %ymm1, %ymm22
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm3, %ymm0, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 64(%r8), %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = <u,u,u,u,u,u,u,u,12,13,14,15,128,128,u,u,u,u,u,u,u,u,u,u,16,17,128,128,u,u,u,u>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm5, %ymm0, %ymm4
+; AVX512F-ONLY-FAST-NEXT:    vporq %ymm2, %ymm4, %ymm18
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 64(%rcx), %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm0, (%rsp) # 32-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm15, %ymm0, %ymm8
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 64(%rdx), %ymm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm11, %ymm0, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 64(%rdx), %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm14, %ymm1, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpor %ymm0, %ymm1, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm10, %ymm0, %ymm12
+; AVX512F-ONLY-FAST-NEXT:    vpor %ymm8, %ymm12, %ymm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 64(%rsi), %ymm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm15, %ymm0, %ymm13
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 64(%rdi), %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm8, %ymm0, %ymm9
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm0, %ymm30
-; AVX512F-ONLY-FAST-NEXT:    vpor %ymm13, %ymm9, %ymm9
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%r9), %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm14, %ymm0, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 64(%rdi), %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm11, %ymm1, %ymm13
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm1, %ymm28
+; AVX512F-ONLY-FAST-NEXT:    vpor %ymm0, %ymm13, %ymm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm10, %ymm0, %ymm9
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%r8), %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%r9), %ymm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm12, %ymm0, %ymm13
-; AVX512F-ONLY-FAST-NEXT:    vporq %ymm9, %ymm13, %ymm17
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm3, %ymm0, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%r8), %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm5, %ymm1, %ymm13
+; AVX512F-ONLY-FAST-NEXT:    vporq %ymm0, %ymm13, %ymm19
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rcx), %ymm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm11, %ymm0, %ymm9
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rdx), %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm15, %ymm0, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rdx), %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm10, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm10, %ymm1, %ymm13
+; AVX512F-ONLY-FAST-NEXT:    vpor %ymm0, %ymm13, %ymm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm14, %ymm0, %ymm13
-; AVX512F-ONLY-FAST-NEXT:    vpor %ymm9, %ymm13, %ymm9
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rsi), %ymm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm15, %ymm0, %ymm9
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rdi), %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm14, %ymm0, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rdi), %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm11, %ymm1, %ymm13
+; AVX512F-ONLY-FAST-NEXT:    vpor %ymm0, %ymm13, %ymm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm8, %ymm0, %ymm13
-; AVX512F-ONLY-FAST-NEXT:    vpor %ymm9, %ymm13, %ymm9
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rcx), %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm11, %ymm3, %ymm9
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rdx), %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm14, %ymm2, %ymm11
-; AVX512F-ONLY-FAST-NEXT:    vpor %ymm9, %ymm11, %ymm9
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rsi), %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm15, %ymm0, %ymm9
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rdi), %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm8, %ymm1, %ymm8
-; AVX512F-ONLY-FAST-NEXT:    vpor %ymm9, %ymm8, %ymm8
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%r8), %ymm11
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm12, %ymm11, %ymm8
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%r9), %ymm15
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm10, %ymm15, %ymm9
-; AVX512F-ONLY-FAST-NEXT:    vporq %ymm9, %ymm8, %ymm18
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm8 = ymm4[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm9 = ymm5[0,1,1,3,4,5,5,7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm8 = ymm9[0,1],ymm8[2],ymm9[3,4],ymm8[5],ymm9[6,7,8,9],ymm8[10],ymm9[11,12],ymm8[13],ymm9[14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[2,1,3,2]
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm12 = [18,19,20,21,18,19,20,21,24,25,26,27,22,23,22,23,18,19,20,21,18,19,20,21,24,25,26,27,22,23,22,23]
-; AVX512F-ONLY-FAST-NEXT:    # ymm12 = mem[0,1,0,1]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm12, %ymm6, %ymm9
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm12, %ymm31
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm12 = ymm7[1,1,1,1,5,5,5,5]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm9 = ymm9[0,1],ymm12[2],ymm9[3,4],ymm12[5],ymm9[6,7,8,9],ymm12[10],ymm9[11,12],ymm12[13],ymm9[14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[2,2,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm8, %zmm16, %zmm8
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm9, %zmm19, %zmm9
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm8, %zmm9
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm13 = [65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 96(%r8), %ymm8
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm12 = ymm8[u,u],zero,zero,zero,zero,zero,zero,zero,zero,ymm8[14,15,u,u,u,u],zero,zero,zero,zero,zero,zero,zero,zero,ymm8[16,17,u,u,u,u],zero,zero
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $248, %ymm13, %ymm9, %ymm12
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm13, %ymm14
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 96(%r9), %ymm13
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm10, %ymm13, %ymm10
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm12, %ymm10
-; AVX512F-ONLY-FAST-NEXT:    vextracti64x4 $1, %zmm9, %ymm9
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm12 = <u,4,u,u,u,5,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm8, %ymm12, %ymm12
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm9, %ymm12
-; AVX512F-ONLY-FAST-NEXT:    vprold $16, %ymm13, %ymm9
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[2,2,2,2]
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm12, %ymm9
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm9, %zmm0, %zmm9
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm9 = zmm10[0,1,2,3],zmm9[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm10 = [0,1,4,5,4,5,5,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 96(%rax), %ymm9
-; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm9, %ymm10, %ymm10
-; AVX512F-ONLY-FAST-NEXT:    vpandn %ymm10, %ymm14, %ymm10
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm13 = [12,13,128,128,128,128,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm13, %ymm9, %ymm12
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm10, %zmm12, %zmm10
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm12 = <u,u,u,u,u,u,u,u,u,u,u,u,14,15,12,13,u,u,u,u,30,31,28,29,u,u,u,u,30,31,28,29>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm12, %ymm6, %ymm10
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm12, %ymm19
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm12 = ymm7[3,3,3,3,7,7,7,7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm10 = ymm12[0,1,2],ymm10[3],ymm12[4,5],ymm10[6],ymm12[7,8,9,10],ymm10[11],ymm12[12,13],ymm10[14],ymm12[15]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm12 = <10,11,8,9,u,u,u,u,u,u,u,u,u,u,u,u,26,27,24,25,u,u,u,u,26,27,24,25,u,u,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm12, %ymm6, %ymm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm12, %ymm23
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm7 = ymm7[2,2,2,2,6,6,6,6]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm7 = ymm7[0],ymm6[1],ymm7[2,3],ymm6[4],ymm7[5,6,7,8],ymm6[9],ymm7[10,11],ymm6[12],ymm7[13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm25 = [0,2,2,3,10,9,11,11]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm10, %zmm25, %zmm7
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = <14,15,12,13,u,u,u,u,u,u,u,u,u,u,u,u,30,31,28,29,u,u,u,u,30,31,28,29,u,u,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm6, %ymm4, %ymm10
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm6, %ymm26
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm12 = ymm5[3,3,3,3,7,7,7,7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm10 = ymm12[0],ymm10[1],ymm12[2,3],ymm10[4],ymm12[5,6,7,8],ymm10[9],ymm12[10,11],ymm10[12],ymm12[13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm6 = [22,23,26,27,0,0,24,25,26,27,0,0,26,27,26,27,22,23,26,27,0,0,24,25,26,27,0,0,26,27,26,27]
-; AVX512F-ONLY-FAST-NEXT:    # ymm6 = mem[0,1,0,1]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm6, %ymm4, %ymm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm6, %ymm29
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm5 = ymm5[2,2,2,2,6,6,6,6]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm4 = ymm4[0,1],ymm5[2],ymm4[3,4],ymm5[5],ymm4[6,7,8,9],ymm5[10],ymm4[11,12],ymm5[13],ymm4[14,15]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm20 = [2,2,2,3,8,10,10,11]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm10, %zmm20, %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535]
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, %zmm7, %zmm5, %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm10
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm4 = <5,u,u,u,6,u,u,6>
-; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm8, %ymm4, %ymm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 64(%rax), %zmm28
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [30,5,0,0,31,6,0,31,30,5,0,0,31,6,0,31]
-; AVX512F-ONLY-FAST-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpermi2d %zmm28, %zmm9, %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm7 = [65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535]
-; AVX512F-ONLY-FAST-NEXT:    vpbroadcastd 72(%rax), %ymm4
-; AVX512F-ONLY-FAST-NEXT:    vpandn %ymm4, %ymm7, %ymm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm7, %ymm16
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 64(%rax), %ymm5
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm13, %ymm14
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm13, %ymm5, %ymm7
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm5, %ymm27
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm4, %zmm9
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm22, %zmm0, %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 64(%r9), %xmm5
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 64(%r8), %xmm12
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm7 = xmm12[4],xmm5[4],xmm12[5],xmm5[5],xmm12[6],xmm5[6],xmm12[7],xmm5[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm5, %xmm21
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm13 = [0,1,2,3,4,5,4,5,6,7,10,11,8,9,10,11]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm13, %xmm7, %xmm7
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm13, %xmm8
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm13 = [0,0,1,1,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm4, %zmm13, %zmm7
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535]
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $248, %zmm4, %zmm7, %zmm9
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpbroadcastd 8(%rax), %ymm7
-; AVX512F-ONLY-FAST-NEXT:    vpandnq %ymm7, %ymm16, %ymm7
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rax), %ymm5
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rcx), %ymm10
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm15, %ymm10, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rdx), %ymm15
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm15, %ymm13
+; AVX512F-ONLY-FAST-NEXT:    vpor %ymm0, %ymm13, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rsi), %ymm8
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm14, %ymm8, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rdi), %ymm4
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm11, %ymm4, %ymm11
+; AVX512F-ONLY-FAST-NEXT:    vpor %ymm0, %ymm11, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%r8), %ymm13
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm5, %ymm13, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%r9), %ymm14
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm3, %ymm14, %ymm5
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm3, %ymm12
+; AVX512F-ONLY-FAST-NEXT:    vporq %ymm5, %ymm0, %ymm21
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm20, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm0 = ymm2[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm11 = ymm9[0,1,1,3,4,5,5,7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm11[0,1],ymm0[2],ymm11[3,4],ymm0[5],ymm11[6,7,8,9],ymm0[10],ymm11[11,12],ymm0[13],ymm11[14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,1,3,2]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm11 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21,18,19,20,21,24,25,26,27,22,23,22,23]
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm3 = ymm7[1,1,1,1,5,5,5,5]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm11[0,1],ymm3[2],ymm11[3,4],ymm3[5],ymm11[6,7,8,9],ymm3[10],ymm11[11,12],ymm3[13],ymm11[14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,2,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm16, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm17, %zmm3
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm3
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm25 = [65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 96(%r8), %ymm11
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm11[u,u],zero,zero,zero,zero,zero,zero,zero,zero,ymm11[14,15,u,u,u,u],zero,zero,zero,zero,zero,zero,zero,zero,ymm11[16,17,u,u,u,u],zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $248, %ymm25, %ymm3, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 96(%r9), %ymm5
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm14, %ymm5, %ymm9
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm9, %zmm7, %zmm16
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm17, %zmm0, %zmm7
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%r9), %xmm5
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%r8), %xmm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm6, (%rsp) # 16-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm14 = xmm6[4],xmm5[4],xmm6[5],xmm5[5],xmm6[6],xmm5[6],xmm6[7],xmm5[7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm8, %xmm14, %xmm14
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm7, %zmm13, %zmm14
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $248, %zmm4, %zmm14, %zmm16
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpbroadcastd {{.*#+}} ymm9 = [18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm9, %ymm15, %ymm4
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm7 = ymm11[0,0,2,1,4,4,6,5]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm4 = ymm4[0,1,2],ymm7[3],ymm4[4,5],ymm7[6],ymm4[7,8,9,10],ymm7[11],ymm4[12,13],ymm7[14],ymm4[15]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm7 = <0,1,u,3,10,10,11,11>
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm4, %zmm18, %zmm7
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm12, %ymm5, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm12 = [65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535]
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $248, %ymm12, %ymm0, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vextracti64x4 $1, %zmm3, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = <u,4,u,u,u,5,u,u>
+; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm11, %ymm3, %ymm3
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm17 = [65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0]
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %ymm0, %ymm17, %ymm3
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm17, %zmm22
+; AVX512F-ONLY-FAST-NEXT:    vprold $16, %ymm5, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,2,2,2]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535]
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %ymm3, %ymm5, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm1[0,1,2,3],zmm0[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = [0,1,4,5,4,5,5,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 96(%rax), %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm0, %ymm1, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vpandnq %ymm1, %ymm25, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = [12,13,128,128,128,128,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm5, %ymm0, %ymm3
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm5, %ymm20
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm3, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = <u,u,u,u,u,u,u,u,u,u,u,u,14,15,12,13,u,u,u,u,30,31,28,29,u,u,u,u,30,31,28,29>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm3, %ymm6, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm3, %ymm26
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm3 = ymm7[3,3,3,3,7,7,7,7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm3[0,1,2],ymm1[3],ymm3[4,5],ymm1[6],ymm3[7,8,9,10],ymm1[11],ymm3[12,13],ymm1[14],ymm3[15]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = <10,11,8,9,u,u,u,u,u,u,u,u,u,u,u,u,26,27,24,25,u,u,u,u,26,27,24,25,u,u,u,u>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm5, %ymm6, %ymm3
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm5, %ymm29
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm6 = ymm7[2,2,2,2,6,6,6,6]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm6[0],ymm3[1],ymm6[2,3],ymm3[4],ymm6[5,6,7,8],ymm3[9],ymm6[10,11],ymm3[12],ymm6[13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm6 = [0,2,2,3,10,9,11,11]
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm6, %zmm3
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm7 = <14,15,12,13,u,u,u,u,u,u,u,u,u,u,u,u,30,31,28,29,u,u,u,u,30,31,28,29,u,u,u,u>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm7, %ymm2, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm7, %ymm27
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm7 = ymm9[3,3,3,3,7,7,7,7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm7[0],ymm1[1],ymm7[2,3],ymm1[4],ymm7[5,6,7,8],ymm1[9],ymm7[10,11],ymm1[12],ymm7[13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm5 = [22,23,26,27,0,0,24,25,26,27,0,0,26,27,26,27,22,23,26,27,0,0,24,25,26,27,0,0,26,27,26,27]
+; AVX512F-ONLY-FAST-NEXT:    # ymm5 = mem[0,1,0,1]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm5, %ymm2, %ymm7
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm5, %ymm16
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm9 = ymm9[2,2,2,2,6,6,6,6]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm7 = ymm7[0,1],ymm9[2],ymm7[3,4],ymm9[5],ymm7[6,7,8,9],ymm9[10],ymm7[11,12],ymm9[13],ymm7[14,15]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm17 = [2,2,2,3,8,10,10,11]
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm17, %zmm7
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535]
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, %zmm3, %zmm1, %zmm7
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm19, %ymm14
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm14, %ymm0, %ymm4
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm24
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = <5,u,u,u,6,u,u,6>
+; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm11, %ymm1, %ymm1
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm7 = ymm1[3,3,3,3,7,7,7,7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm4 = ymm7[0,1,2],ymm4[3],ymm7[4,5],ymm4[6],ymm7[7,8,9,10],ymm4[11],ymm7[12,13],ymm4[14],ymm7[15]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm23, %ymm5
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm5, %ymm0, %ymm7
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm13 = ymm1[2,2,2,2,6,6,6,6]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm7 = ymm13[0],ymm7[1],ymm13[2,3],ymm7[4],ymm13[5,6,7,8],ymm7[9],ymm13[10,11],ymm7[12],ymm13[13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm4, %zmm25, %zmm7
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm26, %ymm8
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm8, %ymm3, %ymm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm6 = ymm2[3,3,3,3,7,7,7,7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm4 = ymm6[0],ymm4[1],ymm6[2,3],ymm4[4],ymm6[5,6,7,8],ymm4[9],ymm6[10,11],ymm4[12],ymm6[13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 64(%rax), %zmm12
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm3 = [30,5,0,0,31,6,0,31,30,5,0,0,31,6,0,31]
+; AVX512F-ONLY-FAST-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpermi2d %zmm12, %zmm0, %zmm3
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm23 = [65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535]
+; AVX512F-ONLY-FAST-NEXT:    vpbroadcastd 72(%rax), %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpandnq %ymm0, %ymm23, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 64(%rax), %ymm7
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm20, %ymm5
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm5, %ymm7, %ymm3
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm7, %ymm30
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm0, %zmm11
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm18, %zmm0, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 64(%r9), %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 64(%r8), %xmm2
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm2, %xmm20
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm9 = [0,1,2,3,4,5,4,5,6,7,10,11,8,9,10,11]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm3, %xmm3
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm18 = [0,0,1,1,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm18, %zmm3
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $248, %zmm23, %zmm3, %zmm11
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpbroadcastd 8(%rax), %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpandnq %ymm0, %ymm23, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rax), %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm5, %ymm1, %ymm3
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm0, %zmm5
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm19, %zmm0, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%r9), %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%r8), %xmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm3, %xmm3
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm18, %zmm3
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $248, %zmm23, %zmm3, %zmm5
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpbroadcastd {{.*#+}} ymm7 = [18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm7, %ymm14, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm3 = ymm13[0,0,2,1,4,4,6,5]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1,2],ymm3[3],ymm0[4,5],ymm3[6],ymm0[7,8,9,10],ymm3[11],ymm0[12,13],ymm3[14],ymm0[15]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm3 = <0,1,u,3,10,10,11,11>
+; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm0, %zmm21, %zmm3
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm26, %ymm9
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm9, %ymm8, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm3 = ymm4[3,3,3,3,7,7,7,7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm3[0,1,2],ymm0[3],ymm3[4,5],ymm0[6],ymm3[7,8,9,10],ymm0[11],ymm3[12,13],ymm0[14],ymm3[15]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm29, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm1, %ymm3, %ymm6
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm13 = ymm2[2,2,2,2,6,6,6,6]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm6[0,1],ymm13[2],ymm6[3,4],ymm13[5],ymm6[6,7,8,9],ymm13[10],ymm6[11,12],ymm13[13],ymm6[14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm4, %zmm20, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, %zmm7, %zmm10, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm4 = ymm15[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,26,27,28,29,26,27,28,29,26,27,28,29,30,31,30,31]
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm6 = ymm11[3,3,3,3,7,7,7,7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm4 = ymm4[0,1],ymm6[2],ymm4[3,4],ymm6[5],ymm4[6,7,8,9],ymm6[10],ymm4[11,12],ymm6[13],ymm4[14,15]
-; AVX512F-ONLY-FAST-NEXT:    vprold $16, %ymm15, %ymm6
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm7 = ymm11[1,2,2,3,5,6,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm7[0,1],ymm6[2],ymm7[3,4],ymm6[5],ymm7[6,7,8,9],ymm6[10],ymm7[11,12],ymm6[13],ymm7[14,15]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm17 = [2,1,3,2,10,10,10,11]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm4, %zmm17, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm1, %ymm8, %ymm3
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm5 = ymm4[2,2,2,2,6,6,6,6]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm5[0],ymm3[1],ymm5[2,3],ymm3[4],ymm5[5,6,7,8],ymm3[9],ymm5[10,11],ymm3[12],ymm5[13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm6, %zmm3
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm27, %ymm11
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm11, %ymm10, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm15, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm5 = ymm15[3,3,3,3,7,7,7,7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm5[0],ymm0[1],ymm5[2,3],ymm0[4],ymm5[5,6,7,8],ymm0[9],ymm5[10,11],ymm0[12],ymm5[13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm16, %ymm4
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm4, %ymm10, %ymm5
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm6 = ymm15[2,2,2,2,6,6,6,6]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm5[0,1],ymm6[2],ymm5[3,4],ymm6[5],ymm5[6,7,8,9],ymm6[10],ymm5[11,12],ymm6[13],ymm5[14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm17, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm24, %zmm16
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, %zmm3, %zmm24, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm15 = [26,27,28,29,26,27,28,29,26,27,28,29,30,31,30,31,26,27,28,29,26,27,28,29,26,27,28,29,30,31,30,31]
+; AVX512F-ONLY-FAST-NEXT:    # ymm15 = mem[0,1,0,1]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm15, %ymm14, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm3 = ymm13[3,3,3,3,7,7,7,7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm3[2],ymm0[3,4],ymm3[5],ymm0[6,7,8,9],ymm3[10],ymm0[11,12],ymm3[13],ymm0[14,15]
+; AVX512F-ONLY-FAST-NEXT:    vprold $16, %ymm14, %ymm3
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm5 = ymm13[1,2,2,3,5,6,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm5[0,1],ymm3[2],ymm5[3,4],ymm3[5],ymm5[6,7,8,9],ymm3[10],ymm5[11,12],ymm3[13],ymm5[14,15]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm18 = [2,1,3,2,10,10,10,11]
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm18, %zmm3
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 (%rax), %zmm19
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [14,21,0,0,15,22,0,15,14,21,0,0,15,22,0,15]
+; AVX512F-ONLY-FAST-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rax), %ymm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm5, %ymm2, %ymm4
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm6 = ymm30[2,2,2,2,6,6,6,6]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm4 = ymm6[0],ymm4[1],ymm6[2,3],ymm4[4],ymm6[5,6,7,8],ymm4[9],ymm6[10,11],ymm4[12],ymm6[13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm31, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm0, %ymm2, %ymm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm2, %ymm5
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm7 = ymm30[1,1,1,1,5,5,5,5]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm6 = ymm6[0,1],ymm7[2],ymm6[3,4],ymm7[5],ymm6[6,7,8,9],ymm7[10],ymm6[11,12],ymm7[13],ymm6[14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm4, %zmm20, %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm1, %ymm0, %ymm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm29, %ymm31
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm7 = ymm1[2,2,2,2,6,6,6,6]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm4 = ymm4[0,1],ymm7[2],ymm4[3,4],ymm7[5],ymm4[6,7,8,9],ymm7[10],ymm4[11,12],ymm7[13],ymm4[14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm7 = ymm0[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm11 = ymm1[0,1,1,3,4,5,5,7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm7 = ymm11[0,1],ymm7[2],ymm11[3,4],ymm7[5],ymm11[6,7,8,9],ymm7[10],ymm11[11,12],ymm7[13],ymm11[14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm4, %zmm17, %zmm7
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm23 = [0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535]
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, %zmm6, %zmm23, %zmm7
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vprold $16, %ymm15, %ymm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %ymm19 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm6 = ymm19[1,2,2,3,5,6,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm4 = ymm6[0,1],ymm4[2],ymm6[3,4],ymm4[5],ymm6[6,7,8,9],ymm4[10],ymm6[11,12],ymm4[13],ymm6[14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm9, %ymm15, %ymm6
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm11 = ymm19[0,0,2,1,4,4,6,5]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm6 = ymm6[0,1,2],ymm11[3],ymm6[4,5],ymm11[6],ymm6[7,8,9,10],ymm11[11],ymm6[12,13],ymm11[14],ymm6[15]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm25 = [2,2,3,3,10,9,11,10]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm4, %zmm25, %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm27, %zmm28, %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm29 = [0,13,4,0,0,14,5,0,0,13,4,0,0,14,5,0]
-; AVX512F-ONLY-FAST-NEXT:    # zmm29 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm4, %zmm29, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm28 = [65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535]
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm6, %zmm28, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm7, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vpermi2d %zmm0, %zmm19, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm3, %zmm22, %zmm2
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm8, %ymm0, %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm2 = ymm1[3,3,3,3,7,7,7,7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm10 = ymm2[0],ymm3[1],ymm2[2,3],ymm3[4],ymm2[5,6,7,8],ymm3[9],ymm2[10,11],ymm3[12],ymm2[13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 96(%rcx), %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm1, %ymm13, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm3 = ymm28[2,2,2,2,6,6,6,6]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm3[0],ymm0[1],ymm3[2,3],ymm0[4],ymm3[5,6,7,8],ymm0[9],ymm3[10,11],ymm0[12],ymm3[13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm3 = ymm13[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21,18,19,20,21,24,25,26,27,22,23,22,23]
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm5 = ymm28[1,1,1,1,5,5,5,5]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm3[0,1],ymm5[2],ymm3[3,4],ymm5[5],ymm3[6,7,8,9],ymm5[10],ymm3[11,12],ymm5[13],ymm3[14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm17, %zmm3
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu (%rsp), %ymm1 # 32-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm4, %ymm1, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm5 = ymm2[2,2,2,2,6,6,6,6]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm5[2],ymm0[3,4],ymm5[5],ymm0[6,7,8,9],ymm5[10],ymm0[11,12],ymm5[13],ymm0[14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm5 = ymm1[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm6 = ymm2[0,1,1,3,4,5,5,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm2, %ymm4
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm5 = ymm6[0,1],ymm5[2],ymm6[3,4],ymm5[5],ymm6[6,7,8,9],ymm5[10],ymm6[11,12],ymm5[13],ymm6[14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm18, %zmm5
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm5
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vprold $16, %ymm8, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm3 = ymm2[1,2,2,3,5,6,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm3[0,1],ymm0[2],ymm3[3,4],ymm0[5],ymm3[6,7,8,9],ymm0[10],ymm3[11,12],ymm0[13],ymm3[14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm7, %ymm8, %ymm3
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm6 = ymm2[0,0,2,1,4,4,6,5]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm2, %ymm14
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm3[0,1,2],ymm6[3],ymm3[4,5],ymm6[6],ymm3[7,8,9,10],ymm6[11],ymm3[12,13],ymm6[14],ymm3[15]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [2,2,3,3,10,9,11,10]
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm2, %zmm3
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm30, %zmm12, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [0,13,4,0,0,14,5,0,0,13,4,0,0,14,5,0]
+; AVX512F-ONLY-FAST-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm0, %zmm2, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm3, %zmm25, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm11, %ymm1, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm1 = ymm4[3,3,3,3,7,7,7,7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6,7,8],ymm0[9],ymm1[10,11],ymm0[12],ymm1[13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 96(%rcx), %xmm3
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 96(%rdx), %xmm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 96(%rdx), %xmm11
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm11[0],xmm0[0],xmm11[1],xmm0[1],xmm11[2],xmm0[2],xmm11[3],xmm0[3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,1,2,3,0,1,2,3,6,7,4,5,6,7,4,5]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm2, %xmm3, %xmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [0,2,2,3,8,9,9,11]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm0, %zmm10
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm14, %ymm5, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm0 = ymm30[3,3,3,3,7,7,7,7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm5 = ymm0[0,1,2],ymm1[3],ymm0[4,5],ymm1[6],ymm0[7,8,9,10],ymm1[11],ymm0[12,13],ymm1[14],ymm0[15]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 64(%rcx), %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 64(%rdx), %xmm7
-; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq {{.*#+}} xmm3 = [6,7,4,5,0,0,8,9,6,7,4,5,0,0,8,9]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm3, %xmm1, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm3, %xmm8
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm3 = xmm7[1,1,2,2]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm3[0],xmm0[1],xmm3[2,3],xmm0[4],xmm3[5,6],xmm0[7]
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm7[4],xmm1[4],xmm7[5],xmm1[5],xmm7[6],xmm1[6],xmm7[7],xmm1[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm7[0],xmm1[0],xmm7[1],xmm1[1],xmm7[2],xmm1[2],xmm7[3],xmm1[3]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm2, %xmm1, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [0,1,1,3,8,8,9,9]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm3, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, %zmm13
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 64(%rdi), %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 64(%rsi), %xmm3
-; AVX512F-ONLY-FAST-NEXT:    vprold $16, %xmm3, %xmm4
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm6 = xmm0[1,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm6[0,1],xmm4[2],xmm6[3,4],xmm4[5],xmm6[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm6 = xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm3, %xmm29
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,1,2,3,0,1,2,3,6,7,4,5,6,7,4,5]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm2, %xmm0, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm2, %xmm7
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm31 = [0,2,2,3,8,9,9,11]
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm31, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm9, %ymm13, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm2 = ymm28[3,3,3,3,7,7,7,7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm2[0,1,2],ymm0[3],ymm2[4,5],ymm0[6],ymm2[7,8,9,10],ymm0[11],ymm2[12,13],ymm0[14],ymm2[15]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 96(%rsi), %xmm4
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 96(%rdi), %xmm3
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm4, %xmm26
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm3, %xmm24
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm9 = [0,1,2,3,4,5,4,5,6,7,10,11,8,9,10,11]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm0, %xmm14
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm26 = [0,0,1,1,8,8,10,9]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm4, %zmm26, %zmm14
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0]
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, %zmm1, %zmm4, %zmm14
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm21, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm12[0],xmm0[0],xmm12[1],xmm0[1],xmm12[2],xmm0[2],xmm12[3],xmm0[3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm6 = [0,1,2,3,6,7,4,5,6,7,4,5,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm3 = [0,1,2,3,8,9,10,11,14,15,12,13,14,15,12,13]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm3, %xmm0, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm3, %xmm12
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm6, %xmm0, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm21 = [0,0,0,1,8,9,9,11]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm21, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rcx), %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rdx), %xmm3
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm8, %xmm1, %xmm7
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm8 = xmm3[1,1,2,2]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm7 = xmm8[0],xmm7[1],xmm8[2,3],xmm7[4],xmm8[5,6],xmm7[7]
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm8 = xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm2, %xmm1, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm2, %xmm24
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm7, %zmm13, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rdi), %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rsi), %xmm8
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vprold $16, %xmm8, %xmm3
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm7 = xmm2[1,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm7 = xmm7[0,1],xmm3[2],xmm7[3,4],xmm3[5],xmm7[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm2[0],xmm8[0],xmm2[1],xmm8[1],xmm2[2],xmm8[2],xmm2[3],xmm8[3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm9, %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm3, %xmm3
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm7, %zmm26, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, %zmm1, %zmm4, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vpbroadcastd 64(%rax), %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpbroadcastd 68(%rax), %ymm4
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm1, %zmm18
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535]
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm0, %zmm1, %zmm18
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rsp), %xmm0 # 16-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm12, %xmm0, %xmm4
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm6, %xmm0, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm6, %xmm8
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm4, %zmm21, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpbroadcastd (%rax), %ymm4
-; AVX512F-ONLY-FAST-NEXT:    vpbroadcastd 4(%rax), %ymm6
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm6, %zmm4, %zmm13
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm0, %zmm1, %zmm13
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 96(%rsi), %xmm9
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 96(%rdi), %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm0[0],xmm9[0],xmm0[1],xmm9[1],xmm0[2],xmm9[2],xmm0[3],xmm9[3]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm2, %xmm1, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [2,1,3,3,8,8,9,9]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm2, %zmm5
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm10, %zmm5
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm15[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,26,27,28,29,26,27,28,29,26,27,28,29,30,31,30,31]
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm2 = ymm19[3,3,3,3,7,7,7,7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7,8,9],ymm2[10],ymm1[11,12],ymm2[13],ymm1[14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm2, %xmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [2,1,3,3,8,8,9,9]
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm3, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, %zmm1, %zmm16, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm15, %ymm8, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm2 = ymm14[3,3,3,3,7,7,7,7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm13 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7,8,9],ymm2[10],ymm1[11,12],ymm2[13],ymm1[14,15]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 64(%rcx), %xmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 64(%rdx), %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq {{.*#+}} xmm4 = [6,7,4,5,0,0,8,9,6,7,4,5,0,0,8,9]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm4, %xmm2, %xmm5
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm4, %xmm8
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm6 = xmm1[1,1,2,2]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm6[0],xmm5[1],xmm6[2,3],xmm5[4],xmm6[5,6],xmm5[7]
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm4 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm7, %xmm14
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm7, %xmm1, %xmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [0,1,1,3,8,8,9,9]
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm1, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm11
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 64(%rdi), %xmm4
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm4, (%rsp) # 16-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 64(%rsi), %xmm6
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vprold $16, %xmm6, %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm5 = xmm4[1,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0,1],xmm1[2],xmm5[3,4],xmm1[5],xmm5[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm4[0],xmm6[0],xmm4[1],xmm6[1],xmm4[2],xmm6[2],xmm4[3],xmm6[3]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm1, %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm9, %xmm3
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm27 = [0,0,1,1,8,8,10,9]
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm27, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0]
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, %zmm2, %zmm5, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm20, %xmm2
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm2 # 16-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    # xmm2 = xmm2[0],mem[0],xmm2[1],mem[1],xmm2[2],mem[2],xmm2[3],mem[3]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm7 = [0,1,2,3,6,7,4,5,6,7,4,5,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm10 = [0,1,2,3,8,9,10,11,14,15,12,13,14,15,12,13]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm10, %xmm2, %xmm6
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm7, %xmm2, %xmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm16 = [0,0,0,1,8,9,9,11]
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm6, %zmm16, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rcx), %xmm4
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rdx), %xmm9
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm8, %xmm4, %xmm6
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm8 = xmm9[1,1,2,2]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm6 = xmm8[0],xmm6[1],xmm8[2,3],xmm6[4],xmm8[5,6],xmm6[7]
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm9[0],xmm4[0],xmm9[1],xmm4[1],xmm9[2],xmm4[2],xmm9[3],xmm4[3]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm14, %xmm8, %xmm8
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm14, %xmm21
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm6, %zmm11, %zmm8
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rdi), %xmm4
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rsi), %xmm11
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vprold $16, %xmm11, %xmm6
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm9 = xmm4[1,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm6 = xmm9[0,1],xmm6[2],xmm9[3,4],xmm6[5],xmm9[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm9 = xmm4[0],xmm11[0],xmm4[1],xmm11[1],xmm4[2],xmm11[2],xmm4[3],xmm11[3]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm3, %xmm9, %xmm9
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm3, %xmm28
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm6, %zmm27, %zmm9
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, %zmm8, %zmm5, %zmm9
+; AVX512F-ONLY-FAST-NEXT:    vpbroadcastd 64(%rax), %ymm5
+; AVX512F-ONLY-FAST-NEXT:    vpbroadcastd 68(%rax), %ymm6
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm6, %zmm5, %zmm23
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535]
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm2, %zmm4, %zmm23
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535]
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm14, %zmm2, %zmm18
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm3, %zmm2, %zmm13
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm1, %zmm2, %zmm23
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm10, %xmm1, %xmm5
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm10, %xmm22
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm7, %xmm1, %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm7, %xmm3
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm16, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vpbroadcastd (%rax), %ymm5
+; AVX512F-ONLY-FAST-NEXT:    vpbroadcastd 4(%rax), %ymm6
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm6, %zmm5, %zmm20
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm1, %zmm4, %zmm20
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm9, %zmm2, %zmm20
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm4[10,11,8,9,u,u,u,u,u,u,u,u,u,u,u,u,26,27,24,25,u,u,u,u,26,27,24,25,u,u,u,u]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %ymm20 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm3 = ymm20[2,2,2,2,6,6,6,6]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm3[0],ymm2[1],ymm3[2,3],ymm2[4],ymm3[5,6,7,8],ymm2[9],ymm3[10,11],ymm2[12],ymm3[13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm3 = [18,19,20,21,18,19,20,21,24,25,26,27,22,23,22,23,18,19,20,21,18,19,20,21,24,25,26,27,22,23,22,23]
-; AVX512F-ONLY-FAST-NEXT:    # ymm3 = mem[0,1,0,1]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm3, %ymm4, %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm4, %ymm16
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm4 = ymm20[1,1,1,1,5,5,5,5]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm6 = ymm3[0,1],ymm4[2],ymm3[3,4],ymm4[5],ymm3[6,7,8,9],ymm4[10],ymm3[11,12],ymm4[13],ymm3[14,15]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [2,2,2,3,8,10,10,11]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm3, %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm31, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm15, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm3 = ymm7[2,2,2,2,6,6,6,6]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0,1],ymm3[2],ymm2[3,4],ymm3[5],ymm2[6,7,8,9],ymm3[10],ymm2[11,12],ymm3[13],ymm2[14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm3 = ymm15[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm4 = ymm7[0,1,1,3,4,5,5,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm7, %ymm21
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm14 = ymm4[0,1],ymm3[2],ymm4[3,4],ymm3[5],ymm4[6,7,8,9],ymm3[10],ymm4[11,12],ymm3[13],ymm4[14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm17, %zmm14
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm10 = [2,2,2,3,8,8,8,9]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 96(%r9), %xmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 96(%r8), %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm8, %xmm4, %xmm7
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm7, %zmm10, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, %zmm6, %zmm23, %zmm14
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vprold $16, %ymm8, %ymm6
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm4[10,11,8,9,u,u,u,u,u,u,u,u,u,u,u,u,26,27,24,25,u,u,u,u,26,27,24,25,u,u,u,u]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %ymm17 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm7 = ymm17[1,2,2,3,5,6,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm6 = ymm7[0,1],ymm6[2],ymm7[3,4],ymm6[5],ymm7[6,7,8,9],ymm6[10],ymm7[11,12],ymm6[13],ymm7[14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm7 = ymm8[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm8, %ymm19
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm8 = ymm17[0,0,2,1,4,4,6,5]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm7 = ymm7[0,1,2],ymm8[3],ymm7[4,5],ymm8[6],ymm7[7,8,9,10],ymm8[11],ymm7[12,13],ymm8[14],ymm7[15]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm6, %zmm25, %zmm7
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 (%rax), %zmm25
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %ymm23 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm23, %zmm25, %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm6, %zmm29, %zmm10
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm7, %zmm28, %zmm10
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm28 = [6,7,3,3,7,7,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm27, %ymm28, %ymm6
-; AVX512F-ONLY-FAST-NEXT:    vpbroadcastd 96(%rax), %ymm7
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm6, %zmm12
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm31 = [65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535]
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm1, %zmm31, %zmm12
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm30 = [0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535]
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm5, %zmm30, %zmm12
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm5 = xmm9[4],xmm0[4],xmm9[5],xmm0[5],xmm9[6],xmm0[6],xmm9[7],xmm0[7]
-; AVX512F-ONLY-FAST-NEXT:    vprold $16, %xmm9, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm7 = xmm0[0,1],xmm1[2],xmm0[3,4],xmm1[5],xmm0[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm14, %zmm10
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rcx), %xmm14
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm2 = ymm17[2,2,2,2,6,6,6,6]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3],ymm1[4],ymm2[5,6,7,8],ymm1[9],ymm2[10,11],ymm1[12],ymm2[13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21,18,19,20,21,24,25,26,27,22,23,22,23]
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm5 = ymm17[1,1,1,1,5,5,5,5]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0,1],ymm5[2],ymm2[3,4],ymm5[5],ymm2[6,7,8,9],ymm5[10],ymm2[11,12],ymm5[13],ymm2[14,15]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [2,2,2,3,8,10,10,11]
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm5, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm9[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,u,u,24,25,26,27,u,u,26,27,26,27]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm5 = ymm7[2,2,2,2,6,6,6,6]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0,1],ymm5[2],ymm1[3,4],ymm5[5],ymm1[6,7,8,9],ymm5[10],ymm1[11,12],ymm5[13],ymm1[14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm5 = ymm9[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm6 = ymm7[0,1,1,3,4,5,5,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm7, %ymm15
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm8 = ymm6[0,1],ymm5[2],ymm6[3,4],ymm5[5],ymm6[6,7,8,9],ymm5[10],ymm6[11,12],ymm5[13],ymm6[14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm18, %zmm8
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm8
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vprold $16, %ymm5, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %ymm16 # 32-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm2 = ymm16[1,2,2,3,5,6,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7,8,9],ymm1[10],ymm2[11,12],ymm1[13],ymm2[14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm5, %ymm18
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm5 = ymm16[0,0,2,1,4,4,6,5]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0,1,2],ymm5[3],ymm2[4,5],ymm5[6],ymm2[7,8,9,10],ymm5[11],ymm2[12,13],ymm5[14],ymm2[15]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [2,2,3,3,10,9,11,10]
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm5, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [2,2,2,3,8,8,8,9]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 96(%r9), %xmm11
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 96(%r8), %xmm7
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm10 = xmm7[0],xmm11[0],xmm7[1],xmm11[1],xmm7[2],xmm11[2],xmm7[3],xmm11[3]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm3, %xmm10, %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm5, %zmm13
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm14, %zmm19, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm5 = [0,13,4,0,0,14,5,0,0,13,4,0,0,14,5,0]
+; AVX512F-ONLY-FAST-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm1, %zmm5, %zmm19
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm2, %zmm25, %zmm19
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm8, %zmm19
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rcx), %xmm12
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rdx), %xmm8
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm8[0],xmm14[0],xmm8[1],xmm14[1],xmm8[2],xmm14[2],xmm8[3],xmm14[3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm24, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm1, %xmm0, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm15[14,15,12,13,u,u,u,u,u,u,u,u,u,u,u,u,30,31,28,29,u,u,u,u,30,31,28,29,u,u,u,u]
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm9 = ymm21[3,3,3,3,7,7,7,7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm6 = ymm9[0],ymm1[1],ymm9[2,3],ymm1[4],ymm9[5,6,7,8],ymm1[9],ymm9[10,11],ymm1[12],ymm9[13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [0,2,2,3,8,9,9,11]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm1, %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm5[4,5,2,3,4,5,6,7,8,9,10,11,10,11,8,9]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm22 = [0,0,2,1,8,8,9,11]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm22, %zmm7
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm5 = xmm11[4],xmm0[4],xmm11[5],xmm0[5],xmm11[6],xmm0[6],xmm11[7],xmm0[7]
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm8[0],xmm12[0],xmm8[1],xmm12[1],xmm8[2],xmm12[2],xmm8[3],xmm12[3]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm21, %xmm2
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm2, %xmm1, %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm9[14,15,12,13,u,u,u,u,u,u,u,u,u,u,u,u,30,31,28,29,u,u,u,u,30,31,28,29,u,u,u,u]
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm9 = ymm15[3,3,3,3,7,7,7,7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm15 = ymm9[0],ymm2[1],ymm9[2,3],ymm2[4],ymm9[5,6,7,8],ymm2[9],ymm9[10,11],ymm2[12],ymm9[13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm31, %zmm15
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm31 = [6,7,3,3,7,7,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm30, %ymm31, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vpbroadcastd 96(%rax), %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm1, %zmm21
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm30 = [65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535]
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm13, %zmm30, %zmm21
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm21
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm24, %xmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm26, %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; AVX512F-ONLY-FAST-NEXT:    vprold $16, %xmm26, %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm2 = xmm24[1,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm2[0,1],xmm1[2],xmm2[3,4],xmm1[5],xmm2[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,14,15,12,13,u,u,u,u,30,31,28,29,u,u,u,u,30,31,28,29]
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm2 = ymm17[3,3,3,3,7,7,7,7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5],ymm1[6],ymm2[7,8,9,10],ymm1[11],ymm2[12,13],ymm1[14],ymm2[15]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rdi), %xmm5
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rsi), %xmm4
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm13 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm28, %xmm6
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm6, %xmm13, %xmm13
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [2,1,3,3,8,8,9,9]
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm13, %zmm2, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[4,5,2,3,4,5,6,7,8,9,10,11,10,11,8,9]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm17 = [0,0,2,1,8,8,9,11]
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm17, %zmm3
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm29, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm9[4],xmm0[4],xmm9[5],xmm0[5],xmm9[6],xmm0[6],xmm9[7],xmm0[7]
 ; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq {{.*#+}} xmm24 = [6,7,4,5,0,0,8,9,6,7,4,5,0,0,8,9]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm24, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm1, %xmm0, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm11 = xmm11[1,1,2,2]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm11[0],xmm0[1],xmm11[2,3],xmm0[4],xmm11[5,6],xmm0[7]
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm5[0,2,3,3,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm26, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm26 = [65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535]
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, %zmm7, %zmm26, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm5 = ymm15[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,24,25,22,23,22,23,u,u,u,u,u,u,u,u]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm7 = ymm5[2,2,2,2]
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm0 = [0,1,2,3,4,5,4,5,6,7,10,11,8,9,10,11]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm0, %xmm2, %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} xmm29 = [0,1,2,3,8,9,10,11,14,15,12,13,14,15,12,13]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm29, %xmm3
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm3, %xmm4, %xmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm27 = [0,1,1,3,8,8,9,9]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm27, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm16, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,14,15,12,13,u,u,u,u,30,31,28,29,u,u,u,u,30,31,28,29]
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm4 = ymm20[3,3,3,3,7,7,7,7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm11 = ymm4[0,1,2],ymm2[3],ymm4[4,5],ymm2[6],ymm4[7,8,9,10],ymm2[11],ymm4[12,13],ymm2[14],ymm4[15]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rdi), %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rsi), %xmm5
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm2[0],xmm5[0],xmm2[1],xmm5[1],xmm2[2],xmm5[2],xmm2[3],xmm5[3]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm0, %xmm4, %xmm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [2,1,3,3,8,8,9,9]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm4, %zmm0, %zmm11
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm6, %zmm11
-; AVX512F-ONLY-FAST-NEXT:    vpbroadcastd 100(%rax), %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpbroadcastd 104(%rax), %ymm4
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm0, %zmm16
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm21 = [65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535]
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm3, %zmm21, %zmm16
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm19, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,26,27,28,29,26,27,28,29,26,27,28,29,30,31,30,31]
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm3 = ymm17[3,3,3,3,7,7,7,7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm3[2],ymm0[3,4],ymm3[5],ymm0[6,7,8,9],ymm3[10],ymm0[11,12],ymm3[13],ymm0[14,15]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%r9), %xmm4
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm24, %xmm13
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm13, %xmm0, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm9 = xmm9[1,1,2,2]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm9[0],xmm0[1],xmm9[2,3],xmm0[4],xmm9[5,6],xmm0[7]
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[0,2,3,3,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm27, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm25 = [65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535]
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, %zmm3, %zmm25, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm7[4],xmm11[4],xmm7[5],xmm11[5],xmm7[6],xmm11[6],xmm7[7],xmm11[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %ymm3, %ymm30, %ymm7
+; AVX512F-ONLY-FAST-NEXT:    vextracti64x4 $1, %zmm3, %ymm11
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm6, %xmm2, %xmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm22, %xmm13
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm13, %xmm10, %xmm3
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm28 = [0,1,1,3,8,8,9,9]
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm28, %zmm3
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} ymm2 = ymm6[0,1,2,3,5,5,7,6,8,9,10,11,13,13,15,14]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm26 = ymm2[3,3,3,3]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,24,25,22,23,22,23,u,u,u,u,u,u,u,u]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm29 = ymm2[2,2,2,2]
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm15, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vpbroadcastd 100(%rax), %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vpbroadcastd 104(%rax), %ymm6
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm6, %zmm2, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm22 = [65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535]
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm3, %zmm22, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm18, %ymm3
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,26,27,28,29,26,27,28,29,26,27,28,29,30,31,30,31]
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm6 = ymm16[3,3,3,3,7,7,7,7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm10 = ymm3[0,1],ymm6[2],ymm3[3,4],ymm6[5],ymm3[6,7,8,9],ymm6[10],ymm3[11,12],ymm6[13],ymm3[14,15]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%r9), %xmm15
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%r8), %xmm3
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3]
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm3[0],xmm15[0],xmm3[1],xmm15[1],xmm3[2],xmm15[2],xmm3[3],xmm15[3]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm9 = xmm6[0,1,2,3,6,7,4,5,6,7,4,5,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm17 = [2,2,2,3,8,8,8,9]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm9, %zmm17, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm23, %ymm28, %ymm9
-; AVX512F-ONLY-FAST-NEXT:    vpbroadcastd 32(%rax), %ymm20
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm20, %zmm9, %zmm20
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm0, %zmm31, %zmm20
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm11, %zmm30, %zmm20
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0]
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm1, %zmm0, %zmm16
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm5[4],xmm2[4],xmm5[5],xmm2[5],xmm5[6],xmm2[6],xmm5[7],xmm2[7]
-; AVX512F-ONLY-FAST-NEXT:    vprold $16, %xmm5, %xmm5
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[1,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm5[2],xmm2[3,4],xmm5[5],xmm2[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm9 = [4,5,2,3,4,5,6,7,8,9,10,11,10,11,8,9]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm1, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm22, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm24, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm1, %xmm14, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm5 = xmm8[4],xmm14[4],xmm8[5],xmm14[5],xmm8[6],xmm14[6],xmm8[7],xmm14[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm16 = [2,2,2,3,8,8,8,9]
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm9, %zmm16, %zmm10
+; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm14, %ymm31, %ymm9
+; AVX512F-ONLY-FAST-NEXT:    vpbroadcastd 32(%rax), %ymm18
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm18, %zmm9, %zmm9
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm10, %zmm30, %zmm9
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm9
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0]
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm0, %zmm1, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7]
+; AVX512F-ONLY-FAST-NEXT:    vprold $16, %xmm4, %xmm4
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[1,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm5[0,1],xmm4[2],xmm5[3,4],xmm4[5],xmm5[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm10 = [4,5,2,3,4,5,6,7,8,9,10,11,10,11,8,9]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm10, %xmm0, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm17, %zmm4
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm24, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm0, %xmm12, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm5 = xmm8[4],xmm12[4],xmm8[5],xmm12[5],xmm8[6],xmm12[6],xmm8[7],xmm12[7]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm8 = xmm8[1,1,2,2]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm8[0],xmm1[1],xmm8[2,3],xmm1[4],xmm8[5,6],xmm1[7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm8[0],xmm0[1],xmm8[2,3],xmm0[4],xmm8[5,6],xmm0[7]
 ; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm5[0,2,3,3,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm8 = [0,0,1,1,8,8,10,9]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm8, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, %zmm2, %zmm26, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,4,5,6,7,10,11,8,9,10,11]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm29, %xmm3
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm3, %xmm6, %xmm3
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm27, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vpbroadcastd 36(%rax), %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vpbroadcastd 40(%rax), %ymm4
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm2, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm3, %zmm21, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm1, %zmm0, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm11, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm7
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # xmm0 = xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7]
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm27, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, %zmm4, %zmm25, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm3[4],xmm15[4],xmm3[5],xmm15[5],xmm3[6],xmm15[6],xmm3[7],xmm15[7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,4,5,6,7,10,11,8,9,10,11]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm13, %xmm6, %xmm4
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm28, %zmm4
+; AVX512F-ONLY-FAST-NEXT:    vpbroadcastd 36(%rax), %ymm3
+; AVX512F-ONLY-FAST-NEXT:    vpbroadcastd 40(%rax), %ymm5
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm5, %zmm3, %zmm3
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm4, %zmm22, %zmm3
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm0, %zmm1, %zmm3
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535]
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %ymm7, %ymm0, %ymm29
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535]
+; AVX512F-ONLY-FAST-NEXT:    vpternlogd $226, 124(%r8){1to8}, %ymm0, %ymm11
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535]
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %ymm11, %ymm0, %ymm26
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm26, %zmm0, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm29[0,1,2,3],zmm1[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm12
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm1, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm0, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm3 = ymm3[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
-; AVX512F-ONLY-FAST-NEXT:    vpshufd $212, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # ymm4 = mem[0,1,1,3,4,5,5,7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm4[0,1],ymm3[2],ymm4[3,4],ymm3[5],ymm4[6,7,8,9],ymm3[10],ymm4[11,12],ymm3[13],ymm4[14,15]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21,18,19,20,21,24,25,26,27,22,23,22,23]
-; AVX512F-ONLY-FAST-NEXT:    vpshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # ymm5 = mem[1,1,1,1,5,5,5,5]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm4 = ymm4[0,1],ymm5[2],ymm4[3,4],ymm5[5],ymm4[6,7,8,9],ymm5[10],ymm4[11,12],ymm5[13],ymm4[14,15]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rax), %ymm5
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm6 = [14,21,0,0,15,22,0,15,14,21,0,0,15,22,0,15]
-; AVX512F-ONLY-FAST-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpermi2d %zmm5, %zmm25, %zmm6
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd (%rsp), %xmm1, %xmm1 # 16-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    # xmm1 = xmm1[4],mem[4],xmm1[5],mem[5],xmm1[6],mem[6],xmm1[7],mem[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm4 # 16-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    # xmm4 = xmm4[4],mem[4],xmm4[5],mem[5],xmm4[6],mem[6],xmm4[7],mem[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm5 # 16-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    # xmm5 = xmm5[4],mem[4],xmm5[5],mem[5],xmm5[6],mem[6],xmm5[7],mem[7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm10, %xmm1, %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm10, %xmm5, %xmm5
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm6 = ymm6[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
+; AVX512F-ONLY-FAST-NEXT:    vpshufd $212, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    # ymm7 = mem[0,1,1,3,4,5,5,7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm6 = ymm7[0,1],ymm6[2],ymm7[3,4],ymm6[5],ymm7[6,7,8,9],ymm6[10],ymm7[11,12],ymm6[13],ymm7[14,15]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm7 = ymm7[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21,18,19,20,21,24,25,26,27,22,23,22,23]
+; AVX512F-ONLY-FAST-NEXT:    vpshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    # ymm8 = mem[1,1,1,1,5,5,5,5]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm7 = ymm7[0,1],ymm8[2],ymm7[3,4],ymm8[5],ymm7[6,7,8,9],ymm8[10],ymm7[11,12],ymm8[13],ymm7[14,15]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm8 = [0,1,4,5,4,5,5,7]
-; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm5, %ymm8, %ymm8
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm5 = ymm5[12,13],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm5[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm5[16,17],zero,zero
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm9 = [65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535]
-; AVX512F-ONLY-FAST-NEXT:    vpandn %ymm8, %ymm9, %ymm8
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm8, %zmm5, %zmm5
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm8 = [65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0]
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm6 # 64-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm9, %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vextracti64x4 $1, %zmm11, %ymm9
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm11 = [65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535]
-; AVX512F-ONLY-FAST-NEXT:    vpternlogd $226, 124(%r8){1to8}, %ymm11, %ymm9
-; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} ymm11 = ymm15[0,1,2,3,5,5,7,6,8,9,10,11,13,13,15,14]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[3,3,3,3]
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm9, %ymm11
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm11, %zmm0, %zmm9
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm7 = zmm7[0,1,2,3],zmm9[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw $248, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # xmm9 = mem[0,2,3,3,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[0,0,2,1]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm10, %ymm8, %ymm8
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm10 = ymm10[12,13],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm10[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm10[16,17],zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm11 = [65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535]
+; AVX512F-ONLY-FAST-NEXT:    vpandn %ymm8, %ymm11, %ymm8
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm8, %zmm10, %zmm8
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw $248, {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    # xmm10 = mem[0,2,3,3,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[0,0,2,1]
 ; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,0,1,3]
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw $248, {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # xmm11 = mem[0,2,3,3,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[0,0,2,1]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,0,1,3]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,1,3,2]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,2,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm7, %zmm8, %zmm14
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm9, %zmm7 # 32-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm4[0,2,3,3,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[0,0,2,1]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[0,0,1,3]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[2,1,3,2]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[2,2,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm11, %zmm13
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm10, %zmm10 # 32-byte Folded Reload
 ; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm1 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, %zmm7, %zmm26, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm11, %zmm7 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, %zmm7, %zmm26, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm7 = [0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535]
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm7, %zmm1 # 64-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm7, %zmm0 # 64-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm7, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm7, %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $248, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm7 # 64-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $248, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm5 # 64-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm4, %zmm5
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, %zmm10, %zmm25, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm4 # 32-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm5, %zmm5 # 32-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, %zmm4, %zmm25, %zmm5
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535]
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm1 # 64-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm5 # 64-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm6, %zmm4, %zmm4
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm6, %zmm6
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm4, %zmm6
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $248, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm4 # 64-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $248, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm8 # 64-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm6, %zmm8
 ; AVX512F-ONLY-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, 320(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, 256(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm20, 192(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, 128(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm13, (%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm18, 448(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm16, 704(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm12, 640(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm2, 576(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm6, 384(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, 64(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, 320(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, 256(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm9, 192(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm19, 128(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm20, (%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm23, 448(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, 704(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm21, 640(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm0, 576(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm13, 384(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, 64(%rax)
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, 512(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm14, 832(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, 768(%rax)
-; AVX512F-ONLY-FAST-NEXT:    addq $1432, %rsp # imm = 0x598
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm12, 832(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, 768(%rax)
+; AVX512F-ONLY-FAST-NEXT:    addq $1496, %rsp # imm = 0x5D8
 ; AVX512F-ONLY-FAST-NEXT:    vzeroupper
 ; AVX512F-ONLY-FAST-NEXT:    retq
 ;
 ; AVX512DQ-SLOW-LABEL: store_i16_stride7_vf64:
 ; AVX512DQ-SLOW:       # %bb.0:
-; AVX512DQ-SLOW-NEXT:    subq $2168, %rsp # imm = 0x878
+; AVX512DQ-SLOW-NEXT:    subq $2200, %rsp # imm = 0x898
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 96(%rcx), %ymm2
-; AVX512DQ-SLOW-NEXT:    vmovdqa 96(%rdx), %ymm6
+; AVX512DQ-SLOW-NEXT:    vmovdqa 96(%rdx), %ymm9
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 96(%rdi), %ymm7
-; AVX512DQ-SLOW-NEXT:    vmovdqa 96(%rsi), %ymm9
+; AVX512DQ-SLOW-NEXT:    vmovdqa 96(%rsi), %ymm8
 ; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} ymm0 = [128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128,128,128,128,128]
 ; AVX512DQ-SLOW-NEXT:    vpshufb %ymm0, %ymm2, %ymm1
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm16
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm19
 ; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} ymm11 = <u,u,u,u,12,13,14,15,128,128,u,u,u,u,u,u,u,u,u,u,16,17,128,128,u,u,u,u,u,u,u,u>
-; AVX512DQ-SLOW-NEXT:    vpshufb %ymm11, %ymm6, %ymm2
-; AVX512DQ-SLOW-NEXT:    vporq %ymm1, %ymm2, %ymm18
-; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} ymm14 = [128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128,128,128,128,128,128,128,128,128]
-; AVX512DQ-SLOW-NEXT:    vpshufb %ymm14, %ymm9, %ymm1
+; AVX512DQ-SLOW-NEXT:    vpshufb %ymm11, %ymm9, %ymm2
+; AVX512DQ-SLOW-NEXT:    vporq %ymm1, %ymm2, %ymm16
+; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} ymm12 = [128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128,128,128,128,128,128,128,128,128]
+; AVX512DQ-SLOW-NEXT:    vpshufb %ymm12, %ymm8, %ymm1
 ; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} ymm13 = <12,13,14,15,128,128,u,u,u,u,u,u,u,u,u,u,16,17,128,128,u,u,u,u,u,u,u,u,16,17,18,19>
 ; AVX512DQ-SLOW-NEXT:    vpshufb %ymm13, %ymm7, %ymm2
 ; AVX512DQ-SLOW-NEXT:    vporq %ymm1, %ymm2, %ymm17
 ; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} ymm3 = [128,128,128,128,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128]
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 64(%r9), %ymm2
 ; AVX512DQ-SLOW-NEXT:    vpshufb %ymm3, %ymm2, %ymm1
-; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm3, %ymm8
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm21
+; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm3, %ymm10
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm20
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 64(%r8), %ymm3
 ; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} ymm15 = <u,u,u,u,u,u,u,u,12,13,14,15,128,128,u,u,u,u,u,u,u,u,u,u,16,17,128,128,u,u,u,u>
 ; AVX512DQ-SLOW-NEXT:    vpshufb %ymm15, %ymm3, %ymm2
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm3, %ymm23
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm3, %ymm24
 ; AVX512DQ-SLOW-NEXT:    vpor %ymm1, %ymm2, %ymm1
 ; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa 64(%rcx), %ymm10
-; AVX512DQ-SLOW-NEXT:    vpshufb %ymm0, %ymm10, %ymm1
-; AVX512DQ-SLOW-NEXT:    vmovdqa 64(%rdx), %ymm4
-; AVX512DQ-SLOW-NEXT:    vpshufb %ymm11, %ymm4, %ymm2
+; AVX512DQ-SLOW-NEXT:    vmovdqa 64(%rcx), %ymm2
+; AVX512DQ-SLOW-NEXT:    vpshufb %ymm0, %ymm2, %ymm1
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm18
+; AVX512DQ-SLOW-NEXT:    vmovdqa 64(%rdx), %ymm6
+; AVX512DQ-SLOW-NEXT:    vpshufb %ymm11, %ymm6, %ymm2
 ; AVX512DQ-SLOW-NEXT:    vpor %ymm1, %ymm2, %ymm1
 ; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 64(%rsi), %ymm5
-; AVX512DQ-SLOW-NEXT:    vpshufb %ymm14, %ymm5, %ymm1
-; AVX512DQ-SLOW-NEXT:    vmovdqa 64(%rdi), %ymm3
-; AVX512DQ-SLOW-NEXT:    vpshufb %ymm13, %ymm3, %ymm2
+; AVX512DQ-SLOW-NEXT:    vpshufb %ymm12, %ymm5, %ymm1
+; AVX512DQ-SLOW-NEXT:    vmovdqa 64(%rdi), %ymm4
+; AVX512DQ-SLOW-NEXT:    vpshufb %ymm13, %ymm4, %ymm2
 ; AVX512DQ-SLOW-NEXT:    vpor %ymm1, %ymm2, %ymm1
 ; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQ-SLOW-NEXT:    vmovdqa (%r9), %ymm1
 ; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpshufb %ymm8, %ymm1, %ymm1
-; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm8, %ymm12
+; AVX512DQ-SLOW-NEXT:    vpshufb %ymm10, %ymm1, %ymm1
+; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm10, %ymm3
 ; AVX512DQ-SLOW-NEXT:    vmovdqa (%r8), %ymm2
 ; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQ-SLOW-NEXT:    vpshufb %ymm15, %ymm2, %ymm2
@@ -12955,558 +12982,567 @@ define void @store_i16_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQ-SLOW-NEXT:    vpor %ymm1, %ymm2, %ymm1
 ; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQ-SLOW-NEXT:    vmovdqa (%rsi), %ymm2
-; AVX512DQ-SLOW-NEXT:    vpshufb %ymm14, %ymm2, %ymm1
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm24
-; AVX512DQ-SLOW-NEXT:    vmovdqa (%rdi), %ymm8
-; AVX512DQ-SLOW-NEXT:    vpshufb %ymm13, %ymm8, %ymm2
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm8, %ymm19
+; AVX512DQ-SLOW-NEXT:    vpshufb %ymm12, %ymm2, %ymm1
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm23
+; AVX512DQ-SLOW-NEXT:    vmovdqa (%rdi), %ymm10
+; AVX512DQ-SLOW-NEXT:    vpshufb %ymm13, %ymm10, %ymm2
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm10, %ymm25
 ; AVX512DQ-SLOW-NEXT:    vpor %ymm1, %ymm2, %ymm1
 ; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%rcx), %ymm8
-; AVX512DQ-SLOW-NEXT:    vpshufb %ymm0, %ymm8, %ymm0
-; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%rdx), %ymm2
-; AVX512DQ-SLOW-NEXT:    vpshufb %ymm11, %ymm2, %ymm1
-; AVX512DQ-SLOW-NEXT:    vpor %ymm0, %ymm1, %ymm0
+; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%rcx), %ymm2
+; AVX512DQ-SLOW-NEXT:    vpshufb %ymm0, %ymm2, %ymm0
+; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%rdx), %ymm1
+; AVX512DQ-SLOW-NEXT:    vpshufb %ymm11, %ymm1, %ymm11
+; AVX512DQ-SLOW-NEXT:    vpor %ymm0, %ymm11, %ymm0
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%rsi), %ymm1
-; AVX512DQ-SLOW-NEXT:    vpshufb %ymm14, %ymm1, %ymm0
+; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%rsi), %ymm0
+; AVX512DQ-SLOW-NEXT:    vpshufb %ymm12, %ymm0, %ymm12
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%rdi), %ymm11
 ; AVX512DQ-SLOW-NEXT:    vpshufb %ymm13, %ymm11, %ymm13
-; AVX512DQ-SLOW-NEXT:    vpor %ymm0, %ymm13, %ymm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vpor %ymm12, %ymm13, %ymm10
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%r8), %ymm13
-; AVX512DQ-SLOW-NEXT:    vpshufb %ymm15, %ymm13, %ymm14
-; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%r9), %ymm0
-; AVX512DQ-SLOW-NEXT:    vpshufb %ymm12, %ymm0, %ymm15
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm12, %ymm20
-; AVX512DQ-SLOW-NEXT:    vpor %ymm15, %ymm14, %ymm12
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vprold $16, %ymm0, %ymm14
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm15 = ymm13[1,2,2,3,5,6,6,7]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm14 = ymm15[0,1],ymm14[2],ymm15[3,4],ymm14[5],ymm15[6,7,8,9],ymm14[10],ymm15[11,12],ymm14[13],ymm15[14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm15 = ymm0[0,1,2,3,5,6,7,7,8,9,10,11,13,14,15,15]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm15 = ymm15[2,2,2,3,6,6,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm12 = ymm13[3,3,3,3,7,7,7,7]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm12 = ymm15[0,1],ymm12[2],ymm15[3,4],ymm12[5],ymm15[6,7,8,9],ymm12[10],ymm15[11,12],ymm12[13],ymm15[14,15]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm15 = [2,1,3,2,10,10,10,11]
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm12, %zmm14, %zmm15
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm14 = [22,23,26,27,0,0,24,25,26,27,0,0,26,27,26,27,22,23,26,27,0,0,24,25,26,27,0,0,26,27,26,27]
-; AVX512DQ-SLOW-NEXT:    # ymm14 = mem[0,1,0,1]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm16, %ymm15
-; AVX512DQ-SLOW-NEXT:    vpshufb %ymm14, %ymm15, %ymm12
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm14, %ymm16
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm14 = ymm6[2,2,2,2,6,6,6,6]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm12 = ymm12[0,1],ymm14[2],ymm12[3,4],ymm14[5],ymm12[6,7,8,9],ymm14[10],ymm12[11,12],ymm14[13],ymm12[14,15]
-; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm12 = ymm15[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm12 = ymm12[2,2,2,2,6,6,6,6]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm14 = ymm6[3,3,3,3,7,7,7,7]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm12 = ymm14[0],ymm12[1],ymm14[2,3],ymm12[4],ymm14[5,6,7,8],ymm12[9],ymm14[10,11],ymm12[12],ymm14[13,14,15]
-; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm12 = ymm9[0,1,2,3,5,4,6,7,8,9,10,11,13,12,14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm12 = ymm12[2,2,2,2,6,6,6,6]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm14 = ymm7[2,2,2,2,6,6,6,6]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm12 = ymm14[0],ymm12[1],ymm14[2,3],ymm12[4],ymm14[5,6,7,8],ymm12[9],ymm14[10,11],ymm12[12],ymm14[13,14,15]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm12, %ymm25
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm12 = ymm9[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm12 = ymm12[2,2,2,2,6,6,6,6]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm14 = ymm7[3,3,3,3,7,7,7,7]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm12 = ymm14[0,1,2],ymm12[3],ymm14[4,5],ymm12[6],ymm14[7,8,9,10],ymm12[11],ymm14[12,13],ymm12[14],ymm14[15]
-; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm6 = ymm6[0,1,1,3,4,5,5,7]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm12 = ymm15[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm12 = ymm12[0,0,0,0,4,4,4,4]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm6 = ymm6[0,1],ymm12[2],ymm6[3,4],ymm12[5],ymm6[6,7,8,9],ymm12[10],ymm6[11,12],ymm12[13],ymm6[14,15]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[2,1,3,2]
+; AVX512DQ-SLOW-NEXT:    vpshufb %ymm15, %ymm13, %ymm12
+; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%r9), %ymm15
+; AVX512DQ-SLOW-NEXT:    vpshufb %ymm3, %ymm15, %ymm14
+; AVX512DQ-SLOW-NEXT:    vpor %ymm14, %ymm12, %ymm10
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vprold $16, %ymm15, %ymm12
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm14 = ymm13[1,2,2,3,5,6,6,7]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm12 = ymm14[0,1],ymm12[2],ymm14[3,4],ymm12[5],ymm14[6,7,8,9],ymm12[10],ymm14[11,12],ymm12[13],ymm14[14,15]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm14 = ymm15[0,1,2,3,5,6,7,7,8,9,10,11,13,14,15,15]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm14 = ymm14[2,2,2,3,6,6,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm10 = ymm13[3,3,3,3,7,7,7,7]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm10 = ymm14[0,1],ymm10[2],ymm14[3,4],ymm10[5],ymm14[6,7,8,9],ymm10[10],ymm14[11,12],ymm10[13],ymm14[14,15]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm14 = [2,1,3,2,10,10,10,11]
+; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm10, %zmm12, %zmm14
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm12 = [22,23,26,27,0,0,24,25,26,27,0,0,26,27,26,27,22,23,26,27,0,0,24,25,26,27,0,0,26,27,26,27]
+; AVX512DQ-SLOW-NEXT:    # ymm12 = mem[0,1,0,1]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm19, %ymm14
+; AVX512DQ-SLOW-NEXT:    vpshufb %ymm12, %ymm14, %ymm10
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm12, %ymm19
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm12 = ymm9[2,2,2,2,6,6,6,6]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm10 = ymm10[0,1],ymm12[2],ymm10[3,4],ymm12[5],ymm10[6,7,8,9],ymm12[10],ymm10[11,12],ymm12[13],ymm10[14,15]
+; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm10 = ymm14[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm10 = ymm10[2,2,2,2,6,6,6,6]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm12 = ymm9[3,3,3,3,7,7,7,7]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm10 = ymm12[0],ymm10[1],ymm12[2,3],ymm10[4],ymm12[5,6,7,8],ymm10[9],ymm12[10,11],ymm10[12],ymm12[13,14,15]
+; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm10 = ymm8[0,1,2,3,5,4,6,7,8,9,10,11,13,12,14,15]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm10 = ymm10[2,2,2,2,6,6,6,6]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm12 = ymm7[2,2,2,2,6,6,6,6]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm10 = ymm12[0],ymm10[1],ymm12[2,3],ymm10[4],ymm12[5,6,7,8],ymm10[9],ymm12[10,11],ymm10[12],ymm12[13,14,15]
+; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm10 = ymm8[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm10 = ymm10[2,2,2,2,6,6,6,6]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm12 = ymm7[3,3,3,3,7,7,7,7]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm10 = ymm12[0,1,2],ymm10[3],ymm12[4,5],ymm10[6],ymm12[7,8,9,10],ymm10[11],ymm12[12,13],ymm10[14],ymm12[15]
+; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm9 = ymm9[0,1,1,3,4,5,5,7]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm10 = ymm14[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm10 = ymm10[0,0,0,0,4,4,4,4]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm9 = ymm9[0,1],ymm10[2],ymm9[3,4],ymm10[5],ymm9[6,7,8,9],ymm10[10],ymm9[11,12],ymm10[13],ymm9[14,15]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[2,1,3,2]
 ; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm7 = ymm7[1,1,1,1,5,5,5,5]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm9 = ymm9[1,2,3,3,4,5,6,7,9,10,11,11,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm9 = ymm9[0,0,2,1,4,4,6,5]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm7 = ymm9[0,1],ymm7[2],ymm9[3,4],ymm7[5],ymm9[6,7,8,9],ymm7[10],ymm9[11,12],ymm7[13],ymm9[14,15]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm8 = ymm8[1,2,3,3,4,5,6,7,9,10,11,11,12,13,14,15]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm8 = ymm8[0,0,2,1,4,4,6,5]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm7 = ymm8[0,1],ymm7[2],ymm8[3,4],ymm7[5],ymm8[6,7,8,9],ymm7[10],ymm8[11,12],ymm7[13],ymm8[14,15]
 ; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[2,2,2,3]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm6, %zmm18, %zmm6
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm9, %zmm16, %zmm8
 ; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm17, %zmm7
-; AVX512DQ-SLOW-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm6, %zmm7
-; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} ymm9 = [65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535]
+; AVX512DQ-SLOW-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm8, %zmm7
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm17 = [65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535]
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 96(%r8), %ymm12
 ; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm6 = ymm12[u,u],zero,zero,zero,zero,zero,zero,zero,zero,ymm12[14,15,u,u,u,u],zero,zero,zero,zero,zero,zero,zero,zero,ymm12[16,17,u,u,u,u],zero,zero
-; AVX512DQ-SLOW-NEXT:    vpternlogq $248, %ymm9, %ymm7, %ymm6
-; AVX512DQ-SLOW-NEXT:    vmovdqa 96(%r9), %ymm15
-; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm15, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm20, %ymm14
-; AVX512DQ-SLOW-NEXT:    vpshufb %ymm14, %ymm15, %ymm14
-; AVX512DQ-SLOW-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm6, %ymm14
-; AVX512DQ-SLOW-NEXT:    vextracti64x4 $1, %zmm7, %ymm6
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm7 = ymm12[0,0,2,1,4,4,6,5]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[2,1,3,3]
-; AVX512DQ-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm6, %ymm7
-; AVX512DQ-SLOW-NEXT:    vprold $16, %ymm15, %ymm6
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[2,2,2,2]
-; AVX512DQ-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm7, %ymm6
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm6, %zmm0, %zmm6
-; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm6 = zmm14[0,1,2,3],zmm6[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm8 = ymm12[u,u],zero,zero,zero,zero,zero,zero,zero,zero,ymm12[14,15,u,u,u,u],zero,zero,zero,zero,zero,zero,zero,zero,ymm12[16,17,u,u,u,u],zero,zero
+; AVX512DQ-SLOW-NEXT:    vpternlogq $248, %ymm17, %ymm7, %ymm8
+; AVX512DQ-SLOW-NEXT:    vmovdqa 96(%r9), %ymm14
+; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-SLOW-NEXT:    vpshufb %ymm3, %ymm14, %ymm9
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm10 = [65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535]
+; AVX512DQ-SLOW-NEXT:    vpternlogq $248, %ymm10, %ymm8, %ymm9
+; AVX512DQ-SLOW-NEXT:    vextracti64x4 $1, %zmm7, %ymm7
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm8 = ymm12[0,0,2,1,4,4,6,5]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[2,1,3,3]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm10 = [65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0]
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %ymm7, %ymm10, %ymm8
+; AVX512DQ-SLOW-NEXT:    vprold $16, %ymm14, %ymm7
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[2,2,2,2]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm10 = [65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535]
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %ymm8, %ymm10, %ymm7
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm16
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm0, %zmm7
+; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm7 = zmm9[0,1,2,3],zmm7[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-SLOW-NEXT:    vbroadcasti32x8 {{.*#+}} zmm18 = [6,5,0,0,7,6,0,7,6,5,0,0,7,6,0,7]
-; AVX512DQ-SLOW-NEXT:    # zmm18 = mem[0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa 96(%rax), %ymm6
-; AVX512DQ-SLOW-NEXT:    vpermd %zmm6, %zmm18, %zmm12
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm14 = ymm6[0,1,1,3,4,5,5,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} ymm15 = [12,13,128,128,128,128,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128]
-; AVX512DQ-SLOW-NEXT:    vpshufb %ymm15, %ymm6, %ymm6
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[0,2,2,3]
-; AVX512DQ-SLOW-NEXT:    vpandn %ymm14, %ymm9, %ymm14
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm14, %zmm6, %zmm6
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm17 = [65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535]
-; AVX512DQ-SLOW-NEXT:    vpbroadcastd 72(%rax), %ymm6
-; AVX512DQ-SLOW-NEXT:    vpandnq %ymm6, %ymm17, %ymm14
-; AVX512DQ-SLOW-NEXT:    vmovdqa 64(%rax), %ymm6
-; AVX512DQ-SLOW-NEXT:    vpshufb %ymm15, %ymm6, %ymm12
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm12, %zmm14, %zmm12
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpbroadcastd 8(%rax), %ymm12
+; AVX512DQ-SLOW-NEXT:    vbroadcasti32x8 {{.*#+}} zmm8 = [6,5,0,0,7,6,0,7,6,5,0,0,7,6,0,7]
+; AVX512DQ-SLOW-NEXT:    # zmm8 = mem[0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa 96(%rax), %ymm7
+; AVX512DQ-SLOW-NEXT:    vpermd %zmm7, %zmm8, %zmm9
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm9 = ymm7[0,1,1,3,4,5,5,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} ymm10 = [12,13,128,128,128,128,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128]
+; AVX512DQ-SLOW-NEXT:    vpshufb %ymm10, %ymm7, %ymm7
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[0,2,2,3]
+; AVX512DQ-SLOW-NEXT:    vpandnq %ymm9, %ymm17, %ymm9
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm9, %zmm7, %zmm7
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm14 = [65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535]
+; AVX512DQ-SLOW-NEXT:    vpbroadcastd 72(%rax), %ymm7
+; AVX512DQ-SLOW-NEXT:    vpandn %ymm7, %ymm14, %ymm9
+; AVX512DQ-SLOW-NEXT:    vmovdqa 64(%rax), %ymm7
+; AVX512DQ-SLOW-NEXT:    vpshufb %ymm10, %ymm7, %ymm12
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm12, %zmm9, %zmm9
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vpbroadcastd 8(%rax), %ymm9
+; AVX512DQ-SLOW-NEXT:    vpandn %ymm9, %ymm14, %ymm9
+; AVX512DQ-SLOW-NEXT:    vmovdqa (%rax), %ymm14
+; AVX512DQ-SLOW-NEXT:    vpshufb %ymm10, %ymm14, %ymm12
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm14, %ymm22
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm12, %zmm9, %zmm9
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm9 = ymm2[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm9 = ymm9[0,0,0,0,4,4,4,4]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm12 = ymm1[0,1,1,3,4,5,5,7]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm9 = ymm12[0,1],ymm9[2],ymm12[3,4],ymm9[5],ymm12[6,7,8,9],ymm9[10],ymm12[11,12],ymm9[13],ymm12[14,15]
+; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm9 = ymm0[1,2,3,3,4,5,6,7,9,10,11,11,12,13,14,15]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm9 = ymm9[0,0,2,1,4,4,6,5]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm12 = ymm11[1,1,1,1,5,5,5,5]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm9 = ymm9[0,1],ymm12[2],ymm9[3,4],ymm12[5],ymm9[6,7,8,9],ymm12[10],ymm9[11,12],ymm12[13],ymm9[14,15]
+; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%rax), %ymm9
+; AVX512DQ-SLOW-NEXT:    vpshufb %ymm10, %ymm9, %ymm10
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm12 = ymm9[0,1,1,3,4,5,5,7]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[0,2,2,3]
 ; AVX512DQ-SLOW-NEXT:    vpandnq %ymm12, %ymm17, %ymm12
-; AVX512DQ-SLOW-NEXT:    vmovdqa (%rax), %ymm7
-; AVX512DQ-SLOW-NEXT:    vpshufb %ymm15, %ymm7, %ymm14
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm7, %ymm22
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm14, %zmm12, %zmm12
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm12 = ymm8[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm12, %zmm10, %zmm10
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm10 = ymm13[0,0,2,1,4,4,6,5]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm12 = ymm15[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
 ; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm12 = ymm12[0,0,0,0,4,4,4,4]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm14 = ymm2[0,1,1,3,4,5,5,7]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm12 = ymm14[0,1],ymm12[2],ymm14[3,4],ymm12[5],ymm14[6,7,8,9],ymm12[10],ymm14[11,12],ymm12[13],ymm14[14,15]
-; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm12 = ymm1[1,2,3,3,4,5,6,7,9,10,11,11,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm12 = ymm12[0,0,2,1,4,4,6,5]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm14 = ymm11[1,1,1,1,5,5,5,5]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm12 = ymm12[0,1],ymm14[2],ymm12[3,4],ymm14[5],ymm12[6,7,8,9],ymm14[10],ymm12[11,12],ymm14[13],ymm12[14,15]
-; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%rax), %ymm12
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm14 = ymm12[0,1,1,3,4,5,5,7]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[0,2,2,3]
-; AVX512DQ-SLOW-NEXT:    vpandn %ymm14, %ymm9, %ymm9
-; AVX512DQ-SLOW-NEXT:    vpshufb %ymm15, %ymm12, %ymm14
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm9, %zmm14, %zmm9
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm9 = ymm13[0,0,2,1,4,4,6,5]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm0[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[0,0,0,0,4,4,4,4]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1,2],ymm9[3],ymm0[4,5],ymm9[6],ymm0[7,8,9,10],ymm9[11],ymm0[12,13],ymm9[14],ymm0[15]
-; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm16, %ymm7
-; AVX512DQ-SLOW-NEXT:    vpshufb %ymm7, %ymm8, %ymm0
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm9 = ymm2[2,2,2,2,6,6,6,6]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm9[2],ymm0[3,4],ymm9[5],ymm0[6,7,8,9],ymm9[10],ymm0[11,12],ymm9[13],ymm0[14,15]
-; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm2[3,3,3,3,7,7,7,7]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm2 = ymm8[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm10 = ymm12[0,1,2],ymm10[3],ymm12[4,5],ymm10[6],ymm12[7,8,9,10],ymm10[11],ymm12[12,13],ymm10[14],ymm12[15]
+; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm19, %ymm13
+; AVX512DQ-SLOW-NEXT:    vpshufb %ymm13, %ymm2, %ymm10
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm12 = ymm1[2,2,2,2,6,6,6,6]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm10 = ymm10[0,1],ymm12[2],ymm10[3,4],ymm12[5],ymm10[6,7,8,9],ymm12[10],ymm10[11,12],ymm12[13],ymm10[14,15]
+; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm1[3,3,3,3,7,7,7,7]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm2 = ymm2[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
 ; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm2[2,2,2,2,6,6,6,6]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2,3],ymm2[4],ymm0[5,6,7,8],ymm2[9],ymm0[10,11],ymm2[12],ymm0[13,14,15]
-; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm0 = ymm1[0,1,2,3,5,4,6,7,8,9,10,11,13,12,14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[2,2,2,2,6,6,6,6]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm11[2,2,2,2,6,6,6,6]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm2[0],ymm0[1],ymm2[2,3],ymm0[4],ymm2[5,6,7,8],ymm0[9],ymm2[10,11],ymm0[12],ymm2[13,14,15]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm31
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm11[3,3,3,3,7,7,7,7]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm1 = ymm1[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3],ymm2[4],ymm1[5,6,7,8],ymm2[9],ymm1[10,11],ymm2[12],ymm1[13,14,15]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm1, %ymm31
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm1 = ymm0[0,1,2,3,5,4,6,7,8,9,10,11,13,12,14,15]
 ; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm1[2,2,2,2,6,6,6,6]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3],ymm0[4,5],ymm1[6],ymm0[7,8,9,10],ymm1[11],ymm0[12,13],ymm1[14],ymm0[15]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm11[2,2,2,2,6,6,6,6]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3],ymm1[4],ymm2[5,6,7,8],ymm1[9],ymm2[10,11],ymm1[12],ymm2[13,14,15]
+; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm11[3,3,3,3,7,7,7,7]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[2,2,2,2,6,6,6,6]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5],ymm0[6],ymm1[7,8,9,10],ymm0[11],ymm1[12,13],ymm0[14],ymm1[15]
 ; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermd %zmm12, %zmm18, %zmm0
+; AVX512DQ-SLOW-NEXT:    vpermd %zmm9, %zmm8, %zmm0
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm5[1,2,3,3,4,5,6,7,9,10,11,11,12,13,14,15]
 ; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[0,0,2,1,4,4,6,5]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm3[1,1,1,1,5,5,5,5]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm4[1,1,1,1,5,5,5,5]
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7,8,9],ymm1[10],ymm0[11,12],ymm1[13],ymm0[14,15]
 ; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm0 = ymm5[0,1,2,3,5,4,6,7,8,9,10,11,13,12,14,15]
 ; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[2,2,2,2,6,6,6,6]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm3[2,2,2,2,6,6,6,6]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm4[2,2,2,2,6,6,6,6]
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6,7,8],ymm0[9],ymm1[10,11],ymm0[12],ymm1[13,14,15]
 ; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm10[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm18, %ymm2
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm2[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
 ; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[0,0,0,0,4,4,4,4]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm4[0,1,1,3,4,5,5,7]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm6[0,1,1,3,4,5,5,7]
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7,8,9],ymm0[10],ymm1[11,12],ymm0[13],ymm1[14,15]
 ; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpshufb %ymm7, %ymm10, %ymm0
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm4[2,2,2,2,6,6,6,6]
+; AVX512DQ-SLOW-NEXT:    vpshufb %ymm13, %ymm2, %ymm0
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm18, %ymm3
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm6[2,2,2,2,6,6,6,6]
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7,8,9],ymm1[10],ymm0[11,12],ymm1[13],ymm0[14,15]
 ; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm21, %ymm9
-; AVX512DQ-SLOW-NEXT:    vprold $16, %ymm21, %ymm0
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm23[1,2,2,3,5,6,6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm20, %ymm8
+; AVX512DQ-SLOW-NEXT:    vprold $16, %ymm20, %ymm0
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm24[1,2,2,3,5,6,6,7]
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7,8,9],ymm0[10],ymm1[11,12],ymm0[13],ymm1[14,15]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm9[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm8[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
 ; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm1[0,0,0,0,4,4,4,4]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm23[0,0,2,1,4,4,6,5]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm24[0,0,2,1,4,4,6,5]
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3],ymm1[4,5],ymm2[6],ymm1[7,8,9,10],ymm2[11],ymm1[12,13],ymm2[14],ymm1[15]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm7 = [2,2,3,3,10,9,11,10]
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm0, %zmm7, %zmm1
-; AVX512DQ-SLOW-NEXT:    vbroadcasti32x8 {{.*#+}} zmm15 = [0,5,4,0,0,6,5,0,0,5,4,0,0,6,5,0]
-; AVX512DQ-SLOW-NEXT:    # zmm15 = mem[0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpermd 64(%rax), %zmm15, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm14 = [65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535]
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm1, %zmm14, %zmm0
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm10 = [2,2,3,3,10,9,11,10]
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm0, %zmm10, %zmm1
+; AVX512DQ-SLOW-NEXT:    vbroadcasti32x8 {{.*#+}} zmm11 = [0,5,4,0,0,6,5,0,0,5,4,0,0,6,5,0]
+; AVX512DQ-SLOW-NEXT:    # zmm11 = mem[0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpermd 64(%rax), %zmm11, %zmm0
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm1, %zmm17, %zmm0
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm4[3,3,3,3,7,7,7,7]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm1 = ymm10[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm6[3,3,3,3,7,7,7,7]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm1 = ymm3[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
 ; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm1[2,2,2,2,6,6,6,6]
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6,7,8],ymm1[9],ymm0[10,11],ymm1[12],ymm0[13,14,15]
 ; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm3[3,3,3,3,7,7,7,7]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm4[3,3,3,3,7,7,7,7]
 ; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm1 = ymm5[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
 ; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm1[2,2,2,2,6,6,6,6]
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3],ymm0[4,5],ymm1[6],ymm0[7,8,9,10],ymm1[11],ymm0[12,13],ymm1[14],ymm0[15]
 ; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm23[3,3,3,3,7,7,7,7]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm1 = ymm9[0,1,2,3,5,6,7,7,8,9,10,11,13,14,15,15]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm24[3,3,3,3,7,7,7,7]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm1 = ymm8[0,1,2,3,5,6,7,7,8,9,10,11,13,14,15,15]
 ; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm1[2,2,2,3,6,6,6,7]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm4 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7,8,9],ymm0[10],ymm1[11,12],ymm0[13],ymm1[14,15]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm11 = [4,5,4,5,4,5,6,7,16,17,16,17,16,17,17,19]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7,8,9],ymm0[10],ymm1[11,12],ymm0[13],ymm1[14,15]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm12 = [4,5,4,5,4,5,6,7,16,17,16,17,16,17,17,19]
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 96(%r9), %xmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqa 96(%r8), %xmm2
-; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm3[0,1,3,2,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpermt2d %zmm1, %zmm11, %zmm4
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm6[2,3,3,3,6,7,7,7]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,1,3,2]
+; AVX512DQ-SLOW-NEXT:    vmovdqa 96(%r8), %xmm1
+; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm3[0,1,3,2,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpermt2d %zmm4, %zmm12, %zmm2
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm4 = ymm7[2,3,3,3,6,7,7,7]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,1,3,2]
 ; AVX512DQ-SLOW-NEXT:    vpbroadcastd 96(%rax), %ymm5
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm1, %zmm5
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535]
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm4, %zmm1, %zmm5
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa 96(%rsi), %xmm4
-; AVX512DQ-SLOW-NEXT:    vmovdqa 96(%rdi), %xmm5
-; AVX512DQ-SLOW-NEXT:    vprold $16, %xmm4, %xmm6
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm9 = xmm5[1,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm6 = xmm9[0,1],xmm6[2],xmm9[3,4],xmm6[5],xmm9[6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm6, %xmm23
-; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm4 = xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa 96(%rcx), %xmm4
-; AVX512DQ-SLOW-NEXT:    vmovdqa 96(%rdx), %xmm5
-; AVX512DQ-SLOW-NEXT:    vpbroadcastq {{.*#+}} xmm13 = [6,7,4,5,0,0,8,9,6,7,4,5,0,0,8,9]
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm13, %xmm4, %xmm6
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm9 = xmm5[1,1,2,2]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm6 = xmm9[0],xmm6[1],xmm9[2,3],xmm6[4],xmm9[5,6],xmm6[7]
-; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3]
-; AVX512DQ-SLOW-NEXT:    vmovdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm4 = xmm5[4],xmm4[4],xmm5[5],xmm4[5],xmm5[6],xmm4[6],xmm5[7],xmm4[7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,7,6]
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm4, %zmm4
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535]
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm2, %zmm5, %zmm4
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm15
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqa 96(%rsi), %xmm2
+; AVX512DQ-SLOW-NEXT:    vmovdqa 96(%rdi), %xmm4
+; AVX512DQ-SLOW-NEXT:    vprold $16, %xmm2, %xmm5
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm4[1,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm5 = xmm6[0,1],xmm5[2],xmm6[3,4],xmm5[5],xmm6[6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm5, %xmm18
+; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqa 96(%rcx), %xmm2
+; AVX512DQ-SLOW-NEXT:    vmovdqa 96(%rdx), %xmm4
+; AVX512DQ-SLOW-NEXT:    vpbroadcastq {{.*#+}} xmm6 = [6,7,4,5,0,0,8,9,6,7,4,5,0,0,8,9]
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm6, %xmm2, %xmm5
+; AVX512DQ-SLOW-NEXT:    vmovdqa %xmm6, %xmm7
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm4[1,1,2,2]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm5 = xmm6[0],xmm5[1],xmm6[2,3],xmm5[4],xmm6[5,6],xmm5[7]
+; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3]
+; AVX512DQ-SLOW-NEXT:    vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm4[4],xmm2[4],xmm4[5],xmm2[5],xmm4[6],xmm2[6],xmm4[7],xmm2[7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm3[0,1,2,3,4,5,7,6]
 ; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} xmm0 = [0,1,2,3,4,5,4,5,6,7,10,11,8,9,10,11]
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm0, %xmm2, %xmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm30 = <16,18,19,19,19,19,u,u,0,1,0,1,2,3,2,3>
-; AVX512DQ-SLOW-NEXT:    vpermt2d %zmm3, %zmm30, %zmm2
-; AVX512DQ-SLOW-NEXT:    vpbroadcastd 100(%rax), %ymm3
-; AVX512DQ-SLOW-NEXT:    vpbroadcastd 104(%rax), %ymm4
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm3, %zmm3
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm28 = [65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535]
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm2, %zmm28, %zmm3
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa 64(%rcx), %xmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqa 64(%rdx), %xmm3
-; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm4 = xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm4, %xmm29
-; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm13, %xmm2, %xmm2
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[1,1,2,2]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm3[0],xmm2[1],xmm3[2,3],xmm2[4],xmm3[5,6],xmm2[7]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm4[0,1,3,2,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm5 = <16,16,17,17,17,17,u,u,0,1,0,1,2,3,2,3>
-; AVX512DQ-SLOW-NEXT:    vpermt2d %zmm3, %zmm5, %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqa 64(%rdi), %xmm3
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm0, %xmm1, %xmm1
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm28 = <16,18,19,19,19,19,u,u,0,1,0,1,2,3,2,3>
+; AVX512DQ-SLOW-NEXT:    vpermt2d %zmm2, %zmm28, %zmm1
+; AVX512DQ-SLOW-NEXT:    vpbroadcastd 100(%rax), %ymm2
+; AVX512DQ-SLOW-NEXT:    vpbroadcastd 104(%rax), %ymm3
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm2, %zmm2
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm1, %zmm16, %zmm2
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqa 64(%rcx), %xmm1
+; AVX512DQ-SLOW-NEXT:    vmovdqa 64(%rdx), %xmm2
+; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm3, %xmm30
+; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm7, %xmm1, %xmm1
+; AVX512DQ-SLOW-NEXT:    vmovdqa %xmm7, %xmm8
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[1,1,2,2]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2,3],xmm1[4],xmm2[5,6],xmm1[7]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm3[0,1,3,2,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm3 = <16,16,17,17,17,17,u,u,0,1,0,1,2,3,2,3>
+; AVX512DQ-SLOW-NEXT:    vpermt2d %zmm2, %zmm3, %zmm1
+; AVX512DQ-SLOW-NEXT:    vmovdqa 64(%rdi), %xmm2
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 64(%rsi), %xmm4
-; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3]
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm0, %xmm6, %xmm6
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[0,0,1,1]
-; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm9 = xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm9, %xmm27
+; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3]
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm0, %xmm5, %xmm5
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[0,0,1,1]
+; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm6 = xmm4[4],xmm2[4],xmm4[5],xmm2[5],xmm4[6],xmm2[6],xmm4[7],xmm2[7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm6, %xmm26
 ; AVX512DQ-SLOW-NEXT:    vprold $16, %xmm4, %xmm4
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[1,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm4[2],xmm3[3,4],xmm4[5],xmm3[6,7]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[0,0,2,1]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm6, %zmm4
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0]
-; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %zmm2, %zmm3, %zmm4
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[1,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm4[2],xmm2[3,4],xmm4[5],xmm2[6,7]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,0,2,1]
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm5, %zmm4
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0]
+; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %zmm1, %zmm2, %zmm4
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa 64(%r9), %xmm2
+; AVX512DQ-SLOW-NEXT:    vmovdqa 64(%r9), %xmm1
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 64(%r8), %xmm4
-; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm8 = xmm4[4],xmm2[4],xmm4[5],xmm2[5],xmm4[6],xmm2[6],xmm4[7],xmm2[7]
-; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm2[0,1,2,3,4,5,7,6]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[0,1,3,2,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm5 = xmm4[4],xmm1[4],xmm4[5],xmm1[5],xmm4[6],xmm1[6],xmm4[7],xmm1[7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm5, %xmm16
+; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm1[0,1,2,3,4,5,7,6]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[0,1,3,2,4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm6 = <0,1,0,1,0,1,1,3,16,18,19,19,19,19,u,u>
-; AVX512DQ-SLOW-NEXT:    vpermt2d %zmm4, %zmm6, %zmm2
+; AVX512DQ-SLOW-NEXT:    vpermt2d %zmm4, %zmm6, %zmm1
 ; AVX512DQ-SLOW-NEXT:    vpbroadcastd 64(%rax), %ymm4
-; AVX512DQ-SLOW-NEXT:    vpbroadcastd 68(%rax), %ymm9
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm9, %zmm4, %zmm4
+; AVX512DQ-SLOW-NEXT:    vpbroadcastd 68(%rax), %ymm5
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm4, %zmm4
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm9 = [65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535]
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm2, %zmm9, %zmm4
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa (%rcx), %xmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqa (%rdx), %xmm10
-; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm4 = xmm10[4],xmm2[4],xmm10[5],xmm2[5],xmm10[6],xmm2[6],xmm10[7],xmm2[7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm4, %xmm26
-; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm12 = xmm10[0],xmm2[0],xmm10[1],xmm2[1],xmm10[2],xmm2[2],xmm10[3],xmm2[3]
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm13, %xmm2, %xmm2
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm10 = xmm10[1,1,2,2]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm10[0],xmm2[1],xmm10[2,3],xmm2[4],xmm10[5,6],xmm2[7]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm12[0,1,3,2,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpermt2d %zmm10, %zmm5, %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqa (%rdi), %xmm5
-; AVX512DQ-SLOW-NEXT:    vmovdqa (%rsi), %xmm10
-; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm12 = xmm5[0],xmm10[0],xmm5[1],xmm10[1],xmm5[2],xmm10[2],xmm5[3],xmm10[3]
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm0, %xmm12, %xmm12
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[0,0,1,1]
-; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm4 = xmm10[4],xmm5[4],xmm10[5],xmm5[5],xmm10[6],xmm5[6],xmm10[7],xmm5[7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm4, %xmm21
-; AVX512DQ-SLOW-NEXT:    vprold $16, %xmm10, %xmm10
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[1,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0,1],xmm10[2],xmm5[3,4],xmm10[5],xmm5[6,7]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[0,0,2,1]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm12, %zmm4
-; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %zmm2, %zmm3, %zmm4
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm1, %zmm9, %zmm4
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa (%r9), %xmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqa (%r8), %xmm3
-; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm4 = xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
-; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm2[0,1,2,3,4,5,7,6]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[0,1,3,2,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpermt2d %zmm3, %zmm6, %zmm2
-; AVX512DQ-SLOW-NEXT:    vpbroadcastd (%rax), %ymm3
-; AVX512DQ-SLOW-NEXT:    vpbroadcastd 4(%rax), %ymm5
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm3, %zmm3
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm2, %zmm9, %zmm3
+; AVX512DQ-SLOW-NEXT:    vmovdqa (%rcx), %xmm1
+; AVX512DQ-SLOW-NEXT:    vmovdqa (%rdx), %xmm5
+; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm4 = xmm5[4],xmm1[4],xmm5[5],xmm1[5],xmm5[6],xmm1[6],xmm5[7],xmm1[7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm4, %xmm19
+; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm5[0],xmm1[0],xmm5[1],xmm1[1],xmm5[2],xmm1[2],xmm5[3],xmm1[3]
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm8, %xmm1, %xmm1
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm8, %xmm29
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[1,1,2,2]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm5[0],xmm1[1],xmm5[2,3],xmm1[4],xmm5[5,6],xmm1[7]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm7[0,1,3,2,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpermt2d %zmm5, %zmm3, %zmm1
+; AVX512DQ-SLOW-NEXT:    vmovdqa (%rdi), %xmm3
+; AVX512DQ-SLOW-NEXT:    vmovdqa (%rsi), %xmm5
+; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm3[0],xmm5[0],xmm3[1],xmm5[1],xmm3[2],xmm5[2],xmm3[3],xmm5[3]
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm0, %xmm7, %xmm7
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[0,0,1,1]
+; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm14 = xmm5[4],xmm3[4],xmm5[5],xmm3[5],xmm5[6],xmm3[6],xmm5[7],xmm3[7]
+; AVX512DQ-SLOW-NEXT:    vprold $16, %xmm5, %xmm5
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[1,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm5[2],xmm3[3,4],xmm5[5],xmm3[6,7]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[0,0,2,1]
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm7, %zmm3
+; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %zmm1, %zmm2, %zmm3
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm24, %ymm10
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm2 = ymm10[1,2,3,3,4,5,6,7,9,10,11,11,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm2[0,0,2,1,4,4,6,5]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm5 = ymm19[1,1,1,1,5,5,5,5]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0,1],ymm5[2],ymm2[3,4],ymm5[5],ymm2[6,7,8,9],ymm5[10],ymm2[11,12],ymm5[13],ymm2[14,15]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm20
-; AVX512DQ-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX512DQ-SLOW-NEXT:    vprold $16, %ymm3, %ymm2
-; AVX512DQ-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm5 = ymm9[1,2,2,3,5,6,6,7]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm5[0,1],ymm2[2],ymm5[3,4],ymm2[5],ymm5[6,7,8,9],ymm2[10],ymm5[11,12],ymm2[13],ymm5[14,15]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm5 = ymm3[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm5 = ymm5[0,0,0,0,4,4,4,4]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm6 = ymm9[0,0,2,1,4,4,6,5]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm5 = ymm5[0,1,2],ymm6[3],ymm5[4,5],ymm6[6],ymm5[7,8,9,10],ymm6[11],ymm5[12,13],ymm6[14],ymm5[15]
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm2, %zmm7, %zmm5
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm2 = ymm10[0,1,2,3,5,4,6,7,8,9,10,11,13,12,14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm2[2,2,2,2,6,6,6,6]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm6 = ymm19[2,2,2,2,6,6,6,6]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm6[0],ymm2[1],ymm6[2,3],ymm2[4],ymm6[5,6,7,8],ymm2[9],ymm6[10,11],ymm2[12],ymm6[13,14,15]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm16
-; AVX512DQ-SLOW-NEXT:    vpermd (%rax), %zmm15, %zmm2
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm5, %zmm14, %zmm2
+; AVX512DQ-SLOW-NEXT:    vmovdqa (%r9), %xmm1
+; AVX512DQ-SLOW-NEXT:    vmovdqa (%r8), %xmm2
+; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm4 = xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm1[0,1,2,3,4,5,7,6]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[0,1,3,2,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpermt2d %zmm2, %zmm6, %zmm1
+; AVX512DQ-SLOW-NEXT:    vpbroadcastd (%rax), %ymm2
+; AVX512DQ-SLOW-NEXT:    vpbroadcastd 4(%rax), %ymm3
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm2, %zmm2
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm1, %zmm9, %zmm2
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm2 = ymm6[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm23, %ymm7
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm7[1,2,3,3,4,5,6,7,9,10,11,11,12,13,14,15]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm1[0,0,2,1,4,4,6,5]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm25[1,1,1,1,5,5,5,5]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7,8,9],ymm2[10],ymm1[11,12],ymm2[13],ymm1[14,15]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm1, %ymm20
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm1 = ymm7[0,1,2,3,5,4,6,7,8,9,10,11,13,12,14,15]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm1[2,2,2,2,6,6,6,6]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm25[2,2,2,2,6,6,6,6]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3],ymm1[4],ymm2[5,6,7,8],ymm1[9],ymm2[10,11],ymm1[12],ymm2[13,14,15]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm1, %ymm27
+; AVX512DQ-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
+; AVX512DQ-SLOW-NEXT:    vprold $16, %ymm3, %ymm1
+; AVX512DQ-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm5[1,2,2,3,5,6,6,7]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7,8,9],ymm1[10],ymm2[11,12],ymm1[13],ymm2[14,15]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm2 = ymm3[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
 ; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm2[0,0,0,0,4,4,4,4]
-; AVX512DQ-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm5 = ymm7[0,1,1,3,4,5,5,7]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm5[0,1],ymm2[2],ymm5[3,4],ymm2[5],ymm5[6,7,8,9],ymm2[10],ymm5[11,12],ymm2[13],ymm5[14,15]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm17
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,u,u,24,25,26,27,u,u,26,27,26,27]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm5 = ymm7[2,2,2,2,6,6,6,6]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0,1],ymm5[2],ymm2[3,4],ymm5[5],ymm2[6,7,8,9],ymm5[10],ymm2[11,12],ymm5[13],ymm2[14,15]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm18
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm7[3,3,3,3,7,7,7,7]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm5 = ymm6[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm5 = ymm5[2,2,2,2,6,6,6,6]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0],ymm5[1],ymm2[2,3],ymm5[4],ymm2[5,6,7,8],ymm5[9],ymm2[10,11],ymm5[12],ymm2[13,14,15]
-; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm19[3,3,3,3,7,7,7,7]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm5 = ymm10[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm5 = ymm5[2,2,2,2,6,6,6,6]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0,1,2],ymm5[3],ymm2[4,5],ymm5[6],ymm2[7,8,9,10],ymm5[11],ymm2[12,13],ymm5[14],ymm2[15]
-; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm9[3,3,3,3,7,7,7,7]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm5 = ymm3[0,1,2,3,5,6,7,7,8,9,10,11,13,14,15,15]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm5 = ymm5[2,2,2,3,6,6,6,7]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm5[0,1],ymm2[2],ymm5[3,4],ymm2[5],ymm5[6,7,8,9],ymm2[10],ymm5[11,12],ymm2[13],ymm5[14,15]
-; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%r9), %xmm6
-; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%r8), %xmm14
-; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm14[0],xmm6[0],xmm14[1],xmm6[1],xmm14[2],xmm6[2],xmm14[3],xmm6[3]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm5[0,1,3,2,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpermt2d %zmm12, %zmm11, %zmm2
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm6 = ymm5[0,0,2,1,4,4,6,5]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0,1,2],ymm6[3],ymm2[4,5],ymm6[6],ymm2[7,8,9,10],ymm6[11],ymm2[12,13],ymm6[14],ymm2[15]
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm1, %zmm10, %zmm2
+; AVX512DQ-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm8[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm1[0,0,0,0,4,4,4,4]
+; AVX512DQ-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm6 = ymm10[0,1,1,3,4,5,5,7]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm6[0,1],ymm1[2],ymm6[3,4],ymm1[5],ymm6[6,7,8,9],ymm1[10],ymm6[11,12],ymm1[13],ymm6[14,15]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm1, %ymm24
+; AVX512DQ-SLOW-NEXT:    vpshufb %ymm13, %ymm8, %ymm1
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm6 = ymm10[2,2,2,2,6,6,6,6]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0,1],ymm6[2],ymm1[3,4],ymm6[5],ymm1[6,7,8,9],ymm6[10],ymm1[11,12],ymm6[13],ymm1[14,15]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm1, %ymm21
+; AVX512DQ-SLOW-NEXT:    vpermd (%rax), %zmm11, %zmm1
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm2, %zmm17, %zmm1
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm10[3,3,3,3,7,7,7,7]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm2 = ymm8[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm2[2,2,2,2,6,6,6,6]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3],ymm2[4],ymm1[5,6,7,8],ymm2[9],ymm1[10,11],ymm2[12],ymm1[13,14,15]
+; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm25[3,3,3,3,7,7,7,7]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm2 = ymm7[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm2[2,2,2,2,6,6,6,6]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3],ymm1[4,5],ymm2[6],ymm1[7,8,9,10],ymm2[11],ymm1[12,13],ymm2[14],ymm1[15]
+; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm5[3,3,3,3,7,7,7,7]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm2 = ymm3[0,1,2,3,5,6,7,7,8,9,10,11,13,14,15,15]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm2[2,2,2,3,6,6,6,7]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7,8,9],ymm1[10],ymm2[11,12],ymm1[13],ymm2[14,15]
+; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%r9), %xmm7
+; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%r8), %xmm2
+; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm2[0],xmm7[0],xmm2[1],xmm7[1],xmm2[2],xmm7[2],xmm2[3],xmm7[3]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm11 = xmm6[0,1,3,2,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpermt2d %zmm11, %zmm12, %zmm1
 ; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm11 = ymm22[2,3,3,3,6,7,7,7]
 ; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[2,1,3,2]
 ; AVX512DQ-SLOW-NEXT:    vpbroadcastd 32(%rax), %ymm12
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm12, %zmm11, %zmm22
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm2, %zmm1, %zmm22
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm12, %zmm11, %zmm25
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm1, %zmm15, %zmm25
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%rdi), %xmm1
-; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%rsi), %xmm2
-; AVX512DQ-SLOW-NEXT:    vprold $16, %xmm2, %xmm12
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm15 = xmm1[1,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm15[0,1],xmm12[2],xmm15[3,4],xmm12[5],xmm15[6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%rsi), %xmm12
+; AVX512DQ-SLOW-NEXT:    vprold $16, %xmm12, %xmm15
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm13 = xmm1[1,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm13[0,1],xmm15[2],xmm13[3,4],xmm15[5],xmm13[6,7]
 ; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm12 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
-; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm13 = xmm1[0],xmm12[0],xmm1[1],xmm12[1],xmm1[2],xmm12[2],xmm1[3],xmm12[3]
+; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm12[4],xmm1[4],xmm12[5],xmm1[5],xmm12[6],xmm1[6],xmm12[7],xmm1[7]
 ; AVX512DQ-SLOW-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm14[4],xmm6[4],xmm14[5],xmm6[5],xmm14[6],xmm6[6],xmm14[7],xmm6[7]
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm0, %xmm8, %xmm2
+; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm2[4],xmm7[4],xmm2[5],xmm7[5],xmm2[6],xmm7[6],xmm2[7],xmm7[7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm16, %xmm1
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm0, %xmm1, %xmm1
 ; AVX512DQ-SLOW-NEXT:    vpshufb %xmm0, %xmm4, %xmm3
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm23, %xmm4
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm18, %xmm4
 ; AVX512DQ-SLOW-NEXT:    vpshufb %xmm0, %xmm4, %xmm4
 ; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm0, %xmm12, %xmm4
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm0, %xmm13, %xmm4
 ; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm0, %xmm1, %xmm0
-; AVX512DQ-SLOW-NEXT:    vpermq $234, {{[-0-9]+}}(%r{{[sb]}}p), %ymm24 # 32-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # ymm24 = mem[2,2,2,3]
-; AVX512DQ-SLOW-NEXT:    vpermq $232, {{[-0-9]+}}(%r{{[sb]}}p), %ymm19 # 32-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # ymm19 = mem[0,2,2,3]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm25 = ymm25[0,2,2,3]
-; AVX512DQ-SLOW-NEXT:    vpermq $246, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # ymm8 = mem[2,1,3,3]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm5[0,1,2,3,4,5,7,6]
-; AVX512DQ-SLOW-NEXT:    vpermt2d %zmm1, %zmm30, %zmm0
-; AVX512DQ-SLOW-NEXT:    vpbroadcastd 36(%rax), %ymm1
-; AVX512DQ-SLOW-NEXT:    vpbroadcastd 40(%rax), %ymm5
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm1, %zmm30
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm0, %zmm28, %zmm30
-; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%rcx), %xmm0
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm13, %xmm0, %xmm5
-; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%rdx), %xmm14
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm15 = xmm14[1,1,2,2]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm15[0],xmm5[1],xmm15[2,3],xmm5[4],xmm15[5,6],xmm5[7]
-; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm14[0],xmm0[0],xmm14[1],xmm0[1],xmm14[2],xmm0[2],xmm14[3],xmm0[3]
-; AVX512DQ-SLOW-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm14[4],xmm0[4],xmm14[5],xmm0[5],xmm14[6],xmm0[6],xmm14[7],xmm0[7]
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm0, %xmm2, %xmm2
+; AVX512DQ-SLOW-NEXT:    vpermq $234, {{[-0-9]+}}(%r{{[sb]}}p), %ymm23 # 32-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    # ymm23 = mem[2,2,2,3]
+; AVX512DQ-SLOW-NEXT:    vpermq $232, {{[-0-9]+}}(%r{{[sb]}}p), %ymm22 # 32-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    # ymm22 = mem[0,2,2,3]
+; AVX512DQ-SLOW-NEXT:    vpermq $232, {{[-0-9]+}}(%r{{[sb]}}p), %ymm18 # 32-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    # ymm18 = mem[0,2,2,3]
+; AVX512DQ-SLOW-NEXT:    vpermq $246, {{[-0-9]+}}(%r{{[sb]}}p), %ymm17 # 32-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    # ymm17 = mem[2,1,3,3]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm6[0,1,2,3,4,5,7,6]
+; AVX512DQ-SLOW-NEXT:    vpermt2d %zmm0, %zmm28, %zmm2
+; AVX512DQ-SLOW-NEXT:    vpbroadcastd 36(%rax), %ymm0
+; AVX512DQ-SLOW-NEXT:    vpbroadcastd 40(%rax), %ymm6
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm6, %zmm0, %zmm28
+; AVX512DQ-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm28
+; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%rcx), %xmm6
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm29, %xmm0
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm0, %xmm6, %xmm2
+; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%rdx), %xmm0
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm0[1,1,2,2]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm7[0],xmm2[1],xmm7[2,3],xmm2[4],xmm7[5,6],xmm2[7]
+; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm0[0],xmm6[0],xmm0[1],xmm6[1],xmm0[2],xmm6[2],xmm0[3],xmm6[3]
+; AVX512DQ-SLOW-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm6[4],xmm0[5],xmm6[5],xmm0[6],xmm6[6],xmm0[7],xmm6[7]
 ; AVX512DQ-SLOW-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX512DQ-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
 ; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm0 = ymm4[0,1,2,3,5,5,7,6,8,9,10,11,13,13,15,14]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm0[3,3,3,3]
-; AVX512DQ-SLOW-NEXT:    vpshufd $233, {{[-0-9]+}}(%r{{[sb]}}p), %ymm23 # 32-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # ymm23 = mem[1,2,2,3,5,6,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm13 = ymm4[2,1,2,3,6,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm13 = ymm13[0,0,3,3,4,5,6,7,8,8,11,11,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm28 = ymm13[2,2,2,2]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm13 = ymm2[0,0,1,1]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm29, %xmm0
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm0[0,2,3,3,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm2[0,0,2,1]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm27, %xmm0
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm0[2,1,2,3,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,4,5,5,4]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[0,0,1,3]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm3[0,0,1,1]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm26, %xmm0
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm0[0,2,3,3,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm3[0,0,2,1]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm21, %xmm0
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm0[2,1,2,3,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,5,5,4]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm5[0,0,1,3]
-; AVX512DQ-SLOW-NEXT:    vpermq $182, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # ymm5 = mem[2,1,3,2]
-; AVX512DQ-SLOW-NEXT:    vpermq $234, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # ymm10 = mem[2,2,2,3]
-; AVX512DQ-SLOW-NEXT:    vpermq $250, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # ymm11 = mem[2,2,3,3]
-; AVX512DQ-SLOW-NEXT:    vpermpd $234, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # ymm0 = mem[2,2,2,3]
-; AVX512DQ-SLOW-NEXT:    vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermq $232, {{[-0-9]+}}(%r{{[sb]}}p), %ymm29 # 32-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # ymm29 = mem[0,2,2,3]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm29 = ymm0[3,3,3,3]
+; AVX512DQ-SLOW-NEXT:    vpshufd $233, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    # ymm2 = mem[1,2,2,3,5,6,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm10 = ymm4[2,1,2,3,6,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm10 = ymm10[0,0,3,3,4,5,6,7,8,8,11,11,12,13,14,15]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm10[2,2,2,2]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,0,1,1]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm30, %xmm4
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm4[0,2,3,3,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[0,0,2,1]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm26, %xmm4
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm8 = xmm4[2,1,2,3,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,4,5,5,4]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[0,0,1,3]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[0,0,1,1]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm19, %xmm4
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm15 = xmm4[0,2,3,3,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm15 = ymm15[0,0,2,1]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm14[2,1,2,3,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm12 = xmm12[0,1,2,3,4,5,5,4]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[0,0,1,3]
+; AVX512DQ-SLOW-NEXT:    vpermq $182, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    # ymm4 = mem[2,1,3,2]
+; AVX512DQ-SLOW-NEXT:    vpermq $234, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    # ymm5 = mem[2,2,2,3]
+; AVX512DQ-SLOW-NEXT:    vpermq $250, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    # ymm6 = mem[2,2,3,3]
+; AVX512DQ-SLOW-NEXT:    vpermpd $234, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    # ymm7 = mem[2,2,2,3]
+; AVX512DQ-SLOW-NEXT:    vmovups %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm31 = ymm31[0,2,2,3]
+; AVX512DQ-SLOW-NEXT:    vpermq $232, {{[-0-9]+}}(%r{{[sb]}}p), %ymm30 # 32-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    # ymm30 = mem[0,2,2,3]
 ; AVX512DQ-SLOW-NEXT:    vpermq $246, {{[-0-9]+}}(%r{{[sb]}}p), %ymm26 # 32-byte Folded Reload
 ; AVX512DQ-SLOW-NEXT:    # ymm26 = mem[2,1,3,3]
-; AVX512DQ-SLOW-NEXT:    vpermq $234, {{[-0-9]+}}(%r{{[sb]}}p), %ymm21 # 32-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # ymm21 = mem[2,2,2,3]
-; AVX512DQ-SLOW-NEXT:    vpermq $232, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # ymm15 = mem[0,2,2,3]
-; AVX512DQ-SLOW-NEXT:    vpermq $182, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # ymm4 = mem[2,1,3,2]
-; AVX512DQ-SLOW-NEXT:    vpermq $234, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # ymm12 = mem[2,2,2,3]
+; AVX512DQ-SLOW-NEXT:    vpermq $234, {{[-0-9]+}}(%r{{[sb]}}p), %ymm19 # 32-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    # ymm19 = mem[2,2,2,3]
+; AVX512DQ-SLOW-NEXT:    vpermq $232, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    # ymm11 = mem[0,2,2,3]
+; AVX512DQ-SLOW-NEXT:    vpermq $182, {{[-0-9]+}}(%r{{[sb]}}p), %ymm16 # 32-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    # ymm16 = mem[2,1,3,2]
+; AVX512DQ-SLOW-NEXT:    vpermq $234, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    # ymm7 = mem[2,2,2,3]
 ; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm20 = ymm20[2,2,2,3]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm16 = ymm16[0,2,2,3]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm17 = ymm17[2,1,3,2]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm14 = ymm18[2,2,2,3]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm19, %zmm24, %zmm24
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm8, %zmm25, %zmm0
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm13 = ymm27[0,2,2,3]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm14 = ymm24[2,1,3,2]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm21[2,2,2,3]
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm22, %zmm23, %zmm22
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm17, %zmm18, %zmm23
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm27 = [65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535]
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm24, %zmm27, %zmm0
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm23[2,1,3,2]
-; AVX512DQ-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm8
-; AVX512DQ-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm8, %ymm28
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm8 # 32-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm8 = zmm13[0,1,2,3],zmm8[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm13 = [65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535]
-; AVX512DQ-SLOW-NEXT:    vpternlogq $234, {{[-0-9]+}}(%r{{[sb]}}p), %zmm13, %zmm8 # 64-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm23 # 32-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm23 = zmm2[0,1,2,3],zmm23[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpternlogq $234, {{[-0-9]+}}(%r{{[sb]}}p), %zmm13, %zmm23 # 64-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm7, %zmm13 # 32-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm9, %zmm7 # 32-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm24 = [65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535]
-; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %zmm13, %zmm24, %zmm7
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm9 = [0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535]
-; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %zmm8, %zmm9, %zmm7
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm6, %zmm6 # 32-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm8 # 32-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %zmm6, %zmm24, %zmm8
-; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %zmm23, %zmm9, %zmm8
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm2, %zmm3
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm10, %zmm2, %zmm18
-; AVX512DQ-SLOW-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm18
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm22, %zmm27, %zmm23
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,1,3,2]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm17 = [65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535]
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %ymm23, %ymm17, %ymm2
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm17 = [65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535]
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %ymm2, %ymm17, %ymm0
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm2 # 32-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm2 = zmm1[0,1,2,3],zmm2[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpternlogq $234, {{[-0-9]+}}(%r{{[sb]}}p), %zmm17, %zmm2 # 64-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm1 # 32-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm3 = zmm3[0,1,2,3],zmm1[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpternlogq $234, {{[-0-9]+}}(%r{{[sb]}}p), %zmm17, %zmm3 # 64-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm10, %zmm10 # 32-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm8 # 32-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm22 = [65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535]
+; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %zmm10, %zmm22, %zmm8
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm17 = [0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535]
+; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %zmm2, %zmm17, %zmm8
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm15, %zmm2 # 32-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm12, %zmm10 # 32-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %zmm2, %zmm22, %zmm10
+; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %zmm3, %zmm17, %zmm10
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm1, %zmm2
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm1, %zmm18
+; AVX512DQ-SLOW-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm18
+; AVX512DQ-SLOW-NEXT:    vextracti64x4 $1, %zmm23, %ymm2
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535]
+; AVX512DQ-SLOW-NEXT:    vpternlogd $226, 124(%r8){1to8}, %ymm1, %ymm2
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535]
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpternlogq $248, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm19 # 64-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm11, %zmm0, %zmm5
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm23 = zmm2[0,1,2,3],zmm5[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpternlogq $234, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm23 # 64-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    vextracti64x4 $1, %zmm0, %ymm0
-; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} ymm2 = [65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535]
-; AVX512DQ-SLOW-NEXT:    vpternlogd $226, 124(%r8){1to8}, %ymm2, %ymm0
-; AVX512DQ-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm1
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
-; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm28[0,1,2,3],zmm0[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %ymm2, %ymm3, %ymm29
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vpternlogq $248, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm21 # 64-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm6, %zmm0, %zmm2
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm24 = zmm1[0,1,2,3],zmm2[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpternlogq $234, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm24 # 64-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm29, %zmm0, %zmm2
+; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm2[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0]
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm0, %zmm1, %zmm25
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm28 # 64-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm0, %zmm1, %zmm23
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm29 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm29 # 64-byte Folded Reload
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535]
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm13 # 64-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm9 # 64-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm15, %zmm21, %zmm0
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm12, %zmm4, %zmm1
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm17 # 64-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm6 # 64-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm11, %zmm19, %zmm0
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm16, %zmm2
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535]
-; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %zmm0, %zmm3, %zmm1
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm16, %zmm20, %zmm0
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm14, %zmm17, %zmm5
-; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %zmm0, %zmm3, %zmm5
+; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %zmm0, %zmm3, %zmm2
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm13, %zmm20, %zmm0
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm9, %zmm14, %zmm4
+; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %zmm0, %zmm3, %zmm4
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0]
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm2, %zmm0, %zmm19
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm1, %zmm0, %zmm20
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm5, %zmm0, %zmm21
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm4, %zmm0, %zmm20
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm29, %zmm0, %zmm0
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm26, %zmm31, %zmm1
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm0, %zmm27, %zmm1
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm31, %zmm0, %zmm0
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm26, %zmm30, %zmm2
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm0, %zmm27, %zmm2
 ; AVX512DQ-SLOW-NEXT:    vpermq $232, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
 ; AVX512DQ-SLOW-NEXT:    # ymm0 = mem[0,2,2,3]
 ; AVX512DQ-SLOW-NEXT:    vpshuflw $180, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
@@ -13516,706 +13552,720 @@ define void @store_i16_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm0, %zmm0
 ; AVX512DQ-SLOW-NEXT:    vpermq $246, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Folded Reload
 ; AVX512DQ-SLOW-NEXT:    # ymm3 = mem[2,1,3,3]
-; AVX512DQ-SLOW-NEXT:    vpermq $80, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # ymm5 = mem[0,0,1,1]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm3, %zmm3
-; AVX512DQ-SLOW-NEXT:    vpermq $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # ymm5 = mem[0,0,2,1]
-; AVX512DQ-SLOW-NEXT:    vpshuflw $230, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # xmm6 = mem[2,1,2,3,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,5,5,4]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[0,0,1,3]
+; AVX512DQ-SLOW-NEXT:    vpermq $80, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    # ymm4 = mem[0,0,1,1]
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm3, %zmm3
+; AVX512DQ-SLOW-NEXT:    vpermq $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    # ymm4 = mem[0,0,2,1]
+; AVX512DQ-SLOW-NEXT:    vpshuflw $230, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    # xmm5 = mem[2,1,2,3,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,5,5,4]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[0,0,1,3]
 ; AVX512DQ-SLOW-NEXT:    vpermq $80, {{[-0-9]+}}(%r{{[sb]}}p), %ymm26 # 32-byte Folded Reload
 ; AVX512DQ-SLOW-NEXT:    # ymm26 = mem[0,0,1,1]
-; AVX512DQ-SLOW-NEXT:    vpshuflw $248, {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # xmm10 = mem[0,2,3,3,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[0,0,2,1]
-; AVX512DQ-SLOW-NEXT:    vpermq $232, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # ymm11 = mem[0,2,2,3]
-; AVX512DQ-SLOW-NEXT:    vpshuflw $180, {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # xmm12 = mem[0,1,3,2,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm12 = xmm12[0,0,1,1]
-; AVX512DQ-SLOW-NEXT:    vpermq $246, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # ymm14 = mem[2,1,3,3]
-; AVX512DQ-SLOW-NEXT:    vpermq $80, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # ymm15 = mem[0,0,1,1]
-; AVX512DQ-SLOW-NEXT:    vpermq $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm16 # 32-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # ymm16 = mem[0,0,2,1]
-; AVX512DQ-SLOW-NEXT:    vpshuflw $230, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # xmm2 = mem[2,1,2,3,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,5,4]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,0,1,3]
-; AVX512DQ-SLOW-NEXT:    vpermq $80, {{[-0-9]+}}(%r{{[sb]}}p), %ymm17 # 32-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # ymm17 = mem[0,0,1,1]
-; AVX512DQ-SLOW-NEXT:    vpshuflw $248, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # xmm4 = mem[0,2,3,3,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[0,0,2,1]
+; AVX512DQ-SLOW-NEXT:    vpshuflw $248, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    # xmm7 = mem[0,2,3,3,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[0,0,2,1]
+; AVX512DQ-SLOW-NEXT:    vpermq $232, {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    # ymm9 = mem[0,2,2,3]
+; AVX512DQ-SLOW-NEXT:    vpshuflw $180, {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    # xmm11 = mem[0,1,3,2,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm11 = xmm11[0,0,1,1]
+; AVX512DQ-SLOW-NEXT:    vpermq $246, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    # ymm12 = mem[2,1,3,3]
+; AVX512DQ-SLOW-NEXT:    vpermq $80, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    # ymm13 = mem[0,0,1,1]
+; AVX512DQ-SLOW-NEXT:    vpermq $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    # ymm14 = mem[0,0,2,1]
+; AVX512DQ-SLOW-NEXT:    vpshuflw $230, {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    # xmm15 = mem[2,1,2,3,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm15 = xmm15[0,1,2,3,4,5,5,4]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm15 = ymm15[0,0,1,3]
+; AVX512DQ-SLOW-NEXT:    vpermq $80, {{[-0-9]+}}(%r{{[sb]}}p), %ymm16 # 32-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    # ymm16 = mem[0,0,1,1]
+; AVX512DQ-SLOW-NEXT:    vpshuflw $248, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    # xmm1 = mem[0,2,3,3,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,0,2,1]
 ; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %zmm0, %zmm27, %zmm3
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm12[0,1,1,3]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm11, %zmm0
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm15, %zmm14, %zmm11
-; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %zmm0, %zmm27, %zmm11
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm11[0,1,1,3]
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm9, %zmm0
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm13, %zmm12, %zmm9
+; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %zmm0, %zmm27, %zmm9
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535]
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm3, %zmm0, %zmm12
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm11, %zmm0, %zmm22
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm6, %zmm5, %zmm0
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm10, %zmm26, %zmm3
-; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %zmm0, %zmm24, %zmm3
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm16, %zmm0
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm17, %zmm2
-; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %zmm0, %zmm24, %zmm2
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm3, %zmm0, %zmm11
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm9, %zmm0, %zmm25
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm4, %zmm0
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm26, %zmm3
+; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %zmm0, %zmm22, %zmm3
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm15, %zmm14, %zmm0
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm16, %zmm1
+; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %zmm0, %zmm22, %zmm1
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0]
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
 ; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm3, %zmm0, %zmm4
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm2, %zmm0, %zmm30
-; AVX512DQ-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm18, %zmm23
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm28, %zmm0
-; AVX512DQ-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm0
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm1, %zmm0, %zmm28
+; AVX512DQ-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm18, %zmm24
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm29, %zmm0
+; AVX512DQ-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm0
 ; AVX512DQ-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm23, 320(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm30, 256(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm22, 192(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm21, 128(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm8, 64(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm9, (%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm13, 448(%rax)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm24, 320(%rax)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm28, 256(%rax)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm25, 192(%rax)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm20, 128(%rax)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm10, 64(%rax)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm6, (%rax)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm17, 448(%rax)
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm4, 704(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm12, 640(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm20, 576(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm7, 512(%rax)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm11, 640(%rax)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm19, 576(%rax)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm8, 512(%rax)
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, 384(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm19, 768(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm25, 832(%rax)
-; AVX512DQ-SLOW-NEXT:    addq $2168, %rsp # imm = 0x878
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm21, 768(%rax)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm23, 832(%rax)
+; AVX512DQ-SLOW-NEXT:    addq $2200, %rsp # imm = 0x898
 ; AVX512DQ-SLOW-NEXT:    vzeroupper
 ; AVX512DQ-SLOW-NEXT:    retq
 ;
 ; AVX512DQ-FAST-LABEL: store_i16_stride7_vf64:
 ; AVX512DQ-FAST:       # %bb.0:
-; AVX512DQ-FAST-NEXT:    subq $1432, %rsp # imm = 0x598
-; AVX512DQ-FAST-NEXT:    vmovdqa 96(%rcx), %ymm4
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm11 = [128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128,128,128,128,128]
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm11, %ymm4, %ymm0
-; AVX512DQ-FAST-NEXT:    vmovdqa 96(%rdx), %ymm5
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm14 = <u,u,u,u,12,13,14,15,128,128,u,u,u,u,u,u,u,u,u,u,16,17,128,128,u,u,u,u,u,u,u,u>
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm14, %ymm5, %ymm1
-; AVX512DQ-FAST-NEXT:    vporq %ymm0, %ymm1, %ymm16
+; AVX512DQ-FAST-NEXT:    subq $1496, %rsp # imm = 0x5D8
+; AVX512DQ-FAST-NEXT:    vmovdqa 96(%rcx), %ymm0
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm15 = [128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128,128,128,128,128]
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm15, %ymm0, %ymm2
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm0, %ymm20
+; AVX512DQ-FAST-NEXT:    vmovdqa 96(%rdx), %ymm9
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = <u,u,u,u,12,13,14,15,128,128,u,u,u,u,u,u,u,u,u,u,16,17,128,128,u,u,u,u,u,u,u,u>
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm0, %ymm9, %ymm3
+; AVX512DQ-FAST-NEXT:    vmovdqa %ymm0, %ymm10
+; AVX512DQ-FAST-NEXT:    vporq %ymm2, %ymm3, %ymm16
 ; AVX512DQ-FAST-NEXT:    vmovdqa 96(%rsi), %ymm6
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm15 = [128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128,128,128,128,128,128,128,128,128]
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm15, %ymm6, %ymm0
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm14 = [128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128,128,128,128,128,128,128,128,128]
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm14, %ymm6, %ymm2
 ; AVX512DQ-FAST-NEXT:    vmovdqa 96(%rdi), %ymm7
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm8 = <12,13,14,15,128,128,u,u,u,u,u,u,u,u,u,u,16,17,128,128,u,u,u,u,u,u,u,u,16,17,18,19>
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm8, %ymm7, %ymm1
-; AVX512DQ-FAST-NEXT:    vporq %ymm0, %ymm1, %ymm19
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm10 = [128,128,128,128,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128]
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm11 = <12,13,14,15,128,128,u,u,u,u,u,u,u,u,u,u,16,17,128,128,u,u,u,u,u,u,u,u,16,17,18,19>
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm11, %ymm7, %ymm3
+; AVX512DQ-FAST-NEXT:    vporq %ymm2, %ymm3, %ymm17
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = [128,128,128,128,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128]
 ; AVX512DQ-FAST-NEXT:    vmovdqa 64(%r9), %ymm0
 ; AVX512DQ-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm10, %ymm0, %ymm0
-; AVX512DQ-FAST-NEXT:    vmovdqa 64(%r8), %ymm1
-; AVX512DQ-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm12 = <u,u,u,u,u,u,u,u,12,13,14,15,128,128,u,u,u,u,u,u,u,u,u,u,16,17,128,128,u,u,u,u>
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm12, %ymm1, %ymm1
-; AVX512DQ-FAST-NEXT:    vporq %ymm0, %ymm1, %ymm22
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm3, %ymm0, %ymm2
+; AVX512DQ-FAST-NEXT:    vmovdqa 64(%r8), %ymm0
+; AVX512DQ-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = <u,u,u,u,u,u,u,u,12,13,14,15,128,128,u,u,u,u,u,u,u,u,u,u,16,17,128,128,u,u,u,u>
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm5, %ymm0, %ymm4
+; AVX512DQ-FAST-NEXT:    vporq %ymm2, %ymm4, %ymm18
 ; AVX512DQ-FAST-NEXT:    vmovdqa 64(%rcx), %ymm0
+; AVX512DQ-FAST-NEXT:    vmovdqu %ymm0, (%rsp) # 32-byte Spill
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm15, %ymm0, %ymm8
+; AVX512DQ-FAST-NEXT:    vmovdqa 64(%rdx), %ymm0
 ; AVX512DQ-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm11, %ymm0, %ymm0
-; AVX512DQ-FAST-NEXT:    vmovdqa 64(%rdx), %ymm1
-; AVX512DQ-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm14, %ymm1, %ymm1
-; AVX512DQ-FAST-NEXT:    vpor %ymm0, %ymm1, %ymm0
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm10, %ymm0, %ymm12
+; AVX512DQ-FAST-NEXT:    vpor %ymm8, %ymm12, %ymm0
 ; AVX512DQ-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqa 64(%rsi), %ymm0
 ; AVX512DQ-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm15, %ymm0, %ymm13
-; AVX512DQ-FAST-NEXT:    vmovdqa 64(%rdi), %ymm0
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm8, %ymm0, %ymm9
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm0, %ymm30
-; AVX512DQ-FAST-NEXT:    vpor %ymm13, %ymm9, %ymm9
-; AVX512DQ-FAST-NEXT:    vmovdqu %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa (%r9), %ymm0
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm14, %ymm0, %ymm0
+; AVX512DQ-FAST-NEXT:    vmovdqa 64(%rdi), %ymm1
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm11, %ymm1, %ymm13
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm1, %ymm28
+; AVX512DQ-FAST-NEXT:    vpor %ymm0, %ymm13, %ymm0
 ; AVX512DQ-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm10, %ymm0, %ymm9
-; AVX512DQ-FAST-NEXT:    vmovdqa (%r8), %ymm0
+; AVX512DQ-FAST-NEXT:    vmovdqa (%r9), %ymm0
 ; AVX512DQ-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm12, %ymm0, %ymm13
-; AVX512DQ-FAST-NEXT:    vporq %ymm9, %ymm13, %ymm17
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm3, %ymm0, %ymm0
+; AVX512DQ-FAST-NEXT:    vmovdqa (%r8), %ymm1
+; AVX512DQ-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm5, %ymm1, %ymm13
+; AVX512DQ-FAST-NEXT:    vporq %ymm0, %ymm13, %ymm19
 ; AVX512DQ-FAST-NEXT:    vmovdqa (%rcx), %ymm0
 ; AVX512DQ-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm11, %ymm0, %ymm9
-; AVX512DQ-FAST-NEXT:    vmovdqa (%rdx), %ymm0
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm15, %ymm0, %ymm0
+; AVX512DQ-FAST-NEXT:    vmovdqa (%rdx), %ymm1
+; AVX512DQ-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa %ymm10, %ymm2
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm10, %ymm1, %ymm13
+; AVX512DQ-FAST-NEXT:    vpor %ymm0, %ymm13, %ymm0
 ; AVX512DQ-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm14, %ymm0, %ymm13
-; AVX512DQ-FAST-NEXT:    vpor %ymm9, %ymm13, %ymm9
-; AVX512DQ-FAST-NEXT:    vmovdqu %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqa (%rsi), %ymm0
 ; AVX512DQ-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm15, %ymm0, %ymm9
-; AVX512DQ-FAST-NEXT:    vmovdqa (%rdi), %ymm0
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm14, %ymm0, %ymm0
+; AVX512DQ-FAST-NEXT:    vmovdqa (%rdi), %ymm1
+; AVX512DQ-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm11, %ymm1, %ymm13
+; AVX512DQ-FAST-NEXT:    vpor %ymm0, %ymm13, %ymm0
 ; AVX512DQ-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm8, %ymm0, %ymm13
-; AVX512DQ-FAST-NEXT:    vpor %ymm9, %ymm13, %ymm9
-; AVX512DQ-FAST-NEXT:    vmovdqu %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rcx), %ymm3
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm11, %ymm3, %ymm9
-; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rdx), %ymm2
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm14, %ymm2, %ymm11
-; AVX512DQ-FAST-NEXT:    vpor %ymm9, %ymm11, %ymm9
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rsi), %ymm0
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm15, %ymm0, %ymm9
-; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rdi), %ymm1
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm8, %ymm1, %ymm8
-; AVX512DQ-FAST-NEXT:    vpor %ymm9, %ymm8, %ymm8
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa 32(%r8), %ymm11
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm12, %ymm11, %ymm8
-; AVX512DQ-FAST-NEXT:    vmovdqa 32(%r9), %ymm15
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm10, %ymm15, %ymm9
-; AVX512DQ-FAST-NEXT:    vporq %ymm9, %ymm8, %ymm18
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm8 = ymm4[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm9 = ymm5[0,1,1,3,4,5,5,7]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm8 = ymm9[0,1],ymm8[2],ymm9[3,4],ymm8[5],ymm9[6,7,8,9],ymm8[10],ymm9[11,12],ymm8[13],ymm9[14,15]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[2,1,3,2]
-; AVX512DQ-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm12 = [18,19,20,21,18,19,20,21,24,25,26,27,22,23,22,23,18,19,20,21,18,19,20,21,24,25,26,27,22,23,22,23]
-; AVX512DQ-FAST-NEXT:    # ymm12 = mem[0,1,0,1]
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm12, %ymm6, %ymm9
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm12, %ymm31
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm12 = ymm7[1,1,1,1,5,5,5,5]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm9 = ymm9[0,1],ymm12[2],ymm9[3,4],ymm12[5],ymm9[6,7,8,9],ymm12[10],ymm9[11,12],ymm12[13],ymm9[14,15]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[2,2,2,3]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm8, %zmm16, %zmm8
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm9, %zmm19, %zmm9
-; AVX512DQ-FAST-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm8, %zmm9
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm13 = [65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535]
-; AVX512DQ-FAST-NEXT:    vmovdqa 96(%r8), %ymm8
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm12 = ymm8[u,u],zero,zero,zero,zero,zero,zero,zero,zero,ymm8[14,15,u,u,u,u],zero,zero,zero,zero,zero,zero,zero,zero,ymm8[16,17,u,u,u,u],zero,zero
-; AVX512DQ-FAST-NEXT:    vpternlogq $248, %ymm13, %ymm9, %ymm12
-; AVX512DQ-FAST-NEXT:    vmovdqa %ymm13, %ymm14
-; AVX512DQ-FAST-NEXT:    vmovdqa 96(%r9), %ymm13
-; AVX512DQ-FAST-NEXT:    vmovdqu %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm10, %ymm13, %ymm10
-; AVX512DQ-FAST-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm12, %ymm10
-; AVX512DQ-FAST-NEXT:    vextracti64x4 $1, %zmm9, %ymm9
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm12 = <u,4,u,u,u,5,u,u>
-; AVX512DQ-FAST-NEXT:    vpermd %ymm8, %ymm12, %ymm12
-; AVX512DQ-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm9, %ymm12
-; AVX512DQ-FAST-NEXT:    vprold $16, %ymm13, %ymm9
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[2,2,2,2]
-; AVX512DQ-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm12, %ymm9
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm9, %zmm0, %zmm9
-; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm9 = zmm10[0,1,2,3],zmm9[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm10 = [0,1,4,5,4,5,5,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa 96(%rax), %ymm9
-; AVX512DQ-FAST-NEXT:    vpermd %ymm9, %ymm10, %ymm10
-; AVX512DQ-FAST-NEXT:    vpandn %ymm10, %ymm14, %ymm10
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm13 = [12,13,128,128,128,128,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128]
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm13, %ymm9, %ymm12
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm10, %zmm12, %zmm10
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm12 = <u,u,u,u,u,u,u,u,u,u,u,u,14,15,12,13,u,u,u,u,30,31,28,29,u,u,u,u,30,31,28,29>
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm12, %ymm6, %ymm10
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm12, %ymm19
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm12 = ymm7[3,3,3,3,7,7,7,7]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm10 = ymm12[0,1,2],ymm10[3],ymm12[4,5],ymm10[6],ymm12[7,8,9,10],ymm10[11],ymm12[12,13],ymm10[14],ymm12[15]
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm12 = <10,11,8,9,u,u,u,u,u,u,u,u,u,u,u,u,26,27,24,25,u,u,u,u,26,27,24,25,u,u,u,u>
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm12, %ymm6, %ymm6
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm12, %ymm23
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm7 = ymm7[2,2,2,2,6,6,6,6]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm7 = ymm7[0],ymm6[1],ymm7[2,3],ymm6[4],ymm7[5,6,7,8],ymm6[9],ymm7[10,11],ymm6[12],ymm7[13,14,15]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm25 = [0,2,2,3,10,9,11,11]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm10, %zmm25, %zmm7
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = <14,15,12,13,u,u,u,u,u,u,u,u,u,u,u,u,30,31,28,29,u,u,u,u,30,31,28,29,u,u,u,u>
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm6, %ymm4, %ymm10
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm6, %ymm26
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm12 = ymm5[3,3,3,3,7,7,7,7]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm10 = ymm12[0],ymm10[1],ymm12[2,3],ymm10[4],ymm12[5,6,7,8],ymm10[9],ymm12[10,11],ymm10[12],ymm12[13,14,15]
-; AVX512DQ-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm6 = [22,23,26,27,0,0,24,25,26,27,0,0,26,27,26,27,22,23,26,27,0,0,24,25,26,27,0,0,26,27,26,27]
-; AVX512DQ-FAST-NEXT:    # ymm6 = mem[0,1,0,1]
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm6, %ymm4, %ymm4
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm6, %ymm29
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm5 = ymm5[2,2,2,2,6,6,6,6]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm4 = ymm4[0,1],ymm5[2],ymm4[3,4],ymm5[5],ymm4[6,7,8,9],ymm5[10],ymm4[11,12],ymm5[13],ymm4[14,15]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm20 = [2,2,2,3,8,10,10,11]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm10, %zmm20, %zmm4
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535]
-; AVX512DQ-FAST-NEXT:    vpternlogq $226, %zmm7, %zmm5, %zmm4
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm5, %zmm10
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm4 = <5,u,u,u,6,u,u,6>
-; AVX512DQ-FAST-NEXT:    vpermd %ymm8, %ymm4, %ymm4
-; AVX512DQ-FAST-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 64(%rax), %zmm28
-; AVX512DQ-FAST-NEXT:    vbroadcasti32x8 {{.*#+}} zmm4 = [30,5,0,0,31,6,0,31,30,5,0,0,31,6,0,31]
-; AVX512DQ-FAST-NEXT:    # zmm4 = mem[0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpermi2d %zmm28, %zmm9, %zmm4
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm7 = [65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535]
-; AVX512DQ-FAST-NEXT:    vpbroadcastd 72(%rax), %ymm4
-; AVX512DQ-FAST-NEXT:    vpandn %ymm4, %ymm7, %ymm4
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm7, %ymm16
-; AVX512DQ-FAST-NEXT:    vmovdqa 64(%rax), %ymm5
-; AVX512DQ-FAST-NEXT:    vmovdqa %ymm13, %ymm14
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm13, %ymm5, %ymm7
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm5, %ymm27
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm4, %zmm9
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm22, %zmm0, %zmm4
-; AVX512DQ-FAST-NEXT:    vmovdqa 64(%r9), %xmm5
-; AVX512DQ-FAST-NEXT:    vmovdqa 64(%r8), %xmm12
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm7 = xmm12[4],xmm5[4],xmm12[5],xmm5[5],xmm12[6],xmm5[6],xmm12[7],xmm5[7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm5, %xmm21
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm13 = [0,1,2,3,4,5,4,5,6,7,10,11,8,9,10,11]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm13, %xmm7, %xmm7
-; AVX512DQ-FAST-NEXT:    vmovdqa %xmm13, %xmm8
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm13 = [0,0,1,1,12,13,14,15]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm4, %zmm13, %zmm7
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535]
-; AVX512DQ-FAST-NEXT:    vpternlogq $248, %zmm4, %zmm7, %zmm9
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpbroadcastd 8(%rax), %ymm7
-; AVX512DQ-FAST-NEXT:    vpandnq %ymm7, %ymm16, %ymm7
-; AVX512DQ-FAST-NEXT:    vmovdqa (%rax), %ymm5
+; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rcx), %ymm10
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm15, %ymm10, %ymm0
+; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rdx), %ymm15
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm2, %ymm15, %ymm13
+; AVX512DQ-FAST-NEXT:    vpor %ymm0, %ymm13, %ymm0
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rsi), %ymm8
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm14, %ymm8, %ymm0
+; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rdi), %ymm4
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm11, %ymm4, %ymm11
+; AVX512DQ-FAST-NEXT:    vpor %ymm0, %ymm11, %ymm0
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa 32(%r8), %ymm13
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm5, %ymm13, %ymm0
+; AVX512DQ-FAST-NEXT:    vmovdqa 32(%r9), %ymm14
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm3, %ymm14, %ymm5
+; AVX512DQ-FAST-NEXT:    vmovdqa %ymm3, %ymm12
+; AVX512DQ-FAST-NEXT:    vporq %ymm5, %ymm0, %ymm21
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm20, %ymm2
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm0 = ymm2[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm11 = ymm9[0,1,1,3,4,5,5,7]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm11[0,1],ymm0[2],ymm11[3,4],ymm0[5],ymm11[6,7,8,9],ymm0[10],ymm11[11,12],ymm0[13],ymm11[14,15]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,1,3,2]
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm11 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21,18,19,20,21,24,25,26,27,22,23,22,23]
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm3 = ymm7[1,1,1,1,5,5,5,5]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm11[0,1],ymm3[2],ymm11[3,4],ymm3[5],ymm11[6,7,8,9],ymm3[10],ymm11[11,12],ymm3[13],ymm11[14,15]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,2,2,3]
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm16, %zmm0
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm17, %zmm3
+; AVX512DQ-FAST-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm3
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm25 = [65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535]
+; AVX512DQ-FAST-NEXT:    vmovdqa 96(%r8), %ymm11
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm11[u,u],zero,zero,zero,zero,zero,zero,zero,zero,ymm11[14,15,u,u,u,u],zero,zero,zero,zero,zero,zero,zero,zero,ymm11[16,17,u,u,u,u],zero,zero
+; AVX512DQ-FAST-NEXT:    vpternlogq $248, %ymm25, %ymm3, %ymm0
+; AVX512DQ-FAST-NEXT:    vmovdqa 96(%r9), %ymm5
 ; AVX512DQ-FAST-NEXT:    vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm14, %ymm5, %ymm9
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm9, %zmm7, %zmm16
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm17, %zmm0, %zmm7
-; AVX512DQ-FAST-NEXT:    vmovdqa (%r9), %xmm5
-; AVX512DQ-FAST-NEXT:    vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa (%r8), %xmm6
-; AVX512DQ-FAST-NEXT:    vmovdqa %xmm6, (%rsp) # 16-byte Spill
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm14 = xmm6[4],xmm5[4],xmm6[5],xmm5[5],xmm6[6],xmm5[6],xmm6[7],xmm5[7]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm8, %xmm14, %xmm14
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm7, %zmm13, %zmm14
-; AVX512DQ-FAST-NEXT:    vpternlogq $248, %zmm4, %zmm14, %zmm16
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpbroadcastd {{.*#+}} ymm9 = [18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21]
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm9, %ymm15, %ymm4
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm7 = ymm11[0,0,2,1,4,4,6,5]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm4 = ymm4[0,1,2],ymm7[3],ymm4[4,5],ymm7[6],ymm4[7,8,9,10],ymm7[11],ymm4[12,13],ymm7[14],ymm4[15]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm7 = <0,1,u,3,10,10,11,11>
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm4, %zmm18, %zmm7
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm12, %ymm5, %ymm1
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm12 = [65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535]
+; AVX512DQ-FAST-NEXT:    vpternlogq $248, %ymm12, %ymm0, %ymm1
+; AVX512DQ-FAST-NEXT:    vextracti64x4 $1, %zmm3, %ymm0
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = <u,4,u,u,u,5,u,u>
+; AVX512DQ-FAST-NEXT:    vpermd %ymm11, %ymm3, %ymm3
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm17 = [65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0]
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %ymm0, %ymm17, %ymm3
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm17, %zmm22
+; AVX512DQ-FAST-NEXT:    vprold $16, %ymm5, %ymm0
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,2,2,2]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535]
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %ymm3, %ymm5, %ymm0
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm0
+; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm1[0,1,2,3],zmm0[4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = [0,1,4,5,4,5,5,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa 96(%rax), %ymm0
+; AVX512DQ-FAST-NEXT:    vpermd %ymm0, %ymm1, %ymm1
+; AVX512DQ-FAST-NEXT:    vpandnq %ymm1, %ymm25, %ymm1
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = [12,13,128,128,128,128,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128]
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm5, %ymm0, %ymm3
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm5, %ymm20
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm3, %zmm1
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = <u,u,u,u,u,u,u,u,u,u,u,u,14,15,12,13,u,u,u,u,30,31,28,29,u,u,u,u,30,31,28,29>
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm3, %ymm6, %ymm1
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm3, %ymm26
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm3 = ymm7[3,3,3,3,7,7,7,7]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm3[0,1,2],ymm1[3],ymm3[4,5],ymm1[6],ymm3[7,8,9,10],ymm1[11],ymm3[12,13],ymm1[14],ymm3[15]
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = <10,11,8,9,u,u,u,u,u,u,u,u,u,u,u,u,26,27,24,25,u,u,u,u,26,27,24,25,u,u,u,u>
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm5, %ymm6, %ymm3
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm5, %ymm29
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm6 = ymm7[2,2,2,2,6,6,6,6]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm6[0],ymm3[1],ymm6[2,3],ymm3[4],ymm6[5,6,7,8],ymm3[9],ymm6[10,11],ymm3[12],ymm6[13,14,15]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm6 = [0,2,2,3,10,9,11,11]
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm1, %zmm6, %zmm3
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm7 = <14,15,12,13,u,u,u,u,u,u,u,u,u,u,u,u,30,31,28,29,u,u,u,u,30,31,28,29,u,u,u,u>
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm7, %ymm2, %ymm1
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm7, %ymm27
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm7 = ymm9[3,3,3,3,7,7,7,7]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm7[0],ymm1[1],ymm7[2,3],ymm1[4],ymm7[5,6,7,8],ymm1[9],ymm7[10,11],ymm1[12],ymm7[13,14,15]
+; AVX512DQ-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm5 = [22,23,26,27,0,0,24,25,26,27,0,0,26,27,26,27,22,23,26,27,0,0,24,25,26,27,0,0,26,27,26,27]
+; AVX512DQ-FAST-NEXT:    # ymm5 = mem[0,1,0,1]
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm5, %ymm2, %ymm7
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm5, %ymm16
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm9 = ymm9[2,2,2,2,6,6,6,6]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm7 = ymm7[0,1],ymm9[2],ymm7[3,4],ymm9[5],ymm7[6,7,8,9],ymm9[10],ymm7[11,12],ymm9[13],ymm7[14,15]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm17 = [2,2,2,3,8,10,10,11]
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm1, %zmm17, %zmm7
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535]
+; AVX512DQ-FAST-NEXT:    vpternlogq $226, %zmm3, %zmm1, %zmm7
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm19, %ymm14
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm14, %ymm0, %ymm4
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm1, %zmm24
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = <5,u,u,u,6,u,u,6>
+; AVX512DQ-FAST-NEXT:    vpermd %ymm11, %ymm1, %ymm1
 ; AVX512DQ-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm7 = ymm1[3,3,3,3,7,7,7,7]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm4 = ymm7[0,1,2],ymm4[3],ymm7[4,5],ymm4[6],ymm7[7,8,9,10],ymm4[11],ymm7[12,13],ymm4[14],ymm7[15]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm23, %ymm5
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm5, %ymm0, %ymm7
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm13 = ymm1[2,2,2,2,6,6,6,6]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm7 = ymm13[0],ymm7[1],ymm13[2,3],ymm7[4],ymm13[5,6,7,8],ymm7[9],ymm13[10,11],ymm7[12],ymm13[13,14,15]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm4, %zmm25, %zmm7
-; AVX512DQ-FAST-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm26, %ymm8
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm8, %ymm3, %ymm4
-; AVX512DQ-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm6 = ymm2[3,3,3,3,7,7,7,7]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm4 = ymm6[0],ymm4[1],ymm6[2,3],ymm4[4],ymm6[5,6,7,8],ymm4[9],ymm6[10,11],ymm4[12],ymm6[13,14,15]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 64(%rax), %zmm12
+; AVX512DQ-FAST-NEXT:    vbroadcasti32x8 {{.*#+}} zmm3 = [30,5,0,0,31,6,0,31,30,5,0,0,31,6,0,31]
+; AVX512DQ-FAST-NEXT:    # zmm3 = mem[0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpermi2d %zmm12, %zmm0, %zmm3
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm23 = [65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535]
+; AVX512DQ-FAST-NEXT:    vpbroadcastd 72(%rax), %ymm0
+; AVX512DQ-FAST-NEXT:    vpandnq %ymm0, %ymm23, %ymm0
+; AVX512DQ-FAST-NEXT:    vmovdqa 64(%rax), %ymm7
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm20, %ymm5
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm5, %ymm7, %ymm3
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm7, %ymm30
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm0, %zmm11
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm18, %zmm0, %zmm0
+; AVX512DQ-FAST-NEXT:    vmovdqa 64(%r9), %xmm1
+; AVX512DQ-FAST-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa 64(%r8), %xmm2
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm2, %xmm20
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm9 = [0,1,2,3,4,5,4,5,6,7,10,11,8,9,10,11]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm9, %xmm3, %xmm3
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm18 = [0,0,1,1,12,13,14,15]
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm0, %zmm18, %zmm3
+; AVX512DQ-FAST-NEXT:    vpternlogq $248, %zmm23, %zmm3, %zmm11
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vpbroadcastd 8(%rax), %ymm0
+; AVX512DQ-FAST-NEXT:    vpandnq %ymm0, %ymm23, %ymm0
+; AVX512DQ-FAST-NEXT:    vmovdqa (%rax), %ymm1
+; AVX512DQ-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm5, %ymm1, %ymm3
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm0, %zmm5
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm19, %zmm0, %zmm0
+; AVX512DQ-FAST-NEXT:    vmovdqa (%r9), %xmm1
+; AVX512DQ-FAST-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa (%r8), %xmm2
+; AVX512DQ-FAST-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm9, %xmm3, %xmm3
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm0, %zmm18, %zmm3
+; AVX512DQ-FAST-NEXT:    vpternlogq $248, %zmm23, %zmm3, %zmm5
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vpbroadcastd {{.*#+}} ymm7 = [18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21]
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm7, %ymm14, %ymm0
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm3 = ymm13[0,0,2,1,4,4,6,5]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1,2],ymm3[3],ymm0[4,5],ymm3[6],ymm0[7,8,9,10],ymm3[11],ymm0[12,13],ymm3[14],ymm0[15]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm3 = <0,1,u,3,10,10,11,11>
+; AVX512DQ-FAST-NEXT:    vpermi2q %zmm0, %zmm21, %zmm3
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqu %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm26, %ymm9
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm9, %ymm8, %ymm0
+; AVX512DQ-FAST-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm3 = ymm4[3,3,3,3,7,7,7,7]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm3[0,1,2],ymm0[3],ymm3[4,5],ymm0[6],ymm3[7,8,9,10],ymm0[11],ymm3[12,13],ymm0[14],ymm3[15]
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm29, %ymm1
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm1, %ymm3, %ymm6
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm13 = ymm2[2,2,2,2,6,6,6,6]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm6[0,1],ymm13[2],ymm6[3,4],ymm13[5],ymm6[6,7,8,9],ymm13[10],ymm6[11,12],ymm13[13],ymm6[14,15]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm4, %zmm20, %zmm0
-; AVX512DQ-FAST-NEXT:    vpternlogq $226, %zmm7, %zmm10, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm4 = ymm15[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,26,27,28,29,26,27,28,29,26,27,28,29,30,31,30,31]
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm6 = ymm11[3,3,3,3,7,7,7,7]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm4 = ymm4[0,1],ymm6[2],ymm4[3,4],ymm6[5],ymm4[6,7,8,9],ymm6[10],ymm4[11,12],ymm6[13],ymm4[14,15]
-; AVX512DQ-FAST-NEXT:    vprold $16, %ymm15, %ymm6
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm7 = ymm11[1,2,2,3,5,6,6,7]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm7[0,1],ymm6[2],ymm7[3,4],ymm6[5],ymm7[6,7,8,9],ymm6[10],ymm7[11,12],ymm6[13],ymm7[14,15]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm17 = [2,1,3,2,10,10,10,11]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm4, %zmm17, %zmm0
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm1, %ymm8, %ymm3
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm5 = ymm4[2,2,2,2,6,6,6,6]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm5[0],ymm3[1],ymm5[2,3],ymm3[4],ymm5[5,6,7,8],ymm3[9],ymm5[10,11],ymm3[12],ymm5[13,14,15]
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm0, %zmm6, %zmm3
+; AVX512DQ-FAST-NEXT:    vmovdqu %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm27, %ymm11
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm11, %ymm10, %ymm0
+; AVX512DQ-FAST-NEXT:    vmovdqu %ymm15, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm5 = ymm15[3,3,3,3,7,7,7,7]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm5[0],ymm0[1],ymm5[2,3],ymm0[4],ymm5[5,6,7,8],ymm0[9],ymm5[10,11],ymm0[12],ymm5[13,14,15]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm16, %ymm4
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm4, %ymm10, %ymm5
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm6 = ymm15[2,2,2,2,6,6,6,6]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm5[0,1],ymm6[2],ymm5[3,4],ymm6[5],ymm5[6,7,8,9],ymm6[10],ymm5[11,12],ymm6[13],ymm5[14,15]
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm0, %zmm17, %zmm2
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm24, %zmm16
+; AVX512DQ-FAST-NEXT:    vpternlogq $226, %zmm3, %zmm24, %zmm2
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm15 = [26,27,28,29,26,27,28,29,26,27,28,29,30,31,30,31,26,27,28,29,26,27,28,29,26,27,28,29,30,31,30,31]
+; AVX512DQ-FAST-NEXT:    # ymm15 = mem[0,1,0,1]
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm15, %ymm14, %ymm0
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm3 = ymm13[3,3,3,3,7,7,7,7]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm3[2],ymm0[3,4],ymm3[5],ymm0[6,7,8,9],ymm3[10],ymm0[11,12],ymm3[13],ymm0[14,15]
+; AVX512DQ-FAST-NEXT:    vprold $16, %ymm14, %ymm3
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm5 = ymm13[1,2,2,3,5,6,6,7]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm5[0,1],ymm3[2],ymm5[3,4],ymm3[5],ymm5[6,7,8,9],ymm3[10],ymm5[11,12],ymm3[13],ymm5[14,15]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm18 = [2,1,3,2,10,10,10,11]
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm0, %zmm18, %zmm3
+; AVX512DQ-FAST-NEXT:    vmovdqa64 (%rax), %zmm19
+; AVX512DQ-FAST-NEXT:    vbroadcasti32x8 {{.*#+}} zmm2 = [14,21,0,0,15,22,0,15,14,21,0,0,15,22,0,15]
+; AVX512DQ-FAST-NEXT:    # zmm2 = mem[0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rax), %ymm0
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm5, %ymm2, %ymm4
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm6 = ymm30[2,2,2,2,6,6,6,6]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm4 = ymm6[0],ymm4[1],ymm6[2,3],ymm4[4],ymm6[5,6,7,8],ymm4[9],ymm6[10,11],ymm4[12],ymm6[13,14,15]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm31, %ymm0
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm0, %ymm2, %ymm6
-; AVX512DQ-FAST-NEXT:    vmovdqa %ymm2, %ymm5
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm7 = ymm30[1,1,1,1,5,5,5,5]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm6 = ymm6[0,1],ymm7[2],ymm6[3,4],ymm7[5],ymm6[6,7,8,9],ymm7[10],ymm6[11,12],ymm7[13],ymm6[14,15]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm4, %zmm20, %zmm6
-; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm1, %ymm0, %ymm4
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm29, %ymm31
-; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm7 = ymm1[2,2,2,2,6,6,6,6]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm4 = ymm4[0,1],ymm7[2],ymm4[3,4],ymm7[5],ymm4[6,7,8,9],ymm7[10],ymm4[11,12],ymm7[13],ymm4[14,15]
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm7 = ymm0[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm11 = ymm1[0,1,1,3,4,5,5,7]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm7 = ymm11[0,1],ymm7[2],ymm11[3,4],ymm7[5],ymm11[6,7,8,9],ymm7[10],ymm11[11,12],ymm7[13],ymm11[14,15]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm4, %zmm17, %zmm7
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm23 = [0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535]
-; AVX512DQ-FAST-NEXT:    vpternlogq $226, %zmm6, %zmm23, %zmm7
-; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vprold $16, %ymm15, %ymm4
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %ymm19 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm6 = ymm19[1,2,2,3,5,6,6,7]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm4 = ymm6[0,1],ymm4[2],ymm6[3,4],ymm4[5],ymm6[6,7,8,9],ymm4[10],ymm6[11,12],ymm4[13],ymm6[14,15]
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm9, %ymm15, %ymm6
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm11 = ymm19[0,0,2,1,4,4,6,5]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm6 = ymm6[0,1,2],ymm11[3],ymm6[4,5],ymm11[6],ymm6[7,8,9,10],ymm11[11],ymm6[12,13],ymm11[14],ymm6[15]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm25 = [2,2,3,3,10,9,11,10]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm4, %zmm25, %zmm6
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm27, %zmm28, %zmm4
-; AVX512DQ-FAST-NEXT:    vbroadcasti32x8 {{.*#+}} zmm29 = [0,13,4,0,0,14,5,0,0,13,4,0,0,14,5,0]
-; AVX512DQ-FAST-NEXT:    # zmm29 = mem[0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpermd %zmm4, %zmm29, %zmm2
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm28 = [65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535]
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm6, %zmm28, %zmm2
-; AVX512DQ-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm7, %zmm2
+; AVX512DQ-FAST-NEXT:    vpermi2d %zmm0, %zmm19, %zmm2
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm3, %zmm22, %zmm2
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm8, %ymm0, %ymm3
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm2 = ymm1[3,3,3,3,7,7,7,7]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm10 = ymm2[0],ymm3[1],ymm2[2,3],ymm3[4],ymm2[5,6,7,8],ymm3[9],ymm2[10,11],ymm3[12],ymm2[13,14,15]
-; AVX512DQ-FAST-NEXT:    vmovdqa 96(%rcx), %xmm0
+; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm1, %ymm13, %ymm0
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm3 = ymm28[2,2,2,2,6,6,6,6]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm3[0],ymm0[1],ymm3[2,3],ymm0[4],ymm3[5,6,7,8],ymm0[9],ymm3[10,11],ymm0[12],ymm3[13,14,15]
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm3 = ymm13[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21,18,19,20,21,24,25,26,27,22,23,22,23]
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm5 = ymm28[1,1,1,1,5,5,5,5]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm3[0,1],ymm5[2],ymm3[3,4],ymm5[5],ymm3[6,7,8,9],ymm5[10],ymm3[11,12],ymm5[13],ymm3[14,15]
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm0, %zmm17, %zmm3
+; AVX512DQ-FAST-NEXT:    vmovdqu (%rsp), %ymm1 # 32-byte Reload
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm4, %ymm1, %ymm0
+; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm5 = ymm2[2,2,2,2,6,6,6,6]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm5[2],ymm0[3,4],ymm5[5],ymm0[6,7,8,9],ymm5[10],ymm0[11,12],ymm5[13],ymm0[14,15]
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm5 = ymm1[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm6 = ymm2[0,1,1,3,4,5,5,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa %ymm2, %ymm4
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm5 = ymm6[0,1],ymm5[2],ymm6[3,4],ymm5[5],ymm6[6,7,8,9],ymm5[10],ymm6[11,12],ymm5[13],ymm6[14,15]
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm0, %zmm18, %zmm5
+; AVX512DQ-FAST-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm5
+; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
+; AVX512DQ-FAST-NEXT:    vprold $16, %ymm8, %ymm0
+; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm3 = ymm2[1,2,2,3,5,6,6,7]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm3[0,1],ymm0[2],ymm3[3,4],ymm0[5],ymm3[6,7,8,9],ymm0[10],ymm3[11,12],ymm0[13],ymm3[14,15]
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm7, %ymm8, %ymm3
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm6 = ymm2[0,0,2,1,4,4,6,5]
+; AVX512DQ-FAST-NEXT:    vmovdqa %ymm2, %ymm14
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm3[0,1,2],ymm6[3],ymm3[4,5],ymm6[6],ymm3[7,8,9,10],ymm6[11],ymm3[12,13],ymm6[14],ymm3[15]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [2,2,3,3,10,9,11,10]
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm0, %zmm2, %zmm3
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm30, %zmm12, %zmm0
+; AVX512DQ-FAST-NEXT:    vbroadcasti32x8 {{.*#+}} zmm2 = [0,13,4,0,0,14,5,0,0,13,4,0,0,14,5,0]
+; AVX512DQ-FAST-NEXT:    # zmm2 = mem[0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpermd %zmm0, %zmm2, %zmm0
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm3, %zmm25, %zmm0
+; AVX512DQ-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm0
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm11, %ymm1, %ymm0
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm1 = ymm4[3,3,3,3,7,7,7,7]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6,7,8],ymm0[9],ymm1[10,11],ymm0[12],ymm1[13,14,15]
+; AVX512DQ-FAST-NEXT:    vmovdqa 96(%rcx), %xmm3
+; AVX512DQ-FAST-NEXT:    vmovdqa 96(%rdx), %xmm0
 ; AVX512DQ-FAST-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa 96(%rdx), %xmm11
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm11[0],xmm0[0],xmm11[1],xmm0[1],xmm11[2],xmm0[2],xmm11[3],xmm0[3]
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,1,2,3,0,1,2,3,6,7,4,5,6,7,4,5]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm2, %xmm3, %xmm3
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [0,2,2,3,8,9,9,11]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm3, %zmm0, %zmm10
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm14, %ymm5, %ymm1
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm0 = ymm30[3,3,3,3,7,7,7,7]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm5 = ymm0[0,1,2],ymm1[3],ymm0[4,5],ymm1[6],ymm0[7,8,9,10],ymm1[11],ymm0[12,13],ymm1[14],ymm0[15]
-; AVX512DQ-FAST-NEXT:    vmovdqa 64(%rcx), %xmm1
-; AVX512DQ-FAST-NEXT:    vmovdqa 64(%rdx), %xmm7
-; AVX512DQ-FAST-NEXT:    vpbroadcastq {{.*#+}} xmm3 = [6,7,4,5,0,0,8,9,6,7,4,5,0,0,8,9]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm3, %xmm1, %xmm0
-; AVX512DQ-FAST-NEXT:    vmovdqa %xmm3, %xmm8
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm3 = xmm7[1,1,2,2]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm3[0],xmm0[1],xmm3[2,3],xmm0[4],xmm3[5,6],xmm0[7]
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm7[4],xmm1[4],xmm7[5],xmm1[5],xmm7[6],xmm1[6],xmm7[7],xmm1[7]
-; AVX512DQ-FAST-NEXT:    vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm7[0],xmm1[0],xmm7[1],xmm1[1],xmm7[2],xmm1[2],xmm7[3],xmm1[3]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm2, %xmm1, %xmm1
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [0,1,1,3,8,8,9,9]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm0, %zmm3, %zmm1
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm3, %zmm13
-; AVX512DQ-FAST-NEXT:    vmovdqa 64(%rdi), %xmm0
-; AVX512DQ-FAST-NEXT:    vmovdqa 64(%rsi), %xmm3
-; AVX512DQ-FAST-NEXT:    vprold $16, %xmm3, %xmm4
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm6 = xmm0[1,1,2,3]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm6[0,1],xmm4[2],xmm6[3,4],xmm4[5],xmm6[6,7]
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm6 = xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
-; AVX512DQ-FAST-NEXT:    vmovdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm3, %xmm29
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,1,2,3,0,1,2,3,6,7,4,5,6,7,4,5]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm2, %xmm0, %xmm0
+; AVX512DQ-FAST-NEXT:    vmovdqa %xmm2, %xmm7
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm31 = [0,2,2,3,8,9,9,11]
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm0, %zmm31, %zmm1
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm9, %ymm13, %ymm0
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm2 = ymm28[3,3,3,3,7,7,7,7]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm2[0,1,2],ymm0[3],ymm2[4,5],ymm0[6],ymm2[7,8,9,10],ymm0[11],ymm2[12,13],ymm0[14],ymm2[15]
+; AVX512DQ-FAST-NEXT:    vmovdqa 96(%rsi), %xmm4
+; AVX512DQ-FAST-NEXT:    vmovdqa 96(%rdi), %xmm3
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm4, %xmm26
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm3, %xmm24
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm9 = [0,1,2,3,4,5,4,5,6,7,10,11,8,9,10,11]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm9, %xmm0, %xmm14
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm26 = [0,0,1,1,8,8,10,9]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm4, %zmm26, %zmm14
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0]
-; AVX512DQ-FAST-NEXT:    vpternlogq $226, %zmm1, %zmm4, %zmm14
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm21, %xmm0
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm12[0],xmm0[0],xmm12[1],xmm0[1],xmm12[2],xmm0[2],xmm12[3],xmm0[3]
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm6 = [0,1,2,3,6,7,4,5,6,7,4,5,12,13,14,15]
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm3 = [0,1,2,3,8,9,10,11,14,15,12,13,14,15,12,13]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm3, %xmm0, %xmm1
-; AVX512DQ-FAST-NEXT:    vmovdqa %xmm3, %xmm12
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm6, %xmm0, %xmm0
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm21 = [0,0,0,1,8,9,9,11]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm1, %zmm21, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqa (%rcx), %xmm1
-; AVX512DQ-FAST-NEXT:    vmovdqa (%rdx), %xmm3
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm8, %xmm1, %xmm7
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm8 = xmm3[1,1,2,2]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm7 = xmm8[0],xmm7[1],xmm8[2,3],xmm7[4],xmm8[5,6],xmm7[7]
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm8 = xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
-; AVX512DQ-FAST-NEXT:    vmovdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm2, %xmm1, %xmm1
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm2, %xmm24
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm7, %zmm13, %zmm1
-; AVX512DQ-FAST-NEXT:    vmovdqa (%rdi), %xmm2
-; AVX512DQ-FAST-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa (%rsi), %xmm8
-; AVX512DQ-FAST-NEXT:    vmovdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-FAST-NEXT:    vprold $16, %xmm8, %xmm3
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm7 = xmm2[1,1,2,3]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm7 = xmm7[0,1],xmm3[2],xmm7[3,4],xmm3[5],xmm7[6,7]
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm2[0],xmm8[0],xmm2[1],xmm8[1],xmm2[2],xmm8[2],xmm2[3],xmm8[3]
-; AVX512DQ-FAST-NEXT:    vmovdqa %xmm9, %xmm2
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm9, %xmm3, %xmm3
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm7, %zmm26, %zmm3
-; AVX512DQ-FAST-NEXT:    vpternlogq $226, %zmm1, %zmm4, %zmm3
-; AVX512DQ-FAST-NEXT:    vpbroadcastd 64(%rax), %ymm1
-; AVX512DQ-FAST-NEXT:    vpbroadcastd 68(%rax), %ymm4
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm1, %zmm18
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535]
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm0, %zmm1, %zmm18
-; AVX512DQ-FAST-NEXT:    vmovdqa (%rsp), %xmm0 # 16-byte Reload
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm12, %xmm0, %xmm4
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm6, %xmm0, %xmm0
-; AVX512DQ-FAST-NEXT:    vmovdqa %xmm6, %xmm8
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm4, %zmm21, %zmm0
-; AVX512DQ-FAST-NEXT:    vpbroadcastd (%rax), %ymm4
-; AVX512DQ-FAST-NEXT:    vpbroadcastd 4(%rax), %ymm6
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm6, %zmm4, %zmm13
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm0, %zmm1, %zmm13
-; AVX512DQ-FAST-NEXT:    vmovdqa 96(%rsi), %xmm9
-; AVX512DQ-FAST-NEXT:    vmovdqa 96(%rdi), %xmm0
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm0[0],xmm9[0],xmm0[1],xmm9[1],xmm0[2],xmm9[2],xmm0[3],xmm9[3]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm2, %xmm1, %xmm1
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [2,1,3,3,8,8,9,9]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm1, %zmm2, %zmm5
-; AVX512DQ-FAST-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm10, %zmm5
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm15[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,26,27,28,29,26,27,28,29,26,27,28,29,30,31,30,31]
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm2 = ymm19[3,3,3,3,7,7,7,7]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7,8,9],ymm2[10],ymm1[11,12],ymm2[13],ymm1[14,15]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm9, %xmm2, %xmm2
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [2,1,3,3,8,8,9,9]
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm2, %zmm3, %zmm0
+; AVX512DQ-FAST-NEXT:    vpternlogq $226, %zmm1, %zmm16, %zmm0
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm15, %ymm8, %ymm1
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm2 = ymm14[3,3,3,3,7,7,7,7]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm13 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7,8,9],ymm2[10],ymm1[11,12],ymm2[13],ymm1[14,15]
+; AVX512DQ-FAST-NEXT:    vmovdqa 64(%rcx), %xmm2
+; AVX512DQ-FAST-NEXT:    vmovdqa 64(%rdx), %xmm1
+; AVX512DQ-FAST-NEXT:    vpbroadcastq {{.*#+}} xmm4 = [6,7,4,5,0,0,8,9,6,7,4,5,0,0,8,9]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm4, %xmm2, %xmm5
+; AVX512DQ-FAST-NEXT:    vmovdqa %xmm4, %xmm8
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm6 = xmm1[1,1,2,2]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm6[0],xmm5[1],xmm6[2,3],xmm5[4],xmm6[5,6],xmm5[7]
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm4 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; AVX512DQ-FAST-NEXT:    vmovdqa %xmm7, %xmm14
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm7, %xmm1, %xmm2
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [0,1,1,3,8,8,9,9]
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm5, %zmm1, %zmm2
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm1, %zmm11
+; AVX512DQ-FAST-NEXT:    vmovdqa 64(%rdi), %xmm4
+; AVX512DQ-FAST-NEXT:    vmovdqa %xmm4, (%rsp) # 16-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa 64(%rsi), %xmm6
+; AVX512DQ-FAST-NEXT:    vmovdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512DQ-FAST-NEXT:    vprold $16, %xmm6, %xmm1
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm5 = xmm4[1,1,2,3]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0,1],xmm1[2],xmm5[3,4],xmm1[5],xmm5[6,7]
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm4[0],xmm6[0],xmm4[1],xmm6[1],xmm4[2],xmm6[2],xmm4[3],xmm6[3]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm9, %xmm1, %xmm1
+; AVX512DQ-FAST-NEXT:    vmovdqa %xmm9, %xmm3
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm27 = [0,0,1,1,8,8,10,9]
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm5, %zmm27, %zmm1
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0]
+; AVX512DQ-FAST-NEXT:    vpternlogq $226, %zmm2, %zmm5, %zmm1
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm20, %xmm2
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm2 # 16-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    # xmm2 = xmm2[0],mem[0],xmm2[1],mem[1],xmm2[2],mem[2],xmm2[3],mem[3]
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm7 = [0,1,2,3,6,7,4,5,6,7,4,5,12,13,14,15]
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm10 = [0,1,2,3,8,9,10,11,14,15,12,13,14,15,12,13]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm10, %xmm2, %xmm6
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm7, %xmm2, %xmm2
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm16 = [0,0,0,1,8,9,9,11]
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm6, %zmm16, %zmm2
+; AVX512DQ-FAST-NEXT:    vmovdqa (%rcx), %xmm4
+; AVX512DQ-FAST-NEXT:    vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa (%rdx), %xmm9
+; AVX512DQ-FAST-NEXT:    vmovdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm8, %xmm4, %xmm6
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm8 = xmm9[1,1,2,2]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm6 = xmm8[0],xmm6[1],xmm8[2,3],xmm6[4],xmm8[5,6],xmm6[7]
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm9[0],xmm4[0],xmm9[1],xmm4[1],xmm9[2],xmm4[2],xmm9[3],xmm4[3]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm14, %xmm8, %xmm8
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm14, %xmm21
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm6, %zmm11, %zmm8
+; AVX512DQ-FAST-NEXT:    vmovdqa (%rdi), %xmm4
+; AVX512DQ-FAST-NEXT:    vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa (%rsi), %xmm11
+; AVX512DQ-FAST-NEXT:    vmovdqa %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512DQ-FAST-NEXT:    vprold $16, %xmm11, %xmm6
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm9 = xmm4[1,1,2,3]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm6 = xmm9[0,1],xmm6[2],xmm9[3,4],xmm6[5],xmm9[6,7]
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm9 = xmm4[0],xmm11[0],xmm4[1],xmm11[1],xmm4[2],xmm11[2],xmm4[3],xmm11[3]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm3, %xmm9, %xmm9
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm3, %xmm28
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm6, %zmm27, %zmm9
+; AVX512DQ-FAST-NEXT:    vpternlogq $226, %zmm8, %zmm5, %zmm9
+; AVX512DQ-FAST-NEXT:    vpbroadcastd 64(%rax), %ymm5
+; AVX512DQ-FAST-NEXT:    vpbroadcastd 68(%rax), %ymm6
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm6, %zmm5, %zmm23
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535]
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm2, %zmm4, %zmm23
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535]
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm14, %zmm2, %zmm18
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm3, %zmm2, %zmm13
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm1, %zmm2, %zmm23
+; AVX512DQ-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm10, %xmm1, %xmm5
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm10, %xmm22
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm7, %xmm1, %xmm1
+; AVX512DQ-FAST-NEXT:    vmovdqa %xmm7, %xmm3
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm5, %zmm16, %zmm1
+; AVX512DQ-FAST-NEXT:    vpbroadcastd (%rax), %ymm5
+; AVX512DQ-FAST-NEXT:    vpbroadcastd 4(%rax), %ymm6
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm6, %zmm5, %zmm20
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm1, %zmm4, %zmm20
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm9, %zmm2, %zmm20
 ; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm4[10,11,8,9,u,u,u,u,u,u,u,u,u,u,u,u,26,27,24,25,u,u,u,u,26,27,24,25,u,u,u,u]
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %ymm20 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm3 = ymm20[2,2,2,2,6,6,6,6]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm3[0],ymm2[1],ymm3[2,3],ymm2[4],ymm3[5,6,7,8],ymm2[9],ymm3[10,11],ymm2[12],ymm3[13,14,15]
-; AVX512DQ-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm3 = [18,19,20,21,18,19,20,21,24,25,26,27,22,23,22,23,18,19,20,21,18,19,20,21,24,25,26,27,22,23,22,23]
-; AVX512DQ-FAST-NEXT:    # ymm3 = mem[0,1,0,1]
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm3, %ymm4, %ymm3
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm4, %ymm16
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm4 = ymm20[1,1,1,1,5,5,5,5]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm6 = ymm3[0,1],ymm4[2],ymm3[3,4],ymm4[5],ymm3[6,7,8,9],ymm4[10],ymm3[11,12],ymm4[13],ymm3[14,15]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [2,2,2,3,8,10,10,11]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm2, %zmm3, %zmm6
-; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm31, %ymm2
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm2, %ymm15, %ymm2
-; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm3 = ymm7[2,2,2,2,6,6,6,6]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0,1],ymm3[2],ymm2[3,4],ymm3[5],ymm2[6,7,8,9],ymm3[10],ymm2[11,12],ymm3[13],ymm2[14,15]
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm3 = ymm15[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm4 = ymm7[0,1,1,3,4,5,5,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm7, %ymm21
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm14 = ymm4[0,1],ymm3[2],ymm4[3,4],ymm3[5],ymm4[6,7,8,9],ymm3[10],ymm4[11,12],ymm3[13],ymm4[14,15]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm2, %zmm17, %zmm14
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm10 = [2,2,2,3,8,8,8,9]
-; AVX512DQ-FAST-NEXT:    vmovdqa 96(%r9), %xmm3
-; AVX512DQ-FAST-NEXT:    vmovdqa 96(%r8), %xmm2
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm8, %xmm4, %xmm7
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm7, %zmm10, %zmm1
-; AVX512DQ-FAST-NEXT:    vpternlogq $226, %zmm6, %zmm23, %zmm14
-; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vprold $16, %ymm8, %ymm6
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm4[10,11,8,9,u,u,u,u,u,u,u,u,u,u,u,u,26,27,24,25,u,u,u,u,26,27,24,25,u,u,u,u]
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %ymm17 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm7 = ymm17[1,2,2,3,5,6,6,7]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm6 = ymm7[0,1],ymm6[2],ymm7[3,4],ymm6[5],ymm7[6,7,8,9],ymm6[10],ymm7[11,12],ymm6[13],ymm7[14,15]
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm7 = ymm8[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm8, %ymm19
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm8 = ymm17[0,0,2,1,4,4,6,5]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm7 = ymm7[0,1,2],ymm8[3],ymm7[4,5],ymm8[6],ymm7[7,8,9,10],ymm8[11],ymm7[12,13],ymm8[14],ymm7[15]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm6, %zmm25, %zmm7
-; AVX512DQ-FAST-NEXT:    vmovdqa64 (%rax), %zmm25
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %ymm23 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm23, %zmm25, %zmm6
-; AVX512DQ-FAST-NEXT:    vpermd %zmm6, %zmm29, %zmm10
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm7, %zmm28, %zmm10
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm28 = [6,7,3,3,7,7,6,7]
-; AVX512DQ-FAST-NEXT:    vpermd %ymm27, %ymm28, %ymm6
-; AVX512DQ-FAST-NEXT:    vpbroadcastd 96(%rax), %ymm7
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm6, %zmm12
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm31 = [65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535]
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm1, %zmm31, %zmm12
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm30 = [0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535]
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm5, %zmm30, %zmm12
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm5 = xmm9[4],xmm0[4],xmm9[5],xmm0[5],xmm9[6],xmm0[6],xmm9[7],xmm0[7]
-; AVX512DQ-FAST-NEXT:    vprold $16, %xmm9, %xmm1
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm7 = xmm0[0,1],xmm1[2],xmm0[3,4],xmm1[5],xmm0[6,7]
-; AVX512DQ-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm14, %zmm10
-; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rcx), %xmm14
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm2 = ymm17[2,2,2,2,6,6,6,6]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3],ymm1[4],ymm2[5,6,7,8],ymm1[9],ymm2[10,11],ymm1[12],ymm2[13,14,15]
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21,18,19,20,21,24,25,26,27,22,23,22,23]
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm5 = ymm17[1,1,1,1,5,5,5,5]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0,1],ymm5[2],ymm2[3,4],ymm5[5],ymm2[6,7,8,9],ymm5[10],ymm2[11,12],ymm5[13],ymm2[14,15]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [2,2,2,3,8,10,10,11]
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm1, %zmm5, %zmm2
+; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm9[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,u,u,24,25,26,27,u,u,26,27,26,27]
+; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm5 = ymm7[2,2,2,2,6,6,6,6]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0,1],ymm5[2],ymm1[3,4],ymm5[5],ymm1[6,7,8,9],ymm5[10],ymm1[11,12],ymm5[13],ymm1[14,15]
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm5 = ymm9[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm6 = ymm7[0,1,1,3,4,5,5,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa %ymm7, %ymm15
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm8 = ymm6[0,1],ymm5[2],ymm6[3,4],ymm5[5],ymm6[6,7,8,9],ymm5[10],ymm6[11,12],ymm5[13],ymm6[14,15]
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm1, %zmm18, %zmm8
+; AVX512DQ-FAST-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm8
+; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX512DQ-FAST-NEXT:    vprold $16, %ymm5, %ymm1
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %ymm16 # 32-byte Reload
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm2 = ymm16[1,2,2,3,5,6,6,7]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7,8,9],ymm1[10],ymm2[11,12],ymm1[13],ymm2[14,15]
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm5, %ymm18
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm5 = ymm16[0,0,2,1,4,4,6,5]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0,1,2],ymm5[3],ymm2[4,5],ymm5[6],ymm2[7,8,9,10],ymm5[11],ymm2[12,13],ymm5[14],ymm2[15]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [2,2,3,3,10,9,11,10]
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm1, %zmm5, %zmm2
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [2,2,2,3,8,8,8,9]
+; AVX512DQ-FAST-NEXT:    vmovdqa 96(%r9), %xmm11
+; AVX512DQ-FAST-NEXT:    vmovdqa 96(%r8), %xmm7
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm10 = xmm7[0],xmm11[0],xmm7[1],xmm11[1],xmm7[2],xmm11[2],xmm7[3],xmm11[3]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm3, %xmm10, %xmm1
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm1, %zmm5, %zmm13
+; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm14, %zmm19, %zmm1
+; AVX512DQ-FAST-NEXT:    vbroadcasti32x8 {{.*#+}} zmm5 = [0,13,4,0,0,14,5,0,0,13,4,0,0,14,5,0]
+; AVX512DQ-FAST-NEXT:    # zmm5 = mem[0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpermd %zmm1, %zmm5, %zmm19
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm2, %zmm25, %zmm19
+; AVX512DQ-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm8, %zmm19
+; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rcx), %xmm12
 ; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rdx), %xmm8
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm8[0],xmm14[0],xmm8[1],xmm14[1],xmm8[2],xmm14[2],xmm8[3],xmm14[3]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm24, %xmm1
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm1, %xmm0, %xmm0
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm15[14,15,12,13,u,u,u,u,u,u,u,u,u,u,u,u,30,31,28,29,u,u,u,u,30,31,28,29,u,u,u,u]
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm9 = ymm21[3,3,3,3,7,7,7,7]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm6 = ymm9[0],ymm1[1],ymm9[2,3],ymm1[4],ymm9[5,6,7,8],ymm1[9],ymm9[10,11],ymm1[12],ymm9[13,14,15]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [0,2,2,3,8,9,9,11]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm0, %zmm1, %zmm6
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm5[4,5,2,3,4,5,6,7,8,9,10,11,10,11,8,9]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm22 = [0,0,2,1,8,8,9,11]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm0, %zmm22, %zmm7
-; AVX512DQ-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm5 = xmm11[4],xmm0[4],xmm11[5],xmm0[5],xmm11[6],xmm0[6],xmm11[7],xmm0[7]
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm8[0],xmm12[0],xmm8[1],xmm12[1],xmm8[2],xmm12[2],xmm8[3],xmm12[3]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm21, %xmm2
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm2, %xmm1, %xmm1
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm9[14,15,12,13,u,u,u,u,u,u,u,u,u,u,u,u,30,31,28,29,u,u,u,u,30,31,28,29,u,u,u,u]
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm9 = ymm15[3,3,3,3,7,7,7,7]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm15 = ymm9[0],ymm2[1],ymm9[2,3],ymm2[4],ymm9[5,6,7,8],ymm2[9],ymm9[10,11],ymm2[12],ymm9[13,14,15]
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm1, %zmm31, %zmm15
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm31 = [6,7,3,3,7,7,6,7]
+; AVX512DQ-FAST-NEXT:    vpermd %ymm30, %ymm31, %ymm1
+; AVX512DQ-FAST-NEXT:    vpbroadcastd 96(%rax), %ymm2
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm1, %zmm21
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm30 = [65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535]
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm13, %zmm30, %zmm21
+; AVX512DQ-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm21
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm24, %xmm2
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm26, %xmm1
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; AVX512DQ-FAST-NEXT:    vprold $16, %xmm26, %xmm1
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm2 = xmm24[1,1,2,3]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm2[0,1],xmm1[2],xmm2[3,4],xmm1[5],xmm2[6,7]
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,14,15,12,13,u,u,u,u,30,31,28,29,u,u,u,u,30,31,28,29]
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm2 = ymm17[3,3,3,3,7,7,7,7]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5],ymm1[6],ymm2[7,8,9,10],ymm1[11],ymm2[12,13],ymm1[14],ymm2[15]
+; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rdi), %xmm5
+; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rsi), %xmm4
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm13 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm28, %xmm6
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm6, %xmm13, %xmm13
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [2,1,3,3,8,8,9,9]
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm13, %zmm2, %zmm1
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[4,5,2,3,4,5,6,7,8,9,10,11,10,11,8,9]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm17 = [0,0,2,1,8,8,9,11]
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm0, %zmm17, %zmm3
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm29, %xmm0
+; AVX512DQ-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm9[4],xmm0[4],xmm9[5],xmm0[5],xmm9[6],xmm0[6],xmm9[7],xmm0[7]
 ; AVX512DQ-FAST-NEXT:    vpbroadcastq {{.*#+}} xmm24 = [6,7,4,5,0,0,8,9,6,7,4,5,0,0,8,9]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm24, %xmm1
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm1, %xmm0, %xmm0
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm11 = xmm11[1,1,2,2]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm11[0],xmm0[1],xmm11[2,3],xmm0[4],xmm11[5,6],xmm0[7]
-; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm5[0,2,3,3,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm5, %zmm26, %zmm1
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm26 = [65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535]
-; AVX512DQ-FAST-NEXT:    vpternlogq $226, %zmm7, %zmm26, %zmm1
-; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm5 = ymm15[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,24,25,22,23,22,23,u,u,u,u,u,u,u,u]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm7 = ymm5[2,2,2,2]
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm0 = [0,1,2,3,4,5,4,5,6,7,10,11,8,9,10,11]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm0, %xmm2, %xmm2
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} xmm29 = [0,1,2,3,8,9,10,11,14,15,12,13,14,15,12,13]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm29, %xmm3
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm3, %xmm4, %xmm3
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm27 = [0,1,1,3,8,8,9,9]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm2, %zmm27, %zmm3
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm16, %ymm2
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,14,15,12,13,u,u,u,u,30,31,28,29,u,u,u,u,30,31,28,29]
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm4 = ymm20[3,3,3,3,7,7,7,7]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm11 = ymm4[0,1,2],ymm2[3],ymm4[4,5],ymm2[6],ymm4[7,8,9,10],ymm2[11],ymm4[12,13],ymm2[14],ymm4[15]
-; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rdi), %xmm2
-; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rsi), %xmm5
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm2[0],xmm5[0],xmm2[1],xmm5[1],xmm2[2],xmm5[2],xmm2[3],xmm5[3]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm0, %xmm4, %xmm4
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [2,1,3,3,8,8,9,9]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm4, %zmm0, %zmm11
-; AVX512DQ-FAST-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm6, %zmm11
-; AVX512DQ-FAST-NEXT:    vpbroadcastd 100(%rax), %ymm0
-; AVX512DQ-FAST-NEXT:    vpbroadcastd 104(%rax), %ymm4
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm0, %zmm16
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm21 = [65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535]
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm3, %zmm21, %zmm16
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm19, %ymm0
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,26,27,28,29,26,27,28,29,26,27,28,29,30,31,30,31]
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm3 = ymm17[3,3,3,3,7,7,7,7]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm3[2],ymm0[3,4],ymm3[5],ymm0[6,7,8,9],ymm3[10],ymm0[11,12],ymm3[13],ymm0[14,15]
-; AVX512DQ-FAST-NEXT:    vmovdqa 32(%r9), %xmm4
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm24, %xmm13
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm13, %xmm0, %xmm0
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm9 = xmm9[1,1,2,2]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm9[0],xmm0[1],xmm9[2,3],xmm0[4],xmm9[5,6],xmm0[7]
+; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[0,2,3,3,4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm2, %zmm27, %zmm0
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm25 = [65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535]
+; AVX512DQ-FAST-NEXT:    vpternlogq $226, %zmm3, %zmm25, %zmm0
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm7[4],xmm11[4],xmm7[5],xmm11[5],xmm7[6],xmm11[6],xmm7[7],xmm11[7]
+; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %ymm3, %ymm30, %ymm7
+; AVX512DQ-FAST-NEXT:    vextracti64x4 $1, %zmm3, %ymm11
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm6, %xmm2, %xmm2
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm22, %xmm13
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm13, %xmm10, %xmm3
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm28 = [0,1,1,3,8,8,9,9]
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm2, %zmm28, %zmm3
+; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} ymm2 = ymm6[0,1,2,3,5,5,7,6,8,9,10,11,13,13,15,14]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm26 = ymm2[3,3,3,3]
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,24,25,22,23,22,23,u,u,u,u,u,u,u,u]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm29 = ymm2[2,2,2,2]
+; AVX512DQ-FAST-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm15, %zmm1
+; AVX512DQ-FAST-NEXT:    vpbroadcastd 100(%rax), %ymm2
+; AVX512DQ-FAST-NEXT:    vpbroadcastd 104(%rax), %ymm6
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm6, %zmm2, %zmm2
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm22 = [65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535]
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm3, %zmm22, %zmm2
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm18, %ymm3
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,26,27,28,29,26,27,28,29,26,27,28,29,30,31,30,31]
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm6 = ymm16[3,3,3,3,7,7,7,7]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm10 = ymm3[0,1],ymm6[2],ymm3[3,4],ymm6[5],ymm3[6,7,8,9],ymm6[10],ymm3[11,12],ymm6[13],ymm3[14,15]
+; AVX512DQ-FAST-NEXT:    vmovdqa 32(%r9), %xmm15
 ; AVX512DQ-FAST-NEXT:    vmovdqa 32(%r8), %xmm3
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3]
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm3[0],xmm15[0],xmm3[1],xmm15[1],xmm3[2],xmm15[2],xmm3[3],xmm15[3]
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm9 = xmm6[0,1,2,3,6,7,4,5,6,7,4,5,12,13,14,15]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm17 = [2,2,2,3,8,8,8,9]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm9, %zmm17, %zmm0
-; AVX512DQ-FAST-NEXT:    vpermd %ymm23, %ymm28, %ymm9
-; AVX512DQ-FAST-NEXT:    vpbroadcastd 32(%rax), %ymm20
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm20, %zmm9, %zmm20
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm0, %zmm31, %zmm20
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm11, %zmm30, %zmm20
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0]
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm1, %zmm0, %zmm16
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm5[4],xmm2[4],xmm5[5],xmm2[5],xmm5[6],xmm2[6],xmm5[7],xmm2[7]
-; AVX512DQ-FAST-NEXT:    vprold $16, %xmm5, %xmm5
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[1,1,2,3]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm5[2],xmm2[3,4],xmm5[5],xmm2[6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm9 = [4,5,2,3,4,5,6,7,8,9,10,11,10,11,8,9]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm9, %xmm1, %xmm1
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm1, %zmm22, %zmm2
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm24, %xmm1
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm1, %xmm14, %xmm1
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm5 = xmm8[4],xmm14[4],xmm8[5],xmm14[5],xmm8[6],xmm14[6],xmm8[7],xmm14[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm16 = [2,2,2,3,8,8,8,9]
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm9, %zmm16, %zmm10
+; AVX512DQ-FAST-NEXT:    vpermd %ymm14, %ymm31, %ymm9
+; AVX512DQ-FAST-NEXT:    vpbroadcastd 32(%rax), %ymm18
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm18, %zmm9, %zmm9
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm10, %zmm30, %zmm9
+; AVX512DQ-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm9
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0]
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm0, %zmm1, %zmm2
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7]
+; AVX512DQ-FAST-NEXT:    vprold $16, %xmm4, %xmm4
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[1,1,2,3]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm5[0,1],xmm4[2],xmm5[3,4],xmm4[5],xmm5[6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm10 = [4,5,2,3,4,5,6,7,8,9,10,11,10,11,8,9]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm10, %xmm0, %xmm0
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm0, %zmm17, %zmm4
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm24, %xmm0
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm0, %xmm12, %xmm0
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm5 = xmm8[4],xmm12[4],xmm8[5],xmm12[5],xmm8[6],xmm12[6],xmm8[7],xmm12[7]
 ; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm8 = xmm8[1,1,2,2]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm8[0],xmm1[1],xmm8[2,3],xmm1[4],xmm8[5,6],xmm1[7]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm8[0],xmm0[1],xmm8[2,3],xmm0[4],xmm8[5,6],xmm0[7]
 ; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm5[0,2,3,3,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm8 = [0,0,1,1,8,8,10,9]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm5, %zmm8, %zmm1
-; AVX512DQ-FAST-NEXT:    vpternlogq $226, %zmm2, %zmm26, %zmm1
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,4,5,6,7,10,11,8,9,10,11]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm29, %xmm3
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm3, %xmm6, %xmm3
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm2, %zmm27, %zmm3
-; AVX512DQ-FAST-NEXT:    vpbroadcastd 36(%rax), %ymm2
-; AVX512DQ-FAST-NEXT:    vpbroadcastd 40(%rax), %ymm4
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm2, %zmm2
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm3, %zmm21, %zmm2
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm1, %zmm0, %zmm2
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm11, %ymm0
-; AVX512DQ-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm7
-; AVX512DQ-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # xmm0 = xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7]
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm5, %zmm27, %zmm0
+; AVX512DQ-FAST-NEXT:    vpternlogq $226, %zmm4, %zmm25, %zmm0
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm3[4],xmm15[4],xmm3[5],xmm15[5],xmm3[6],xmm15[6],xmm3[7],xmm15[7]
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,4,5,6,7,10,11,8,9,10,11]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm13, %xmm6, %xmm4
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm3, %zmm28, %zmm4
+; AVX512DQ-FAST-NEXT:    vpbroadcastd 36(%rax), %ymm3
+; AVX512DQ-FAST-NEXT:    vpbroadcastd 40(%rax), %ymm5
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm5, %zmm3, %zmm3
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm4, %zmm22, %zmm3
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm0, %zmm1, %zmm3
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535]
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %ymm7, %ymm0, %ymm29
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535]
+; AVX512DQ-FAST-NEXT:    vpternlogd $226, 124(%r8){1to8}, %ymm0, %ymm11
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535]
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %ymm11, %ymm0, %ymm26
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm26, %zmm0, %zmm1
+; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm29[0,1,2,3],zmm1[4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm12
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm9, %xmm1, %xmm1
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm9, %xmm0, %xmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm3 = ymm3[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
-; AVX512DQ-FAST-NEXT:    vpshufd $212, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # ymm4 = mem[0,1,1,3,4,5,5,7]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm4[0,1],ymm3[2],ymm4[3,4],ymm3[5],ymm4[6,7,8,9],ymm3[10],ymm4[11,12],ymm3[13],ymm4[14,15]
-; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21,18,19,20,21,24,25,26,27,22,23,22,23]
-; AVX512DQ-FAST-NEXT:    vpshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # ymm5 = mem[1,1,1,1,5,5,5,5]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm4 = ymm4[0,1],ymm5[2],ymm4[3,4],ymm5[5],ymm4[6,7,8,9],ymm5[10],ymm4[11,12],ymm5[13],ymm4[14,15]
-; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rax), %ymm5
-; AVX512DQ-FAST-NEXT:    vbroadcasti32x8 {{.*#+}} zmm6 = [14,21,0,0,15,22,0,15,14,21,0,0,15,22,0,15]
-; AVX512DQ-FAST-NEXT:    # zmm6 = mem[0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpermi2d %zmm5, %zmm25, %zmm6
+; AVX512DQ-FAST-NEXT:    vpunpckhwd (%rsp), %xmm1, %xmm1 # 16-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    # xmm1 = xmm1[4],mem[4],xmm1[5],mem[5],xmm1[6],mem[6],xmm1[7],mem[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm4 # 16-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    # xmm4 = xmm4[4],mem[4],xmm4[5],mem[5],xmm4[6],mem[6],xmm4[7],mem[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm5 # 16-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    # xmm5 = xmm5[4],mem[4],xmm5[5],mem[5],xmm5[6],mem[6],xmm5[7],mem[7]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm10, %xmm1, %xmm1
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm10, %xmm5, %xmm5
+; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm6 = ymm6[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
+; AVX512DQ-FAST-NEXT:    vpshufd $212, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    # ymm7 = mem[0,1,1,3,4,5,5,7]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm6 = ymm7[0,1],ymm6[2],ymm7[3,4],ymm6[5],ymm7[6,7,8,9],ymm6[10],ymm7[11,12],ymm6[13],ymm7[14,15]
+; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm7 = ymm7[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21,18,19,20,21,24,25,26,27,22,23,22,23]
+; AVX512DQ-FAST-NEXT:    vpshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    # ymm8 = mem[1,1,1,1,5,5,5,5]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm7 = ymm7[0,1],ymm8[2],ymm7[3,4],ymm8[5],ymm7[6,7,8,9],ymm8[10],ymm7[11,12],ymm8[13],ymm7[14,15]
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm8 = [0,1,4,5,4,5,5,7]
-; AVX512DQ-FAST-NEXT:    vpermd %ymm5, %ymm8, %ymm8
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm5 = ymm5[12,13],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm5[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm5[16,17],zero,zero
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm9 = [65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535]
-; AVX512DQ-FAST-NEXT:    vpandn %ymm8, %ymm9, %ymm8
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm8, %zmm5, %zmm5
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm8 = [65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0]
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm6 # 64-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm9, %zmm6
-; AVX512DQ-FAST-NEXT:    vextracti64x4 $1, %zmm11, %ymm9
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm11 = [65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535]
-; AVX512DQ-FAST-NEXT:    vpternlogd $226, 124(%r8){1to8}, %ymm11, %ymm9
-; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} ymm11 = ymm15[0,1,2,3,5,5,7,6,8,9,10,11,13,13,15,14]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[3,3,3,3]
-; AVX512DQ-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm9, %ymm11
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm11, %zmm0, %zmm9
-; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm7 = zmm7[0,1,2,3],zmm9[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpshuflw $248, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # xmm9 = mem[0,2,3,3,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[0,0,2,1]
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vpermd %ymm10, %ymm8, %ymm8
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm10 = ymm10[12,13],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm10[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm10[16,17],zero,zero
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm11 = [65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535]
+; AVX512DQ-FAST-NEXT:    vpandn %ymm8, %ymm11, %ymm8
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm8, %zmm10, %zmm8
+; AVX512DQ-FAST-NEXT:    vpshuflw $248, {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    # xmm10 = mem[0,2,3,3,4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[0,0,2,1]
 ; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,0,1,3]
-; AVX512DQ-FAST-NEXT:    vpshuflw $248, {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # xmm11 = mem[0,2,3,3,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[0,0,2,1]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,0,1,3]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,1,3,2]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,2,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm7, %zmm8, %zmm14
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm9, %zmm7 # 32-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm4[0,2,3,3,4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[0,0,2,1]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[0,0,1,3]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[2,1,3,2]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[2,2,2,3]
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm11, %zmm13
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm10, %zmm10 # 32-byte Folded Reload
 ; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm1 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    vpternlogq $226, %zmm7, %zmm26, %zmm1
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm11, %zmm7 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    vpternlogq $226, %zmm7, %zmm26, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm7 = [0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535]
-; AVX512DQ-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm7, %zmm1 # 64-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm7, %zmm0 # 64-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm7, %zmm3
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm7, %zmm4
-; AVX512DQ-FAST-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm4
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535]
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpternlogq $248, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm7 # 64-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    vpternlogq $248, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm5 # 64-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm4, %zmm5
+; AVX512DQ-FAST-NEXT:    vpternlogq $226, %zmm10, %zmm25, %zmm1
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm4 # 32-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm5, %zmm5 # 32-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    vpternlogq $226, %zmm4, %zmm25, %zmm5
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535]
+; AVX512DQ-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm1 # 64-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm5 # 64-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm6, %zmm4, %zmm4
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm6, %zmm6
+; AVX512DQ-FAST-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm4, %zmm6
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vpternlogq $248, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm4 # 64-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    vpternlogq $248, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm8 # 64-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm6, %zmm8
 ; AVX512DQ-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm5, 320(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, 256(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm20, 192(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm10, 128(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm13, (%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm18, 448(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm16, 704(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm12, 640(%rax)
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm2, 576(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm6, 384(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, 64(%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm8, 320(%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm3, 256(%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm9, 192(%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm19, 128(%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm20, (%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm23, 448(%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, 704(%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm21, 640(%rax)
+; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vmovaps %zmm0, 576(%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm13, 384(%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm5, 64(%rax)
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm1, 512(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm14, 832(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm7, 768(%rax)
-; AVX512DQ-FAST-NEXT:    addq $1432, %rsp # imm = 0x598
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm12, 832(%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm4, 768(%rax)
+; AVX512DQ-FAST-NEXT:    addq $1496, %rsp # imm = 0x5D8
 ; AVX512DQ-FAST-NEXT:    vzeroupper
 ; AVX512DQ-FAST-NEXT:    retq
 ;
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-5.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-5.ll
index 270bcc23b715820..424365ce60d4a1a 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-5.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-5.ll
@@ -3897,11 +3897,11 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdi), %xmm1
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm8 = <8,128,u,7,128,9,128,u,128,u,10,128,12,128,u,11>
 ; AVX512F-SLOW-NEXT:    vpshufb %xmm8, %xmm1, %xmm0
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm1, %xmm29
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm1, %xmm30
 ; AVX512F-SLOW-NEXT:    vmovdqa 32(%rsi), %xmm4
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm14 = <128,8,u,128,7,128,9,u,11,u,128,10,128,12,u,128>
 ; AVX512F-SLOW-NEXT:    vpshufb %xmm14, %xmm4, %xmm1
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm4, %xmm30
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm4, %xmm31
 ; AVX512F-SLOW-NEXT:    vpor %xmm0, %xmm1, %xmm0
 ; AVX512F-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-SLOW-NEXT:    vmovdqa 32(%rcx), %ymm9
@@ -3910,8 +3910,7 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdx), %ymm11
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm1 = <u,u,12,13,128,u,u,u,14,128,u,u,14,15,128,u,u,u,16,128,u,u,16,17,128,u,u,u,18,128,u,u>
 ; AVX512F-SLOW-NEXT:    vpshufb %ymm1, %ymm11, %ymm10
-; AVX512F-SLOW-NEXT:    vpor %ymm4, %ymm10, %ymm4
-; AVX512F-SLOW-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-SLOW-NEXT:    vporq %ymm4, %ymm10, %ymm18
 ; AVX512F-SLOW-NEXT:    vmovdqa 32(%rcx), %xmm13
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm5 = <128,6,128,8,u,128,7,128,9,128,11,u,128,10,128,12>
 ; AVX512F-SLOW-NEXT:    vpshufb %xmm5, %xmm13, %xmm4
@@ -3920,7 +3919,7 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm5 = <6,128,8,128,u,7,128,9,128,11,128,u,10,128,12,128>
 ; AVX512F-SLOW-NEXT:    vpshufb %xmm5, %xmm10, %xmm12
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm5, %xmm26
-; AVX512F-SLOW-NEXT:    vporq %xmm4, %xmm12, %xmm31
+; AVX512F-SLOW-NEXT:    vporq %xmm4, %xmm12, %xmm19
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm4 = ymm3[11,u,u,10,u,12,u,u,u,u,13,u,15,u,u,14,27,u,u,26,u,28,u,u,u,u,29,u,31,u,u,30]
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm3 = ymm3[3,u,5,u,u,4,u,6,u,8,u,u,7,u,9,u,19,u,21,u,u,20,u,22,u,24,u,u,23,u,25,u]
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm3, %zmm22
@@ -3942,23 +3941,22 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-SLOW-NEXT:    vporq %ymm2, %ymm3, %ymm21
 ; AVX512F-SLOW-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512F-SLOW-NEXT:    vpshufb %xmm8, %xmm0, %xmm2
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm0, %xmm17
-; AVX512F-SLOW-NEXT:    vmovdqa (%rsi), %xmm8
-; AVX512F-SLOW-NEXT:    vpshufb %xmm14, %xmm8, %xmm3
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm0, %xmm16
+; AVX512F-SLOW-NEXT:    vmovdqa (%rsi), %xmm7
+; AVX512F-SLOW-NEXT:    vpshufb %xmm14, %xmm7, %xmm3
 ; AVX512F-SLOW-NEXT:    vporq %xmm2, %xmm3, %xmm27
 ; AVX512F-SLOW-NEXT:    vmovdqa (%rcx), %xmm1
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm25, %xmm0
 ; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm1, %xmm0
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm1, %xmm19
-; AVX512F-SLOW-NEXT:    vmovdqa (%rdx), %xmm2
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm1, %xmm17
+; AVX512F-SLOW-NEXT:    vmovdqa (%rdx), %xmm8
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm26, %xmm1
-; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm2, %xmm15
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm2, %xmm16
-; AVX512F-SLOW-NEXT:    vporq %xmm0, %xmm15, %xmm28
+; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm8, %xmm15
+; AVX512F-SLOW-NEXT:    vporq %xmm0, %xmm15, %xmm29
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = mem[1,1,2,2]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,1,1,1]
-; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm25 = [255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255]
-; AVX512F-SLOW-NEXT:    vpandnq %ymm0, %ymm25, %ymm0
+; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm28 = [255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255]
+; AVX512F-SLOW-NEXT:    vpandnq %ymm0, %ymm28, %ymm0
 ; AVX512F-SLOW-NEXT:    vmovdqa 32(%r8), %ymm15
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm1 = [12,128,128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128]
 ; AVX512F-SLOW-NEXT:    vpshufb %ymm1, %ymm15, %ymm14
@@ -3980,262 +3978,266 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm6 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm6[27],zero,zero,ymm6[26],zero,ymm6[28],zero,ymm6[30],zero,zero,ymm6[29],zero,ymm6[31],zero,zero
 ; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm10 = xmm13[0],xmm10[0],xmm13[1],xmm10[1],xmm13[2],xmm10[2],xmm13[3],xmm10[3],xmm13[4],xmm10[4],xmm13[5],xmm10[5],xmm13[6],xmm10[6],xmm13[7],xmm10[7]
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm13 = ymm5[3,u,5,u,u,4,u,6,u,8,u,u,7,u,9,u,19,u,21,u,u,20,u,22,u,24,u,u,23,u,25,u]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm7 = ymm5[11,u,u,10,u,12,u,u,u,u,13,u,15,u,u,14,27,u,u,26,u,28,u,u,u,u,29,u,31,u,u,30]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm5 = ymm5[11,u,u,10,u,12,u,u,u,u,13,u,15,u,u,14,27,u,u,26,u,28,u,u,u,u,29,u,31,u,u,30]
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm4[21],zero,zero,ymm4[20],zero,ymm4[22],zero,ymm4[24],zero,zero,ymm4[23],zero,ymm4[25],zero,zero
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm5 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm4[26],zero,ymm4[28],zero,zero,ymm4[27],zero,ymm4[29],zero,ymm4[31],zero,zero,ymm4[30],zero
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm29, %xmm1
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm30, %xmm3
-; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm3 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
-; AVX512F-SLOW-NEXT:    vmovdqa64 (%r8), %zmm29
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm3 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm4[26],zero,ymm4[28],zero,zero,ymm4[27],zero,ymm4[29],zero,ymm4[31],zero,zero,ymm4[30],zero
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm30, %xmm1
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm31, %xmm4
+; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm4 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
 ; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm1 = <4,u,5,5,5,5,u,6,6,6,6,u,7,7,7,7>
-; AVX512F-SLOW-NEXT:    vpermd %zmm15, %zmm1, %zmm4
+; AVX512F-SLOW-NEXT:    vpermd %zmm15, %zmm1, %zmm30
+; AVX512F-SLOW-NEXT:    vmovdqa64 (%r8), %zmm31
 ; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm1 = <6,6,6,u,7,7,7,7,u,16,16,16,16,u,17,17>
-; AVX512F-SLOW-NEXT:    vpermi2d %zmm15, %zmm29, %zmm1
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm30 = ymm9[2,2,3,3]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm18 = ymm11[2,2,3,3]
+; AVX512F-SLOW-NEXT:    vpermi2d %zmm15, %zmm31, %zmm1
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm16, %xmm15
+; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm7 = xmm15[0],xmm7[0],xmm15[1],xmm7[1],xmm15[2],xmm7[2],xmm15[3],xmm7[3],xmm15[4],xmm7[4],xmm15[5],xmm7[5],xmm15[6],xmm7[6],xmm15[7],xmm7[7]
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm15 = <0,1,4,5,u,2,3,6,7,10,11,u,8,9,12,13>
+; AVX512F-SLOW-NEXT:    vpshufb %xmm15, %xmm4, %xmm4
+; AVX512F-SLOW-NEXT:    vpshufb %xmm15, %xmm7, %xmm7
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[2,2,3,3]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[2,2,3,3]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[2,2,3,3]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,2,3,3]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[2,2,3,3]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,2,3,3]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[2,2,3,3]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[2,2,3,3]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm17, %xmm9
-; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm8 = xmm9[0],xmm8[0],xmm9[1],xmm8[1],xmm9[2],xmm8[2],xmm9[3],xmm8[3],xmm9[4],xmm8[4],xmm9[5],xmm8[5],xmm9[6],xmm8[6],xmm9[7],xmm8[7]
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm15 = <0,1,4,5,u,2,3,6,7,10,11,u,8,9,12,13>
-; AVX512F-SLOW-NEXT:    vpshufb %xmm15, %xmm3, %xmm3
-; AVX512F-SLOW-NEXT:    vpshufb %xmm15, %xmm8, %xmm8
+; AVX512F-SLOW-NEXT:    vinserti32x4 $2, %xmm27, %zmm7, %zmm27
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm15 = <2,u,1,0,5,4,u,3,u,7,6,11,10,u,9,8>
 ; AVX512F-SLOW-NEXT:    vpshufb %xmm15, %xmm10, %xmm10
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[0,0,1,1]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[2,2,3,3]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,2,3,3]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[0,0,1,1]
-; AVX512F-SLOW-NEXT:    vinserti32x4 $2, %xmm27, %zmm8, %zmm8
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm19, %xmm9
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm16, %xmm11
-; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm9 = xmm9[0],xmm11[0],xmm9[1],xmm11[1],xmm9[2],xmm11[2],xmm9[3],xmm11[3],xmm9[4],xmm11[4],xmm9[5],xmm11[5],xmm9[6],xmm11[6],xmm9[7],xmm11[7]
-; AVX512F-SLOW-NEXT:    vpshufb %xmm15, %xmm9, %xmm9
-; AVX512F-SLOW-NEXT:    vinserti32x4 $2, %xmm28, %zmm9, %zmm9
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,2,3,3]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[0,0,1,1]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm17, %xmm7
+; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm8 = xmm7[0],xmm8[0],xmm7[1],xmm8[1],xmm7[2],xmm8[2],xmm7[3],xmm8[3],xmm7[4],xmm8[4],xmm7[5],xmm8[5],xmm7[6],xmm8[6],xmm7[7],xmm8[7]
+; AVX512F-SLOW-NEXT:    vpshufb %xmm15, %xmm8, %xmm8
+; AVX512F-SLOW-NEXT:    vinserti32x4 $2, %xmm29, %zmm8, %zmm8
 ; AVX512F-SLOW-NEXT:    vpermq $80, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Folded Reload
 ; AVX512F-SLOW-NEXT:    # ymm15 = mem[0,0,1,1]
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm15, %zmm15 # 32-byte Folded Reload
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm16 = ymm31[0,0,1,1]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm16 # 32-byte Folded Reload
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm16 = ymm19[0,0,1,1]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm18, %zmm16, %zmm16
 ; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm17 = [255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0]
 ; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm15, %zmm17, %zmm16
+; AVX512F-SLOW-NEXT:    vpternlogq $248, %zmm28, %zmm16, %zmm26
 ; AVX512F-SLOW-NEXT:    vpor %ymm2, %ymm14, %ymm2
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm20, %zmm2
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm14 = [18374966859431608575,18374966859431608575,18446463693966278400,18446463693966278400]
+; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm14 = [18374966859431608575,18374966859431608575,18446463693966278400,18446463693966278400,72056498804555775,72056498804555775,18374967950370078975,18374967950370078975]
 ; AVX512F-SLOW-NEXT:    vpternlogq $248, %ymm14, %ymm13, %ymm0
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm21, %zmm0
 ; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm2, %zmm17, %zmm0
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm2 = zmm22[2,2,3,3,6,6,7,7]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm13 = zmm23[2,2,3,3,6,6,7,7]
-; AVX512F-SLOW-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm13
+; AVX512F-SLOW-NEXT:    vpternlogq $248, %zmm14, %zmm2, %zmm13
 ; AVX512F-SLOW-NEXT:    vpternlogq $248, %ymm14, %ymm12, %ymm6
-; AVX512F-SLOW-NEXT:    vpandq %ymm14, %ymm30, %ymm2
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm18, %zmm2
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm11 = zmm24[2,2,3,3,6,6,7,7]
-; AVX512F-SLOW-NEXT:    vporq %zmm11, %zmm2, %zmm2
-; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm11 = [0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255]
-; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm13, %zmm11, %zmm2
+; AVX512F-SLOW-NEXT:    vpand %ymm14, %ymm9, %ymm2
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm11, %zmm2
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm9 = zmm24[2,2,3,3,6,6,7,7]
+; AVX512F-SLOW-NEXT:    vporq %zmm9, %zmm2, %zmm2
+; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm9 = [0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255]
+; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm13, %zmm9, %zmm2
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm10, %zmm6, %zmm6
-; AVX512F-SLOW-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm7, %ymm5
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm5, %zmm3
-; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm6, %zmm11, %zmm3
-; AVX512F-SLOW-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm16, %zmm26
-; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm4
+; AVX512F-SLOW-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm5, %ymm3
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm3, %zmm3
+; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm6, %zmm9, %zmm3
+; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm30
 ; AVX512F-SLOW-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm25
 ; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm1
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm0 = zmm8[0,0,1,1,4,4,5,5]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm2 = zmm9[0,0,1,1,4,4,5,5]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm0 = zmm27[0,0,1,1,4,4,5,5]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm2 = zmm8[0,0,1,1,4,4,5,5]
 ; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm2
 ; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm0 = <u,0,0,0,0,u,1,1,1,1,u,2,2,2,2,u>
-; AVX512F-SLOW-NEXT:    vpermd %zmm29, %zmm0, %zmm0
+; AVX512F-SLOW-NEXT:    vpermd %zmm31, %zmm0, %zmm0
 ; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm0
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm25, 64(%r9)
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm0, (%r9)
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm1, 128(%r9)
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm4, 256(%r9)
+; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm30, 256(%r9)
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm26, 192(%r9)
 ; AVX512F-SLOW-NEXT:    vzeroupper
 ; AVX512F-SLOW-NEXT:    retq
 ;
 ; AVX512F-FAST-LABEL: store_i8_stride5_vf64:
 ; AVX512F-FAST:       # %bb.0:
-; AVX512F-FAST-NEXT:    vmovdqa 32(%rsi), %ymm7
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm13 = [128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128,128,128,128]
-; AVX512F-FAST-NEXT:    vpshufb %ymm13, %ymm7, %ymm0
-; AVX512F-FAST-NEXT:    vmovdqa 32(%rdi), %ymm3
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = <12,13,128,15,12,13,14,128,12,13,14,15,128,u,u,u,16,128,18,19,16,17,128,19,16,17,18,128,16,17,18,19>
-; AVX512F-FAST-NEXT:    vpshufb %ymm6, %ymm3, %ymm1
-; AVX512F-FAST-NEXT:    vpor %ymm0, %ymm1, %ymm0
+; AVX512F-FAST-NEXT:    pushq %rax
+; AVX512F-FAST-NEXT:    vmovdqa 32(%rsi), %ymm3
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm8 = [128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128,128,128,128]
+; AVX512F-FAST-NEXT:    vpshufb %ymm8, %ymm3, %ymm0
+; AVX512F-FAST-NEXT:    vmovdqa 32(%rdi), %ymm1
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm11 = <12,13,128,15,12,13,14,128,12,13,14,15,128,u,u,u,16,128,18,19,16,17,128,19,16,17,18,128,16,17,18,19>
+; AVX512F-FAST-NEXT:    vpshufb %ymm11, %ymm1, %ymm2
+; AVX512F-FAST-NEXT:    vpor %ymm0, %ymm2, %ymm0
 ; AVX512F-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa 32(%rdi), %xmm1
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm15 = <8,128,u,7,128,9,128,u,128,u,10,128,12,128,u,11>
-; AVX512F-FAST-NEXT:    vpshufb %xmm15, %xmm1, %xmm0
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm1, %xmm18
-; AVX512F-FAST-NEXT:    vmovdqa 32(%rsi), %xmm2
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm4 = <128,8,u,128,7,128,9,u,11,u,128,10,128,12,u,128>
-; AVX512F-FAST-NEXT:    vpshufb %xmm4, %xmm2, %xmm1
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm4, %xmm25
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm2, %xmm17
-; AVX512F-FAST-NEXT:    vpor %xmm0, %xmm1, %xmm0
+; AVX512F-FAST-NEXT:    vmovdqa 32(%rdi), %xmm2
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm6 = <8,128,u,7,128,9,128,u,128,u,10,128,12,128,u,11>
+; AVX512F-FAST-NEXT:    vpshufb %xmm6, %xmm2, %xmm0
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm2, %xmm19
+; AVX512F-FAST-NEXT:    vmovdqa 32(%rsi), %xmm4
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm13 = <128,8,u,128,7,128,9,u,11,u,128,10,128,12,u,128>
+; AVX512F-FAST-NEXT:    vpshufb %xmm13, %xmm4, %xmm2
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm4, %xmm18
+; AVX512F-FAST-NEXT:    vpor %xmm0, %xmm2, %xmm0
 ; AVX512F-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa 32(%rcx), %ymm9
+; AVX512F-FAST-NEXT:    vmovdqa 32(%rcx), %ymm7
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = [128,128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128,128]
-; AVX512F-FAST-NEXT:    vpshufb %ymm0, %ymm9, %ymm2
-; AVX512F-FAST-NEXT:    vmovdqa 32(%rdx), %ymm8
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = <u,u,12,13,128,u,u,u,14,128,u,u,14,15,128,u,u,u,16,128,u,u,16,17,128,u,u,u,18,128,u,u>
-; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm8, %ymm4
+; AVX512F-FAST-NEXT:    vpshufb %ymm0, %ymm7, %ymm2
+; AVX512F-FAST-NEXT:    vmovdqa 32(%rdx), %ymm9
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm15 = <u,u,12,13,128,u,u,u,14,128,u,u,14,15,128,u,u,u,16,128,u,u,16,17,128,u,u,u,18,128,u,u>
+; AVX512F-FAST-NEXT:    vpshufb %ymm15, %ymm9, %ymm4
 ; AVX512F-FAST-NEXT:    vpor %ymm2, %ymm4, %ymm2
 ; AVX512F-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa 32(%rcx), %xmm10
+; AVX512F-FAST-NEXT:    vmovdqa 32(%rcx), %xmm12
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm4 = <128,6,128,8,u,128,7,128,9,128,11,u,128,10,128,12>
-; AVX512F-FAST-NEXT:    vpshufb %xmm4, %xmm10, %xmm2
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm4, %xmm26
-; AVX512F-FAST-NEXT:    vmovdqa 32(%rdx), %xmm11
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm14 = <6,128,8,128,u,7,128,9,128,11,128,u,10,128,12,128>
-; AVX512F-FAST-NEXT:    vpshufb %xmm14, %xmm11, %xmm4
-; AVX512F-FAST-NEXT:    vporq %xmm2, %xmm4, %xmm21
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm7[11,u,u,10,u,12,u,u,u,u,13,u,15,u,u,14,27,u,u,26,u,28,u,u,u,u,29,u,31,u,u,30]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm4 = ymm7[3,u,5,u,u,4,u,6,u,8,u,u,7,u,9,u,19,u,21,u,u,20,u,22,u,24,u,u,23,u,25,u]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm4, %zmm22
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm3[26],zero,ymm3[28],zero,zero,ymm3[27],zero,ymm3[29],zero,ymm3[31],zero,zero,ymm3[30],zero
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm3[21],zero,zero,ymm3[20],zero,ymm3[22],zero,ymm3[24],zero,zero,ymm3[23],zero,ymm3[25],zero,zero
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm3, %zmm23
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm8[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm8[27],zero,zero,ymm8[26],zero,ymm8[28],zero,ymm8[30],zero,zero,ymm8[29],zero,ymm8[31],zero,zero
-; AVX512F-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm4 = [128,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128,128,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128]
-; AVX512F-FAST-NEXT:    # ymm4 = mem[0,1,0,1]
-; AVX512F-FAST-NEXT:    vpshufb %ymm4, %ymm9, %ymm3
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm4, %ymm30
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm3, %zmm24
-; AVX512F-FAST-NEXT:    vmovdqa (%rcx), %ymm7
-; AVX512F-FAST-NEXT:    vpshufb %ymm0, %ymm7, %ymm0
-; AVX512F-FAST-NEXT:    vmovdqa (%rdx), %ymm12
-; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm12, %ymm1
-; AVX512F-FAST-NEXT:    vporq %ymm0, %ymm1, %ymm19
-; AVX512F-FAST-NEXT:    vmovdqa (%rsi), %ymm5
-; AVX512F-FAST-NEXT:    vpshufb %ymm13, %ymm5, %ymm0
+; AVX512F-FAST-NEXT:    vpshufb %xmm4, %xmm12, %xmm2
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm4, %xmm25
+; AVX512F-FAST-NEXT:    vmovdqa 32(%rdx), %xmm10
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm5 = <6,128,8,128,u,7,128,9,128,11,128,u,10,128,12,128>
+; AVX512F-FAST-NEXT:    vpshufb %xmm5, %xmm10, %xmm4
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm5, %xmm26
+; AVX512F-FAST-NEXT:    vpor %xmm2, %xmm4, %xmm2
+; AVX512F-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm3[11,u,u,10,u,12,u,u,u,u,13,u,15,u,u,14,27,u,u,26,u,28,u,u,u,u,29,u,31,u,u,30]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm3 = ymm3[3,u,5,u,u,4,u,6,u,8,u,u,7,u,9,u,19,u,21,u,u,20,u,22,u,24,u,u,23,u,25,u]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm3, %zmm21
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm1[26],zero,ymm1[28],zero,zero,ymm1[27],zero,ymm1[29],zero,ymm1[31],zero,zero,ymm1[30],zero
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm1[21],zero,zero,ymm1[20],zero,ymm1[22],zero,ymm1[24],zero,zero,ymm1[23],zero,ymm1[25],zero,zero
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm1, %zmm22
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm9[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm9[27],zero,zero,ymm9[26],zero,ymm9[28],zero,ymm9[30],zero,zero,ymm9[29],zero,ymm9[31],zero,zero
+; AVX512F-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm3 = [128,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128,128,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128]
+; AVX512F-FAST-NEXT:    # ymm3 = mem[0,1,0,1]
+; AVX512F-FAST-NEXT:    vpshufb %ymm3, %ymm7, %ymm2
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm3, %ymm31
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm23
+; AVX512F-FAST-NEXT:    vmovdqa (%rcx), %ymm5
+; AVX512F-FAST-NEXT:    vpshufb %ymm0, %ymm5, %ymm0
+; AVX512F-FAST-NEXT:    vmovdqa (%rdx), %ymm14
+; AVX512F-FAST-NEXT:    vpshufb %ymm15, %ymm14, %ymm1
+; AVX512F-FAST-NEXT:    vporq %ymm0, %ymm1, %ymm24
+; AVX512F-FAST-NEXT:    vmovdqa (%rsi), %ymm15
+; AVX512F-FAST-NEXT:    vpshufb %ymm8, %ymm15, %ymm0
 ; AVX512F-FAST-NEXT:    vmovdqa (%rdi), %ymm4
-; AVX512F-FAST-NEXT:    vpshufb %ymm6, %ymm4, %ymm1
+; AVX512F-FAST-NEXT:    vpshufb %ymm11, %ymm4, %ymm1
 ; AVX512F-FAST-NEXT:    vporq %ymm0, %ymm1, %ymm20
 ; AVX512F-FAST-NEXT:    vmovdqa (%rdi), %xmm1
-; AVX512F-FAST-NEXT:    vpshufb %xmm15, %xmm1, %xmm0
+; AVX512F-FAST-NEXT:    vpshufb %xmm6, %xmm1, %xmm0
 ; AVX512F-FAST-NEXT:    vmovdqa64 %xmm1, %xmm16
 ; AVX512F-FAST-NEXT:    vmovdqa (%rsi), %xmm3
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm25, %xmm1
-; AVX512F-FAST-NEXT:    vpshufb %xmm1, %xmm3, %xmm2
+; AVX512F-FAST-NEXT:    vpshufb %xmm13, %xmm3, %xmm2
 ; AVX512F-FAST-NEXT:    vporq %xmm0, %xmm2, %xmm28
-; AVX512F-FAST-NEXT:    vmovdqa (%rcx), %xmm6
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm26, %xmm0
-; AVX512F-FAST-NEXT:    vpshufb %xmm0, %xmm6, %xmm0
-; AVX512F-FAST-NEXT:    vmovdqa (%rdx), %xmm2
-; AVX512F-FAST-NEXT:    vpshufb %xmm14, %xmm2, %xmm14
-; AVX512F-FAST-NEXT:    vporq %xmm0, %xmm14, %xmm29
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm14 = [1,1,2,2,2,2,2,2]
-; AVX512F-FAST-NEXT:    vmovdqa 32(%r8), %ymm0
-; AVX512F-FAST-NEXT:    vpermd %ymm0, %ymm14, %ymm14
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm25 = [255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255]
-; AVX512F-FAST-NEXT:    vpandnq %ymm14, %ymm25, %ymm14
+; AVX512F-FAST-NEXT:    vmovdqa (%rcx), %xmm1
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm25, %xmm0
+; AVX512F-FAST-NEXT:    vpshufb %xmm0, %xmm1, %xmm0
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm1, %xmm17
+; AVX512F-FAST-NEXT:    vmovdqa (%rdx), %xmm11
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm26, %xmm1
+; AVX512F-FAST-NEXT:    vpshufb %xmm1, %xmm11, %xmm8
+; AVX512F-FAST-NEXT:    vporq %xmm0, %xmm8, %xmm29
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = [1,1,2,2,2,2,2,2]
+; AVX512F-FAST-NEXT:    vmovdqa 32(%r8), %ymm8
+; AVX512F-FAST-NEXT:    vpermd %ymm8, %ymm0, %ymm0
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm25 = [255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255]
+; AVX512F-FAST-NEXT:    vpandnq %ymm0, %ymm25, %ymm0
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = [12,128,128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128]
-; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm0, %ymm15
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm15, %zmm14, %zmm27
-; AVX512F-FAST-NEXT:    vmovdqa64 (%r8), %zmm26
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm31 = <4,u,5,5,5,5,u,6,30,30,30,u,31,31,31,31>
-; AVX512F-FAST-NEXT:    vpermi2d %zmm26, %zmm0, %zmm31
-; AVX512F-FAST-NEXT:    vmovdqa (%r8), %ymm0
-; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm0, %ymm1
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm15 = <4,u,5,5,5,5,u,6>
-; AVX512F-FAST-NEXT:    vpermd %ymm0, %ymm15, %ymm15
-; AVX512F-FAST-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm15, %ymm15
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm15, %zmm1, %zmm25
+; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm8, %ymm13
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm13, %zmm0, %zmm26
+; AVX512F-FAST-NEXT:    vmovdqa (%r8), %ymm2
+; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm2, %ymm1
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm30 = <4,u,5,5,5,5,u,6,30,30,30,u,31,31,31,31>
+; AVX512F-FAST-NEXT:    vpermd %ymm2, %ymm30, %ymm27
+; AVX512F-FAST-NEXT:    vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm27, %ymm27
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm27, %zmm1, %zmm27
 ; AVX512F-FAST-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [9,14,11,0,13,10,15,12,9,14,11,0,13,10,15,12,9,14,11,0,13,10,15,12,9,14,11,0,13,10,15,12]
-; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm9, %ymm9
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm30, %ymm13
-; AVX512F-FAST-NEXT:    vpshufb %ymm13, %ymm7, %ymm15
-; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm7, %ymm1
-; AVX512F-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm7 = [18,19,128,21,128,21,20,128,22,128,24,128,22,23,128,25,18,19,128,21,128,21,20,128,22,128,24,128,22,23,128,25]
-; AVX512F-FAST-NEXT:    # ymm7 = mem[0,1,0,1]
-; AVX512F-FAST-NEXT:    vpshufb %ymm7, %ymm8, %ymm8
-; AVX512F-FAST-NEXT:    vpshufb %ymm7, %ymm12, %ymm7
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm12 = ymm12[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm12[27],zero,zero,ymm12[26],zero,ymm12[28],zero,ymm12[30],zero,zero,ymm12[29],zero,ymm12[31],zero,zero
-; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm10 = xmm10[0],xmm11[0],xmm10[1],xmm11[1],xmm10[2],xmm11[2],xmm10[3],xmm11[3],xmm10[4],xmm11[4],xmm10[5],xmm11[5],xmm10[6],xmm11[6],xmm10[7],xmm11[7]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm30 = ymm9[2,2,3,3]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm11 = ymm5[3,u,5,u,u,4,u,6,u,8,u,u,7,u,9,u,19,u,21,u,u,20,u,22,u,24,u,u,23,u,25,u]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm5 = ymm5[11,u,u,10,u,12,u,u,u,u,13,u,15,u,u,14,27,u,u,26,u,28,u,u,u,u,29,u,31,u,u,30]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm9 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm4[21],zero,zero,ymm4[20],zero,ymm4[22],zero,ymm4[24],zero,zero,ymm4[23],zero,ymm4[25],zero,zero
+; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm7, %ymm7
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm31, %ymm0
+; AVX512F-FAST-NEXT:    vpshufb %ymm0, %ymm5, %ymm13
+; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm5, %ymm6
+; AVX512F-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm5 = [18,19,128,21,128,21,20,128,22,128,24,128,22,23,128,25,18,19,128,21,128,21,20,128,22,128,24,128,22,23,128,25]
+; AVX512F-FAST-NEXT:    # ymm5 = mem[0,1,0,1]
+; AVX512F-FAST-NEXT:    vpshufb %ymm5, %ymm9, %ymm9
+; AVX512F-FAST-NEXT:    vpshufb %ymm5, %ymm14, %ymm5
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm14 = ymm14[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm14[27],zero,zero,ymm14[26],zero,ymm14[28],zero,ymm14[30],zero,zero,ymm14[29],zero,ymm14[31],zero,zero
+; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm10 = xmm12[0],xmm10[0],xmm12[1],xmm10[1],xmm12[2],xmm10[2],xmm12[3],xmm10[3],xmm12[4],xmm10[4],xmm12[5],xmm10[5],xmm12[6],xmm10[6],xmm12[7],xmm10[7]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm12 = ymm15[3,u,5,u,u,4,u,6,u,8,u,u,7,u,9,u,19,u,21,u,u,20,u,22,u,24,u,u,23,u,25,u]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm15 = ymm15[11,u,u,10,u,12,u,u,u,u,13,u,15,u,u,14,27,u,u,26,u,28,u,u,u,u,29,u,31,u,u,30]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm31 = ymm7[2,2,3,3]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm7 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm4[21],zero,zero,ymm4[20],zero,ymm4[22],zero,ymm4[24],zero,zero,ymm4[23],zero,ymm4[25],zero,zero
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm4[26],zero,ymm4[28],zero,zero,ymm4[27],zero,ymm4[29],zero,ymm4[31],zero,zero,ymm4[30],zero
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm18, %xmm14
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm17, %xmm13
-; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm14 = xmm14[0],xmm13[0],xmm14[1],xmm13[1],xmm14[2],xmm13[2],xmm14[3],xmm13[3],xmm14[4],xmm13[4],xmm14[5],xmm13[5],xmm14[6],xmm13[6],xmm14[7],xmm13[7]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[2,2,3,3]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm15 = ymm15[2,2,3,3]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[2,2,3,3]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[2,2,3,3]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[2,2,3,3]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,2,3,3]
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm19, %xmm0
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm18, %xmm1
+; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm16, %xmm1
+; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm3 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm1 = <0,1,4,5,u,2,3,6,7,10,11,u,8,9,12,13>
+; AVX512F-FAST-NEXT:    vpshufb %xmm1, %xmm0, %xmm0
+; AVX512F-FAST-NEXT:    vpshufb %xmm1, %xmm3, %xmm1
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm9[2,2,3,3]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm9 = ymm13[2,2,3,3]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,2,3,3]
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[2,2,3,3]
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm16, %xmm13
-; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm3 = xmm13[0],xmm3[0],xmm13[1],xmm3[1],xmm13[2],xmm3[2],xmm13[3],xmm3[3],xmm13[4],xmm3[4],xmm13[5],xmm3[5],xmm13[6],xmm3[6],xmm13[7],xmm3[7]
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm13 = <0,1,4,5,u,2,3,6,7,10,11,u,8,9,12,13>
-; AVX512F-FAST-NEXT:    vpshufb %xmm13, %xmm14, %xmm14
-; AVX512F-FAST-NEXT:    vpshufb %xmm13, %xmm3, %xmm3
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm13 = <2,u,1,0,5,4,u,3,u,7,6,11,10,u,9,8>
-; AVX512F-FAST-NEXT:    vpshufb %xmm13, %xmm10, %xmm10
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[2,2,3,3]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[2,2,3,3]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm13 = ymm14[2,2,3,3]
+; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm28, %zmm1, %zmm18
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm14 = <2,u,1,0,5,4,u,3,u,7,6,11,10,u,9,8>
+; AVX512F-FAST-NEXT:    vpshufb %xmm14, %xmm10, %xmm10
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[0,0,1,1]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,2,3,3]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm15 = ymm15[2,2,3,3]
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,2,3,3]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[0,0,1,1]
-; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm28, %zmm3, %zmm3
-; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm2 = xmm6[0],xmm2[0],xmm6[1],xmm2[1],xmm6[2],xmm2[2],xmm6[3],xmm2[3],xmm6[4],xmm2[4],xmm6[5],xmm2[5],xmm6[6],xmm2[6],xmm6[7],xmm2[7]
-; AVX512F-FAST-NEXT:    vpshufb %xmm13, %xmm2, %xmm2
-; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm29, %zmm2, %zmm2
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm26, %zmm0
-; AVX512F-FAST-NEXT:    vpermq $80, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Folded Reload
-; AVX512F-FAST-NEXT:    # ymm6 = mem[0,0,1,1]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm6, %zmm6 # 32-byte Folded Reload
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm13 = ymm21[0,0,1,1]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm13, %zmm13 # 32-byte Folded Reload
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,0,1,1]
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm17, %xmm1
+; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm11 = xmm1[0],xmm11[0],xmm1[1],xmm11[1],xmm1[2],xmm11[2],xmm1[3],xmm11[3],xmm1[4],xmm11[4],xmm1[5],xmm11[5],xmm1[6],xmm11[6],xmm1[7],xmm11[7]
+; AVX512F-FAST-NEXT:    vpshufb %xmm14, %xmm11, %xmm11
+; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm29, %zmm11, %zmm11
+; AVX512F-FAST-NEXT:    vmovdqa64 (%r8), %zmm14
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm14, %zmm2
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm21 = zmm21[2,2,3,3,6,6,7,7]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm22 = zmm22[2,2,3,3,6,6,7,7]
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm28 = [18374966859431608575,18374966859431608575,18446463693966278400,18446463693966278400,72056498804555775,72056498804555775,18374967950370078975,18374967950370078975]
+; AVX512F-FAST-NEXT:    vpternlogq $248, %zmm28, %zmm21, %zmm22
+; AVX512F-FAST-NEXT:    vpandq %ymm28, %ymm31, %ymm21
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm21, %zmm3, %zmm3
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm21 = zmm23[2,2,3,3,6,6,7,7]
+; AVX512F-FAST-NEXT:    vporq %zmm21, %zmm3, %zmm3
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm21 = [0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255]
+; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm22, %zmm21, %zmm3
+; AVX512F-FAST-NEXT:    vpermt2d %zmm14, %zmm30, %zmm8
+; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm8
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm8, 256(%r9)
+; AVX512F-FAST-NEXT:    vpermq $80, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Folded Reload
+; AVX512F-FAST-NEXT:    # ymm3 = mem[0,0,1,1]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm3 # 32-byte Folded Reload
+; AVX512F-FAST-NEXT:    vpermq $80, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Folded Reload
+; AVX512F-FAST-NEXT:    # ymm8 = mem[0,0,1,1]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm8 # 32-byte Folded Reload
 ; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm16 = [255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0]
-; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm6, %zmm16, %zmm13
-; AVX512F-FAST-NEXT:    vpor %ymm7, %ymm15, %ymm6
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm6, %zmm19, %zmm6
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm7 = [18374966859431608575,18374966859431608575,18446463693966278400,18446463693966278400]
-; AVX512F-FAST-NEXT:    vpternlogq $248, %ymm7, %ymm11, %ymm9
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm9, %zmm20, %zmm9
-; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm6, %zmm16, %zmm9
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm6 = zmm22[2,2,3,3,6,6,7,7]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm11 = zmm23[2,2,3,3,6,6,7,7]
-; AVX512F-FAST-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm6, %zmm11
-; AVX512F-FAST-NEXT:    vpternlogq $248, %ymm7, %ymm1, %ymm12
-; AVX512F-FAST-NEXT:    vpandq %ymm7, %ymm30, %ymm1
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm8, %zmm1
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm6 = zmm24[2,2,3,3,6,6,7,7]
-; AVX512F-FAST-NEXT:    vporq %zmm6, %zmm1, %zmm1
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm6 = [0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255]
-; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm11, %zmm6, %zmm1
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm10, %zmm12, %zmm7
-; AVX512F-FAST-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm5, %ymm4
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm14, %zmm4, %zmm4
-; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm7, %zmm6, %zmm4
-; AVX512F-FAST-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm13, %zmm27
-; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm31
-; AVX512F-FAST-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm9, %zmm25
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm1 = <6,6,6,u,7,7,7,7,u,8,8,8,8,u,9,9>
-; AVX512F-FAST-NEXT:    vpermd %zmm26, %zmm1, %zmm1
-; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm4, %zmm1
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm3 = zmm3[0,0,1,1,4,4,5,5]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm2 = zmm2[0,0,1,1,4,4,5,5]
-; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm2
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm3 = <u,0,0,0,0,u,1,1,9,9,u,10,10,10,10,u>
-; AVX512F-FAST-NEXT:    vpermd %zmm0, %zmm3, %zmm0
-; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm0
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm25, 64(%r9)
+; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm3, %zmm16, %zmm8
+; AVX512F-FAST-NEXT:    vpternlogq $248, %zmm25, %zmm8, %zmm26
+; AVX512F-FAST-NEXT:    vpor %ymm5, %ymm9, %ymm3
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm24, %zmm3
+; AVX512F-FAST-NEXT:    vpternlogq $248, %ymm28, %ymm12, %ymm7
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm20, %zmm5
+; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm3, %zmm16, %zmm5
+; AVX512F-FAST-NEXT:    vpternlogq $248, %ymm28, %ymm6, %ymm13
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm10, %zmm13, %zmm3
+; AVX512F-FAST-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm15, %ymm4
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm4, %zmm0
+; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm3, %zmm21, %zmm0
+; AVX512F-FAST-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm27
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm3 = <6,6,6,u,7,7,7,7,u,8,8,8,8,u,9,9>
+; AVX512F-FAST-NEXT:    vpermd %zmm14, %zmm3, %zmm3
+; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm3
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm0 = zmm18[0,0,1,1,4,4,5,5]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm1 = zmm11[0,0,1,1,4,4,5,5]
+; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm1
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm0 = <u,0,0,0,0,u,1,1,9,9,u,10,10,10,10,u>
+; AVX512F-FAST-NEXT:    vpermd %zmm2, %zmm0, %zmm0
+; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm0
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm27, 64(%r9)
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm0, (%r9)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm1, 128(%r9)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm31, 256(%r9)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm27, 192(%r9)
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm3, 128(%r9)
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm26, 192(%r9)
+; AVX512F-FAST-NEXT:    popq %rax
 ; AVX512F-FAST-NEXT:    vzeroupper
 ; AVX512F-FAST-NEXT:    retq
 ;
@@ -4422,116 +4424,120 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-FAST-LABEL: store_i8_stride5_vf64:
 ; AVX512BW-FAST:       # %bb.0:
 ; AVX512BW-FAST-NEXT:    vmovdqa64 (%r8), %zmm5
-; AVX512BW-FAST-NEXT:    vmovdqa64 32(%rdx), %ymm19
+; AVX512BW-FAST-NEXT:    vmovdqa64 32(%rdx), %ymm21
 ; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = [128,128,12,13,128,128,128,128,14,128,128,128,14,15,128,128,128,128,16,128,128,128,16,17,128,128,128,128,18,128,128,128]
-; AVX512BW-FAST-NEXT:    vpshufb %ymm1, %ymm19, %ymm0
+; AVX512BW-FAST-NEXT:    vpshufb %ymm1, %ymm21, %ymm0
 ; AVX512BW-FAST-NEXT:    vmovdqa 32(%rcx), %ymm13
 ; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [128,128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128,128]
 ; AVX512BW-FAST-NEXT:    vpshufb %ymm2, %ymm13, %ymm3
 ; AVX512BW-FAST-NEXT:    vpor %ymm0, %ymm3, %ymm0
 ; AVX512BW-FAST-NEXT:    vmovdqa (%rcx), %xmm6
-; AVX512BW-FAST-NEXT:    vmovdqa64 32(%rcx), %xmm16
+; AVX512BW-FAST-NEXT:    vmovdqa64 32(%rcx), %xmm18
 ; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} xmm8 = <128,6,128,8,u,128,7,128,9,128,11,u,128,10,128,12>
-; AVX512BW-FAST-NEXT:    vpshufb %xmm8, %xmm16, %xmm3
+; AVX512BW-FAST-NEXT:    vpshufb %xmm8, %xmm18, %xmm3
 ; AVX512BW-FAST-NEXT:    vmovdqa (%rdx), %xmm7
-; AVX512BW-FAST-NEXT:    vmovdqa64 32(%rdx), %xmm18
+; AVX512BW-FAST-NEXT:    vmovdqa64 32(%rdx), %xmm20
 ; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} xmm10 = <6,128,8,128,u,7,128,9,128,11,128,u,10,128,12,128>
-; AVX512BW-FAST-NEXT:    vpshufb %xmm10, %xmm18, %xmm4
+; AVX512BW-FAST-NEXT:    vpshufb %xmm10, %xmm20, %xmm4
 ; AVX512BW-FAST-NEXT:    vpor %xmm3, %xmm4, %xmm3
 ; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[0,0,1,1]
 ; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm3, %zmm0
 ; AVX512BW-FAST-NEXT:    vmovdqa (%rdi), %xmm9
-; AVX512BW-FAST-NEXT:    vmovdqa 32(%rdi), %xmm15
+; AVX512BW-FAST-NEXT:    vmovdqa64 32(%rdi), %xmm17
 ; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} xmm12 = <8,128,u,7,128,9,128,u,128,u,10,128,12,128,u,11>
-; AVX512BW-FAST-NEXT:    vpshufb %xmm12, %xmm15, %xmm3
+; AVX512BW-FAST-NEXT:    vpshufb %xmm12, %xmm17, %xmm3
 ; AVX512BW-FAST-NEXT:    vmovdqa (%rsi), %xmm11
-; AVX512BW-FAST-NEXT:    vmovdqa64 32(%rsi), %xmm17
+; AVX512BW-FAST-NEXT:    vmovdqa64 32(%rsi), %xmm19
 ; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} xmm14 = <128,8,u,128,7,128,9,u,11,u,128,10,128,12,u,128>
-; AVX512BW-FAST-NEXT:    vpshufb %xmm14, %xmm17, %xmm4
+; AVX512BW-FAST-NEXT:    vpshufb %xmm14, %xmm19, %xmm4
 ; AVX512BW-FAST-NEXT:    vpor %xmm3, %xmm4, %xmm3
-; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm20 = ymm3[0,0,1,1]
-; AVX512BW-FAST-NEXT:    vmovdqa64 32(%rdi), %ymm21
+; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm15 = ymm3[0,0,1,1]
+; AVX512BW-FAST-NEXT:    vmovdqa64 32(%rdi), %ymm16
 ; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = <3,3,3,u,4,4,4,4>
-; AVX512BW-FAST-NEXT:    vpermd %ymm21, %ymm3, %ymm22
+; AVX512BW-FAST-NEXT:    vpermd %ymm16, %ymm3, %ymm22
 ; AVX512BW-FAST-NEXT:    vmovdqa64 32(%rsi), %ymm23
 ; AVX512BW-FAST-NEXT:    vpbroadcastq {{.*#+}} ymm4 = [0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14]
 ; AVX512BW-FAST-NEXT:    movl $138547332, %eax # imm = 0x8421084
 ; AVX512BW-FAST-NEXT:    kmovd %eax, %k1
 ; AVX512BW-FAST-NEXT:    vpshufb %ymm4, %ymm23, %ymm22 {%k1}
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm22, %zmm20, %zmm20
+; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm22, %zmm15, %zmm15
 ; AVX512BW-FAST-NEXT:    movabsq $-8330787646191410408, %rax # imm = 0x8C6318C6318C6318
 ; AVX512BW-FAST-NEXT:    kmovq %rax, %k2
-; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm20, %zmm0 {%k2}
-; AVX512BW-FAST-NEXT:    vmovdqa64 32(%r8), %ymm20
-; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm22 = <1,1,2,2,2,2,2,2,27,27,27,27,u,28,28,28>
-; AVX512BW-FAST-NEXT:    vpermi2d %zmm5, %zmm20, %zmm22
+; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm15, %zmm0 {%k2}
+; AVX512BW-FAST-NEXT:    vmovdqa64 32(%r8), %ymm24
+; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm15 = <1,1,2,2,2,2,2,2,27,27,27,27,u,28,28,28>
+; AVX512BW-FAST-NEXT:    vpermi2d %zmm5, %zmm24, %zmm15
 ; AVX512BW-FAST-NEXT:    movabsq $4760450083537948804, %rax # imm = 0x4210842108421084
 ; AVX512BW-FAST-NEXT:    kmovq %rax, %k3
-; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm22, %zmm0 {%k3}
+; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm15, %zmm0 {%k3}
 ; AVX512BW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm22 = zmm23[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} zmm22 = zmm22[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,19],zero,zmm22[21],zero,zero,zmm22[20],zero,zmm22[22],zero,zmm22[24],zero,zero,zmm22[23],zero,zmm22[25],zero,zmm22[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,59],zero,zero,zmm22[58],zero,zmm22[60],zero,zero,zero,zero,zmm22[61],zero,zmm22[63],zero,zero,zmm22[62]
+; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm15 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,19,128,21,128,128,20,128,22,128,24,128,128,23,128,25,128,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27,128,128,26,128,28,128,128,128,128,29,128,31,128,128,30>
+; AVX512BW-FAST-NEXT:    vpshufb %zmm15, %zmm22, %zmm22
 ; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} zmm22 = zmm22[2,2,3,3,6,6,7,7]
-; AVX512BW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm21 = zmm21[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} zmm21 = zmm21[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm21[21],zero,zero,zmm21[20],zero,zmm21[22],zero,zmm21[24],zero,zero,zmm21[23],zero,zmm21[25],zero,zero,zmm21[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zmm21[58],zero,zmm21[60],zero,zero,zmm21[59],zero,zmm21[61],zero,zmm21[63],zero,zero,zmm21[62],zero
-; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} zmm21 = zmm21[2,2,3,3,6,6,7,7]
-; AVX512BW-FAST-NEXT:    vporq %zmm22, %zmm21, %zmm21
-; AVX512BW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm19 = zmm19[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} zmm19 = zmm19[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19],zero,zmm19[21],zero,zmm19[21,20],zero,zmm19[22],zero,zmm19[24],zero,zmm19[22,23],zero,zmm19[25,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,57],zero,zmm19[59],zero,zero,zmm19[58],zero,zmm19[60],zero,zmm19[62],zero,zero,zmm19[61],zero,zmm19[63],zero
-; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} zmm19 = zmm19[2,2,3,3,6,6,7,7]
+; AVX512BW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm23 = zmm16[0,1,2,3],mem[4,5,6,7]
+; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm16 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,21,128,128,20,128,22,128,24,128,128,23,128,25,128,128,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,128,26,128,28,128,128,27,128,29,128,31,128,128,30,128>
+; AVX512BW-FAST-NEXT:    vpshufb %zmm16, %zmm23, %zmm23
+; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} zmm23 = zmm23[2,2,3,3,6,6,7,7]
+; AVX512BW-FAST-NEXT:    vporq %zmm22, %zmm23, %zmm23
+; AVX512BW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm22 = zmm21[0,1,2,3],mem[4,5,6,7]
+; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm21 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,128,21,128,21,20,128,22,128,24,128,22,23,128,25,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25,128,27,128,128,26,128,28,128,30,128,128,29,128,31,128>
+; AVX512BW-FAST-NEXT:    vpshufb %zmm21, %zmm22, %zmm22
+; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} zmm25 = zmm22[2,2,3,3,6,6,7,7]
 ; AVX512BW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm13 = zmm13[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} zmm13 = zmm13[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zmm13[19],zero,zmm13[21],zero,zero,zmm13[20],zero,zmm13[22],zero,zmm13[24],zero,zero,zmm13[23],zero,zmm13[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm13[59],zero,zero,zmm13[58],zero,zmm13[60],zero,zmm13[62],zero,zero,zmm13[61],zero,zmm13[63],zero,zero
+; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm22 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,27,128,128,26,128,28,128,30,128,128,29,128,31,128,128>
+; AVX512BW-FAST-NEXT:    vpshufb %zmm22, %zmm13, %zmm13
 ; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} zmm13 = zmm13[2,2,3,3,6,6,7,7]
-; AVX512BW-FAST-NEXT:    vporq %zmm19, %zmm13, %zmm13
+; AVX512BW-FAST-NEXT:    vporq %zmm25, %zmm13, %zmm13
 ; AVX512BW-FAST-NEXT:    movabsq $1785168781326730801, %rax # imm = 0x18C6318C6318C631
 ; AVX512BW-FAST-NEXT:    kmovq %rax, %k3
-; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm21, %zmm13 {%k3}
-; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm19 = [4,6,5,5,5,5,4,6,30,30,30,30,31,31,31,31]
-; AVX512BW-FAST-NEXT:    vpermi2d %zmm5, %zmm20, %zmm19
+; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm23, %zmm13 {%k3}
+; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm23 = [4,6,5,5,5,5,4,6,30,30,30,30,31,31,31,31]
+; AVX512BW-FAST-NEXT:    vpermi2d %zmm5, %zmm24, %zmm23
 ; AVX512BW-FAST-NEXT:    movabsq $-8925843906633654008, %rax # imm = 0x8421084210842108
 ; AVX512BW-FAST-NEXT:    kmovq %rax, %k4
-; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm19, %zmm13 {%k4}
-; AVX512BW-FAST-NEXT:    vmovdqa64 (%rcx), %ymm19
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm20 = ymm19[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25],zero,ymm19[27],zero,zero,ymm19[26],zero,ymm19[28],zero,ymm19[30],zero,zero,ymm19[29],zero,ymm19[31],zero
-; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm20 = ymm20[2,2,3,3]
-; AVX512BW-FAST-NEXT:    vmovdqa64 (%rdx), %ymm21
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm22 = ymm21[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm21[27],zero,zero,ymm21[26],zero,ymm21[28],zero,ymm21[30],zero,zero,ymm21[29],zero,ymm21[31],zero,zero
-; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm22 = ymm22[2,2,3,3]
-; AVX512BW-FAST-NEXT:    vporq %ymm20, %ymm22, %ymm20
-; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm16 = xmm16[0],xmm18[0],xmm16[1],xmm18[1],xmm16[2],xmm18[2],xmm16[3],xmm18[3],xmm16[4],xmm18[4],xmm16[5],xmm18[5],xmm16[6],xmm18[6],xmm16[7],xmm18[7]
-; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} xmm18 = <2,u,1,0,5,4,u,3,u,7,6,11,10,u,9,8>
-; AVX512BW-FAST-NEXT:    vpshufb %xmm18, %xmm16, %xmm16
-; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm16 = ymm16[0,0,1,1]
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm16, %zmm20, %zmm20
-; AVX512BW-FAST-NEXT:    vmovdqa64 (%rsi), %ymm16
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm22 = ymm16[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27],zero,zero,ymm16[26],zero,ymm16[28],zero,zero,zero,zero,ymm16[29],zero,ymm16[31],zero,zero,ymm16[30]
-; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm22 = ymm22[2,2,3,3]
-; AVX512BW-FAST-NEXT:    vmovdqa64 (%rdi), %ymm23
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm24 = ymm23[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm23[26],zero,ymm23[28],zero,zero,ymm23[27],zero,ymm23[29],zero,ymm23[31],zero,zero,ymm23[30],zero
-; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm24 = ymm24[2,2,3,3]
-; AVX512BW-FAST-NEXT:    vporq %ymm22, %ymm24, %ymm22
-; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm15 = xmm15[0],xmm17[0],xmm15[1],xmm17[1],xmm15[2],xmm17[2],xmm15[3],xmm17[3],xmm15[4],xmm17[4],xmm15[5],xmm17[5],xmm15[6],xmm17[6],xmm15[7],xmm17[7]
-; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} xmm17 = <0,1,4,5,u,2,3,6,7,10,11,u,8,9,12,13>
-; AVX512BW-FAST-NEXT:    vpshufb %xmm17, %xmm15, %xmm15
-; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm15 = ymm15[0,0,1,1]
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm15, %zmm22, %zmm15
-; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm20, %zmm15 {%k3}
-; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm20 = [6,6,6,6,7,7,7,7,8,8,8,8,8,8,9,9]
-; AVX512BW-FAST-NEXT:    vpermd %zmm5, %zmm20, %zmm5
+; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm23, %zmm13 {%k4}
+; AVX512BW-FAST-NEXT:    vmovdqa64 (%rcx), %ymm23
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm24 = ymm23[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25],zero,ymm23[27],zero,zero,ymm23[26],zero,ymm23[28],zero,ymm23[30],zero,zero,ymm23[29],zero,ymm23[31],zero
+; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm25 = ymm24[2,2,3,3]
+; AVX512BW-FAST-NEXT:    vmovdqa64 (%rdx), %ymm24
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm26 = ymm24[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm24[27],zero,zero,ymm24[26],zero,ymm24[28],zero,ymm24[30],zero,zero,ymm24[29],zero,ymm24[31],zero,zero
+; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm26 = ymm26[2,2,3,3]
+; AVX512BW-FAST-NEXT:    vporq %ymm25, %ymm26, %ymm25
+; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm18 = xmm18[0],xmm20[0],xmm18[1],xmm20[1],xmm18[2],xmm20[2],xmm18[3],xmm20[3],xmm18[4],xmm20[4],xmm18[5],xmm20[5],xmm18[6],xmm20[6],xmm18[7],xmm20[7]
+; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} xmm26 = <2,u,1,0,5,4,u,3,u,7,6,11,10,u,9,8>
+; AVX512BW-FAST-NEXT:    vpshufb %xmm26, %xmm18, %xmm18
+; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm18 = ymm18[0,0,1,1]
+; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm18, %zmm25, %zmm25
+; AVX512BW-FAST-NEXT:    vmovdqa64 (%rsi), %ymm18
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm20 = ymm18[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27],zero,zero,ymm18[26],zero,ymm18[28],zero,zero,zero,zero,ymm18[29],zero,ymm18[31],zero,zero,ymm18[30]
+; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm27 = ymm20[2,2,3,3]
+; AVX512BW-FAST-NEXT:    vmovdqa64 (%rdi), %ymm20
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm28 = ymm20[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm20[26],zero,ymm20[28],zero,zero,ymm20[27],zero,ymm20[29],zero,ymm20[31],zero,zero,ymm20[30],zero
+; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm28 = ymm28[2,2,3,3]
+; AVX512BW-FAST-NEXT:    vporq %ymm27, %ymm28, %ymm27
+; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm17 = xmm17[0],xmm19[0],xmm17[1],xmm19[1],xmm17[2],xmm19[2],xmm17[3],xmm19[3],xmm17[4],xmm19[4],xmm17[5],xmm19[5],xmm17[6],xmm19[6],xmm17[7],xmm19[7]
+; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} xmm19 = <0,1,4,5,u,2,3,6,7,10,11,u,8,9,12,13>
+; AVX512BW-FAST-NEXT:    vpshufb %xmm19, %xmm17, %xmm17
+; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm17 = ymm17[0,0,1,1]
+; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm17, %zmm27, %zmm17
+; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm25, %zmm17 {%k3}
+; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm25 = [6,6,6,6,7,7,7,7,8,8,8,8,8,8,9,9]
+; AVX512BW-FAST-NEXT:    vpermd %zmm5, %zmm25, %zmm5
 ; AVX512BW-FAST-NEXT:    movabsq $2380225041768974402, %rax # imm = 0x2108421084210842
 ; AVX512BW-FAST-NEXT:    kmovq %rax, %k3
-; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm5, %zmm15 {%k3}
+; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm5, %zmm17 {%k3}
 ; AVX512BW-FAST-NEXT:    vpshufb %xmm8, %xmm6, %xmm5
 ; AVX512BW-FAST-NEXT:    vpshufb %xmm10, %xmm7, %xmm8
 ; AVX512BW-FAST-NEXT:    vpor %xmm5, %xmm8, %xmm5
 ; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3],xmm6[4],xmm7[4],xmm6[5],xmm7[5],xmm6[6],xmm7[6],xmm6[7],xmm7[7]
-; AVX512BW-FAST-NEXT:    vpshufb %xmm18, %xmm6, %xmm6
+; AVX512BW-FAST-NEXT:    vpshufb %xmm26, %xmm6, %xmm6
 ; AVX512BW-FAST-NEXT:    vinserti32x4 $2, %xmm5, %zmm6, %zmm5
 ; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} zmm5 = zmm5[0,0,1,1,4,4,5,5]
 ; AVX512BW-FAST-NEXT:    vpshufb %xmm12, %xmm9, %xmm6
 ; AVX512BW-FAST-NEXT:    vpshufb %xmm14, %xmm11, %xmm7
 ; AVX512BW-FAST-NEXT:    vpor %xmm6, %xmm7, %xmm6
 ; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm7 = xmm9[0],xmm11[0],xmm9[1],xmm11[1],xmm9[2],xmm11[2],xmm9[3],xmm11[3],xmm9[4],xmm11[4],xmm9[5],xmm11[5],xmm9[6],xmm11[6],xmm9[7],xmm11[7]
-; AVX512BW-FAST-NEXT:    vpshufb %xmm17, %xmm7, %xmm7
+; AVX512BW-FAST-NEXT:    vpshufb %xmm19, %xmm7, %xmm7
 ; AVX512BW-FAST-NEXT:    vinserti32x4 $2, %xmm6, %zmm7, %zmm6
 ; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} zmm6 = zmm6[0,0,1,1,4,4,5,5]
 ; AVX512BW-FAST-NEXT:    movabsq $-4165393823095705204, %rax # imm = 0xC6318C6318C6318C
@@ -4543,22 +4549,22 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-FAST-NEXT:    movabsq $595056260442243600, %rax # imm = 0x842108421084210
 ; AVX512BW-FAST-NEXT:    kmovq %rax, %k3
 ; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm7, %zmm6 {%k3}
-; AVX512BW-FAST-NEXT:    vpshufb %ymm1, %ymm21, %ymm1
-; AVX512BW-FAST-NEXT:    vpshufb %ymm2, %ymm19, %ymm2
-; AVX512BW-FAST-NEXT:    vpor %ymm1, %ymm2, %ymm1
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm21[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19],zero,ymm21[21],zero,ymm21[21,20],zero,ymm21[22],zero,ymm21[24],zero,ymm21[22,23],zero,ymm21[25]
-; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,2,3,3]
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm7 = ymm19[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm19[19],zero,ymm19[21],zero,zero,ymm19[20],zero,ymm19[22],zero,ymm19[24],zero,zero,ymm19[23],zero
+; AVX512BW-FAST-NEXT:    vpshufb %ymm21, %ymm24, %ymm7
 ; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[2,2,3,3]
-; AVX512BW-FAST-NEXT:    vpor %ymm2, %ymm7, %ymm2
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm1, %zmm1
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm16[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,19],zero,ymm16[21],zero,zero,ymm16[20],zero,ymm16[22],zero,ymm16[24],zero,zero,ymm16[23],zero,ymm16[25],zero
+; AVX512BW-FAST-NEXT:    vpshufb %ymm22, %ymm23, %ymm8
+; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[2,2,3,3]
+; AVX512BW-FAST-NEXT:    vpor %ymm7, %ymm8, %ymm7
+; AVX512BW-FAST-NEXT:    vpshufb %ymm1, %ymm24, %ymm1
+; AVX512BW-FAST-NEXT:    vpshufb %ymm2, %ymm23, %ymm2
+; AVX512BW-FAST-NEXT:    vpor %ymm1, %ymm2, %ymm1
+; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm1, %zmm1
+; AVX512BW-FAST-NEXT:    vpshufb %ymm15, %ymm18, %ymm2
 ; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,2,3,3]
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm7 = ymm23[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm23[21],zero,zero,ymm23[20],zero,ymm23[22],zero,ymm23[24],zero,zero,ymm23[23],zero,ymm23[25],zero,zero
+; AVX512BW-FAST-NEXT:    vpshufb %ymm16, %ymm20, %ymm7
 ; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[2,2,3,3]
 ; AVX512BW-FAST-NEXT:    vpor %ymm2, %ymm7, %ymm2
-; AVX512BW-FAST-NEXT:    vpermd %ymm23, %ymm3, %ymm3
-; AVX512BW-FAST-NEXT:    vpshufb %ymm4, %ymm16, %ymm3 {%k1}
+; AVX512BW-FAST-NEXT:    vpermd %ymm20, %ymm3, %ymm3
+; AVX512BW-FAST-NEXT:    vpshufb %ymm4, %ymm18, %ymm3 {%k1}
 ; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm3, %zmm2
 ; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm1, %zmm2 {%k2}
 ; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm1 = <3,3,3,3,u,4,4,4,12,14,13,13,13,13,12,14>
@@ -4568,7 +4574,7 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm1, %zmm2 {%k1}
 ; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm2, 64(%r9)
 ; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm6, (%r9)
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm15, 128(%r9)
+; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm17, 128(%r9)
 ; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm13, 256(%r9)
 ; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm0, 192(%r9)
 ; AVX512BW-FAST-NEXT:    vzeroupper
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-6.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-6.ll
index 9c0dbdf03484b06..c70b741fb29e857 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-6.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-6.ll
@@ -1325,48 +1325,47 @@ define void @store_i8_stride6_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX2-SLOW-NEXT:    vmovdqa (%rcx), %ymm2
 ; AVX2-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vmovdqa (%r8), %ymm4
+; AVX2-SLOW-NEXT:    vpbroadcastq {{.*#+}} ymm7 = [5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10]
 ; AVX2-SLOW-NEXT:    vmovdqa (%rcx), %xmm6
-; AVX2-SLOW-NEXT:    vpbroadcastq {{.*#+}} xmm5 = [5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10]
-; AVX2-SLOW-NEXT:    vpshufb %xmm5, %xmm6, %xmm7
+; AVX2-SLOW-NEXT:    vpshufb %xmm7, %xmm6, %xmm5
 ; AVX2-SLOW-NEXT:    vmovdqa (%rdx), %xmm8
-; AVX2-SLOW-NEXT:    vpshufb %xmm5, %xmm8, %xmm5
-; AVX2-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm5 = xmm5[0],xmm7[0],xmm5[1],xmm7[1],xmm5[2],xmm7[2],xmm5[3],xmm7[3],xmm5[4],xmm7[4],xmm5[5],xmm7[5],xmm5[6],xmm7[6],xmm5[7],xmm7[7]
+; AVX2-SLOW-NEXT:    vpshufb %xmm7, %xmm8, %xmm9
+; AVX2-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm5 = xmm9[0],xmm5[0],xmm9[1],xmm5[1],xmm9[2],xmm5[2],xmm9[3],xmm5[3],xmm9[4],xmm5[4],xmm9[5],xmm5[5],xmm9[6],xmm5[6],xmm9[7],xmm5[7]
 ; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[0,0,0,1]
 ; AVX2-SLOW-NEXT:    vmovdqa (%rsi), %xmm11
-; AVX2-SLOW-NEXT:    vpbroadcastq {{.*#+}} xmm7 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
-; AVX2-SLOW-NEXT:    vpshufb %xmm7, %xmm11, %xmm9
+; AVX2-SLOW-NEXT:    vpbroadcastq {{.*#+}} xmm9 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
+; AVX2-SLOW-NEXT:    vpshufb %xmm9, %xmm11, %xmm10
 ; AVX2-SLOW-NEXT:    vmovdqa (%rdi), %xmm13
-; AVX2-SLOW-NEXT:    vpshufb %xmm7, %xmm13, %xmm7
-; AVX2-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm7 = xmm7[8],xmm9[8],xmm7[9],xmm9[9],xmm7[10],xmm9[10],xmm7[11],xmm9[11],xmm7[12],xmm9[12],xmm7[13],xmm9[13],xmm7[14],xmm9[14],xmm7[15],xmm9[15]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[0,0,0,1]
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm9 = <255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255>
-; AVX2-SLOW-NEXT:    vpblendvb %ymm9, %ymm5, %ymm7, %ymm7
+; AVX2-SLOW-NEXT:    vpshufb %xmm9, %xmm13, %xmm9
+; AVX2-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm9 = xmm9[8],xmm10[8],xmm9[9],xmm10[9],xmm9[10],xmm10[10],xmm9[11],xmm10[11],xmm9[12],xmm10[12],xmm9[13],xmm10[13],xmm9[14],xmm10[14],xmm9[15],xmm10[15]
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[0,0,0,1]
+; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm10 = <255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255>
+; AVX2-SLOW-NEXT:    vpblendvb %ymm10, %ymm5, %ymm9, %ymm9
 ; AVX2-SLOW-NEXT:    vmovdqa (%r8), %xmm5
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm10 = xmm5[6,u,5,u,8,u,7,u,9,u,9,u,9,u,9,u]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[0,0,0,1]
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm12 = [255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255]
-; AVX2-SLOW-NEXT:    vpblendvb %ymm12, %ymm7, %ymm10, %ymm14
-; AVX2-SLOW-NEXT:    vpbroadcastq {{.*#+}} ymm7 = [5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10]
-; AVX2-SLOW-NEXT:    vpshufb %ymm7, %ymm2, %ymm10
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm12 = xmm5[6,u,5,u,8,u,7,u,9,u,9,u,9,u,9,u]
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[0,0,0,1]
+; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm14 = [255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255]
+; AVX2-SLOW-NEXT:    vpblendvb %ymm14, %ymm9, %ymm12, %ymm9
+; AVX2-SLOW-NEXT:    vpshufb %ymm7, %ymm2, %ymm12
 ; AVX2-SLOW-NEXT:    vpshufb %ymm7, %ymm0, %ymm7
-; AVX2-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm10 = ymm7[0],ymm10[0],ymm7[1],ymm10[1],ymm7[2],ymm10[2],ymm7[3],ymm10[3],ymm7[4],ymm10[4],ymm7[5],ymm10[5],ymm7[6],ymm10[6],ymm7[7],ymm10[7],ymm7[16],ymm10[16],ymm7[17],ymm10[17],ymm7[18],ymm10[18],ymm7[19],ymm10[19],ymm7[20],ymm10[20],ymm7[21],ymm10[21],ymm7[22],ymm10[22],ymm7[23],ymm10[23]
+; AVX2-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm12 = ymm7[0],ymm12[0],ymm7[1],ymm12[1],ymm7[2],ymm12[2],ymm7[3],ymm12[3],ymm7[4],ymm12[4],ymm7[5],ymm12[5],ymm7[6],ymm12[6],ymm7[7],ymm12[7],ymm7[16],ymm12[16],ymm7[17],ymm12[17],ymm7[18],ymm12[18],ymm7[19],ymm12[19],ymm7[20],ymm12[20],ymm7[21],ymm12[21],ymm7[22],ymm12[22],ymm7[23],ymm12[23]
 ; AVX2-SLOW-NEXT:    vpbroadcastq {{.*#+}} ymm7 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
 ; AVX2-SLOW-NEXT:    vpshufb %ymm7, %ymm3, %ymm15
 ; AVX2-SLOW-NEXT:    vmovdqa %ymm1, %ymm2
 ; AVX2-SLOW-NEXT:    vpshufb %ymm7, %ymm1, %ymm7
 ; AVX2-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm15 = ymm7[0],ymm15[0],ymm7[1],ymm15[1],ymm7[2],ymm15[2],ymm7[3],ymm15[3],ymm7[4],ymm15[4],ymm7[5],ymm15[5],ymm7[6],ymm15[6],ymm7[7],ymm15[7],ymm7[16],ymm15[16],ymm7[17],ymm15[17],ymm7[18],ymm15[18],ymm7[19],ymm15[19],ymm7[20],ymm15[20],ymm7[21],ymm15[21],ymm7[22],ymm15[22],ymm7[23],ymm15[23]
 ; AVX2-SLOW-NEXT:    vmovdqa (%r9), %ymm7
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[2,2,2,3]
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[2,2,2,3]
 ; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm15 = ymm15[2,2,2,3]
-; AVX2-SLOW-NEXT:    vpblendvb %ymm9, %ymm10, %ymm15, %ymm9
+; AVX2-SLOW-NEXT:    vpblendvb %ymm10, %ymm12, %ymm15, %ymm12
 ; AVX2-SLOW-NEXT:    vmovdqa (%r9), %xmm10
 ; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm15 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,u,21,u,24,u,23,u,u,u,25,u,u,u,u,u]
 ; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm15 = ymm15[2,2,2,3]
-; AVX2-SLOW-NEXT:    vpblendvb %ymm12, %ymm9, %ymm15, %ymm12
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm9 = xmm10[u,6,u,5,u,8,u,7,u,9,u,9,u,9,u,9]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[0,0,0,1]
+; AVX2-SLOW-NEXT:    vpblendvb %ymm14, %ymm12, %ymm15, %ymm12
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm14 = xmm10[u,6,u,5,u,8,u,7,u,9,u,9,u,9,u,9]
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[0,0,0,1]
 ; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm15 = [255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255]
-; AVX2-SLOW-NEXT:    vpblendvb %ymm15, %ymm14, %ymm9, %ymm0
+; AVX2-SLOW-NEXT:    vpblendvb %ymm15, %ymm9, %ymm14, %ymm0
 ; AVX2-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm14 = ymm7[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,u,21,u,24,u,23,u,u,u,25,u,u,u,u]
 ; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[2,2,2,3]
@@ -1459,34 +1458,33 @@ define void @store_i8_stride6_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX2-FAST-NEXT:    vmovdqa (%rcx), %ymm4
 ; AVX2-FAST-NEXT:    vmovdqa (%r8), %ymm0
 ; AVX2-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vpbroadcastq {{.*#+}} ymm9 = [5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10]
 ; AVX2-FAST-NEXT:    vmovdqa (%rcx), %xmm1
 ; AVX2-FAST-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FAST-NEXT:    vpbroadcastq {{.*#+}} xmm7 = [5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10]
-; AVX2-FAST-NEXT:    vpshufb %xmm7, %xmm1, %xmm8
+; AVX2-FAST-NEXT:    vpshufb %xmm9, %xmm1, %xmm7
 ; AVX2-FAST-NEXT:    vmovdqa (%rdx), %xmm1
 ; AVX2-FAST-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FAST-NEXT:    vpshufb %xmm7, %xmm1, %xmm7
-; AVX2-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm7 = xmm7[0],xmm8[0],xmm7[1],xmm8[1],xmm7[2],xmm8[2],xmm7[3],xmm8[3],xmm7[4],xmm8[4],xmm7[5],xmm8[5],xmm7[6],xmm8[6],xmm7[7],xmm8[7]
+; AVX2-FAST-NEXT:    vpshufb %xmm9, %xmm1, %xmm8
+; AVX2-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm7 = xmm8[0],xmm7[0],xmm8[1],xmm7[1],xmm8[2],xmm7[2],xmm8[3],xmm7[3],xmm8[4],xmm7[4],xmm8[5],xmm7[5],xmm8[6],xmm7[6],xmm8[7],xmm7[7]
 ; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[0,0,0,1]
 ; AVX2-FAST-NEXT:    vmovdqa (%rsi), %xmm1
-; AVX2-FAST-NEXT:    vpbroadcastq {{.*#+}} xmm9 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
-; AVX2-FAST-NEXT:    vpshufb %xmm9, %xmm1, %xmm11
+; AVX2-FAST-NEXT:    vpbroadcastq {{.*#+}} xmm11 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
+; AVX2-FAST-NEXT:    vpshufb %xmm11, %xmm1, %xmm12
 ; AVX2-FAST-NEXT:    vmovdqa %xmm1, %xmm10
 ; AVX2-FAST-NEXT:    vmovdqa (%rdi), %xmm5
-; AVX2-FAST-NEXT:    vpshufb %xmm9, %xmm5, %xmm9
-; AVX2-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm9 = xmm9[8],xmm11[8],xmm9[9],xmm11[9],xmm9[10],xmm11[10],xmm9[11],xmm11[11],xmm9[12],xmm11[12],xmm9[13],xmm11[13],xmm9[14],xmm11[14],xmm9[15],xmm11[15]
-; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[0,0,0,1]
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm11 = <255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255>
-; AVX2-FAST-NEXT:    vpblendvb %ymm11, %ymm7, %ymm9, %ymm9
+; AVX2-FAST-NEXT:    vpshufb %xmm11, %xmm5, %xmm11
+; AVX2-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm11 = xmm11[8],xmm12[8],xmm11[9],xmm12[9],xmm11[10],xmm12[10],xmm11[11],xmm12[11],xmm11[12],xmm12[12],xmm11[13],xmm12[13],xmm11[14],xmm12[14],xmm11[15],xmm12[15]
+; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[0,0,0,1]
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm12 = <255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255>
+; AVX2-FAST-NEXT:    vpblendvb %ymm12, %ymm7, %ymm11, %ymm11
 ; AVX2-FAST-NEXT:    vmovdqa (%r8), %xmm7
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm12 = xmm7[6,u,5,u,8,u,7,u,9,u,9,u,9,u,9,u]
-; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[0,0,0,1]
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm13 = [255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255]
-; AVX2-FAST-NEXT:    vpblendvb %ymm13, %ymm9, %ymm12, %ymm14
-; AVX2-FAST-NEXT:    vpbroadcastq {{.*#+}} ymm9 = [5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10]
-; AVX2-FAST-NEXT:    vpshufb %ymm9, %ymm4, %ymm12
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm13 = xmm7[6,u,5,u,8,u,7,u,9,u,9,u,9,u,9,u]
+; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[0,0,0,1]
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm14 = [255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255]
+; AVX2-FAST-NEXT:    vpblendvb %ymm14, %ymm11, %ymm13, %ymm11
+; AVX2-FAST-NEXT:    vpshufb %ymm9, %ymm4, %ymm13
 ; AVX2-FAST-NEXT:    vpshufb %ymm9, %ymm3, %ymm9
-; AVX2-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm12 = ymm9[0],ymm12[0],ymm9[1],ymm12[1],ymm9[2],ymm12[2],ymm9[3],ymm12[3],ymm9[4],ymm12[4],ymm9[5],ymm12[5],ymm9[6],ymm12[6],ymm9[7],ymm12[7],ymm9[16],ymm12[16],ymm9[17],ymm12[17],ymm9[18],ymm12[18],ymm9[19],ymm12[19],ymm9[20],ymm12[20],ymm9[21],ymm12[21],ymm9[22],ymm12[22],ymm9[23],ymm12[23]
+; AVX2-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm13 = ymm9[0],ymm13[0],ymm9[1],ymm13[1],ymm9[2],ymm13[2],ymm9[3],ymm13[3],ymm9[4],ymm13[4],ymm9[5],ymm13[5],ymm9[6],ymm13[6],ymm9[7],ymm13[7],ymm9[16],ymm13[16],ymm9[17],ymm13[17],ymm9[18],ymm13[18],ymm9[19],ymm13[19],ymm9[20],ymm13[20],ymm9[21],ymm13[21],ymm9[22],ymm13[22],ymm9[23],ymm13[23]
 ; AVX2-FAST-NEXT:    vpbroadcastq {{.*#+}} ymm9 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
 ; AVX2-FAST-NEXT:    vpshufb %ymm9, %ymm6, %ymm15
 ; AVX2-FAST-NEXT:    vpshufb %ymm9, %ymm2, %ymm9
@@ -1494,17 +1492,17 @@ define void @store_i8_stride6_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX2-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm15 = ymm9[0],ymm15[0],ymm9[1],ymm15[1],ymm9[2],ymm15[2],ymm9[3],ymm15[3],ymm9[4],ymm15[4],ymm9[5],ymm15[5],ymm9[6],ymm15[6],ymm9[7],ymm15[7],ymm9[16],ymm15[16],ymm9[17],ymm15[17],ymm9[18],ymm15[18],ymm9[19],ymm15[19],ymm9[20],ymm15[20],ymm9[21],ymm15[21],ymm9[22],ymm15[22],ymm9[23],ymm15[23]
 ; AVX2-FAST-NEXT:    vmovdqa (%r9), %ymm1
 ; AVX2-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[2,2,2,3]
+; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[2,2,2,3]
 ; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm15 = ymm15[2,2,2,3]
-; AVX2-FAST-NEXT:    vpblendvb %ymm11, %ymm12, %ymm15, %ymm11
+; AVX2-FAST-NEXT:    vpblendvb %ymm12, %ymm13, %ymm15, %ymm13
 ; AVX2-FAST-NEXT:    vmovdqa (%r9), %xmm12
 ; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm15 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,u,21,u,24,u,23,u,u,u,25,u,u,u,u,u]
 ; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm15 = ymm15[2,2,2,3]
-; AVX2-FAST-NEXT:    vpblendvb %ymm13, %ymm11, %ymm15, %ymm13
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm11 = xmm12[u,6,u,5,u,8,u,7,u,9,u,9,u,9,u,9]
-; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[0,0,0,1]
+; AVX2-FAST-NEXT:    vpblendvb %ymm14, %ymm13, %ymm15, %ymm13
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm14 = xmm12[u,6,u,5,u,8,u,7,u,9,u,9,u,9,u,9]
+; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[0,0,0,1]
 ; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm15 = [255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255]
-; AVX2-FAST-NEXT:    vpblendvb %ymm15, %ymm14, %ymm11, %ymm0
+; AVX2-FAST-NEXT:    vpblendvb %ymm15, %ymm11, %ymm14, %ymm0
 ; AVX2-FAST-NEXT:    vmovdqu %ymm0, (%rsp) # 32-byte Spill
 ; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm14 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,u,21,u,24,u,23,u,u,u,25,u,u,u,u]
 ; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[2,2,2,3]
@@ -1596,34 +1594,33 @@ define void @store_i8_stride6_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rcx), %ymm4
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%r8), %ymm0
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vpbroadcastq {{.*#+}} ymm9 = [5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10]
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rcx), %xmm1
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpbroadcastq {{.*#+}} xmm7 = [5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm7, %xmm1, %xmm8
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm9, %xmm1, %xmm7
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rdx), %xmm1
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm7, %xmm1, %xmm7
-; AVX2-FAST-PERLANE-NEXT:    vpunpcklbw {{.*#+}} xmm7 = xmm7[0],xmm8[0],xmm7[1],xmm8[1],xmm7[2],xmm8[2],xmm7[3],xmm8[3],xmm7[4],xmm8[4],xmm7[5],xmm8[5],xmm7[6],xmm8[6],xmm7[7],xmm8[7]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm9, %xmm1, %xmm8
+; AVX2-FAST-PERLANE-NEXT:    vpunpcklbw {{.*#+}} xmm7 = xmm8[0],xmm7[0],xmm8[1],xmm7[1],xmm8[2],xmm7[2],xmm8[3],xmm7[3],xmm8[4],xmm7[4],xmm8[5],xmm7[5],xmm8[6],xmm7[6],xmm8[7],xmm7[7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[0,0,0,1]
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rsi), %xmm1
-; AVX2-FAST-PERLANE-NEXT:    vpbroadcastq {{.*#+}} xmm9 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm9, %xmm1, %xmm11
+; AVX2-FAST-PERLANE-NEXT:    vpbroadcastq {{.*#+}} xmm11 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm11, %xmm1, %xmm12
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa %xmm1, %xmm10
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rdi), %xmm5
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm9, %xmm5, %xmm9
-; AVX2-FAST-PERLANE-NEXT:    vpunpckhbw {{.*#+}} xmm9 = xmm9[8],xmm11[8],xmm9[9],xmm11[9],xmm9[10],xmm11[10],xmm9[11],xmm11[11],xmm9[12],xmm11[12],xmm9[13],xmm11[13],xmm9[14],xmm11[14],xmm9[15],xmm11[15]
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[0,0,0,1]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} ymm11 = <255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255>
-; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm11, %ymm7, %ymm9, %ymm9
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm11, %xmm5, %xmm11
+; AVX2-FAST-PERLANE-NEXT:    vpunpckhbw {{.*#+}} xmm11 = xmm11[8],xmm12[8],xmm11[9],xmm12[9],xmm11[10],xmm12[10],xmm11[11],xmm12[11],xmm11[12],xmm12[12],xmm11[13],xmm12[13],xmm11[14],xmm12[14],xmm11[15],xmm12[15]
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[0,0,0,1]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} ymm12 = <255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255>
+; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm12, %ymm7, %ymm11, %ymm11
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%r8), %xmm7
-; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm12 = xmm7[6,u,5,u,8,u,7,u,9,u,9,u,9,u,9,u]
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[0,0,0,1]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} ymm13 = [255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255]
-; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm13, %ymm9, %ymm12, %ymm14
-; AVX2-FAST-PERLANE-NEXT:    vpbroadcastq {{.*#+}} ymm9 = [5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm9, %ymm4, %ymm12
+; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm13 = xmm7[6,u,5,u,8,u,7,u,9,u,9,u,9,u,9,u]
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[0,0,0,1]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} ymm14 = [255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255]
+; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm14, %ymm11, %ymm13, %ymm11
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm9, %ymm4, %ymm13
 ; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm9, %ymm3, %ymm9
-; AVX2-FAST-PERLANE-NEXT:    vpunpcklbw {{.*#+}} ymm12 = ymm9[0],ymm12[0],ymm9[1],ymm12[1],ymm9[2],ymm12[2],ymm9[3],ymm12[3],ymm9[4],ymm12[4],ymm9[5],ymm12[5],ymm9[6],ymm12[6],ymm9[7],ymm12[7],ymm9[16],ymm12[16],ymm9[17],ymm12[17],ymm9[18],ymm12[18],ymm9[19],ymm12[19],ymm9[20],ymm12[20],ymm9[21],ymm12[21],ymm9[22],ymm12[22],ymm9[23],ymm12[23]
+; AVX2-FAST-PERLANE-NEXT:    vpunpcklbw {{.*#+}} ymm13 = ymm9[0],ymm13[0],ymm9[1],ymm13[1],ymm9[2],ymm13[2],ymm9[3],ymm13[3],ymm9[4],ymm13[4],ymm9[5],ymm13[5],ymm9[6],ymm13[6],ymm9[7],ymm13[7],ymm9[16],ymm13[16],ymm9[17],ymm13[17],ymm9[18],ymm13[18],ymm9[19],ymm13[19],ymm9[20],ymm13[20],ymm9[21],ymm13[21],ymm9[22],ymm13[22],ymm9[23],ymm13[23]
 ; AVX2-FAST-PERLANE-NEXT:    vpbroadcastq {{.*#+}} ymm9 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
 ; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm9, %ymm6, %ymm15
 ; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm9, %ymm2, %ymm9
@@ -1631,17 +1628,17 @@ define void @store_i8_stride6_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX2-FAST-PERLANE-NEXT:    vpunpcklbw {{.*#+}} ymm15 = ymm9[0],ymm15[0],ymm9[1],ymm15[1],ymm9[2],ymm15[2],ymm9[3],ymm15[3],ymm9[4],ymm15[4],ymm9[5],ymm15[5],ymm9[6],ymm15[6],ymm9[7],ymm15[7],ymm9[16],ymm15[16],ymm9[17],ymm15[17],ymm9[18],ymm15[18],ymm9[19],ymm15[19],ymm9[20],ymm15[20],ymm9[21],ymm15[21],ymm9[22],ymm15[22],ymm9[23],ymm15[23]
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%r9), %ymm1
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[2,2,2,3]
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[2,2,2,3]
 ; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm15 = ymm15[2,2,2,3]
-; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm11, %ymm12, %ymm15, %ymm11
+; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm12, %ymm13, %ymm15, %ymm13
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%r9), %xmm12
 ; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm15 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,u,21,u,24,u,23,u,u,u,25,u,u,u,u,u]
 ; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm15 = ymm15[2,2,2,3]
-; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm13, %ymm11, %ymm15, %ymm13
-; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm11 = xmm12[u,6,u,5,u,8,u,7,u,9,u,9,u,9,u,9]
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[0,0,0,1]
+; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm14, %ymm13, %ymm15, %ymm13
+; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm14 = xmm12[u,6,u,5,u,8,u,7,u,9,u,9,u,9,u,9]
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[0,0,0,1]
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} ymm15 = [255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255]
-; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm15, %ymm14, %ymm11, %ymm0
+; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm15, %ymm11, %ymm14, %ymm0
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm0, (%rsp) # 32-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm14 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,u,21,u,24,u,23,u,u,u,25,u,u,u,u]
 ; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[2,2,2,3]
@@ -1772,38 +1769,37 @@ define void @store_i8_stride6_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm15 = xmm8[0],xmm7[0],xmm8[1],xmm7[1],xmm8[2],xmm7[2],xmm8[3],xmm7[3],xmm8[4],xmm7[4],xmm8[5],xmm7[5],xmm8[6],xmm7[6],xmm8[7],xmm7[7]
 ; AVX512F-SLOW-NEXT:    vprold $16, %xmm15, %xmm15
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm13, %zmm15, %zmm13
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm13 = zmm13[0,0,0,1,4,4,4,5]
-; AVX512F-SLOW-NEXT:    vpbroadcastq {{.*#+}} xmm15 = [5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10]
-; AVX512F-SLOW-NEXT:    vpshufb %xmm15, %xmm7, %xmm7
-; AVX512F-SLOW-NEXT:    vpshufb %xmm15, %xmm8, %xmm8
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm15 = zmm13[0,0,0,1,4,4,4,5]
+; AVX512F-SLOW-NEXT:    vpbroadcastq {{.*#+}} ymm13 = [5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10]
+; AVX512F-SLOW-NEXT:    vpshufb %xmm13, %xmm7, %xmm7
+; AVX512F-SLOW-NEXT:    vpshufb %xmm13, %xmm8, %xmm8
 ; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm7 = xmm8[0],xmm7[0],xmm8[1],xmm7[1],xmm8[2],xmm7[2],xmm8[3],xmm7[3],xmm8[4],xmm7[4],xmm8[5],xmm7[5],xmm8[6],xmm7[6],xmm8[7],xmm7[7]
 ; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm8 = xmm10[0],xmm9[0],xmm10[1],xmm9[1],xmm10[2],xmm9[2],xmm10[3],xmm9[3],xmm10[4],xmm9[4],xmm10[5],xmm9[5],xmm10[6],xmm9[6],xmm10[7],xmm9[7]
 ; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm8 = xmm8[0,3,2,1,4,5,6,7]
 ; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,4,5,6,5]
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm8, %zmm7
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm7 = zmm7[0,0,0,1,4,4,4,5]
-; AVX512F-SLOW-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm13, %zmm7
-; AVX512F-SLOW-NEXT:    vpbroadcastq {{.*#+}} xmm8 = [6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0]
-; AVX512F-SLOW-NEXT:    vpshufb %xmm8, %xmm11, %xmm9
-; AVX512F-SLOW-NEXT:    vpshufb %xmm8, %xmm12, %xmm8
-; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm8 = xmm8[0],xmm9[0],xmm8[1],xmm9[1],xmm8[2],xmm9[2],xmm8[3],xmm9[3],xmm8[4],xmm9[4],xmm8[5],xmm9[5],xmm8[6],xmm9[6],xmm8[7],xmm9[7]
-; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm9 = xmm12[0],xmm11[0],xmm12[1],xmm11[1],xmm12[2],xmm11[2],xmm12[3],xmm11[3],xmm12[4],xmm11[4],xmm12[5],xmm11[5],xmm12[6],xmm11[6],xmm12[7],xmm11[7]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm9[2,1,0,3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,4,4,4,4]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm8, %zmm9, %zmm8
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm8 = zmm8[0,0,0,1,4,4,4,5]
-; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm7, %zmm14, %zmm8
-; AVX512F-SLOW-NEXT:    vpbroadcastq {{.*#+}} ymm7 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
-; AVX512F-SLOW-NEXT:    vpshufb %ymm7, %ymm3, %ymm9
-; AVX512F-SLOW-NEXT:    vpshufb %ymm7, %ymm2, %ymm7
-; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm7 = ymm7[0],ymm9[0],ymm7[1],ymm9[1],ymm7[2],ymm9[2],ymm7[3],ymm9[3],ymm7[4],ymm9[4],ymm7[5],ymm9[5],ymm7[6],ymm9[6],ymm7[7],ymm9[7],ymm7[16],ymm9[16],ymm7[17],ymm9[17],ymm7[18],ymm9[18],ymm7[19],ymm9[19],ymm7[20],ymm9[20],ymm7[21],ymm9[21],ymm7[22],ymm9[22],ymm7[23],ymm9[23]
-; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} ymm9 = ymm4[8],ymm5[8],ymm4[9],ymm5[9],ymm4[10],ymm5[10],ymm4[11],ymm5[11],ymm4[12],ymm5[12],ymm4[13],ymm5[13],ymm4[14],ymm5[14],ymm4[15],ymm5[15],ymm4[24],ymm5[24],ymm4[25],ymm5[25],ymm4[26],ymm5[26],ymm4[27],ymm5[27],ymm4[28],ymm5[28],ymm4[29],ymm5[29],ymm4[30],ymm5[30],ymm4[31],ymm5[31]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm9 = ymm9[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,26,27,24,25,22,23,28,29,30,31,30,31,30,31,30,31]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm9, %zmm7, %zmm7
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm7 = zmm7[2,2,2,3,6,6,6,7]
-; AVX512F-SLOW-NEXT:    vpbroadcastq {{.*#+}} ymm9 = [5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10]
-; AVX512F-SLOW-NEXT:    vpshufb %ymm9, %ymm5, %ymm5
-; AVX512F-SLOW-NEXT:    vpshufb %ymm9, %ymm4, %ymm4
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm8 = zmm7[0,0,0,1,4,4,4,5]
+; AVX512F-SLOW-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm15, %zmm8
+; AVX512F-SLOW-NEXT:    vpbroadcastq {{.*#+}} ymm7 = [6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0]
+; AVX512F-SLOW-NEXT:    vpshufb %xmm7, %xmm11, %xmm9
+; AVX512F-SLOW-NEXT:    vpshufb %xmm7, %xmm12, %xmm10
+; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm9 = xmm10[0],xmm9[0],xmm10[1],xmm9[1],xmm10[2],xmm9[2],xmm10[3],xmm9[3],xmm10[4],xmm9[4],xmm10[5],xmm9[5],xmm10[6],xmm9[6],xmm10[7],xmm9[7]
+; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm10 = xmm12[0],xmm11[0],xmm12[1],xmm11[1],xmm12[2],xmm11[2],xmm12[3],xmm11[3],xmm12[4],xmm11[4],xmm12[5],xmm11[5],xmm12[6],xmm11[6],xmm12[7],xmm11[7]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm10[2,1,0,3,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm10 = xmm10[0,1,2,3,4,4,4,4]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm9, %zmm10, %zmm9
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm9 = zmm9[0,0,0,1,4,4,4,5]
+; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm8, %zmm14, %zmm9
+; AVX512F-SLOW-NEXT:    vpbroadcastq {{.*#+}} ymm8 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
+; AVX512F-SLOW-NEXT:    vpshufb %ymm8, %ymm3, %ymm10
+; AVX512F-SLOW-NEXT:    vpshufb %ymm8, %ymm2, %ymm8
+; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm8 = ymm8[0],ymm10[0],ymm8[1],ymm10[1],ymm8[2],ymm10[2],ymm8[3],ymm10[3],ymm8[4],ymm10[4],ymm8[5],ymm10[5],ymm8[6],ymm10[6],ymm8[7],ymm10[7],ymm8[16],ymm10[16],ymm8[17],ymm10[17],ymm8[18],ymm10[18],ymm8[19],ymm10[19],ymm8[20],ymm10[20],ymm8[21],ymm10[21],ymm8[22],ymm10[22],ymm8[23],ymm10[23]
+; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} ymm10 = ymm4[8],ymm5[8],ymm4[9],ymm5[9],ymm4[10],ymm5[10],ymm4[11],ymm5[11],ymm4[12],ymm5[12],ymm4[13],ymm5[13],ymm4[14],ymm5[14],ymm4[15],ymm5[15],ymm4[24],ymm5[24],ymm4[25],ymm5[25],ymm4[26],ymm5[26],ymm4[27],ymm5[27],ymm4[28],ymm5[28],ymm4[29],ymm5[29],ymm4[30],ymm5[30],ymm4[31],ymm5[31]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm10 = ymm10[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,26,27,24,25,22,23,28,29,30,31,30,31,30,31,30,31]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm10, %zmm8, %zmm8
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm8 = zmm8[2,2,2,3,6,6,6,7]
+; AVX512F-SLOW-NEXT:    vpshufb %ymm13, %ymm5, %ymm5
+; AVX512F-SLOW-NEXT:    vpshufb %ymm13, %ymm4, %ymm4
 ; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm4 = ymm4[0],ymm5[0],ymm4[1],ymm5[1],ymm4[2],ymm5[2],ymm4[3],ymm5[3],ymm4[4],ymm5[4],ymm4[5],ymm5[5],ymm4[6],ymm5[6],ymm4[7],ymm5[7],ymm4[16],ymm5[16],ymm4[17],ymm5[17],ymm4[18],ymm5[18],ymm4[19],ymm5[19],ymm4[20],ymm5[20],ymm4[21],ymm5[21],ymm4[22],ymm5[22],ymm4[23],ymm5[23]
 ; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} ymm2 = ymm2[8],ymm3[8],ymm2[9],ymm3[9],ymm2[10],ymm3[10],ymm2[11],ymm3[11],ymm2[12],ymm3[12],ymm2[13],ymm3[13],ymm2[14],ymm3[14],ymm2[15],ymm3[15],ymm2[24],ymm3[24],ymm2[25],ymm3[25],ymm2[26],ymm3[26],ymm2[27],ymm3[27],ymm2[28],ymm3[28],ymm2[29],ymm3[29],ymm2[30],ymm3[30],ymm2[31],ymm3[31]
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,22,23,28,29,26,27,30,31,30,31,30,31,30,31]
@@ -1811,18 +1807,17 @@ define void @store_i8_stride6_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm2 = zmm2[2,2,2,3,6,6,6,7]
 ; AVX512F-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm3 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535]
 ; AVX512F-SLOW-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-SLOW-NEXT:    vpternlogq $202, %zmm7, %zmm2, %zmm3
-; AVX512F-SLOW-NEXT:    vpbroadcastq {{.*#+}} ymm2 = [6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0]
-; AVX512F-SLOW-NEXT:    vpshufb %ymm2, %ymm1, %ymm4
-; AVX512F-SLOW-NEXT:    vpshufb %ymm2, %ymm0, %ymm2
-; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm2 = ymm2[0],ymm4[0],ymm2[1],ymm4[1],ymm2[2],ymm4[2],ymm2[3],ymm4[3],ymm2[4],ymm4[4],ymm2[5],ymm4[5],ymm2[6],ymm4[6],ymm2[7],ymm4[7],ymm2[16],ymm4[16],ymm2[17],ymm4[17],ymm2[18],ymm4[18],ymm2[19],ymm4[19],ymm2[20],ymm4[20],ymm2[21],ymm4[21],ymm2[22],ymm4[22],ymm2[23],ymm4[23]
+; AVX512F-SLOW-NEXT:    vpternlogq $202, %zmm8, %zmm2, %zmm3
+; AVX512F-SLOW-NEXT:    vpshufb %ymm7, %ymm1, %ymm2
+; AVX512F-SLOW-NEXT:    vpshufb %ymm7, %ymm0, %ymm4
+; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm2 = ymm4[0],ymm2[0],ymm4[1],ymm2[1],ymm4[2],ymm2[2],ymm4[3],ymm2[3],ymm4[4],ymm2[4],ymm4[5],ymm2[5],ymm4[6],ymm2[6],ymm4[7],ymm2[7],ymm4[16],ymm2[16],ymm4[17],ymm2[17],ymm4[18],ymm2[18],ymm4[19],ymm2[19],ymm4[20],ymm2[20],ymm4[21],ymm2[21],ymm4[22],ymm2[22],ymm4[23],ymm2[23]
 ; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} ymm0 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20,21,26,27,24,25,22,23,28,29,26,27,28,29,30,31]
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm2, %zmm0
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm0 = zmm0[2,2,2,3,6,6,6,7]
 ; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm0
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm0, 128(%rax)
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm8, (%rax)
+; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm9, (%rax)
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm6, 64(%rax)
 ; AVX512F-SLOW-NEXT:    vzeroupper
 ; AVX512F-SLOW-NEXT:    retq
@@ -1846,60 +1841,58 @@ define void @store_i8_stride6_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm6 = zmm6[2,2,2,3,6,6,6,7]
 ; AVX512F-FAST-NEXT:    vpbroadcastq {{.*#+}} ymm7 = [5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10]
 ; AVX512F-FAST-NEXT:    vpshufb %ymm7, %ymm5, %ymm8
-; AVX512F-FAST-NEXT:    vpshufb %ymm7, %ymm4, %ymm7
-; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm7 = ymm7[0],ymm8[0],ymm7[1],ymm8[1],ymm7[2],ymm8[2],ymm7[3],ymm8[3],ymm7[4],ymm8[4],ymm7[5],ymm8[5],ymm7[6],ymm8[6],ymm7[7],ymm8[7],ymm7[16],ymm8[16],ymm7[17],ymm8[17],ymm7[18],ymm8[18],ymm7[19],ymm8[19],ymm7[20],ymm8[20],ymm7[21],ymm8[21],ymm7[22],ymm8[22],ymm7[23],ymm8[23]
-; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} ymm8 = ymm2[8],ymm3[8],ymm2[9],ymm3[9],ymm2[10],ymm3[10],ymm2[11],ymm3[11],ymm2[12],ymm3[12],ymm2[13],ymm3[13],ymm2[14],ymm3[14],ymm2[15],ymm3[15],ymm2[24],ymm3[24],ymm2[25],ymm3[25],ymm2[26],ymm3[26],ymm2[27],ymm3[27],ymm2[28],ymm3[28],ymm2[29],ymm3[29],ymm2[30],ymm3[30],ymm2[31],ymm3[31]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm8 = ymm8[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,22,23,28,29,26,27,30,31,30,31,30,31,30,31]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm8, %zmm7, %zmm7
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm7 = zmm7[2,2,2,3,6,6,6,7]
-; AVX512F-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm8 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535]
-; AVX512F-FAST-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-FAST-NEXT:    vpternlogq $202, %zmm6, %zmm7, %zmm8
-; AVX512F-FAST-NEXT:    vpbroadcastq {{.*#+}} ymm6 = [6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0]
-; AVX512F-FAST-NEXT:    vpshufb %ymm6, %ymm1, %ymm7
-; AVX512F-FAST-NEXT:    vpshufb %ymm6, %ymm0, %ymm6
-; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm6 = ymm6[0],ymm7[0],ymm6[1],ymm7[1],ymm6[2],ymm7[2],ymm6[3],ymm7[3],ymm6[4],ymm7[4],ymm6[5],ymm7[5],ymm6[6],ymm7[6],ymm6[7],ymm7[7],ymm6[16],ymm7[16],ymm6[17],ymm7[17],ymm6[18],ymm7[18],ymm6[19],ymm7[19],ymm6[20],ymm7[20],ymm6[21],ymm7[21],ymm6[22],ymm7[22],ymm6[23],ymm7[23]
-; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} ymm7 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm7 = ymm7[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20,21,26,27,24,25,22,23,28,29,26,27,28,29,30,31]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm6, %zmm6
+; AVX512F-FAST-NEXT:    vpshufb %ymm7, %ymm4, %ymm9
+; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm8 = ymm9[0],ymm8[0],ymm9[1],ymm8[1],ymm9[2],ymm8[2],ymm9[3],ymm8[3],ymm9[4],ymm8[4],ymm9[5],ymm8[5],ymm9[6],ymm8[6],ymm9[7],ymm8[7],ymm9[16],ymm8[16],ymm9[17],ymm8[17],ymm9[18],ymm8[18],ymm9[19],ymm8[19],ymm9[20],ymm8[20],ymm9[21],ymm8[21],ymm9[22],ymm8[22],ymm9[23],ymm8[23]
+; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} ymm9 = ymm2[8],ymm3[8],ymm2[9],ymm3[9],ymm2[10],ymm3[10],ymm2[11],ymm3[11],ymm2[12],ymm3[12],ymm2[13],ymm3[13],ymm2[14],ymm3[14],ymm2[15],ymm3[15],ymm2[24],ymm3[24],ymm2[25],ymm3[25],ymm2[26],ymm3[26],ymm2[27],ymm3[27],ymm2[28],ymm3[28],ymm2[29],ymm3[29],ymm2[30],ymm3[30],ymm2[31],ymm3[31]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm9 = ymm9[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,22,23,28,29,26,27,30,31,30,31,30,31,30,31]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm9, %zmm8, %zmm8
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm8 = zmm8[2,2,2,3,6,6,6,7]
+; AVX512F-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm9 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535]
+; AVX512F-FAST-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-FAST-NEXT:    vpternlogq $202, %zmm6, %zmm8, %zmm9
+; AVX512F-FAST-NEXT:    vpbroadcastq {{.*#+}} ymm10 = [6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0]
+; AVX512F-FAST-NEXT:    vpshufb %ymm10, %ymm1, %ymm6
+; AVX512F-FAST-NEXT:    vpshufb %ymm10, %ymm0, %ymm8
+; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm6 = ymm8[0],ymm6[0],ymm8[1],ymm6[1],ymm8[2],ymm6[2],ymm8[3],ymm6[3],ymm8[4],ymm6[4],ymm8[5],ymm6[5],ymm8[6],ymm6[6],ymm8[7],ymm6[7],ymm8[16],ymm6[16],ymm8[17],ymm6[17],ymm8[18],ymm6[18],ymm8[19],ymm6[19],ymm8[20],ymm6[20],ymm8[21],ymm6[21],ymm8[22],ymm6[22],ymm8[23],ymm6[23]
+; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} ymm8 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm8 = ymm8[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20,21,26,27,24,25,22,23,28,29,26,27,28,29,30,31]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm8, %zmm6, %zmm6
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm6 = zmm6[2,2,2,3,6,6,6,7]
-; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm8, %zmm6
+; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm9, %zmm6
 ; AVX512F-FAST-NEXT:    vmovdqa (%rcx), %xmm9
-; AVX512F-FAST-NEXT:    vpbroadcastq {{.*#+}} xmm7 = [5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10]
 ; AVX512F-FAST-NEXT:    vpshufb %xmm7, %xmm9, %xmm8
-; AVX512F-FAST-NEXT:    vmovdqa (%rdx), %xmm10
-; AVX512F-FAST-NEXT:    vpshufb %xmm7, %xmm10, %xmm7
-; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm11 = xmm7[0],xmm8[0],xmm7[1],xmm8[1],xmm7[2],xmm8[2],xmm7[3],xmm8[3],xmm7[4],xmm8[4],xmm7[5],xmm8[5],xmm7[6],xmm8[6],xmm7[7],xmm8[7]
+; AVX512F-FAST-NEXT:    vmovdqa (%rdx), %xmm11
+; AVX512F-FAST-NEXT:    vpshufb %xmm7, %xmm11, %xmm7
+; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm12 = xmm7[0],xmm8[0],xmm7[1],xmm8[1],xmm7[2],xmm8[2],xmm7[3],xmm8[3],xmm7[4],xmm8[4],xmm7[5],xmm8[5],xmm7[6],xmm8[6],xmm7[7],xmm8[7]
 ; AVX512F-FAST-NEXT:    vmovdqa (%rsi), %xmm7
 ; AVX512F-FAST-NEXT:    vmovdqa (%rdi), %xmm8
-; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm12 = xmm8[0],xmm7[0],xmm8[1],xmm7[1],xmm8[2],xmm7[2],xmm8[3],xmm7[3],xmm8[4],xmm7[4],xmm8[5],xmm7[5],xmm8[6],xmm7[6],xmm8[7],xmm7[7]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm12 = xmm12[0,1,6,7,4,5,2,3,8,9,10,11,12,13,10,11]
-; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm11, %zmm12, %zmm11
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm11 = zmm11[0,0,0,1,4,4,4,5]
-; AVX512F-FAST-NEXT:    vpbroadcastq {{.*#+}} xmm12 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
-; AVX512F-FAST-NEXT:    vpshufb %xmm12, %xmm7, %xmm13
-; AVX512F-FAST-NEXT:    vpshufb %xmm12, %xmm8, %xmm12
-; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm12 = xmm12[8],xmm13[8],xmm12[9],xmm13[9],xmm12[10],xmm13[10],xmm12[11],xmm13[11],xmm12[12],xmm13[12],xmm12[13],xmm13[13],xmm12[14],xmm13[14],xmm12[15],xmm13[15]
-; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm13 = xmm10[0],xmm9[0],xmm10[1],xmm9[1],xmm10[2],xmm9[2],xmm10[3],xmm9[3],xmm10[4],xmm9[4],xmm10[5],xmm9[5],xmm10[6],xmm9[6],xmm10[7],xmm9[7]
-; AVX512F-FAST-NEXT:    vprold $16, %xmm13, %xmm13
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm12, %zmm13, %zmm12
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm13 = zmm12[0,0,0,1,4,4,4,5]
-; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm11, %zmm13
-; AVX512F-FAST-NEXT:    vmovdqa (%r9), %xmm11
-; AVX512F-FAST-NEXT:    vpbroadcastq {{.*#+}} xmm14 = [6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0]
-; AVX512F-FAST-NEXT:    vpshufb %xmm14, %xmm11, %xmm15
-; AVX512F-FAST-NEXT:    vmovdqa (%r8), %xmm12
-; AVX512F-FAST-NEXT:    vpshufb %xmm14, %xmm12, %xmm14
-; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm14 = xmm14[0],xmm15[0],xmm14[1],xmm15[1],xmm14[2],xmm15[2],xmm14[3],xmm15[3],xmm14[4],xmm15[4],xmm14[5],xmm15[5],xmm14[6],xmm15[6],xmm14[7],xmm15[7]
-; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm15 = xmm12[0],xmm11[0],xmm12[1],xmm11[1],xmm12[2],xmm11[2],xmm12[3],xmm11[3],xmm12[4],xmm11[4],xmm12[5],xmm11[5],xmm12[6],xmm11[6],xmm12[7],xmm11[7]
+; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm13 = xmm8[0],xmm7[0],xmm8[1],xmm7[1],xmm8[2],xmm7[2],xmm8[3],xmm7[3],xmm8[4],xmm7[4],xmm8[5],xmm7[5],xmm8[6],xmm7[6],xmm8[7],xmm7[7]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm13 = xmm13[0,1,6,7,4,5,2,3,8,9,10,11,12,13,10,11]
+; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm12, %zmm13, %zmm12
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm12 = zmm12[0,0,0,1,4,4,4,5]
+; AVX512F-FAST-NEXT:    vpbroadcastq {{.*#+}} xmm13 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
+; AVX512F-FAST-NEXT:    vpshufb %xmm13, %xmm7, %xmm14
+; AVX512F-FAST-NEXT:    vpshufb %xmm13, %xmm8, %xmm13
+; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm13 = xmm13[8],xmm14[8],xmm13[9],xmm14[9],xmm13[10],xmm14[10],xmm13[11],xmm14[11],xmm13[12],xmm14[12],xmm13[13],xmm14[13],xmm13[14],xmm14[14],xmm13[15],xmm14[15]
+; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm14 = xmm11[0],xmm9[0],xmm11[1],xmm9[1],xmm11[2],xmm9[2],xmm11[3],xmm9[3],xmm11[4],xmm9[4],xmm11[5],xmm9[5],xmm11[6],xmm9[6],xmm11[7],xmm9[7]
+; AVX512F-FAST-NEXT:    vprold $16, %xmm14, %xmm14
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm13, %zmm14, %zmm13
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm14 = zmm13[0,0,0,1,4,4,4,5]
+; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm12, %zmm14
+; AVX512F-FAST-NEXT:    vmovdqa (%r9), %xmm12
+; AVX512F-FAST-NEXT:    vpshufb %xmm10, %xmm12, %xmm15
+; AVX512F-FAST-NEXT:    vmovdqa (%r8), %xmm13
+; AVX512F-FAST-NEXT:    vpshufb %xmm10, %xmm13, %xmm10
+; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm10 = xmm10[0],xmm15[0],xmm10[1],xmm15[1],xmm10[2],xmm15[2],xmm10[3],xmm15[3],xmm10[4],xmm15[4],xmm10[5],xmm15[5],xmm10[6],xmm15[6],xmm10[7],xmm15[7]
+; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm15 = xmm13[0],xmm12[0],xmm13[1],xmm12[1],xmm13[2],xmm12[2],xmm13[3],xmm12[3],xmm13[4],xmm12[4],xmm13[5],xmm12[5],xmm13[6],xmm12[6],xmm13[7],xmm12[7]
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm15 = xmm15[4,5,2,3,0,1,6,7,8,9,8,9,8,9,8,9]
-; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm14, %zmm15, %zmm14
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm14 = zmm14[0,0,0,1,4,4,4,5]
+; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm10, %zmm15, %zmm10
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm10 = zmm10[0,0,0,1,4,4,4,5]
 ; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm15 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
-; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm13, %zmm15, %zmm14
+; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm14, %zmm15, %zmm10
 ; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm4 = ymm4[0],ymm5[0],ymm4[1],ymm5[1],ymm4[2],ymm5[2],ymm4[3],ymm5[3],ymm4[4],ymm5[4],ymm4[5],ymm5[5],ymm4[6],ymm5[6],ymm4[7],ymm5[7],ymm4[16],ymm5[16],ymm4[17],ymm5[17],ymm4[18],ymm5[18],ymm4[19],ymm5[19],ymm4[20],ymm5[20],ymm4[21],ymm5[21],ymm4[22],ymm5[22],ymm4[23],ymm5[23]
 ; AVX512F-FAST-NEXT:    vprold $16, %ymm4, %ymm4
-; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm5 = xmm10[8],xmm9[8],xmm10[9],xmm9[9],xmm10[10],xmm9[10],xmm10[11],xmm9[11],xmm10[12],xmm9[12],xmm10[13],xmm9[13],xmm10[14],xmm9[14],xmm10[15],xmm9[15]
+; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm5 = xmm11[8],xmm9[8],xmm11[9],xmm9[9],xmm11[10],xmm9[10],xmm11[11],xmm9[11],xmm11[12],xmm9[12],xmm11[13],xmm9[13],xmm11[14],xmm9[14],xmm11[15],xmm9[15]
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[10,11,8,9,6,7,12,13,14,15,14,15,14,15,14,15]
 ; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm9 = [0,0,0,1,10,10,10,11]
 ; AVX512F-FAST-NEXT:    vpermt2q %zmm4, %zmm9, %zmm5
@@ -1911,12 +1904,12 @@ define void @store_i8_stride6_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm5, %zmm15, %zmm3
 ; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20,21,18,19,16,17,22,23,24,25,24,25,24,25,24,25]
-; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm12[8],xmm11[8],xmm12[9],xmm11[9],xmm12[10],xmm11[10],xmm12[11],xmm11[11],xmm12[12],xmm11[12],xmm12[13],xmm11[13],xmm12[14],xmm11[14],xmm12[15],xmm11[15]
+; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm13[8],xmm12[8],xmm13[9],xmm12[9],xmm13[10],xmm12[10],xmm13[11],xmm12[11],xmm13[12],xmm12[12],xmm13[13],xmm12[13],xmm13[14],xmm12[14],xmm13[15],xmm12[15]
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[4,5,10,11,8,9,6,7,12,13,10,11,12,13,14,15]
 ; AVX512F-FAST-NEXT:    vpermt2q %zmm0, %zmm9, %zmm1
 ; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm1
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm1, 64(%rax)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm14, (%rax)
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm10, (%rax)
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm6, 128(%rax)
 ; AVX512F-FAST-NEXT:    vzeroupper
 ; AVX512F-FAST-NEXT:    retq
@@ -1924,99 +1917,97 @@ define void @store_i8_stride6_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-SLOW-LABEL: store_i8_stride6_vf32:
 ; AVX512BW-SLOW:       # %bb.0:
 ; AVX512BW-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-SLOW-NEXT:    vmovdqa (%rdi), %ymm4
+; AVX512BW-SLOW-NEXT:    vmovdqa (%rdi), %ymm3
 ; AVX512BW-SLOW-NEXT:    vmovdqa (%rsi), %ymm5
 ; AVX512BW-SLOW-NEXT:    vmovdqa (%rdx), %ymm2
-; AVX512BW-SLOW-NEXT:    vmovdqa (%rcx), %ymm3
+; AVX512BW-SLOW-NEXT:    vmovdqa (%rcx), %ymm4
 ; AVX512BW-SLOW-NEXT:    vmovdqa (%r8), %ymm0
 ; AVX512BW-SLOW-NEXT:    vmovdqa (%r9), %ymm1
-; AVX512BW-SLOW-NEXT:    vmovdqa (%rsi), %xmm7
+; AVX512BW-SLOW-NEXT:    vmovdqa (%rsi), %xmm9
 ; AVX512BW-SLOW-NEXT:    vpbroadcastq {{.*#+}} xmm6 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm6, %xmm7, %xmm8
-; AVX512BW-SLOW-NEXT:    vmovdqa (%rdi), %xmm9
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm6, %xmm9, %xmm6
-; AVX512BW-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm6[8],xmm8[8],xmm6[9],xmm8[9],xmm6[10],xmm8[10],xmm6[11],xmm8[11],xmm6[12],xmm8[12],xmm6[13],xmm8[13],xmm6[14],xmm8[14],xmm6[15],xmm8[15]
-; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[0,0,0,1]
-; AVX512BW-SLOW-NEXT:    vmovdqa (%rcx), %xmm8
-; AVX512BW-SLOW-NEXT:    vpbroadcastq {{.*#+}} xmm10 = [5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10]
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm10, %xmm8, %xmm11
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm6, %xmm9, %xmm7
+; AVX512BW-SLOW-NEXT:    vmovdqa (%rdi), %xmm10
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm6, %xmm10, %xmm6
+; AVX512BW-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm6[8],xmm7[8],xmm6[9],xmm7[9],xmm6[10],xmm7[10],xmm6[11],xmm7[11],xmm6[12],xmm7[12],xmm6[13],xmm7[13],xmm6[14],xmm7[14],xmm6[15],xmm7[15]
+; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm6[0,0,0,1]
+; AVX512BW-SLOW-NEXT:    vpbroadcastq {{.*#+}} ymm6 = [5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10]
+; AVX512BW-SLOW-NEXT:    vmovdqa (%rcx), %xmm11
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm6, %xmm11, %xmm8
 ; AVX512BW-SLOW-NEXT:    vmovdqa (%rdx), %xmm12
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm10, %xmm12, %xmm10
-; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm10 = xmm10[0],xmm11[0],xmm10[1],xmm11[1],xmm10[2],xmm11[2],xmm10[3],xmm11[3],xmm10[4],xmm11[4],xmm10[5],xmm11[5],xmm10[6],xmm11[6],xmm10[7],xmm11[7]
-; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[0,0,0,1]
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm6, %xmm12, %xmm13
+; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm8 = xmm13[0],xmm8[0],xmm13[1],xmm8[1],xmm13[2],xmm8[2],xmm13[3],xmm8[3],xmm13[4],xmm8[4],xmm13[5],xmm8[5],xmm13[6],xmm8[6],xmm13[7],xmm8[7]
+; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[0,0,0,1]
 ; AVX512BW-SLOW-NEXT:    movw $18724, %cx # imm = 0x4924
 ; AVX512BW-SLOW-NEXT:    kmovd %ecx, %k1
-; AVX512BW-SLOW-NEXT:    vmovdqu16 %ymm6, %ymm10 {%k1}
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm10, %zmm0, %zmm6
-; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm10 = xmm9[0],xmm7[0],xmm9[1],xmm7[1],xmm9[2],xmm7[2],xmm9[3],xmm7[3],xmm9[4],xmm7[4],xmm9[5],xmm7[5],xmm9[6],xmm7[6],xmm9[7],xmm7[7]
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} ymm11 = [0,3,2,1,0,3,2,1,0,3,2,1,4,5,6,5]
-; AVX512BW-SLOW-NEXT:    vpermw %ymm10, %ymm11, %ymm10
-; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm11 = xmm12[0],xmm8[0],xmm12[1],xmm8[1],xmm12[2],xmm8[2],xmm12[3],xmm8[3],xmm12[4],xmm8[4],xmm12[5],xmm8[5],xmm12[6],xmm8[6],xmm12[7],xmm8[7]
-; AVX512BW-SLOW-NEXT:    vprold $16, %xmm11, %xmm11
-; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[0,0,0,1]
+; AVX512BW-SLOW-NEXT:    vmovdqu16 %ymm7, %ymm8 {%k1}
+; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm8, %zmm0, %zmm7
+; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm8 = xmm10[0],xmm9[0],xmm10[1],xmm9[1],xmm10[2],xmm9[2],xmm10[3],xmm9[3],xmm10[4],xmm9[4],xmm10[5],xmm9[5],xmm10[6],xmm9[6],xmm10[7],xmm9[7]
+; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} ymm13 = [0,3,2,1,0,3,2,1,0,3,2,1,4,5,6,5]
+; AVX512BW-SLOW-NEXT:    vpermw %ymm8, %ymm13, %ymm8
+; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm13 = xmm12[0],xmm11[0],xmm12[1],xmm11[1],xmm12[2],xmm11[2],xmm12[3],xmm11[3],xmm12[4],xmm11[4],xmm12[5],xmm11[5],xmm12[6],xmm11[6],xmm12[7],xmm11[7]
+; AVX512BW-SLOW-NEXT:    vprold $16, %xmm13, %xmm13
+; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[0,0,0,1]
 ; AVX512BW-SLOW-NEXT:    movw $9362, %cx # imm = 0x2492
 ; AVX512BW-SLOW-NEXT:    kmovd %ecx, %k2
-; AVX512BW-SLOW-NEXT:    vmovdqu16 %ymm11, %ymm10 {%k2}
-; AVX512BW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm6 = zmm10[0,1,2,3],zmm6[4,5,6,7]
-; AVX512BW-SLOW-NEXT:    vmovdqa (%r9), %xmm10
-; AVX512BW-SLOW-NEXT:    vpbroadcastq {{.*#+}} xmm11 = [6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0]
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm11, %xmm10, %xmm13
-; AVX512BW-SLOW-NEXT:    vmovdqa (%r8), %xmm14
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm11, %xmm14, %xmm11
-; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm11 = xmm11[0],xmm13[0],xmm11[1],xmm13[1],xmm11[2],xmm13[2],xmm11[3],xmm13[3],xmm11[4],xmm13[4],xmm11[5],xmm13[5],xmm11[6],xmm13[6],xmm11[7],xmm13[7]
-; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[0,0,0,1]
-; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm13 = xmm14[0],xmm10[0],xmm14[1],xmm10[1],xmm14[2],xmm10[2],xmm14[3],xmm10[3],xmm14[4],xmm10[4],xmm14[5],xmm10[5],xmm14[6],xmm10[6],xmm14[7],xmm10[7]
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} ymm15 = [2,1,0,3,2,1,0,3,2,1,0,3,4,4,4,4]
-; AVX512BW-SLOW-NEXT:    vpermw %ymm13, %ymm15, %ymm13
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm11, %zmm13, %zmm11
+; AVX512BW-SLOW-NEXT:    vmovdqu16 %ymm13, %ymm8 {%k2}
+; AVX512BW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm7 = zmm8[0,1,2,3],zmm7[4,5,6,7]
+; AVX512BW-SLOW-NEXT:    vpbroadcastq {{.*#+}} ymm8 = [6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0]
+; AVX512BW-SLOW-NEXT:    vmovdqa (%r9), %xmm13
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm8, %xmm13, %xmm14
+; AVX512BW-SLOW-NEXT:    vmovdqa (%r8), %xmm15
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm8, %xmm15, %xmm16
+; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm14 = xmm16[0],xmm14[0],xmm16[1],xmm14[1],xmm16[2],xmm14[2],xmm16[3],xmm14[3],xmm16[4],xmm14[4],xmm16[5],xmm14[5],xmm16[6],xmm14[6],xmm16[7],xmm14[7]
+; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[0,0,0,1]
+; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm16 = xmm15[0],xmm13[0],xmm15[1],xmm13[1],xmm15[2],xmm13[2],xmm15[3],xmm13[3],xmm15[4],xmm13[4],xmm15[5],xmm13[5],xmm15[6],xmm13[6],xmm15[7],xmm13[7]
+; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm17 = [2,1,0,3,2,1,0,3,2,1,0,3,4,4,4,4]
+; AVX512BW-SLOW-NEXT:    vpermw %ymm16, %ymm17, %ymm16
+; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm14, %zmm16, %zmm14
 ; AVX512BW-SLOW-NEXT:    movl $613566756, %ecx # imm = 0x24924924
 ; AVX512BW-SLOW-NEXT:    kmovd %ecx, %k3
-; AVX512BW-SLOW-NEXT:    vmovdqu16 %zmm11, %zmm6 {%k3}
-; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm11 = ymm4[0],ymm5[0],ymm4[1],ymm5[1],ymm4[2],ymm5[2],ymm4[3],ymm5[3],ymm4[4],ymm5[4],ymm4[5],ymm5[5],ymm4[6],ymm5[6],ymm4[7],ymm5[7],ymm4[16],ymm5[16],ymm4[17],ymm5[17],ymm4[18],ymm5[18],ymm4[19],ymm5[19],ymm4[20],ymm5[20],ymm4[21],ymm5[21],ymm4[22],ymm5[22],ymm4[23],ymm5[23]
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} ymm13 = [8,11,10,9,8,11,10,9,8,11,10,9,12,13,14,13]
-; AVX512BW-SLOW-NEXT:    vpermw %ymm11, %ymm13, %ymm11
-; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm13 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[4],ymm3[4],ymm2[5],ymm3[5],ymm2[6],ymm3[6],ymm2[7],ymm3[7],ymm2[16],ymm3[16],ymm2[17],ymm3[17],ymm2[18],ymm3[18],ymm2[19],ymm3[19],ymm2[20],ymm3[20],ymm2[21],ymm3[21],ymm2[22],ymm3[22],ymm2[23],ymm3[23]
-; AVX512BW-SLOW-NEXT:    vprold $16, %ymm13, %ymm13
-; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[2,2,2,3]
-; AVX512BW-SLOW-NEXT:    vmovdqu16 %ymm13, %ymm11 {%k2}
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm11, %zmm0, %zmm11
-; AVX512BW-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm8 = xmm12[8],xmm8[8],xmm12[9],xmm8[9],xmm12[10],xmm8[10],xmm12[11],xmm8[11],xmm12[12],xmm8[12],xmm12[13],xmm8[13],xmm12[14],xmm8[14],xmm12[15],xmm8[15]
-; AVX512BW-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm7 = xmm9[8],xmm7[8],xmm9[9],xmm7[9],xmm9[10],xmm7[10],xmm9[11],xmm7[11],xmm9[12],xmm7[12],xmm9[13],xmm7[13],xmm9[14],xmm7[14],xmm9[15],xmm7[15]
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} ymm9 = [4,3,6,5,4,3,6,5,4,3,6,5,7,7,7,7]
-; AVX512BW-SLOW-NEXT:    vpermw %ymm7, %ymm9, %ymm7
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} ymm9 = [5,4,3,6,5,4,3,6,5,4,3,6,7,7,7,7]
-; AVX512BW-SLOW-NEXT:    vpermw %ymm8, %ymm9, %ymm7 {%k1}
-; AVX512BW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm7 = zmm7[0,1,2,3],zmm11[4,5,6,7]
-; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm8 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
-; AVX512BW-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm9 = xmm14[8],xmm10[8],xmm14[9],xmm10[9],xmm14[10],xmm10[10],xmm14[11],xmm10[11],xmm14[12],xmm10[12],xmm14[13],xmm10[13],xmm14[14],xmm10[14],xmm14[15],xmm10[15]
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm8, %zmm9, %zmm8
-; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm9 = [2,5,4,3,2,5,4,3,2,5,4,3,6,5,6,7,26,25,24,27,26,25,24,27,26,25,24,27,28,28,28,28]
+; AVX512BW-SLOW-NEXT:    vmovdqu16 %zmm14, %zmm7 {%k3}
+; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm14 = ymm3[0],ymm5[0],ymm3[1],ymm5[1],ymm3[2],ymm5[2],ymm3[3],ymm5[3],ymm3[4],ymm5[4],ymm3[5],ymm5[5],ymm3[6],ymm5[6],ymm3[7],ymm5[7],ymm3[16],ymm5[16],ymm3[17],ymm5[17],ymm3[18],ymm5[18],ymm3[19],ymm5[19],ymm3[20],ymm5[20],ymm3[21],ymm5[21],ymm3[22],ymm5[22],ymm3[23],ymm5[23]
+; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm16 = [8,11,10,9,8,11,10,9,8,11,10,9,12,13,14,13]
+; AVX512BW-SLOW-NEXT:    vpermw %ymm14, %ymm16, %ymm14
+; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm16 = ymm2[0],ymm4[0],ymm2[1],ymm4[1],ymm2[2],ymm4[2],ymm2[3],ymm4[3],ymm2[4],ymm4[4],ymm2[5],ymm4[5],ymm2[6],ymm4[6],ymm2[7],ymm4[7],ymm2[16],ymm4[16],ymm2[17],ymm4[17],ymm2[18],ymm4[18],ymm2[19],ymm4[19],ymm2[20],ymm4[20],ymm2[21],ymm4[21],ymm2[22],ymm4[22],ymm2[23],ymm4[23]
+; AVX512BW-SLOW-NEXT:    vprold $16, %ymm16, %ymm16
+; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm16 = ymm16[2,2,2,3]
+; AVX512BW-SLOW-NEXT:    vmovdqu16 %ymm16, %ymm14 {%k2}
+; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm14, %zmm0, %zmm14
+; AVX512BW-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm11 = xmm12[8],xmm11[8],xmm12[9],xmm11[9],xmm12[10],xmm11[10],xmm12[11],xmm11[11],xmm12[12],xmm11[12],xmm12[13],xmm11[13],xmm12[14],xmm11[14],xmm12[15],xmm11[15]
+; AVX512BW-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm9 = xmm10[8],xmm9[8],xmm10[9],xmm9[9],xmm10[10],xmm9[10],xmm10[11],xmm9[11],xmm10[12],xmm9[12],xmm10[13],xmm9[13],xmm10[14],xmm9[14],xmm10[15],xmm9[15]
+; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} ymm10 = [4,3,6,5,4,3,6,5,4,3,6,5,7,7,7,7]
+; AVX512BW-SLOW-NEXT:    vpermw %ymm9, %ymm10, %ymm9
+; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} ymm10 = [5,4,3,6,5,4,3,6,5,4,3,6,7,7,7,7]
+; AVX512BW-SLOW-NEXT:    vpermw %ymm11, %ymm10, %ymm9 {%k1}
+; AVX512BW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm9 = zmm9[0,1,2,3],zmm14[4,5,6,7]
+; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm10 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
+; AVX512BW-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm11 = xmm15[8],xmm13[8],xmm15[9],xmm13[9],xmm15[10],xmm13[10],xmm15[11],xmm13[11],xmm15[12],xmm13[12],xmm15[13],xmm13[13],xmm15[14],xmm13[14],xmm15[15],xmm13[15]
+; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm10, %zmm11, %zmm10
+; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm11 = [2,5,4,3,2,5,4,3,2,5,4,3,6,5,6,7,26,25,24,27,26,25,24,27,26,25,24,27,28,28,28,28]
 ; AVX512BW-SLOW-NEXT:    movl $1227133513, %ecx # imm = 0x49249249
 ; AVX512BW-SLOW-NEXT:    kmovd %ecx, %k2
-; AVX512BW-SLOW-NEXT:    vpermw %zmm8, %zmm9, %zmm7 {%k2}
-; AVX512BW-SLOW-NEXT:    vpunpckhbw {{.*#+}} ymm8 = ymm2[8],ymm3[8],ymm2[9],ymm3[9],ymm2[10],ymm3[10],ymm2[11],ymm3[11],ymm2[12],ymm3[12],ymm2[13],ymm3[13],ymm2[14],ymm3[14],ymm2[15],ymm3[15],ymm2[24],ymm3[24],ymm2[25],ymm3[25],ymm2[26],ymm3[26],ymm2[27],ymm3[27],ymm2[28],ymm3[28],ymm2[29],ymm3[29],ymm2[30],ymm3[30],ymm2[31],ymm3[31]
-; AVX512BW-SLOW-NEXT:    vpunpckhbw {{.*#+}} ymm9 = ymm4[8],ymm5[8],ymm4[9],ymm5[9],ymm4[10],ymm5[10],ymm4[11],ymm5[11],ymm4[12],ymm5[12],ymm4[13],ymm5[13],ymm4[14],ymm5[14],ymm4[15],ymm5[15],ymm4[24],ymm5[24],ymm4[25],ymm5[25],ymm4[26],ymm5[26],ymm4[27],ymm5[27],ymm4[28],ymm5[28],ymm4[29],ymm5[29],ymm4[30],ymm5[30],ymm4[31],ymm5[31]
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} ymm10 = [12,11,14,13,12,11,14,13,12,11,14,13,15,15,15,15]
-; AVX512BW-SLOW-NEXT:    vpermw %ymm9, %ymm10, %ymm9
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} ymm10 = [13,12,11,14,13,12,11,14,13,12,11,14,15,15,15,15]
-; AVX512BW-SLOW-NEXT:    vpermw %ymm8, %ymm10, %ymm9 {%k1}
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm9, %zmm0, %zmm8
-; AVX512BW-SLOW-NEXT:    vpbroadcastq {{.*#+}} ymm9 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
-; AVX512BW-SLOW-NEXT:    vpshufb %ymm9, %ymm5, %ymm5
-; AVX512BW-SLOW-NEXT:    vpshufb %ymm9, %ymm4, %ymm4
-; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm4 = ymm4[0],ymm5[0],ymm4[1],ymm5[1],ymm4[2],ymm5[2],ymm4[3],ymm5[3],ymm4[4],ymm5[4],ymm4[5],ymm5[5],ymm4[6],ymm5[6],ymm4[7],ymm5[7],ymm4[16],ymm5[16],ymm4[17],ymm5[17],ymm4[18],ymm5[18],ymm4[19],ymm5[19],ymm4[20],ymm5[20],ymm4[21],ymm5[21],ymm4[22],ymm5[22],ymm4[23],ymm5[23]
-; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,2,2,3]
-; AVX512BW-SLOW-NEXT:    vpbroadcastq {{.*#+}} ymm5 = [5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10]
-; AVX512BW-SLOW-NEXT:    vpshufb %ymm5, %ymm3, %ymm3
-; AVX512BW-SLOW-NEXT:    vpshufb %ymm5, %ymm2, %ymm2
-; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm2 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[4],ymm3[4],ymm2[5],ymm3[5],ymm2[6],ymm3[6],ymm2[7],ymm3[7],ymm2[16],ymm3[16],ymm2[17],ymm3[17],ymm2[18],ymm3[18],ymm2[19],ymm3[19],ymm2[20],ymm3[20],ymm2[21],ymm3[21],ymm2[22],ymm3[22],ymm2[23],ymm3[23]
+; AVX512BW-SLOW-NEXT:    vpermw %zmm10, %zmm11, %zmm9 {%k2}
+; AVX512BW-SLOW-NEXT:    vpunpckhbw {{.*#+}} ymm10 = ymm2[8],ymm4[8],ymm2[9],ymm4[9],ymm2[10],ymm4[10],ymm2[11],ymm4[11],ymm2[12],ymm4[12],ymm2[13],ymm4[13],ymm2[14],ymm4[14],ymm2[15],ymm4[15],ymm2[24],ymm4[24],ymm2[25],ymm4[25],ymm2[26],ymm4[26],ymm2[27],ymm4[27],ymm2[28],ymm4[28],ymm2[29],ymm4[29],ymm2[30],ymm4[30],ymm2[31],ymm4[31]
+; AVX512BW-SLOW-NEXT:    vpunpckhbw {{.*#+}} ymm11 = ymm3[8],ymm5[8],ymm3[9],ymm5[9],ymm3[10],ymm5[10],ymm3[11],ymm5[11],ymm3[12],ymm5[12],ymm3[13],ymm5[13],ymm3[14],ymm5[14],ymm3[15],ymm5[15],ymm3[24],ymm5[24],ymm3[25],ymm5[25],ymm3[26],ymm5[26],ymm3[27],ymm5[27],ymm3[28],ymm5[28],ymm3[29],ymm5[29],ymm3[30],ymm5[30],ymm3[31],ymm5[31]
+; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} ymm12 = [12,11,14,13,12,11,14,13,12,11,14,13,15,15,15,15]
+; AVX512BW-SLOW-NEXT:    vpermw %ymm11, %ymm12, %ymm11
+; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} ymm12 = [13,12,11,14,13,12,11,14,13,12,11,14,15,15,15,15]
+; AVX512BW-SLOW-NEXT:    vpermw %ymm10, %ymm12, %ymm11 {%k1}
+; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm11, %zmm0, %zmm10
+; AVX512BW-SLOW-NEXT:    vpbroadcastq {{.*#+}} ymm11 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
+; AVX512BW-SLOW-NEXT:    vpshufb %ymm11, %ymm5, %ymm5
+; AVX512BW-SLOW-NEXT:    vpshufb %ymm11, %ymm3, %ymm3
+; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm3 = ymm3[0],ymm5[0],ymm3[1],ymm5[1],ymm3[2],ymm5[2],ymm3[3],ymm5[3],ymm3[4],ymm5[4],ymm3[5],ymm5[5],ymm3[6],ymm5[6],ymm3[7],ymm5[7],ymm3[16],ymm5[16],ymm3[17],ymm5[17],ymm3[18],ymm5[18],ymm3[19],ymm5[19],ymm3[20],ymm5[20],ymm3[21],ymm5[21],ymm3[22],ymm5[22],ymm3[23],ymm5[23]
+; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,2,2,3]
+; AVX512BW-SLOW-NEXT:    vpshufb %ymm6, %ymm4, %ymm4
+; AVX512BW-SLOW-NEXT:    vpshufb %ymm6, %ymm2, %ymm2
+; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm2 = ymm2[0],ymm4[0],ymm2[1],ymm4[1],ymm2[2],ymm4[2],ymm2[3],ymm4[3],ymm2[4],ymm4[4],ymm2[5],ymm4[5],ymm2[6],ymm4[6],ymm2[7],ymm4[7],ymm2[16],ymm4[16],ymm2[17],ymm4[17],ymm2[18],ymm4[18],ymm2[19],ymm4[19],ymm2[20],ymm4[20],ymm2[21],ymm4[21],ymm2[22],ymm4[22],ymm2[23],ymm4[23]
 ; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,2,2,3]
-; AVX512BW-SLOW-NEXT:    vmovdqu16 %ymm4, %ymm2 {%k1}
-; AVX512BW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm2 = zmm2[0,1,2,3],zmm8[4,5,6,7]
-; AVX512BW-SLOW-NEXT:    vpbroadcastq {{.*#+}} ymm3 = [6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0]
-; AVX512BW-SLOW-NEXT:    vpshufb %ymm3, %ymm1, %ymm4
-; AVX512BW-SLOW-NEXT:    vpshufb %ymm3, %ymm0, %ymm3
-; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm3 = ymm3[0],ymm4[0],ymm3[1],ymm4[1],ymm3[2],ymm4[2],ymm3[3],ymm4[3],ymm3[4],ymm4[4],ymm3[5],ymm4[5],ymm3[6],ymm4[6],ymm3[7],ymm4[7],ymm3[16],ymm4[16],ymm3[17],ymm4[17],ymm3[18],ymm4[18],ymm3[19],ymm4[19],ymm3[20],ymm4[20],ymm3[21],ymm4[21],ymm3[22],ymm4[22],ymm3[23],ymm4[23]
+; AVX512BW-SLOW-NEXT:    vmovdqu16 %ymm3, %ymm2 {%k1}
+; AVX512BW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm2 = zmm2[0,1,2,3],zmm10[4,5,6,7]
+; AVX512BW-SLOW-NEXT:    vpshufb %ymm8, %ymm1, %ymm3
+; AVX512BW-SLOW-NEXT:    vpshufb %ymm8, %ymm0, %ymm4
+; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm3 = ymm4[0],ymm3[0],ymm4[1],ymm3[1],ymm4[2],ymm3[2],ymm4[3],ymm3[3],ymm4[4],ymm3[4],ymm4[5],ymm3[5],ymm4[6],ymm3[6],ymm4[7],ymm3[7],ymm4[16],ymm3[16],ymm4[17],ymm3[17],ymm4[18],ymm3[18],ymm4[19],ymm3[19],ymm4[20],ymm3[20],ymm4[21],ymm3[21],ymm4[22],ymm3[22],ymm4[23],ymm3[23]
 ; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,2,2,3]
 ; AVX512BW-SLOW-NEXT:    vpunpckhbw {{.*#+}} ymm0 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
 ; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} ymm1 = [10,13,12,11,10,13,12,11,10,13,12,11,14,13,14,15]
@@ -2026,105 +2017,103 @@ define void @store_i8_stride6_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-SLOW-NEXT:    kmovd %ecx, %k1
 ; AVX512BW-SLOW-NEXT:    vmovdqu16 %zmm0, %zmm2 {%k1}
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 %zmm2, 128(%rax)
-; AVX512BW-SLOW-NEXT:    vmovdqa64 %zmm7, 64(%rax)
-; AVX512BW-SLOW-NEXT:    vmovdqa64 %zmm6, (%rax)
+; AVX512BW-SLOW-NEXT:    vmovdqa64 %zmm9, 64(%rax)
+; AVX512BW-SLOW-NEXT:    vmovdqa64 %zmm7, (%rax)
 ; AVX512BW-SLOW-NEXT:    vzeroupper
 ; AVX512BW-SLOW-NEXT:    retq
 ;
 ; AVX512BW-FAST-LABEL: store_i8_stride6_vf32:
 ; AVX512BW-FAST:       # %bb.0:
 ; AVX512BW-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-FAST-NEXT:    vmovdqa (%rdi), %ymm4
+; AVX512BW-FAST-NEXT:    vmovdqa (%rdi), %ymm3
 ; AVX512BW-FAST-NEXT:    vmovdqa (%rsi), %ymm5
 ; AVX512BW-FAST-NEXT:    vmovdqa (%rdx), %ymm2
-; AVX512BW-FAST-NEXT:    vmovdqa (%rcx), %ymm3
+; AVX512BW-FAST-NEXT:    vmovdqa (%rcx), %ymm4
 ; AVX512BW-FAST-NEXT:    vmovdqa (%r8), %ymm0
 ; AVX512BW-FAST-NEXT:    vmovdqa (%r9), %ymm1
-; AVX512BW-FAST-NEXT:    vmovdqa (%rcx), %xmm7
-; AVX512BW-FAST-NEXT:    vmovdqa (%rdx), %xmm8
-; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm6 = xmm8[0],xmm7[0],xmm8[1],xmm7[1],xmm8[2],xmm7[2],xmm8[3],xmm7[3],xmm8[4],xmm7[4],xmm8[5],xmm7[5],xmm8[6],xmm7[6],xmm8[7],xmm7[7]
-; AVX512BW-FAST-NEXT:    vmovdqa (%rsi), %xmm9
-; AVX512BW-FAST-NEXT:    vmovdqa (%rdi), %xmm10
-; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm11 = xmm10[0],xmm9[0],xmm10[1],xmm9[1],xmm10[2],xmm9[2],xmm10[3],xmm9[3],xmm10[4],xmm9[4],xmm10[5],xmm9[5],xmm10[6],xmm9[6],xmm10[7],xmm9[7]
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm12 = [0,3,2,1,0,3,2,1,0,3,2,1,4,5,6,5]
-; AVX512BW-FAST-NEXT:    vpermw %ymm11, %ymm12, %ymm11
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm12 = [1,0,3,2,1,0,3,2,1,0,3,2,5,4,7,6]
+; AVX512BW-FAST-NEXT:    vmovdqa (%rcx), %xmm9
+; AVX512BW-FAST-NEXT:    vmovdqa (%rdx), %xmm10
+; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm6 = xmm10[0],xmm9[0],xmm10[1],xmm9[1],xmm10[2],xmm9[2],xmm10[3],xmm9[3],xmm10[4],xmm9[4],xmm10[5],xmm9[5],xmm10[6],xmm9[6],xmm10[7],xmm9[7]
+; AVX512BW-FAST-NEXT:    vmovdqa (%rsi), %xmm11
+; AVX512BW-FAST-NEXT:    vmovdqa (%rdi), %xmm12
+; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm7 = xmm12[0],xmm11[0],xmm12[1],xmm11[1],xmm12[2],xmm11[2],xmm12[3],xmm11[3],xmm12[4],xmm11[4],xmm12[5],xmm11[5],xmm12[6],xmm11[6],xmm12[7],xmm11[7]
+; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm8 = [0,3,2,1,0,3,2,1,0,3,2,1,4,5,6,5]
+; AVX512BW-FAST-NEXT:    vpermw %ymm7, %ymm8, %ymm8
+; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm7 = [1,0,3,2,1,0,3,2,1,0,3,2,5,4,7,6]
 ; AVX512BW-FAST-NEXT:    movw $9362, %cx # imm = 0x2492
 ; AVX512BW-FAST-NEXT:    kmovd %ecx, %k2
-; AVX512BW-FAST-NEXT:    vpermw %ymm6, %ymm12, %ymm11 {%k2}
+; AVX512BW-FAST-NEXT:    vpermw %ymm6, %ymm7, %ymm8 {%k2}
 ; AVX512BW-FAST-NEXT:    vpbroadcastq {{.*#+}} xmm6 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
-; AVX512BW-FAST-NEXT:    vpshufb %xmm6, %xmm9, %xmm12
-; AVX512BW-FAST-NEXT:    vpshufb %xmm6, %xmm10, %xmm6
-; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm6[8],xmm12[8],xmm6[9],xmm12[9],xmm6[10],xmm12[10],xmm6[11],xmm12[11],xmm6[12],xmm12[12],xmm6[13],xmm12[13],xmm6[14],xmm12[14],xmm6[15],xmm12[15]
+; AVX512BW-FAST-NEXT:    vpshufb %xmm6, %xmm11, %xmm7
+; AVX512BW-FAST-NEXT:    vpshufb %xmm6, %xmm12, %xmm6
+; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm6[8],xmm7[8],xmm6[9],xmm7[9],xmm6[10],xmm7[10],xmm6[11],xmm7[11],xmm6[12],xmm7[12],xmm6[13],xmm7[13],xmm6[14],xmm7[14],xmm6[15],xmm7[15]
 ; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[0,0,0,1]
-; AVX512BW-FAST-NEXT:    vpbroadcastq {{.*#+}} xmm12 = [5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10]
-; AVX512BW-FAST-NEXT:    vpshufb %xmm12, %xmm7, %xmm13
-; AVX512BW-FAST-NEXT:    vpshufb %xmm12, %xmm8, %xmm12
-; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm12 = xmm12[0],xmm13[0],xmm12[1],xmm13[1],xmm12[2],xmm13[2],xmm12[3],xmm13[3],xmm12[4],xmm13[4],xmm12[5],xmm13[5],xmm12[6],xmm13[6],xmm12[7],xmm13[7]
-; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[0,0,0,1]
+; AVX512BW-FAST-NEXT:    vpbroadcastq {{.*#+}} ymm7 = [5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10]
+; AVX512BW-FAST-NEXT:    vpshufb %xmm7, %xmm9, %xmm13
+; AVX512BW-FAST-NEXT:    vpshufb %xmm7, %xmm10, %xmm14
+; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm13 = xmm14[0],xmm13[0],xmm14[1],xmm13[1],xmm14[2],xmm13[2],xmm14[3],xmm13[3],xmm14[4],xmm13[4],xmm14[5],xmm13[5],xmm14[6],xmm13[6],xmm14[7],xmm13[7]
+; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[0,0,0,1]
 ; AVX512BW-FAST-NEXT:    movw $18724, %cx # imm = 0x4924
 ; AVX512BW-FAST-NEXT:    kmovd %ecx, %k1
-; AVX512BW-FAST-NEXT:    vmovdqu16 %ymm6, %ymm12 {%k1}
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm12, %zmm0, %zmm6
-; AVX512BW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm6 = zmm11[0,1,2,3],zmm6[4,5,6,7]
-; AVX512BW-FAST-NEXT:    vmovdqa (%r9), %xmm11
-; AVX512BW-FAST-NEXT:    vpbroadcastq {{.*#+}} xmm12 = [6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0]
-; AVX512BW-FAST-NEXT:    vpshufb %xmm12, %xmm11, %xmm13
-; AVX512BW-FAST-NEXT:    vmovdqa (%r8), %xmm14
-; AVX512BW-FAST-NEXT:    vpshufb %xmm12, %xmm14, %xmm12
-; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm12 = xmm12[0],xmm13[0],xmm12[1],xmm13[1],xmm12[2],xmm13[2],xmm12[3],xmm13[3],xmm12[4],xmm13[4],xmm12[5],xmm13[5],xmm12[6],xmm13[6],xmm12[7],xmm13[7]
-; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[0,0,0,1]
-; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm13 = xmm14[0],xmm11[0],xmm14[1],xmm11[1],xmm14[2],xmm11[2],xmm14[3],xmm11[3],xmm14[4],xmm11[4],xmm14[5],xmm11[5],xmm14[6],xmm11[6],xmm14[7],xmm11[7]
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm15 = [2,1,0,3,2,1,0,3,2,1,0,3,4,4,4,4]
-; AVX512BW-FAST-NEXT:    vpermw %ymm13, %ymm15, %ymm13
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm12, %zmm13, %zmm12
+; AVX512BW-FAST-NEXT:    vmovdqu16 %ymm6, %ymm13 {%k1}
+; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm13, %zmm0, %zmm6
+; AVX512BW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm6 = zmm8[0,1,2,3],zmm6[4,5,6,7]
+; AVX512BW-FAST-NEXT:    vpbroadcastq {{.*#+}} ymm8 = [6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0]
+; AVX512BW-FAST-NEXT:    vmovdqa (%r9), %xmm13
+; AVX512BW-FAST-NEXT:    vpshufb %xmm8, %xmm13, %xmm14
+; AVX512BW-FAST-NEXT:    vmovdqa (%r8), %xmm15
+; AVX512BW-FAST-NEXT:    vpshufb %xmm8, %xmm15, %xmm16
+; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm14 = xmm16[0],xmm14[0],xmm16[1],xmm14[1],xmm16[2],xmm14[2],xmm16[3],xmm14[3],xmm16[4],xmm14[4],xmm16[5],xmm14[5],xmm16[6],xmm14[6],xmm16[7],xmm14[7]
+; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[0,0,0,1]
+; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm16 = xmm15[0],xmm13[0],xmm15[1],xmm13[1],xmm15[2],xmm13[2],xmm15[3],xmm13[3],xmm15[4],xmm13[4],xmm15[5],xmm13[5],xmm15[6],xmm13[6],xmm15[7],xmm13[7]
+; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm17 = [2,1,0,3,2,1,0,3,2,1,0,3,4,4,4,4]
+; AVX512BW-FAST-NEXT:    vpermw %ymm16, %ymm17, %ymm16
+; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm14, %zmm16, %zmm14
 ; AVX512BW-FAST-NEXT:    movl $613566756, %ecx # imm = 0x24924924
 ; AVX512BW-FAST-NEXT:    kmovd %ecx, %k3
-; AVX512BW-FAST-NEXT:    vmovdqu16 %zmm12, %zmm6 {%k3}
-; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm12 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[4],ymm3[4],ymm2[5],ymm3[5],ymm2[6],ymm3[6],ymm2[7],ymm3[7],ymm2[16],ymm3[16],ymm2[17],ymm3[17],ymm2[18],ymm3[18],ymm2[19],ymm3[19],ymm2[20],ymm3[20],ymm2[21],ymm3[21],ymm2[22],ymm3[22],ymm2[23],ymm3[23]
-; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm13 = ymm4[0],ymm5[0],ymm4[1],ymm5[1],ymm4[2],ymm5[2],ymm4[3],ymm5[3],ymm4[4],ymm5[4],ymm4[5],ymm5[5],ymm4[6],ymm5[6],ymm4[7],ymm5[7],ymm4[16],ymm5[16],ymm4[17],ymm5[17],ymm4[18],ymm5[18],ymm4[19],ymm5[19],ymm4[20],ymm5[20],ymm4[21],ymm5[21],ymm4[22],ymm5[22],ymm4[23],ymm5[23]
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm15 = [8,11,10,9,8,11,10,9,8,11,10,9,12,13,14,13]
-; AVX512BW-FAST-NEXT:    vpermw %ymm13, %ymm15, %ymm13
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm15 = [9,8,11,10,9,8,11,10,9,8,11,10,13,12,15,14]
-; AVX512BW-FAST-NEXT:    vpermw %ymm12, %ymm15, %ymm13 {%k2}
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm13, %zmm0, %zmm12
-; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm7 = xmm8[8],xmm7[8],xmm8[9],xmm7[9],xmm8[10],xmm7[10],xmm8[11],xmm7[11],xmm8[12],xmm7[12],xmm8[13],xmm7[13],xmm8[14],xmm7[14],xmm8[15],xmm7[15]
-; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm8 = xmm10[8],xmm9[8],xmm10[9],xmm9[9],xmm10[10],xmm9[10],xmm10[11],xmm9[11],xmm10[12],xmm9[12],xmm10[13],xmm9[13],xmm10[14],xmm9[14],xmm10[15],xmm9[15]
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm9 = [4,3,6,5,4,3,6,5,4,3,6,5,7,7,7,7]
-; AVX512BW-FAST-NEXT:    vpermw %ymm8, %ymm9, %ymm8
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm9 = [5,4,3,6,5,4,3,6,5,4,3,6,7,7,7,7]
-; AVX512BW-FAST-NEXT:    vpermw %ymm7, %ymm9, %ymm8 {%k1}
-; AVX512BW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm7 = zmm8[0,1,2,3],zmm12[4,5,6,7]
-; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm8 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
-; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm9 = xmm14[8],xmm11[8],xmm14[9],xmm11[9],xmm14[10],xmm11[10],xmm14[11],xmm11[11],xmm14[12],xmm11[12],xmm14[13],xmm11[13],xmm14[14],xmm11[14],xmm14[15],xmm11[15]
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm8, %zmm9, %zmm8
-; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm9 = [2,5,4,3,2,5,4,3,2,5,4,3,6,5,6,7,26,25,24,27,26,25,24,27,26,25,24,27,28,28,28,28]
+; AVX512BW-FAST-NEXT:    vmovdqu16 %zmm14, %zmm6 {%k3}
+; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm14 = ymm2[0],ymm4[0],ymm2[1],ymm4[1],ymm2[2],ymm4[2],ymm2[3],ymm4[3],ymm2[4],ymm4[4],ymm2[5],ymm4[5],ymm2[6],ymm4[6],ymm2[7],ymm4[7],ymm2[16],ymm4[16],ymm2[17],ymm4[17],ymm2[18],ymm4[18],ymm2[19],ymm4[19],ymm2[20],ymm4[20],ymm2[21],ymm4[21],ymm2[22],ymm4[22],ymm2[23],ymm4[23]
+; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm16 = ymm3[0],ymm5[0],ymm3[1],ymm5[1],ymm3[2],ymm5[2],ymm3[3],ymm5[3],ymm3[4],ymm5[4],ymm3[5],ymm5[5],ymm3[6],ymm5[6],ymm3[7],ymm5[7],ymm3[16],ymm5[16],ymm3[17],ymm5[17],ymm3[18],ymm5[18],ymm3[19],ymm5[19],ymm3[20],ymm5[20],ymm3[21],ymm5[21],ymm3[22],ymm5[22],ymm3[23],ymm5[23]
+; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm17 = [8,11,10,9,8,11,10,9,8,11,10,9,12,13,14,13]
+; AVX512BW-FAST-NEXT:    vpermw %ymm16, %ymm17, %ymm16
+; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm17 = [9,8,11,10,9,8,11,10,9,8,11,10,13,12,15,14]
+; AVX512BW-FAST-NEXT:    vpermw %ymm14, %ymm17, %ymm16 {%k2}
+; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm16, %zmm0, %zmm14
+; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm9 = xmm10[8],xmm9[8],xmm10[9],xmm9[9],xmm10[10],xmm9[10],xmm10[11],xmm9[11],xmm10[12],xmm9[12],xmm10[13],xmm9[13],xmm10[14],xmm9[14],xmm10[15],xmm9[15]
+; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm10 = xmm12[8],xmm11[8],xmm12[9],xmm11[9],xmm12[10],xmm11[10],xmm12[11],xmm11[11],xmm12[12],xmm11[12],xmm12[13],xmm11[13],xmm12[14],xmm11[14],xmm12[15],xmm11[15]
+; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm11 = [4,3,6,5,4,3,6,5,4,3,6,5,7,7,7,7]
+; AVX512BW-FAST-NEXT:    vpermw %ymm10, %ymm11, %ymm10
+; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm11 = [5,4,3,6,5,4,3,6,5,4,3,6,7,7,7,7]
+; AVX512BW-FAST-NEXT:    vpermw %ymm9, %ymm11, %ymm10 {%k1}
+; AVX512BW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm9 = zmm10[0,1,2,3],zmm14[4,5,6,7]
+; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm10 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
+; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm11 = xmm15[8],xmm13[8],xmm15[9],xmm13[9],xmm15[10],xmm13[10],xmm15[11],xmm13[11],xmm15[12],xmm13[12],xmm15[13],xmm13[13],xmm15[14],xmm13[14],xmm15[15],xmm13[15]
+; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm10, %zmm11, %zmm10
+; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm11 = [2,5,4,3,2,5,4,3,2,5,4,3,6,5,6,7,26,25,24,27,26,25,24,27,26,25,24,27,28,28,28,28]
 ; AVX512BW-FAST-NEXT:    movl $1227133513, %ecx # imm = 0x49249249
 ; AVX512BW-FAST-NEXT:    kmovd %ecx, %k2
-; AVX512BW-FAST-NEXT:    vpermw %zmm8, %zmm9, %zmm7 {%k2}
-; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} ymm8 = ymm2[8],ymm3[8],ymm2[9],ymm3[9],ymm2[10],ymm3[10],ymm2[11],ymm3[11],ymm2[12],ymm3[12],ymm2[13],ymm3[13],ymm2[14],ymm3[14],ymm2[15],ymm3[15],ymm2[24],ymm3[24],ymm2[25],ymm3[25],ymm2[26],ymm3[26],ymm2[27],ymm3[27],ymm2[28],ymm3[28],ymm2[29],ymm3[29],ymm2[30],ymm3[30],ymm2[31],ymm3[31]
-; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} ymm9 = ymm4[8],ymm5[8],ymm4[9],ymm5[9],ymm4[10],ymm5[10],ymm4[11],ymm5[11],ymm4[12],ymm5[12],ymm4[13],ymm5[13],ymm4[14],ymm5[14],ymm4[15],ymm5[15],ymm4[24],ymm5[24],ymm4[25],ymm5[25],ymm4[26],ymm5[26],ymm4[27],ymm5[27],ymm4[28],ymm5[28],ymm4[29],ymm5[29],ymm4[30],ymm5[30],ymm4[31],ymm5[31]
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm10 = [12,11,14,13,12,11,14,13,12,11,14,13,15,15,15,15]
-; AVX512BW-FAST-NEXT:    vpermw %ymm9, %ymm10, %ymm9
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm10 = [13,12,11,14,13,12,11,14,13,12,11,14,15,15,15,15]
-; AVX512BW-FAST-NEXT:    vpermw %ymm8, %ymm10, %ymm9 {%k1}
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm9, %zmm0, %zmm8
-; AVX512BW-FAST-NEXT:    vpbroadcastq {{.*#+}} ymm9 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
-; AVX512BW-FAST-NEXT:    vpshufb %ymm9, %ymm5, %ymm5
-; AVX512BW-FAST-NEXT:    vpshufb %ymm9, %ymm4, %ymm4
-; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm4 = ymm4[0],ymm5[0],ymm4[1],ymm5[1],ymm4[2],ymm5[2],ymm4[3],ymm5[3],ymm4[4],ymm5[4],ymm4[5],ymm5[5],ymm4[6],ymm5[6],ymm4[7],ymm5[7],ymm4[16],ymm5[16],ymm4[17],ymm5[17],ymm4[18],ymm5[18],ymm4[19],ymm5[19],ymm4[20],ymm5[20],ymm4[21],ymm5[21],ymm4[22],ymm5[22],ymm4[23],ymm5[23]
-; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,2,2,3]
-; AVX512BW-FAST-NEXT:    vpbroadcastq {{.*#+}} ymm5 = [5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10]
-; AVX512BW-FAST-NEXT:    vpshufb %ymm5, %ymm3, %ymm3
-; AVX512BW-FAST-NEXT:    vpshufb %ymm5, %ymm2, %ymm2
-; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm2 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[4],ymm3[4],ymm2[5],ymm3[5],ymm2[6],ymm3[6],ymm2[7],ymm3[7],ymm2[16],ymm3[16],ymm2[17],ymm3[17],ymm2[18],ymm3[18],ymm2[19],ymm3[19],ymm2[20],ymm3[20],ymm2[21],ymm3[21],ymm2[22],ymm3[22],ymm2[23],ymm3[23]
+; AVX512BW-FAST-NEXT:    vpermw %zmm10, %zmm11, %zmm9 {%k2}
+; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} ymm10 = ymm2[8],ymm4[8],ymm2[9],ymm4[9],ymm2[10],ymm4[10],ymm2[11],ymm4[11],ymm2[12],ymm4[12],ymm2[13],ymm4[13],ymm2[14],ymm4[14],ymm2[15],ymm4[15],ymm2[24],ymm4[24],ymm2[25],ymm4[25],ymm2[26],ymm4[26],ymm2[27],ymm4[27],ymm2[28],ymm4[28],ymm2[29],ymm4[29],ymm2[30],ymm4[30],ymm2[31],ymm4[31]
+; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} ymm11 = ymm3[8],ymm5[8],ymm3[9],ymm5[9],ymm3[10],ymm5[10],ymm3[11],ymm5[11],ymm3[12],ymm5[12],ymm3[13],ymm5[13],ymm3[14],ymm5[14],ymm3[15],ymm5[15],ymm3[24],ymm5[24],ymm3[25],ymm5[25],ymm3[26],ymm5[26],ymm3[27],ymm5[27],ymm3[28],ymm5[28],ymm3[29],ymm5[29],ymm3[30],ymm5[30],ymm3[31],ymm5[31]
+; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm12 = [12,11,14,13,12,11,14,13,12,11,14,13,15,15,15,15]
+; AVX512BW-FAST-NEXT:    vpermw %ymm11, %ymm12, %ymm11
+; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm12 = [13,12,11,14,13,12,11,14,13,12,11,14,15,15,15,15]
+; AVX512BW-FAST-NEXT:    vpermw %ymm10, %ymm12, %ymm11 {%k1}
+; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm11, %zmm0, %zmm10
+; AVX512BW-FAST-NEXT:    vpbroadcastq {{.*#+}} ymm11 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
+; AVX512BW-FAST-NEXT:    vpshufb %ymm11, %ymm5, %ymm5
+; AVX512BW-FAST-NEXT:    vpshufb %ymm11, %ymm3, %ymm3
+; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm3 = ymm3[0],ymm5[0],ymm3[1],ymm5[1],ymm3[2],ymm5[2],ymm3[3],ymm5[3],ymm3[4],ymm5[4],ymm3[5],ymm5[5],ymm3[6],ymm5[6],ymm3[7],ymm5[7],ymm3[16],ymm5[16],ymm3[17],ymm5[17],ymm3[18],ymm5[18],ymm3[19],ymm5[19],ymm3[20],ymm5[20],ymm3[21],ymm5[21],ymm3[22],ymm5[22],ymm3[23],ymm5[23]
+; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,2,2,3]
+; AVX512BW-FAST-NEXT:    vpshufb %ymm7, %ymm4, %ymm4
+; AVX512BW-FAST-NEXT:    vpshufb %ymm7, %ymm2, %ymm2
+; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm2 = ymm2[0],ymm4[0],ymm2[1],ymm4[1],ymm2[2],ymm4[2],ymm2[3],ymm4[3],ymm2[4],ymm4[4],ymm2[5],ymm4[5],ymm2[6],ymm4[6],ymm2[7],ymm4[7],ymm2[16],ymm4[16],ymm2[17],ymm4[17],ymm2[18],ymm4[18],ymm2[19],ymm4[19],ymm2[20],ymm4[20],ymm2[21],ymm4[21],ymm2[22],ymm4[22],ymm2[23],ymm4[23]
 ; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,2,2,3]
-; AVX512BW-FAST-NEXT:    vmovdqu16 %ymm4, %ymm2 {%k1}
-; AVX512BW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm2 = zmm2[0,1,2,3],zmm8[4,5,6,7]
-; AVX512BW-FAST-NEXT:    vpbroadcastq {{.*#+}} ymm3 = [6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0]
-; AVX512BW-FAST-NEXT:    vpshufb %ymm3, %ymm1, %ymm4
-; AVX512BW-FAST-NEXT:    vpshufb %ymm3, %ymm0, %ymm3
-; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm3 = ymm3[0],ymm4[0],ymm3[1],ymm4[1],ymm3[2],ymm4[2],ymm3[3],ymm4[3],ymm3[4],ymm4[4],ymm3[5],ymm4[5],ymm3[6],ymm4[6],ymm3[7],ymm4[7],ymm3[16],ymm4[16],ymm3[17],ymm4[17],ymm3[18],ymm4[18],ymm3[19],ymm4[19],ymm3[20],ymm4[20],ymm3[21],ymm4[21],ymm3[22],ymm4[22],ymm3[23],ymm4[23]
+; AVX512BW-FAST-NEXT:    vmovdqu16 %ymm3, %ymm2 {%k1}
+; AVX512BW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm2 = zmm2[0,1,2,3],zmm10[4,5,6,7]
+; AVX512BW-FAST-NEXT:    vpshufb %ymm8, %ymm1, %ymm3
+; AVX512BW-FAST-NEXT:    vpshufb %ymm8, %ymm0, %ymm4
+; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm3 = ymm4[0],ymm3[0],ymm4[1],ymm3[1],ymm4[2],ymm3[2],ymm4[3],ymm3[3],ymm4[4],ymm3[4],ymm4[5],ymm3[5],ymm4[6],ymm3[6],ymm4[7],ymm3[7],ymm4[16],ymm3[16],ymm4[17],ymm3[17],ymm4[18],ymm3[18],ymm4[19],ymm3[19],ymm4[20],ymm3[20],ymm4[21],ymm3[21],ymm4[22],ymm3[22],ymm4[23],ymm3[23]
 ; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,2,2,3]
 ; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} ymm0 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
 ; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = [10,13,12,11,10,13,12,11,10,13,12,11,14,13,14,15]
@@ -2134,7 +2123,7 @@ define void @store_i8_stride6_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-FAST-NEXT:    kmovd %ecx, %k1
 ; AVX512BW-FAST-NEXT:    vmovdqu16 %zmm0, %zmm2 {%k1}
 ; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm2, 128(%rax)
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm7, 64(%rax)
+; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm9, 64(%rax)
 ; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm6, (%rax)
 ; AVX512BW-FAST-NEXT:    vzeroupper
 ; AVX512BW-FAST-NEXT:    retq
@@ -3144,53 +3133,52 @@ define void @store_i8_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX2-SLOW-NEXT:    subq $664, %rsp # imm = 0x298
 ; AVX2-SLOW-NEXT:    vmovdqa 32(%rdx), %ymm6
 ; AVX2-SLOW-NEXT:    vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vmovdqa 32(%rcx), %ymm5
-; AVX2-SLOW-NEXT:    vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vmovdqa (%rcx), %xmm1
-; AVX2-SLOW-NEXT:    vmovdqa 32(%rcx), %xmm7
-; AVX2-SLOW-NEXT:    vmovdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-SLOW-NEXT:    vpbroadcastq {{.*#+}} xmm2 = [5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10]
-; AVX2-SLOW-NEXT:    vpshufb %xmm2, %xmm1, %xmm0
-; AVX2-SLOW-NEXT:    vmovdqa %xmm1, %xmm8
-; AVX2-SLOW-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX2-SLOW-NEXT:    vmovdqa 32(%rcx), %ymm7
+; AVX2-SLOW-NEXT:    vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vpbroadcastq {{.*#+}} ymm0 = [5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10]
+; AVX2-SLOW-NEXT:    vmovdqa (%rcx), %xmm2
+; AVX2-SLOW-NEXT:    vmovdqa 32(%rcx), %xmm5
+; AVX2-SLOW-NEXT:    vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX2-SLOW-NEXT:    vpshufb %xmm0, %xmm2, %xmm1
+; AVX2-SLOW-NEXT:    vmovdqa %xmm2, %xmm8
+; AVX2-SLOW-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX2-SLOW-NEXT:    vmovdqa (%rdx), %xmm3
 ; AVX2-SLOW-NEXT:    vmovdqa 32(%rdx), %xmm9
 ; AVX2-SLOW-NEXT:    vmovdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-SLOW-NEXT:    vpshufb %xmm2, %xmm3, %xmm1
+; AVX2-SLOW-NEXT:    vpshufb %xmm0, %xmm3, %xmm2
 ; AVX2-SLOW-NEXT:    vmovdqa %xmm3, %xmm11
 ; AVX2-SLOW-NEXT:    vmovdqa %xmm3, (%rsp) # 16-byte Spill
-; AVX2-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,0,0,1]
+; AVX2-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,0,0,1]
 ; AVX2-SLOW-NEXT:    vmovdqa (%rsi), %xmm4
 ; AVX2-SLOW-NEXT:    vmovdqa 32(%rsi), %xmm12
 ; AVX2-SLOW-NEXT:    vpbroadcastq {{.*#+}} xmm3 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
-; AVX2-SLOW-NEXT:    vpshufb %xmm3, %xmm4, %xmm1
+; AVX2-SLOW-NEXT:    vpshufb %xmm3, %xmm4, %xmm2
 ; AVX2-SLOW-NEXT:    vmovdqa %xmm4, %xmm15
 ; AVX2-SLOW-NEXT:    vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX2-SLOW-NEXT:    vmovdqa (%rdi), %xmm10
 ; AVX2-SLOW-NEXT:    vmovdqa 32(%rdi), %xmm13
 ; AVX2-SLOW-NEXT:    vpshufb %xmm3, %xmm10, %xmm4
 ; AVX2-SLOW-NEXT:    vmovdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm4[8],xmm1[8],xmm4[9],xmm1[9],xmm4[10],xmm1[10],xmm4[11],xmm1[11],xmm4[12],xmm1[12],xmm4[13],xmm1[13],xmm4[14],xmm1[14],xmm4[15],xmm1[15]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm1[0,0,0,1]
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm1 = <255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255>
-; AVX2-SLOW-NEXT:    vpblendvb %ymm1, %ymm0, %ymm4, %ymm0
-; AVX2-SLOW-NEXT:    vpshufb %xmm2, %xmm7, %xmm4
-; AVX2-SLOW-NEXT:    vpshufb %xmm2, %xmm9, %xmm2
-; AVX2-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3],xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,0,0,1]
-; AVX2-SLOW-NEXT:    vpshufb %xmm3, %xmm12, %xmm4
+; AVX2-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm2 = xmm4[8],xmm2[8],xmm4[9],xmm2[9],xmm4[10],xmm2[10],xmm4[11],xmm2[11],xmm4[12],xmm2[12],xmm4[13],xmm2[13],xmm4[14],xmm2[14],xmm4[15],xmm2[15]
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm2[0,0,0,1]
+; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm2 = <255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255>
+; AVX2-SLOW-NEXT:    vpblendvb %ymm2, %ymm1, %ymm4, %ymm1
+; AVX2-SLOW-NEXT:    vpshufb %xmm0, %xmm5, %xmm4
+; AVX2-SLOW-NEXT:    vpshufb %xmm0, %xmm9, %xmm5
+; AVX2-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm4 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3],xmm5[4],xmm4[4],xmm5[5],xmm4[5],xmm5[6],xmm4[6],xmm5[7],xmm4[7]
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[0,0,0,1]
+; AVX2-SLOW-NEXT:    vpshufb %xmm3, %xmm12, %xmm5
 ; AVX2-SLOW-NEXT:    vmovdqa %xmm12, %xmm14
 ; AVX2-SLOW-NEXT:    vmovdqa %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX2-SLOW-NEXT:    vpshufb %xmm3, %xmm13, %xmm3
 ; AVX2-SLOW-NEXT:    vmovdqa %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm3[8],xmm4[8],xmm3[9],xmm4[9],xmm3[10],xmm4[10],xmm3[11],xmm4[11],xmm3[12],xmm4[12],xmm3[13],xmm4[13],xmm3[14],xmm4[14],xmm3[15],xmm4[15]
+; AVX2-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm3[8],xmm5[8],xmm3[9],xmm5[9],xmm3[10],xmm5[10],xmm3[11],xmm5[11],xmm3[12],xmm5[12],xmm3[13],xmm5[13],xmm3[14],xmm5[14],xmm3[15],xmm5[15]
 ; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[0,0,0,1]
-; AVX2-SLOW-NEXT:    vpblendvb %ymm1, %ymm2, %ymm3, %ymm2
-; AVX2-SLOW-NEXT:    vpbroadcastq {{.*#+}} ymm4 = [5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10]
-; AVX2-SLOW-NEXT:    vpshufb %ymm4, %ymm5, %ymm3
-; AVX2-SLOW-NEXT:    vpshufb %ymm4, %ymm6, %ymm5
-; AVX2-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm3 = ymm5[0],ymm3[0],ymm5[1],ymm3[1],ymm5[2],ymm3[2],ymm5[3],ymm3[3],ymm5[4],ymm3[4],ymm5[5],ymm3[5],ymm5[6],ymm3[6],ymm5[7],ymm3[7],ymm5[16],ymm3[16],ymm5[17],ymm3[17],ymm5[18],ymm3[18],ymm5[19],ymm3[19],ymm5[20],ymm3[20],ymm5[21],ymm3[21],ymm5[22],ymm3[22],ymm5[23],ymm3[23]
+; AVX2-SLOW-NEXT:    vpblendvb %ymm2, %ymm4, %ymm3, %ymm3
+; AVX2-SLOW-NEXT:    vpshufb %ymm0, %ymm7, %ymm4
+; AVX2-SLOW-NEXT:    vpshufb %ymm0, %ymm6, %ymm5
+; AVX2-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm4 = ymm5[0],ymm4[0],ymm5[1],ymm4[1],ymm5[2],ymm4[2],ymm5[3],ymm4[3],ymm5[4],ymm4[4],ymm5[5],ymm4[5],ymm5[6],ymm4[6],ymm5[7],ymm4[7],ymm5[16],ymm4[16],ymm5[17],ymm4[17],ymm5[18],ymm4[18],ymm5[19],ymm4[19],ymm5[20],ymm4[20],ymm5[21],ymm4[21],ymm5[22],ymm4[22],ymm5[23],ymm4[23]
 ; AVX2-SLOW-NEXT:    vmovdqa 32(%rdi), %ymm7
 ; AVX2-SLOW-NEXT:    vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vmovdqa 32(%rsi), %ymm6
@@ -3199,16 +3187,16 @@ define void @store_i8_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX2-SLOW-NEXT:    vpshufb %ymm5, %ymm6, %ymm6
 ; AVX2-SLOW-NEXT:    vpshufb %ymm5, %ymm7, %ymm7
 ; AVX2-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm6 = ymm7[0],ymm6[0],ymm7[1],ymm6[1],ymm7[2],ymm6[2],ymm7[3],ymm6[3],ymm7[4],ymm6[4],ymm7[5],ymm6[5],ymm7[6],ymm6[6],ymm7[7],ymm6[7],ymm7[16],ymm6[16],ymm7[17],ymm6[17],ymm7[18],ymm6[18],ymm7[19],ymm6[19],ymm7[20],ymm6[20],ymm7[21],ymm6[21],ymm7[22],ymm6[22],ymm7[23],ymm6[23]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,2,2,3]
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,2,2,3]
 ; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[2,2,2,3]
-; AVX2-SLOW-NEXT:    vpblendvb %ymm1, %ymm3, %ymm6, %ymm3
+; AVX2-SLOW-NEXT:    vpblendvb %ymm2, %ymm4, %ymm6, %ymm4
 ; AVX2-SLOW-NEXT:    vmovdqa (%rdx), %ymm7
 ; AVX2-SLOW-NEXT:    vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vmovdqa (%rcx), %ymm6
 ; AVX2-SLOW-NEXT:    vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpshufb %ymm4, %ymm6, %ymm6
-; AVX2-SLOW-NEXT:    vpshufb %ymm4, %ymm7, %ymm4
-; AVX2-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm4 = ymm4[0],ymm6[0],ymm4[1],ymm6[1],ymm4[2],ymm6[2],ymm4[3],ymm6[3],ymm4[4],ymm6[4],ymm4[5],ymm6[5],ymm4[6],ymm6[6],ymm4[7],ymm6[7],ymm4[16],ymm6[16],ymm4[17],ymm6[17],ymm4[18],ymm6[18],ymm4[19],ymm6[19],ymm4[20],ymm6[20],ymm4[21],ymm6[21],ymm4[22],ymm6[22],ymm4[23],ymm6[23]
+; AVX2-SLOW-NEXT:    vpshufb %ymm0, %ymm6, %ymm6
+; AVX2-SLOW-NEXT:    vpshufb %ymm0, %ymm7, %ymm0
+; AVX2-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm0 = ymm0[0],ymm6[0],ymm0[1],ymm6[1],ymm0[2],ymm6[2],ymm0[3],ymm6[3],ymm0[4],ymm6[4],ymm0[5],ymm6[5],ymm0[6],ymm6[6],ymm0[7],ymm6[7],ymm0[16],ymm6[16],ymm0[17],ymm6[17],ymm0[18],ymm6[18],ymm0[19],ymm6[19],ymm0[20],ymm6[20],ymm0[21],ymm6[21],ymm0[22],ymm6[22],ymm0[23],ymm6[23]
 ; AVX2-SLOW-NEXT:    vmovdqa (%rdi), %ymm7
 ; AVX2-SLOW-NEXT:    vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vmovdqa (%rsi), %ymm9
@@ -3216,61 +3204,61 @@ define void @store_i8_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX2-SLOW-NEXT:    vmovdqa %ymm9, %ymm12
 ; AVX2-SLOW-NEXT:    vpshufb %ymm5, %ymm7, %ymm5
 ; AVX2-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm5 = ymm5[0],ymm6[0],ymm5[1],ymm6[1],ymm5[2],ymm6[2],ymm5[3],ymm6[3],ymm5[4],ymm6[4],ymm5[5],ymm6[5],ymm5[6],ymm6[6],ymm5[7],ymm6[7],ymm5[16],ymm6[16],ymm5[17],ymm6[17],ymm5[18],ymm6[18],ymm5[19],ymm6[19],ymm5[20],ymm6[20],ymm5[21],ymm6[21],ymm5[22],ymm6[22],ymm5[23],ymm6[23]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,2,2,3]
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,2,2,3]
 ; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,2,2,3]
-; AVX2-SLOW-NEXT:    vpblendvb %ymm1, %ymm4, %ymm5, %ymm1
+; AVX2-SLOW-NEXT:    vpblendvb %ymm2, %ymm0, %ymm5, %ymm0
 ; AVX2-SLOW-NEXT:    vmovdqa (%r8), %xmm6
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} xmm4 = <6,u,5,u,8,u,7,u,9,u,9,u,9,u,9,u>
-; AVX2-SLOW-NEXT:    vpshufb %xmm4, %xmm6, %xmm5
+; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} xmm2 = <6,u,5,u,8,u,7,u,9,u,9,u,9,u,9,u>
+; AVX2-SLOW-NEXT:    vpshufb %xmm2, %xmm6, %xmm5
 ; AVX2-SLOW-NEXT:    vmovdqa %xmm6, %xmm7
 ; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[0,0,0,1]
 ; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm6 = [255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255]
-; AVX2-SLOW-NEXT:    vpblendvb %ymm6, %ymm0, %ymm5, %ymm0
+; AVX2-SLOW-NEXT:    vpblendvb %ymm6, %ymm1, %ymm5, %ymm1
 ; AVX2-SLOW-NEXT:    vmovdqa 32(%r8), %xmm5
 ; AVX2-SLOW-NEXT:    vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-SLOW-NEXT:    vpshufb %xmm4, %xmm5, %xmm4
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[0,0,0,1]
-; AVX2-SLOW-NEXT:    vpblendvb %ymm6, %ymm2, %ymm4, %ymm2
+; AVX2-SLOW-NEXT:    vpshufb %xmm2, %xmm5, %xmm2
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,0,0,1]
+; AVX2-SLOW-NEXT:    vpblendvb %ymm6, %ymm3, %ymm2, %ymm2
 ; AVX2-SLOW-NEXT:    vmovdqa 32(%r8), %ymm5
 ; AVX2-SLOW-NEXT:    vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm4 = [6,0,5,0,8,0,7,0,0,0,9,0,0,0,0,0,6,0,5,0,8,0,7,0,0,0,9,0,0,0,0,0]
-; AVX2-SLOW-NEXT:    # ymm4 = mem[0,1,0,1]
-; AVX2-SLOW-NEXT:    vpshufb %ymm4, %ymm5, %ymm5
+; AVX2-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm3 = [6,0,5,0,8,0,7,0,0,0,9,0,0,0,0,0,6,0,5,0,8,0,7,0,0,0,9,0,0,0,0,0]
+; AVX2-SLOW-NEXT:    # ymm3 = mem[0,1,0,1]
+; AVX2-SLOW-NEXT:    vpshufb %ymm3, %ymm5, %ymm5
 ; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,2,2,3]
-; AVX2-SLOW-NEXT:    vpblendvb %ymm6, %ymm3, %ymm5, %ymm3
+; AVX2-SLOW-NEXT:    vpblendvb %ymm6, %ymm4, %ymm5, %ymm4
 ; AVX2-SLOW-NEXT:    vmovdqa (%r8), %ymm5
 ; AVX2-SLOW-NEXT:    vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpshufb %ymm4, %ymm5, %ymm4
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,2,2,3]
-; AVX2-SLOW-NEXT:    vpblendvb %ymm6, %ymm1, %ymm4, %ymm1
+; AVX2-SLOW-NEXT:    vpshufb %ymm3, %ymm5, %ymm3
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,2,2,3]
+; AVX2-SLOW-NEXT:    vpblendvb %ymm6, %ymm0, %ymm3, %ymm0
 ; AVX2-SLOW-NEXT:    vmovdqa (%r9), %xmm5
 ; AVX2-SLOW-NEXT:    vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} xmm4 = <u,6,u,5,u,8,u,7,u,9,u,9,u,9,u,9>
-; AVX2-SLOW-NEXT:    vpshufb %xmm4, %xmm5, %xmm5
+; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} xmm3 = <u,6,u,5,u,8,u,7,u,9,u,9,u,9,u,9>
+; AVX2-SLOW-NEXT:    vpshufb %xmm3, %xmm5, %xmm5
 ; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[0,0,0,1]
 ; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm6 = [255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255]
-; AVX2-SLOW-NEXT:    vpblendvb %ymm6, %ymm0, %ymm5, %ymm0
-; AVX2-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vpblendvb %ymm6, %ymm1, %ymm5, %ymm1
+; AVX2-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vmovdqa 32(%r9), %xmm5
-; AVX2-SLOW-NEXT:    vpshufb %xmm4, %xmm5, %xmm0
+; AVX2-SLOW-NEXT:    vpshufb %xmm3, %xmm5, %xmm1
 ; AVX2-SLOW-NEXT:    vmovdqa %xmm5, %xmm9
 ; AVX2-SLOW-NEXT:    vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,0,0,1]
-; AVX2-SLOW-NEXT:    vpblendvb %ymm6, %ymm2, %ymm0, %ymm0
-; AVX2-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,0,0,1]
+; AVX2-SLOW-NEXT:    vpblendvb %ymm6, %ymm2, %ymm1, %ymm1
+; AVX2-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vmovdqa 32(%r9), %ymm2
 ; AVX2-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = [0,6,0,5,0,8,0,7,0,0,0,9,0,0,0,0,0,6,0,5,0,8,0,7,0,0,0,9,0,0,0,0]
-; AVX2-SLOW-NEXT:    # ymm0 = mem[0,1,0,1]
-; AVX2-SLOW-NEXT:    vpshufb %ymm0, %ymm2, %ymm2
+; AVX2-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [0,6,0,5,0,8,0,7,0,0,0,9,0,0,0,0,0,6,0,5,0,8,0,7,0,0,0,9,0,0,0,0]
+; AVX2-SLOW-NEXT:    # ymm1 = mem[0,1,0,1]
+; AVX2-SLOW-NEXT:    vpshufb %ymm1, %ymm2, %ymm2
 ; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,2,2,3]
-; AVX2-SLOW-NEXT:    vpblendvb %ymm6, %ymm3, %ymm2, %ymm2
+; AVX2-SLOW-NEXT:    vpblendvb %ymm6, %ymm4, %ymm2, %ymm2
 ; AVX2-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vmovdqa (%r9), %ymm2
 ; AVX2-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpshufb %ymm0, %ymm2, %ymm0
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,2,2,3]
-; AVX2-SLOW-NEXT:    vpblendvb %ymm6, %ymm1, %ymm0, %ymm0
+; AVX2-SLOW-NEXT:    vpshufb %ymm1, %ymm2, %ymm1
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,2,2,3]
+; AVX2-SLOW-NEXT:    vpblendvb %ymm6, %ymm0, %ymm1, %ymm0
 ; AVX2-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm10[0],xmm15[0],xmm10[1],xmm15[1],xmm10[2],xmm15[2],xmm10[3],xmm15[3],xmm10[4],xmm15[4],xmm10[5],xmm15[5],xmm10[6],xmm15[6],xmm10[7],xmm15[7]
 ; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[0,3,2,1,4,5,6,7]
@@ -3471,54 +3459,53 @@ define void @store_i8_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ;
 ; AVX2-FAST-LABEL: store_i8_stride6_vf64:
 ; AVX2-FAST:       # %bb.0:
-; AVX2-FAST-NEXT:    subq $680, %rsp # imm = 0x2A8
+; AVX2-FAST-NEXT:    subq $664, %rsp # imm = 0x298
 ; AVX2-FAST-NEXT:    vmovdqa 32(%rdx), %ymm6
 ; AVX2-FAST-NEXT:    vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vmovdqa 32(%rcx), %ymm5
-; AVX2-FAST-NEXT:    vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vmovdqa (%rcx), %xmm0
-; AVX2-FAST-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FAST-NEXT:    vmovdqa 32(%rcx), %xmm7
-; AVX2-FAST-NEXT:    vpbroadcastq {{.*#+}} xmm2 = [5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10]
-; AVX2-FAST-NEXT:    vpshufb %xmm2, %xmm0, %xmm0
-; AVX2-FAST-NEXT:    vmovdqa (%rdx), %xmm1
+; AVX2-FAST-NEXT:    vmovdqa 32(%rcx), %ymm7
+; AVX2-FAST-NEXT:    vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vpbroadcastq {{.*#+}} ymm0 = [5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10]
+; AVX2-FAST-NEXT:    vmovdqa (%rcx), %xmm1
 ; AVX2-FAST-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX2-FAST-NEXT:    vmovdqa 32(%rcx), %xmm5
+; AVX2-FAST-NEXT:    vpshufb %xmm0, %xmm1, %xmm1
+; AVX2-FAST-NEXT:    vmovdqa (%rdx), %xmm2
+; AVX2-FAST-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX2-FAST-NEXT:    vmovdqa 32(%rdx), %xmm8
-; AVX2-FAST-NEXT:    vpshufb %xmm2, %xmm1, %xmm1
-; AVX2-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,0,0,1]
+; AVX2-FAST-NEXT:    vpshufb %xmm0, %xmm2, %xmm2
+; AVX2-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,0,0,1]
 ; AVX2-FAST-NEXT:    vmovdqa (%rsi), %xmm4
 ; AVX2-FAST-NEXT:    vmovdqa 32(%rsi), %xmm9
 ; AVX2-FAST-NEXT:    vpbroadcastq {{.*#+}} xmm3 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
-; AVX2-FAST-NEXT:    vpshufb %xmm3, %xmm4, %xmm1
+; AVX2-FAST-NEXT:    vpshufb %xmm3, %xmm4, %xmm2
 ; AVX2-FAST-NEXT:    vmovdqa %xmm4, %xmm12
 ; AVX2-FAST-NEXT:    vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX2-FAST-NEXT:    vmovdqa (%rdi), %xmm13
 ; AVX2-FAST-NEXT:    vmovdqa 32(%rdi), %xmm11
 ; AVX2-FAST-NEXT:    vpshufb %xmm3, %xmm13, %xmm4
 ; AVX2-FAST-NEXT:    vmovdqa %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm4[8],xmm1[8],xmm4[9],xmm1[9],xmm4[10],xmm1[10],xmm4[11],xmm1[11],xmm4[12],xmm1[12],xmm4[13],xmm1[13],xmm4[14],xmm1[14],xmm4[15],xmm1[15]
-; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm1[0,0,0,1]
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = <255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255>
-; AVX2-FAST-NEXT:    vpblendvb %ymm1, %ymm0, %ymm4, %ymm0
-; AVX2-FAST-NEXT:    vpshufb %xmm2, %xmm7, %xmm4
-; AVX2-FAST-NEXT:    vmovdqa %xmm7, %xmm14
-; AVX2-FAST-NEXT:    vmovdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FAST-NEXT:    vpshufb %xmm2, %xmm8, %xmm2
+; AVX2-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm2 = xmm4[8],xmm2[8],xmm4[9],xmm2[9],xmm4[10],xmm2[10],xmm4[11],xmm2[11],xmm4[12],xmm2[12],xmm4[13],xmm2[13],xmm4[14],xmm2[14],xmm4[15],xmm2[15]
+; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm2[0,0,0,1]
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = <255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255>
+; AVX2-FAST-NEXT:    vpblendvb %ymm2, %ymm1, %ymm4, %ymm1
+; AVX2-FAST-NEXT:    vpshufb %xmm0, %xmm5, %xmm4
+; AVX2-FAST-NEXT:    vmovdqa %xmm5, %xmm14
+; AVX2-FAST-NEXT:    vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX2-FAST-NEXT:    vpshufb %xmm0, %xmm8, %xmm5
 ; AVX2-FAST-NEXT:    vmovdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3],xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
-; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,0,0,1]
-; AVX2-FAST-NEXT:    vpshufb %xmm3, %xmm9, %xmm4
+; AVX2-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm4 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3],xmm5[4],xmm4[4],xmm5[5],xmm4[5],xmm5[6],xmm4[6],xmm5[7],xmm4[7]
+; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[0,0,0,1]
+; AVX2-FAST-NEXT:    vpshufb %xmm3, %xmm9, %xmm5
 ; AVX2-FAST-NEXT:    vmovdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX2-FAST-NEXT:    vpshufb %xmm3, %xmm11, %xmm3
 ; AVX2-FAST-NEXT:    vmovdqa %xmm11, (%rsp) # 16-byte Spill
-; AVX2-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm3[8],xmm4[8],xmm3[9],xmm4[9],xmm3[10],xmm4[10],xmm3[11],xmm4[11],xmm3[12],xmm4[12],xmm3[13],xmm4[13],xmm3[14],xmm4[14],xmm3[15],xmm4[15]
+; AVX2-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm3[8],xmm5[8],xmm3[9],xmm5[9],xmm3[10],xmm5[10],xmm3[11],xmm5[11],xmm3[12],xmm5[12],xmm3[13],xmm5[13],xmm3[14],xmm5[14],xmm3[15],xmm5[15]
 ; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[0,0,0,1]
-; AVX2-FAST-NEXT:    vpblendvb %ymm1, %ymm2, %ymm3, %ymm2
-; AVX2-FAST-NEXT:    vpbroadcastq {{.*#+}} ymm4 = [5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10]
-; AVX2-FAST-NEXT:    vpshufb %ymm4, %ymm5, %ymm3
-; AVX2-FAST-NEXT:    vpshufb %ymm4, %ymm6, %ymm5
-; AVX2-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm3 = ymm5[0],ymm3[0],ymm5[1],ymm3[1],ymm5[2],ymm3[2],ymm5[3],ymm3[3],ymm5[4],ymm3[4],ymm5[5],ymm3[5],ymm5[6],ymm3[6],ymm5[7],ymm3[7],ymm5[16],ymm3[16],ymm5[17],ymm3[17],ymm5[18],ymm3[18],ymm5[19],ymm3[19],ymm5[20],ymm3[20],ymm5[21],ymm3[21],ymm5[22],ymm3[22],ymm5[23],ymm3[23]
+; AVX2-FAST-NEXT:    vpblendvb %ymm2, %ymm4, %ymm3, %ymm3
+; AVX2-FAST-NEXT:    vpshufb %ymm0, %ymm7, %ymm4
+; AVX2-FAST-NEXT:    vpshufb %ymm0, %ymm6, %ymm5
+; AVX2-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm4 = ymm5[0],ymm4[0],ymm5[1],ymm4[1],ymm5[2],ymm4[2],ymm5[3],ymm4[3],ymm5[4],ymm4[4],ymm5[5],ymm4[5],ymm5[6],ymm4[6],ymm5[7],ymm4[7],ymm5[16],ymm4[16],ymm5[17],ymm4[17],ymm5[18],ymm4[18],ymm5[19],ymm4[19],ymm5[20],ymm4[20],ymm5[21],ymm4[21],ymm5[22],ymm4[22],ymm5[23],ymm4[23]
 ; AVX2-FAST-NEXT:    vmovdqa 32(%rdi), %ymm7
 ; AVX2-FAST-NEXT:    vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-NEXT:    vmovdqa 32(%rsi), %ymm6
@@ -3527,16 +3514,16 @@ define void @store_i8_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX2-FAST-NEXT:    vpshufb %ymm5, %ymm6, %ymm6
 ; AVX2-FAST-NEXT:    vpshufb %ymm5, %ymm7, %ymm7
 ; AVX2-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm6 = ymm7[0],ymm6[0],ymm7[1],ymm6[1],ymm7[2],ymm6[2],ymm7[3],ymm6[3],ymm7[4],ymm6[4],ymm7[5],ymm6[5],ymm7[6],ymm6[6],ymm7[7],ymm6[7],ymm7[16],ymm6[16],ymm7[17],ymm6[17],ymm7[18],ymm6[18],ymm7[19],ymm6[19],ymm7[20],ymm6[20],ymm7[21],ymm6[21],ymm7[22],ymm6[22],ymm7[23],ymm6[23]
-; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,2,2,3]
+; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,2,2,3]
 ; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[2,2,2,3]
-; AVX2-FAST-NEXT:    vpblendvb %ymm1, %ymm3, %ymm6, %ymm3
+; AVX2-FAST-NEXT:    vpblendvb %ymm2, %ymm4, %ymm6, %ymm4
 ; AVX2-FAST-NEXT:    vmovdqa (%rdx), %ymm7
 ; AVX2-FAST-NEXT:    vmovdqa (%rcx), %ymm6
 ; AVX2-FAST-NEXT:    vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vpshufb %ymm4, %ymm6, %ymm6
-; AVX2-FAST-NEXT:    vpshufb %ymm4, %ymm7, %ymm4
+; AVX2-FAST-NEXT:    vpshufb %ymm0, %ymm6, %ymm6
+; AVX2-FAST-NEXT:    vpshufb %ymm0, %ymm7, %ymm0
 ; AVX2-FAST-NEXT:    vmovdqa %ymm7, %ymm15
-; AVX2-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm4 = ymm4[0],ymm6[0],ymm4[1],ymm6[1],ymm4[2],ymm6[2],ymm4[3],ymm6[3],ymm4[4],ymm6[4],ymm4[5],ymm6[5],ymm4[6],ymm6[6],ymm4[7],ymm6[7],ymm4[16],ymm6[16],ymm4[17],ymm6[17],ymm4[18],ymm6[18],ymm4[19],ymm6[19],ymm4[20],ymm6[20],ymm4[21],ymm6[21],ymm4[22],ymm6[22],ymm4[23],ymm6[23]
+; AVX2-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm0 = ymm0[0],ymm6[0],ymm0[1],ymm6[1],ymm0[2],ymm6[2],ymm0[3],ymm6[3],ymm0[4],ymm6[4],ymm0[5],ymm6[5],ymm0[6],ymm6[6],ymm0[7],ymm6[7],ymm0[16],ymm6[16],ymm0[17],ymm6[17],ymm0[18],ymm6[18],ymm0[19],ymm6[19],ymm0[20],ymm6[20],ymm0[21],ymm6[21],ymm0[22],ymm6[22],ymm0[23],ymm6[23]
 ; AVX2-FAST-NEXT:    vmovdqa (%rdi), %ymm7
 ; AVX2-FAST-NEXT:    vmovdqa (%rsi), %ymm6
 ; AVX2-FAST-NEXT:    vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
@@ -3545,60 +3532,60 @@ define void @store_i8_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX2-FAST-NEXT:    vmovdqa %ymm7, %ymm10
 ; AVX2-FAST-NEXT:    vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm5 = ymm5[0],ymm6[0],ymm5[1],ymm6[1],ymm5[2],ymm6[2],ymm5[3],ymm6[3],ymm5[4],ymm6[4],ymm5[5],ymm6[5],ymm5[6],ymm6[6],ymm5[7],ymm6[7],ymm5[16],ymm6[16],ymm5[17],ymm6[17],ymm5[18],ymm6[18],ymm5[19],ymm6[19],ymm5[20],ymm6[20],ymm5[21],ymm6[21],ymm5[22],ymm6[22],ymm5[23],ymm6[23]
-; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,2,2,3]
+; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,2,2,3]
 ; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,2,2,3]
-; AVX2-FAST-NEXT:    vpblendvb %ymm1, %ymm4, %ymm5, %ymm1
+; AVX2-FAST-NEXT:    vpblendvb %ymm2, %ymm0, %ymm5, %ymm0
 ; AVX2-FAST-NEXT:    vmovdqa (%r8), %xmm5
 ; AVX2-FAST-NEXT:    vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm4 = <6,u,5,u,8,u,7,u,9,u,9,u,9,u,9,u>
-; AVX2-FAST-NEXT:    vpshufb %xmm4, %xmm5, %xmm5
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm2 = <6,u,5,u,8,u,7,u,9,u,9,u,9,u,9,u>
+; AVX2-FAST-NEXT:    vpshufb %xmm2, %xmm5, %xmm5
 ; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[0,0,0,1]
 ; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = [255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255]
-; AVX2-FAST-NEXT:    vpblendvb %ymm6, %ymm0, %ymm5, %ymm7
-; AVX2-FAST-NEXT:    vmovdqa 32(%r8), %xmm0
-; AVX2-FAST-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FAST-NEXT:    vpshufb %xmm4, %xmm0, %xmm4
-; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[0,0,0,1]
-; AVX2-FAST-NEXT:    vpblendvb %ymm6, %ymm2, %ymm4, %ymm2
-; AVX2-FAST-NEXT:    vmovdqa 32(%r8), %ymm0
-; AVX2-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm4 = [6,0,5,0,8,0,7,0,0,0,9,0,0,0,0,0,6,0,5,0,8,0,7,0,0,0,9,0,0,0,0,0]
-; AVX2-FAST-NEXT:    # ymm4 = mem[0,1,0,1]
-; AVX2-FAST-NEXT:    vpshufb %ymm4, %ymm0, %ymm5
+; AVX2-FAST-NEXT:    vpblendvb %ymm6, %ymm1, %ymm5, %ymm1
+; AVX2-FAST-NEXT:    vmovdqa 32(%r8), %xmm5
+; AVX2-FAST-NEXT:    vpshufb %xmm2, %xmm5, %xmm2
+; AVX2-FAST-NEXT:    vmovdqa %xmm5, %xmm7
+; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,0,0,1]
+; AVX2-FAST-NEXT:    vpblendvb %ymm6, %ymm3, %ymm2, %ymm2
+; AVX2-FAST-NEXT:    vmovdqa 32(%r8), %ymm5
+; AVX2-FAST-NEXT:    vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm3 = [6,0,5,0,8,0,7,0,0,0,9,0,0,0,0,0,6,0,5,0,8,0,7,0,0,0,9,0,0,0,0,0]
+; AVX2-FAST-NEXT:    # ymm3 = mem[0,1,0,1]
+; AVX2-FAST-NEXT:    vpshufb %ymm3, %ymm5, %ymm5
 ; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,2,2,3]
-; AVX2-FAST-NEXT:    vpblendvb %ymm6, %ymm3, %ymm5, %ymm3
-; AVX2-FAST-NEXT:    vmovdqa (%r8), %ymm0
-; AVX2-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vpshufb %ymm4, %ymm0, %ymm4
-; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,2,2,3]
-; AVX2-FAST-NEXT:    vpblendvb %ymm6, %ymm1, %ymm4, %ymm1
+; AVX2-FAST-NEXT:    vpblendvb %ymm6, %ymm4, %ymm5, %ymm4
+; AVX2-FAST-NEXT:    vmovdqa (%r8), %ymm5
+; AVX2-FAST-NEXT:    vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vpshufb %ymm3, %ymm5, %ymm3
+; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,2,2,3]
+; AVX2-FAST-NEXT:    vpblendvb %ymm6, %ymm0, %ymm3, %ymm6
 ; AVX2-FAST-NEXT:    vmovdqa (%r9), %xmm0
 ; AVX2-FAST-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm4 = <u,6,u,5,u,8,u,7,u,9,u,9,u,9,u,9>
-; AVX2-FAST-NEXT:    vpshufb %xmm4, %xmm0, %xmm5
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm3 = <u,6,u,5,u,8,u,7,u,9,u,9,u,9,u,9>
+; AVX2-FAST-NEXT:    vpshufb %xmm3, %xmm0, %xmm5
 ; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[0,0,0,1]
 ; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = [255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255]
-; AVX2-FAST-NEXT:    vpblendvb %ymm0, %ymm7, %ymm5, %ymm5
-; AVX2-FAST-NEXT:    vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vmovdqa 32(%r9), %xmm5
-; AVX2-FAST-NEXT:    vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FAST-NEXT:    vpshufb %xmm4, %xmm5, %xmm4
-; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[0,0,0,1]
-; AVX2-FAST-NEXT:    vpblendvb %ymm0, %ymm2, %ymm4, %ymm2
+; AVX2-FAST-NEXT:    vpblendvb %ymm0, %ymm1, %ymm5, %ymm1
+; AVX2-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vmovdqa 32(%r9), %xmm1
+; AVX2-FAST-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX2-FAST-NEXT:    vpshufb %xmm3, %xmm1, %xmm1
+; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,0,0,1]
+; AVX2-FAST-NEXT:    vpblendvb %ymm0, %ymm2, %ymm1, %ymm1
+; AVX2-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vmovdqa 32(%r9), %ymm2
 ; AVX2-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vmovdqa 32(%r9), %ymm4
-; AVX2-FAST-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = [0,6,0,5,0,8,0,7,0,0,0,9,0,0,0,0,0,6,0,5,0,8,0,7,0,0,0,9,0,0,0,0]
-; AVX2-FAST-NEXT:    # ymm2 = mem[0,1,0,1]
-; AVX2-FAST-NEXT:    vpshufb %ymm2, %ymm4, %ymm4
-; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,2,2,3]
-; AVX2-FAST-NEXT:    vpblendvb %ymm0, %ymm3, %ymm4, %ymm3
-; AVX2-FAST-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vmovdqa (%r9), %ymm3
-; AVX2-FAST-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vpshufb %ymm2, %ymm3, %ymm2
+; AVX2-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [0,6,0,5,0,8,0,7,0,0,0,9,0,0,0,0,0,6,0,5,0,8,0,7,0,0,0,9,0,0,0,0]
+; AVX2-FAST-NEXT:    # ymm1 = mem[0,1,0,1]
+; AVX2-FAST-NEXT:    vpshufb %ymm1, %ymm2, %ymm2
 ; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,2,2,3]
-; AVX2-FAST-NEXT:    vpblendvb %ymm0, %ymm1, %ymm2, %ymm0
+; AVX2-FAST-NEXT:    vpblendvb %ymm0, %ymm4, %ymm2, %ymm2
+; AVX2-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vmovdqa (%r9), %ymm2
+; AVX2-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vpshufb %ymm1, %ymm2, %ymm1
+; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,2,2,3]
+; AVX2-FAST-NEXT:    vpblendvb %ymm0, %ymm6, %ymm1, %ymm0
 ; AVX2-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm11[8],xmm9[8],xmm11[9],xmm9[9],xmm11[10],xmm9[10],xmm11[11],xmm9[11],xmm11[12],xmm9[12],xmm11[13],xmm9[13],xmm11[14],xmm9[14],xmm11[15],xmm9[15]
 ; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm2 = [8,9,6,7,12,13,10,11,14,15,14,15,14,15,14,15]
@@ -3608,8 +3595,8 @@ define void @store_i8_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm3 = [10,11,8,9,6,7,12,13,14,15,14,15,14,15,14,15]
 ; AVX2-FAST-NEXT:    vpshufb %xmm3, %xmm1, %xmm1
 ; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,0,0,1]
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = <u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u>
-; AVX2-FAST-NEXT:    vpblendvb %ymm5, %ymm0, %ymm1, %ymm6
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm4 = <u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u>
+; AVX2-FAST-NEXT:    vpblendvb %ymm4, %ymm0, %ymm1, %ymm5
 ; AVX2-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm13[8],xmm12[8],xmm13[9],xmm12[9],xmm13[10],xmm12[10],xmm13[11],xmm12[11],xmm13[12],xmm12[12],xmm13[13],xmm12[13],xmm13[14],xmm12[14],xmm13[15],xmm12[15]
 ; AVX2-FAST-NEXT:    vpshufb %xmm2, %xmm0, %xmm0
 ; AVX2-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
@@ -3618,75 +3605,75 @@ define void @store_i8_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX2-FAST-NEXT:    vpshufb %xmm3, %xmm1, %xmm1
 ; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,0,0,1]
 ; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,0,0,1]
-; AVX2-FAST-NEXT:    vpblendvb %ymm5, %ymm0, %ymm1, %ymm4
+; AVX2-FAST-NEXT:    vpblendvb %ymm4, %ymm0, %ymm1, %ymm2
 ; AVX2-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX2-FAST-NEXT:    vpunpckhbw {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
 ; AVX2-FAST-NEXT:    # ymm0 = ymm0[8],mem[8],ymm0[9],mem[9],ymm0[10],mem[10],ymm0[11],mem[11],ymm0[12],mem[12],ymm0[13],mem[13],ymm0[14],mem[14],ymm0[15],mem[15],ymm0[24],mem[24],ymm0[25],mem[25],ymm0[26],mem[26],ymm0[27],mem[27],ymm0[28],mem[28],ymm0[29],mem[29],ymm0[30],mem[30],ymm0[31],mem[31]
-; AVX2-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = [24,25,22,23,28,29,26,27,30,31,30,31,30,31,30,31,24,25,22,23,28,29,26,27,30,31,30,31,30,31,30,31]
-; AVX2-FAST-NEXT:    # ymm2 = mem[0,1,0,1]
-; AVX2-FAST-NEXT:    vpshufb %ymm2, %ymm0, %ymm0
-; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm0[2,2,2,3]
-; AVX2-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FAST-NEXT:    vpunpckhbw {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    # ymm0 = ymm0[8],mem[8],ymm0[9],mem[9],ymm0[10],mem[10],ymm0[11],mem[11],ymm0[12],mem[12],ymm0[13],mem[13],ymm0[14],mem[14],ymm0[15],mem[15],ymm0[24],mem[24],ymm0[25],mem[25],ymm0[26],mem[26],ymm0[27],mem[27],ymm0[28],mem[28],ymm0[29],mem[29],ymm0[30],mem[30],ymm0[31],mem[31]
-; AVX2-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [26,27,24,25,22,23,28,29,30,31,30,31,30,31,30,31,26,27,24,25,22,23,28,29,30,31,30,31,30,31,30,31]
+; AVX2-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [24,25,22,23,28,29,26,27,30,31,30,31,30,31,30,31,24,25,22,23,28,29,26,27,30,31,30,31,30,31,30,31]
 ; AVX2-FAST-NEXT:    # ymm1 = mem[0,1,0,1]
 ; AVX2-FAST-NEXT:    vpshufb %ymm1, %ymm0, %ymm0
-; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,2,2,3]
-; AVX2-FAST-NEXT:    vpblendvb %ymm5, %ymm3, %ymm0, %ymm3
+; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm0[2,2,2,3]
+; AVX2-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX2-FAST-NEXT:    vpunpckhbw {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm6 # 32-byte Folded Reload
+; AVX2-FAST-NEXT:    # ymm6 = ymm0[8],mem[8],ymm0[9],mem[9],ymm0[10],mem[10],ymm0[11],mem[11],ymm0[12],mem[12],ymm0[13],mem[13],ymm0[14],mem[14],ymm0[15],mem[15],ymm0[24],mem[24],ymm0[25],mem[25],ymm0[26],mem[26],ymm0[27],mem[27],ymm0[28],mem[28],ymm0[29],mem[29],ymm0[30],mem[30],ymm0[31],mem[31]
+; AVX2-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = [26,27,24,25,22,23,28,29,30,31,30,31,30,31,30,31,26,27,24,25,22,23,28,29,30,31,30,31,30,31,30,31]
+; AVX2-FAST-NEXT:    # ymm0 = mem[0,1,0,1]
+; AVX2-FAST-NEXT:    vpshufb %ymm0, %ymm6, %ymm6
+; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[2,2,2,3]
+; AVX2-FAST-NEXT:    vpblendvb %ymm4, %ymm3, %ymm6, %ymm3
 ; AVX2-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
-; AVX2-FAST-NEXT:    vpunpckhbw {{.*#+}} ymm0 = ymm10[8],ymm9[8],ymm10[9],ymm9[9],ymm10[10],ymm9[10],ymm10[11],ymm9[11],ymm10[12],ymm9[12],ymm10[13],ymm9[13],ymm10[14],ymm9[14],ymm10[15],ymm9[15],ymm10[24],ymm9[24],ymm10[25],ymm9[25],ymm10[26],ymm9[26],ymm10[27],ymm9[27],ymm10[28],ymm9[28],ymm10[29],ymm9[29],ymm10[30],ymm9[30],ymm10[31],ymm9[31]
-; AVX2-FAST-NEXT:    vpshufb %ymm2, %ymm0, %ymm0
+; AVX2-FAST-NEXT:    vpunpckhbw {{.*#+}} ymm6 = ymm10[8],ymm9[8],ymm10[9],ymm9[9],ymm10[10],ymm9[10],ymm10[11],ymm9[11],ymm10[12],ymm9[12],ymm10[13],ymm9[13],ymm10[14],ymm9[14],ymm10[15],ymm9[15],ymm10[24],ymm9[24],ymm10[25],ymm9[25],ymm10[26],ymm9[26],ymm10[27],ymm9[27],ymm10[28],ymm9[28],ymm10[29],ymm9[29],ymm10[30],ymm9[30],ymm10[31],ymm9[31]
+; AVX2-FAST-NEXT:    vpshufb %ymm1, %ymm6, %ymm1
 ; AVX2-FAST-NEXT:    vmovdqa %ymm15, %ymm12
-; AVX2-FAST-NEXT:    vpunpckhbw {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm2 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    # ymm2 = ymm15[8],mem[8],ymm15[9],mem[9],ymm15[10],mem[10],ymm15[11],mem[11],ymm15[12],mem[12],ymm15[13],mem[13],ymm15[14],mem[14],ymm15[15],mem[15],ymm15[24],mem[24],ymm15[25],mem[25],ymm15[26],mem[26],ymm15[27],mem[27],ymm15[28],mem[28],ymm15[29],mem[29],ymm15[30],mem[30],ymm15[31],mem[31]
-; AVX2-FAST-NEXT:    vpshufb %ymm1, %ymm2, %ymm1
-; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,2,2,3]
+; AVX2-FAST-NEXT:    vpunpckhbw {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm6 # 32-byte Folded Reload
+; AVX2-FAST-NEXT:    # ymm6 = ymm15[8],mem[8],ymm15[9],mem[9],ymm15[10],mem[10],ymm15[11],mem[11],ymm15[12],mem[12],ymm15[13],mem[13],ymm15[14],mem[14],ymm15[15],mem[15],ymm15[24],mem[24],ymm15[25],mem[25],ymm15[26],mem[26],ymm15[27],mem[27],ymm15[28],mem[28],ymm15[29],mem[29],ymm15[30],mem[30],ymm15[31],mem[31]
+; AVX2-FAST-NEXT:    vpshufb %ymm0, %ymm6, %ymm0
 ; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,2,2,3]
-; AVX2-FAST-NEXT:    vpblendvb %ymm5, %ymm0, %ymm1, %ymm0
+; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,2,2,3]
+; AVX2-FAST-NEXT:    vpblendvb %ymm4, %ymm1, %ymm0, %ymm0
 ; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm1 = <10,u,13,u,12,u,11,u,14,u,13,u,14,u,15,u>
-; AVX2-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
-; AVX2-FAST-NEXT:    vpshufb %xmm1, %xmm15, %xmm2
-; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,0,0,1]
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = [0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0]
-; AVX2-FAST-NEXT:    vpblendvb %ymm5, %ymm6, %ymm2, %ymm2
+; AVX2-FAST-NEXT:    vmovdqa %xmm7, %xmm15
+; AVX2-FAST-NEXT:    vpshufb %xmm1, %xmm7, %xmm4
+; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[0,0,0,1]
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = [0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0]
+; AVX2-FAST-NEXT:    vpblendvb %ymm6, %ymm5, %ymm4, %ymm4
 ; AVX2-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
 ; AVX2-FAST-NEXT:    vpshufb %xmm1, %xmm7, %xmm1
 ; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,0,0,1]
-; AVX2-FAST-NEXT:    vpblendvb %ymm5, %ymm4, %ymm1, %ymm1
-; AVX2-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm4 = [10,0,13,0,12,0,11,0,14,0,0,0,0,0,15,0,10,0,13,0,12,0,11,0,14,0,0,0,0,0,15,0]
-; AVX2-FAST-NEXT:    # ymm4 = mem[0,1,0,1]
+; AVX2-FAST-NEXT:    vpblendvb %ymm6, %ymm2, %ymm1, %ymm1
+; AVX2-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = [10,0,13,0,12,0,11,0,14,0,0,0,0,0,15,0,10,0,13,0,12,0,11,0,14,0,0,0,0,0,15,0]
+; AVX2-FAST-NEXT:    # ymm2 = mem[0,1,0,1]
 ; AVX2-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX2-FAST-NEXT:    vpshufb %ymm4, %ymm10, %ymm6
-; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[2,2,2,3]
-; AVX2-FAST-NEXT:    vpblendvb %ymm5, %ymm3, %ymm6, %ymm6
+; AVX2-FAST-NEXT:    vpshufb %ymm2, %ymm10, %ymm5
+; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,2,2,3]
+; AVX2-FAST-NEXT:    vpblendvb %ymm6, %ymm3, %ymm5, %ymm5
 ; AVX2-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
-; AVX2-FAST-NEXT:    vpshufb %ymm4, %ymm11, %ymm3
-; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,2,2,3]
-; AVX2-FAST-NEXT:    vpblendvb %ymm5, %ymm0, %ymm3, %ymm0
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm4 = <u,10,u,13,u,12,u,11,u,14,u,13,u,14,u,15>
+; AVX2-FAST-NEXT:    vpshufb %ymm2, %ymm11, %ymm2
+; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,2,2,3]
+; AVX2-FAST-NEXT:    vpblendvb %ymm6, %ymm0, %ymm2, %ymm0
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm2 = <u,10,u,13,u,12,u,11,u,14,u,13,u,14,u,15>
 ; AVX2-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
-; AVX2-FAST-NEXT:    vpshufb %xmm4, %xmm3, %xmm3
+; AVX2-FAST-NEXT:    vpshufb %xmm2, %xmm3, %xmm3
 ; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[0,0,0,1]
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = [255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0]
-; AVX2-FAST-NEXT:    vpblendvb %ymm5, %ymm2, %ymm3, %ymm2
-; AVX2-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = [255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0]
+; AVX2-FAST-NEXT:    vpblendvb %ymm6, %ymm4, %ymm3, %ymm3
+; AVX2-FAST-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
-; AVX2-FAST-NEXT:    vpshufb %xmm4, %xmm8, %xmm2
+; AVX2-FAST-NEXT:    vpshufb %xmm2, %xmm8, %xmm2
 ; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,0,0,1]
-; AVX2-FAST-NEXT:    vpblendvb %ymm5, %ymm1, %ymm2, %ymm1
+; AVX2-FAST-NEXT:    vpblendvb %ymm6, %ymm1, %ymm2, %ymm1
 ; AVX2-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm4 = [0,10,0,13,0,12,0,11,0,14,0,0,0,0,0,15,0,10,0,13,0,12,0,11,0,14,0,0,0,0,0,15]
 ; AVX2-FAST-NEXT:    # ymm4 = mem[0,1,0,1]
 ; AVX2-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
 ; AVX2-FAST-NEXT:    vpshufb %ymm4, %ymm13, %ymm1
 ; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,2,2,3]
-; AVX2-FAST-NEXT:    vpblendvb %ymm5, %ymm6, %ymm1, %ymm1
+; AVX2-FAST-NEXT:    vpblendvb %ymm6, %ymm5, %ymm1, %ymm1
 ; AVX2-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
 ; AVX2-FAST-NEXT:    vpshufb %ymm4, %ymm14, %ymm4
 ; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,2,2,3]
-; AVX2-FAST-NEXT:    vpblendvb %ymm5, %ymm0, %ymm4, %ymm0
+; AVX2-FAST-NEXT:    vpblendvb %ymm6, %ymm0, %ymm4, %ymm0
 ; AVX2-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
 ; AVX2-FAST-NEXT:    vpunpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm4 # 16-byte Folded Reload
@@ -3791,60 +3778,59 @@ define void @store_i8_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX2-FAST-NEXT:    vmovdqa %ymm2, (%rax)
 ; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX2-FAST-NEXT:    vmovaps %ymm0, 32(%rax)
-; AVX2-FAST-NEXT:    addq $680, %rsp # imm = 0x2A8
+; AVX2-FAST-NEXT:    addq $664, %rsp # imm = 0x298
 ; AVX2-FAST-NEXT:    vzeroupper
 ; AVX2-FAST-NEXT:    retq
 ;
 ; AVX2-FAST-PERLANE-LABEL: store_i8_stride6_vf64:
 ; AVX2-FAST-PERLANE:       # %bb.0:
-; AVX2-FAST-PERLANE-NEXT:    subq $680, %rsp # imm = 0x2A8
+; AVX2-FAST-PERLANE-NEXT:    subq $664, %rsp # imm = 0x298
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa 32(%rdx), %ymm6
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa 32(%rcx), %ymm5
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rcx), %xmm0
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa 32(%rcx), %xmm7
-; AVX2-FAST-PERLANE-NEXT:    vpbroadcastq {{.*#+}} xmm2 = [5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm2, %xmm0, %xmm0
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rdx), %xmm1
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa 32(%rcx), %ymm7
+; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vpbroadcastq {{.*#+}} ymm0 = [5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rcx), %xmm1
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa 32(%rcx), %xmm5
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm0, %xmm1, %xmm1
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rdx), %xmm2
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa 32(%rdx), %xmm8
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm2, %xmm1, %xmm1
-; AVX2-FAST-PERLANE-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,0,0,1]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm0, %xmm2, %xmm2
+; AVX2-FAST-PERLANE-NEXT:    vpunpcklbw {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,0,0,1]
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rsi), %xmm4
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa 32(%rsi), %xmm9
 ; AVX2-FAST-PERLANE-NEXT:    vpbroadcastq {{.*#+}} xmm3 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm3, %xmm4, %xmm1
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm3, %xmm4, %xmm2
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa %xmm4, %xmm12
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rdi), %xmm13
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa 32(%rdi), %xmm11
 ; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm3, %xmm13, %xmm4
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm4[8],xmm1[8],xmm4[9],xmm1[9],xmm4[10],xmm1[10],xmm4[11],xmm1[11],xmm4[12],xmm1[12],xmm4[13],xmm1[13],xmm4[14],xmm1[14],xmm4[15],xmm1[15]
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm4 = ymm1[0,0,0,1]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} ymm1 = <255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255>
-; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm1, %ymm0, %ymm4, %ymm0
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm2, %xmm7, %xmm4
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa %xmm7, %xmm14
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm2, %xmm8, %xmm2
+; AVX2-FAST-PERLANE-NEXT:    vpunpckhbw {{.*#+}} xmm2 = xmm4[8],xmm2[8],xmm4[9],xmm2[9],xmm4[10],xmm2[10],xmm4[11],xmm2[11],xmm4[12],xmm2[12],xmm4[13],xmm2[13],xmm4[14],xmm2[14],xmm4[15],xmm2[15]
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm4 = ymm2[0,0,0,1]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} ymm2 = <255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255>
+; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm2, %ymm1, %ymm4, %ymm1
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm0, %xmm5, %xmm4
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa %xmm5, %xmm14
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm0, %xmm8, %xmm5
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpunpcklbw {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3],xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,0,0,1]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm3, %xmm9, %xmm4
+; AVX2-FAST-PERLANE-NEXT:    vpunpcklbw {{.*#+}} xmm4 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3],xmm5[4],xmm4[4],xmm5[5],xmm4[5],xmm5[6],xmm4[6],xmm5[7],xmm4[7]
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[0,0,0,1]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm3, %xmm9, %xmm5
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm3, %xmm11, %xmm3
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa %xmm11, (%rsp) # 16-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm3[8],xmm4[8],xmm3[9],xmm4[9],xmm3[10],xmm4[10],xmm3[11],xmm4[11],xmm3[12],xmm4[12],xmm3[13],xmm4[13],xmm3[14],xmm4[14],xmm3[15],xmm4[15]
+; AVX2-FAST-PERLANE-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm3[8],xmm5[8],xmm3[9],xmm5[9],xmm3[10],xmm5[10],xmm3[11],xmm5[11],xmm3[12],xmm5[12],xmm3[13],xmm5[13],xmm3[14],xmm5[14],xmm3[15],xmm5[15]
 ; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[0,0,0,1]
-; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm1, %ymm2, %ymm3, %ymm2
-; AVX2-FAST-PERLANE-NEXT:    vpbroadcastq {{.*#+}} ymm4 = [5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm4, %ymm5, %ymm3
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm4, %ymm6, %ymm5
-; AVX2-FAST-PERLANE-NEXT:    vpunpcklbw {{.*#+}} ymm3 = ymm5[0],ymm3[0],ymm5[1],ymm3[1],ymm5[2],ymm3[2],ymm5[3],ymm3[3],ymm5[4],ymm3[4],ymm5[5],ymm3[5],ymm5[6],ymm3[6],ymm5[7],ymm3[7],ymm5[16],ymm3[16],ymm5[17],ymm3[17],ymm5[18],ymm3[18],ymm5[19],ymm3[19],ymm5[20],ymm3[20],ymm5[21],ymm3[21],ymm5[22],ymm3[22],ymm5[23],ymm3[23]
+; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm2, %ymm4, %ymm3, %ymm3
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm0, %ymm7, %ymm4
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm0, %ymm6, %ymm5
+; AVX2-FAST-PERLANE-NEXT:    vpunpcklbw {{.*#+}} ymm4 = ymm5[0],ymm4[0],ymm5[1],ymm4[1],ymm5[2],ymm4[2],ymm5[3],ymm4[3],ymm5[4],ymm4[4],ymm5[5],ymm4[5],ymm5[6],ymm4[6],ymm5[7],ymm4[7],ymm5[16],ymm4[16],ymm5[17],ymm4[17],ymm5[18],ymm4[18],ymm5[19],ymm4[19],ymm5[20],ymm4[20],ymm5[21],ymm4[21],ymm5[22],ymm4[22],ymm5[23],ymm4[23]
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa 32(%rdi), %ymm7
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa 32(%rsi), %ymm6
@@ -3853,16 +3839,16 @@ define void @store_i8_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm5, %ymm6, %ymm6
 ; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm5, %ymm7, %ymm7
 ; AVX2-FAST-PERLANE-NEXT:    vpunpcklbw {{.*#+}} ymm6 = ymm7[0],ymm6[0],ymm7[1],ymm6[1],ymm7[2],ymm6[2],ymm7[3],ymm6[3],ymm7[4],ymm6[4],ymm7[5],ymm6[5],ymm7[6],ymm6[6],ymm7[7],ymm6[7],ymm7[16],ymm6[16],ymm7[17],ymm6[17],ymm7[18],ymm6[18],ymm7[19],ymm6[19],ymm7[20],ymm6[20],ymm7[21],ymm6[21],ymm7[22],ymm6[22],ymm7[23],ymm6[23]
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,2,2,3]
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,2,2,3]
 ; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[2,2,2,3]
-; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm1, %ymm3, %ymm6, %ymm3
+; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm2, %ymm4, %ymm6, %ymm4
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rdx), %ymm7
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rcx), %ymm6
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm4, %ymm6, %ymm6
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm4, %ymm7, %ymm4
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm0, %ymm6, %ymm6
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm0, %ymm7, %ymm0
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm7, %ymm15
-; AVX2-FAST-PERLANE-NEXT:    vpunpcklbw {{.*#+}} ymm4 = ymm4[0],ymm6[0],ymm4[1],ymm6[1],ymm4[2],ymm6[2],ymm4[3],ymm6[3],ymm4[4],ymm6[4],ymm4[5],ymm6[5],ymm4[6],ymm6[6],ymm4[7],ymm6[7],ymm4[16],ymm6[16],ymm4[17],ymm6[17],ymm4[18],ymm6[18],ymm4[19],ymm6[19],ymm4[20],ymm6[20],ymm4[21],ymm6[21],ymm4[22],ymm6[22],ymm4[23],ymm6[23]
+; AVX2-FAST-PERLANE-NEXT:    vpunpcklbw {{.*#+}} ymm0 = ymm0[0],ymm6[0],ymm0[1],ymm6[1],ymm0[2],ymm6[2],ymm0[3],ymm6[3],ymm0[4],ymm6[4],ymm0[5],ymm6[5],ymm0[6],ymm6[6],ymm0[7],ymm6[7],ymm0[16],ymm6[16],ymm0[17],ymm6[17],ymm0[18],ymm6[18],ymm0[19],ymm6[19],ymm0[20],ymm6[20],ymm0[21],ymm6[21],ymm0[22],ymm6[22],ymm0[23],ymm6[23]
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rdi), %ymm7
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rsi), %ymm6
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
@@ -3871,60 +3857,60 @@ define void @store_i8_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm7, %ymm10
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vpunpcklbw {{.*#+}} ymm5 = ymm5[0],ymm6[0],ymm5[1],ymm6[1],ymm5[2],ymm6[2],ymm5[3],ymm6[3],ymm5[4],ymm6[4],ymm5[5],ymm6[5],ymm5[6],ymm6[6],ymm5[7],ymm6[7],ymm5[16],ymm6[16],ymm5[17],ymm6[17],ymm5[18],ymm6[18],ymm5[19],ymm6[19],ymm5[20],ymm6[20],ymm5[21],ymm6[21],ymm5[22],ymm6[22],ymm5[23],ymm6[23]
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,2,2,3]
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,2,2,3]
 ; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,2,2,3]
-; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm1, %ymm4, %ymm5, %ymm1
+; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm2, %ymm0, %ymm5, %ymm0
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%r8), %xmm5
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm4 = <6,u,5,u,8,u,7,u,9,u,9,u,9,u,9,u>
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm4, %xmm5, %xmm5
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm2 = <6,u,5,u,8,u,7,u,9,u,9,u,9,u,9,u>
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm2, %xmm5, %xmm5
 ; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[0,0,0,1]
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} ymm6 = [255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255]
-; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm6, %ymm0, %ymm5, %ymm7
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa 32(%r8), %xmm0
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm4, %xmm0, %xmm4
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[0,0,0,1]
-; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm6, %ymm2, %ymm4, %ymm2
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa 32(%r8), %ymm0
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vbroadcasti128 {{.*#+}} ymm4 = [6,0,5,0,8,0,7,0,0,0,9,0,0,0,0,0,6,0,5,0,8,0,7,0,0,0,9,0,0,0,0,0]
-; AVX2-FAST-PERLANE-NEXT:    # ymm4 = mem[0,1,0,1]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm4, %ymm0, %ymm5
+; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm6, %ymm1, %ymm5, %ymm1
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa 32(%r8), %xmm5
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm2, %xmm5, %xmm2
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa %xmm5, %xmm7
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,0,0,1]
+; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm6, %ymm3, %ymm2, %ymm2
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa 32(%r8), %ymm5
+; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vbroadcasti128 {{.*#+}} ymm3 = [6,0,5,0,8,0,7,0,0,0,9,0,0,0,0,0,6,0,5,0,8,0,7,0,0,0,9,0,0,0,0,0]
+; AVX2-FAST-PERLANE-NEXT:    # ymm3 = mem[0,1,0,1]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm3, %ymm5, %ymm5
 ; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,2,2,3]
-; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm6, %ymm3, %ymm5, %ymm3
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%r8), %ymm0
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm4, %ymm0, %ymm4
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,2,2,3]
-; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm6, %ymm1, %ymm4, %ymm1
+; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm6, %ymm4, %ymm5, %ymm4
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%r8), %ymm5
+; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm3, %ymm5, %ymm3
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,2,2,3]
+; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm6, %ymm0, %ymm3, %ymm6
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%r9), %xmm0
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm4 = <u,6,u,5,u,8,u,7,u,9,u,9,u,9,u,9>
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm4, %xmm0, %xmm5
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm3 = <u,6,u,5,u,8,u,7,u,9,u,9,u,9,u,9>
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm3, %xmm0, %xmm5
 ; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[0,0,0,1]
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} ymm0 = [255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255]
-; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm0, %ymm7, %ymm5, %ymm5
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa 32(%r9), %xmm5
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm4, %xmm5, %xmm4
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[0,0,0,1]
-; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm0, %ymm2, %ymm4, %ymm2
+; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm0, %ymm1, %ymm5, %ymm1
+; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa 32(%r9), %xmm1
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm3, %xmm1, %xmm1
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,0,0,1]
+; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm0, %ymm2, %ymm1, %ymm1
+; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa 32(%r9), %ymm2
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa 32(%r9), %ymm4
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = [0,6,0,5,0,8,0,7,0,0,0,9,0,0,0,0,0,6,0,5,0,8,0,7,0,0,0,9,0,0,0,0]
-; AVX2-FAST-PERLANE-NEXT:    # ymm2 = mem[0,1,0,1]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm2, %ymm4, %ymm4
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,2,2,3]
-; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm0, %ymm3, %ymm4, %ymm3
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%r9), %ymm3
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm2, %ymm3, %ymm2
+; AVX2-FAST-PERLANE-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [0,6,0,5,0,8,0,7,0,0,0,9,0,0,0,0,0,6,0,5,0,8,0,7,0,0,0,9,0,0,0,0]
+; AVX2-FAST-PERLANE-NEXT:    # ymm1 = mem[0,1,0,1]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm1, %ymm2, %ymm2
 ; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,2,2,3]
-; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm0, %ymm1, %ymm2, %ymm0
+; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm0, %ymm4, %ymm2, %ymm2
+; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%r9), %ymm2
+; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm1, %ymm2, %ymm1
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,2,2,3]
+; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm0, %ymm6, %ymm1, %ymm0
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm11[8],xmm9[8],xmm11[9],xmm9[9],xmm11[10],xmm9[10],xmm11[11],xmm9[11],xmm11[12],xmm9[12],xmm11[13],xmm9[13],xmm11[14],xmm9[14],xmm11[15],xmm9[15]
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm2 = [8,9,6,7,12,13,10,11,14,15,14,15,14,15,14,15]
@@ -3934,8 +3920,8 @@ define void @store_i8_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm3 = [10,11,8,9,6,7,12,13,14,15,14,15,14,15,14,15]
 ; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm3, %xmm1, %xmm1
 ; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,0,0,1]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} ymm5 = <u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u>
-; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm5, %ymm0, %ymm1, %ymm6
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} ymm4 = <u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u>
+; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm4, %ymm0, %ymm1, %ymm5
 ; AVX2-FAST-PERLANE-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm13[8],xmm12[8],xmm13[9],xmm12[9],xmm13[10],xmm12[10],xmm13[11],xmm12[11],xmm13[12],xmm12[12],xmm13[13],xmm12[13],xmm13[14],xmm12[14],xmm13[15],xmm12[15]
 ; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm2, %xmm0, %xmm0
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
@@ -3944,75 +3930,75 @@ define void @store_i8_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm3, %xmm1, %xmm1
 ; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,0,0,1]
 ; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,0,0,1]
-; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm5, %ymm0, %ymm1, %ymm4
+; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm4, %ymm0, %ymm1, %ymm2
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX2-FAST-PERLANE-NEXT:    vpunpckhbw {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
 ; AVX2-FAST-PERLANE-NEXT:    # ymm0 = ymm0[8],mem[8],ymm0[9],mem[9],ymm0[10],mem[10],ymm0[11],mem[11],ymm0[12],mem[12],ymm0[13],mem[13],ymm0[14],mem[14],ymm0[15],mem[15],ymm0[24],mem[24],ymm0[25],mem[25],ymm0[26],mem[26],ymm0[27],mem[27],ymm0[28],mem[28],ymm0[29],mem[29],ymm0[30],mem[30],ymm0[31],mem[31]
-; AVX2-FAST-PERLANE-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = [24,25,22,23,28,29,26,27,30,31,30,31,30,31,30,31,24,25,22,23,28,29,26,27,30,31,30,31,30,31,30,31]
-; AVX2-FAST-PERLANE-NEXT:    # ymm2 = mem[0,1,0,1]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm2, %ymm0, %ymm0
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm3 = ymm0[2,2,2,3]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpunpckhbw {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # ymm0 = ymm0[8],mem[8],ymm0[9],mem[9],ymm0[10],mem[10],ymm0[11],mem[11],ymm0[12],mem[12],ymm0[13],mem[13],ymm0[14],mem[14],ymm0[15],mem[15],ymm0[24],mem[24],ymm0[25],mem[25],ymm0[26],mem[26],ymm0[27],mem[27],ymm0[28],mem[28],ymm0[29],mem[29],ymm0[30],mem[30],ymm0[31],mem[31]
-; AVX2-FAST-PERLANE-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [26,27,24,25,22,23,28,29,30,31,30,31,30,31,30,31,26,27,24,25,22,23,28,29,30,31,30,31,30,31,30,31]
+; AVX2-FAST-PERLANE-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [24,25,22,23,28,29,26,27,30,31,30,31,30,31,30,31,24,25,22,23,28,29,26,27,30,31,30,31,30,31,30,31]
 ; AVX2-FAST-PERLANE-NEXT:    # ymm1 = mem[0,1,0,1]
 ; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm1, %ymm0, %ymm0
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,2,2,3]
-; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm5, %ymm3, %ymm0, %ymm3
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm3 = ymm0[2,2,2,3]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX2-FAST-PERLANE-NEXT:    vpunpckhbw {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm6 # 32-byte Folded Reload
+; AVX2-FAST-PERLANE-NEXT:    # ymm6 = ymm0[8],mem[8],ymm0[9],mem[9],ymm0[10],mem[10],ymm0[11],mem[11],ymm0[12],mem[12],ymm0[13],mem[13],ymm0[14],mem[14],ymm0[15],mem[15],ymm0[24],mem[24],ymm0[25],mem[25],ymm0[26],mem[26],ymm0[27],mem[27],ymm0[28],mem[28],ymm0[29],mem[29],ymm0[30],mem[30],ymm0[31],mem[31]
+; AVX2-FAST-PERLANE-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = [26,27,24,25,22,23,28,29,30,31,30,31,30,31,30,31,26,27,24,25,22,23,28,29,30,31,30,31,30,31,30,31]
+; AVX2-FAST-PERLANE-NEXT:    # ymm0 = mem[0,1,0,1]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm0, %ymm6, %ymm6
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[2,2,2,3]
+; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm4, %ymm3, %ymm6, %ymm3
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpunpckhbw {{.*#+}} ymm0 = ymm10[8],ymm9[8],ymm10[9],ymm9[9],ymm10[10],ymm9[10],ymm10[11],ymm9[11],ymm10[12],ymm9[12],ymm10[13],ymm9[13],ymm10[14],ymm9[14],ymm10[15],ymm9[15],ymm10[24],ymm9[24],ymm10[25],ymm9[25],ymm10[26],ymm9[26],ymm10[27],ymm9[27],ymm10[28],ymm9[28],ymm10[29],ymm9[29],ymm10[30],ymm9[30],ymm10[31],ymm9[31]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm2, %ymm0, %ymm0
+; AVX2-FAST-PERLANE-NEXT:    vpunpckhbw {{.*#+}} ymm6 = ymm10[8],ymm9[8],ymm10[9],ymm9[9],ymm10[10],ymm9[10],ymm10[11],ymm9[11],ymm10[12],ymm9[12],ymm10[13],ymm9[13],ymm10[14],ymm9[14],ymm10[15],ymm9[15],ymm10[24],ymm9[24],ymm10[25],ymm9[25],ymm10[26],ymm9[26],ymm10[27],ymm9[27],ymm10[28],ymm9[28],ymm10[29],ymm9[29],ymm10[30],ymm9[30],ymm10[31],ymm9[31]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm1, %ymm6, %ymm1
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm15, %ymm12
-; AVX2-FAST-PERLANE-NEXT:    vpunpckhbw {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm2 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # ymm2 = ymm15[8],mem[8],ymm15[9],mem[9],ymm15[10],mem[10],ymm15[11],mem[11],ymm15[12],mem[12],ymm15[13],mem[13],ymm15[14],mem[14],ymm15[15],mem[15],ymm15[24],mem[24],ymm15[25],mem[25],ymm15[26],mem[26],ymm15[27],mem[27],ymm15[28],mem[28],ymm15[29],mem[29],ymm15[30],mem[30],ymm15[31],mem[31]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm1, %ymm2, %ymm1
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,2,2,3]
+; AVX2-FAST-PERLANE-NEXT:    vpunpckhbw {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm6 # 32-byte Folded Reload
+; AVX2-FAST-PERLANE-NEXT:    # ymm6 = ymm15[8],mem[8],ymm15[9],mem[9],ymm15[10],mem[10],ymm15[11],mem[11],ymm15[12],mem[12],ymm15[13],mem[13],ymm15[14],mem[14],ymm15[15],mem[15],ymm15[24],mem[24],ymm15[25],mem[25],ymm15[26],mem[26],ymm15[27],mem[27],ymm15[28],mem[28],ymm15[29],mem[29],ymm15[30],mem[30],ymm15[31],mem[31]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm0, %ymm6, %ymm0
 ; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,2,2,3]
-; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm5, %ymm0, %ymm1, %ymm0
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,2,2,3]
+; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm4, %ymm1, %ymm0, %ymm0
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm1 = <10,u,13,u,12,u,11,u,14,u,13,u,14,u,15,u>
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm1, %xmm15, %xmm2
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,0,0,1]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} ymm5 = [0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0]
-; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm5, %ymm6, %ymm2, %ymm2
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa %xmm7, %xmm15
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm1, %xmm7, %xmm4
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[0,0,0,1]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} ymm6 = [0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0]
+; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm6, %ymm5, %ymm4, %ymm4
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
 ; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm1, %xmm7, %xmm1
 ; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,0,0,1]
-; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm5, %ymm4, %ymm1, %ymm1
-; AVX2-FAST-PERLANE-NEXT:    vbroadcasti128 {{.*#+}} ymm4 = [10,0,13,0,12,0,11,0,14,0,0,0,0,0,15,0,10,0,13,0,12,0,11,0,14,0,0,0,0,0,15,0]
-; AVX2-FAST-PERLANE-NEXT:    # ymm4 = mem[0,1,0,1]
+; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm6, %ymm2, %ymm1, %ymm1
+; AVX2-FAST-PERLANE-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = [10,0,13,0,12,0,11,0,14,0,0,0,0,0,15,0,10,0,13,0,12,0,11,0,14,0,0,0,0,0,15,0]
+; AVX2-FAST-PERLANE-NEXT:    # ymm2 = mem[0,1,0,1]
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm4, %ymm10, %ymm6
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[2,2,2,3]
-; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm5, %ymm3, %ymm6, %ymm6
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm2, %ymm10, %ymm5
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,2,2,3]
+; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm6, %ymm3, %ymm5, %ymm5
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm4, %ymm11, %ymm3
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,2,2,3]
-; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm5, %ymm0, %ymm3, %ymm0
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm4 = <u,10,u,13,u,12,u,11,u,14,u,13,u,14,u,15>
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm2, %ymm11, %ymm2
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,2,2,3]
+; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm6, %ymm0, %ymm2, %ymm0
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm2 = <u,10,u,13,u,12,u,11,u,14,u,13,u,14,u,15>
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm4, %xmm3, %xmm3
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm2, %xmm3, %xmm3
 ; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[0,0,0,1]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} ymm5 = [255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0]
-; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm5, %ymm2, %ymm3, %ymm2
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} ymm6 = [255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0]
+; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm6, %ymm4, %ymm3, %ymm3
+; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm4, %xmm8, %xmm2
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm2, %xmm8, %xmm2
 ; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,0,0,1]
-; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm5, %ymm1, %ymm2, %ymm1
+; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm6, %ymm1, %ymm2, %ymm1
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vbroadcasti128 {{.*#+}} ymm4 = [0,10,0,13,0,12,0,11,0,14,0,0,0,0,0,15,0,10,0,13,0,12,0,11,0,14,0,0,0,0,0,15]
 ; AVX2-FAST-PERLANE-NEXT:    # ymm4 = mem[0,1,0,1]
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
 ; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm4, %ymm13, %ymm1
 ; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,2,2,3]
-; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm5, %ymm6, %ymm1, %ymm1
+; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm6, %ymm5, %ymm1, %ymm1
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
 ; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm4, %ymm14, %ymm4
 ; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,2,2,3]
-; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm5, %ymm0, %ymm4, %ymm0
+; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm6, %ymm0, %ymm4, %ymm0
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
 ; AVX2-FAST-PERLANE-NEXT:    vpunpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm4 # 16-byte Folded Reload
@@ -4117,300 +4103,304 @@ define void @store_i8_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm2, (%rax)
 ; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm0, 32(%rax)
-; AVX2-FAST-PERLANE-NEXT:    addq $680, %rsp # imm = 0x2A8
+; AVX2-FAST-PERLANE-NEXT:    addq $664, %rsp # imm = 0x298
 ; AVX2-FAST-PERLANE-NEXT:    vzeroupper
 ; AVX2-FAST-PERLANE-NEXT:    retq
 ;
 ; AVX512F-SLOW-LABEL: store_i8_stride6_vf64:
 ; AVX512F-SLOW:       # %bb.0:
-; AVX512F-SLOW-NEXT:    subq $264, %rsp # imm = 0x108
+; AVX512F-SLOW-NEXT:    subq $328, %rsp # imm = 0x148
 ; AVX512F-SLOW-NEXT:    vmovdqa (%rsi), %xmm5
 ; AVX512F-SLOW-NEXT:    vmovdqa 32(%rsi), %xmm2
-; AVX512F-SLOW-NEXT:    vmovdqa (%rdi), %xmm12
+; AVX512F-SLOW-NEXT:    vmovdqa (%rdi), %xmm10
 ; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdi), %xmm3
-; AVX512F-SLOW-NEXT:    vmovdqa (%r9), %xmm13
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%r9), %xmm6
+; AVX512F-SLOW-NEXT:    vmovdqa (%r9), %xmm11
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%r9), %xmm7
 ; AVX512F-SLOW-NEXT:    vmovdqa 32(%rcx), %ymm8
 ; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdx), %ymm9
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%rsi), %ymm10
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdi), %ymm11
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%rsi), %ymm6
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdi), %ymm12
+; AVX512F-SLOW-NEXT:    vpbroadcastq {{.*#+}} ymm14 = [5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10]
 ; AVX512F-SLOW-NEXT:    vpbroadcastq {{.*#+}} xmm4 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
 ; AVX512F-SLOW-NEXT:    vpshufb %xmm4, %xmm2, %xmm0
 ; AVX512F-SLOW-NEXT:    vpshufb %xmm4, %xmm3, %xmm1
 ; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
 ; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm1 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm3, %xmm26
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm3, %xmm16
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm2, %xmm27
 ; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[0,3,2,1,4,5,6,7]
 ; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,6,5]
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
 ; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm0 = <u,6,u,5,u,8,u,7,u,9,u,9,u,9,u,9>
-; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm6, %xmm2
+; AVX512F-SLOW-NEXT:    vmovdqa %xmm7, %xmm3
+; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm7, %xmm2
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm1 = <u,2,u,1,u,0,u,3,u,4,u,4,u,4,u,4>
-; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm6, %xmm7
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm6, %xmm28
+; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm7, %xmm7
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm3, %xmm26
 ; AVX512F-SLOW-NEXT:    vinserti32x4 $2, %xmm2, %zmm7, %zmm2
 ; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-SLOW-NEXT:    vpshufb %xmm4, %xmm5, %xmm2
-; AVX512F-SLOW-NEXT:    vpshufb %xmm4, %xmm12, %xmm4
+; AVX512F-SLOW-NEXT:    vpshufb %xmm4, %xmm10, %xmm4
 ; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm2 = xmm4[8],xmm2[8],xmm4[9],xmm2[9],xmm4[10],xmm2[10],xmm4[11],xmm2[11],xmm4[12],xmm2[12],xmm4[13],xmm2[13],xmm4[14],xmm2[14],xmm4[15],xmm2[15]
-; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm4 = xmm12[0],xmm5[0],xmm12[1],xmm5[1],xmm12[2],xmm5[2],xmm12[3],xmm5[3],xmm12[4],xmm5[4],xmm12[5],xmm5[5],xmm12[6],xmm5[6],xmm12[7],xmm5[7]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm5, %xmm29
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm12, %xmm5
+; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm4 = xmm10[0],xmm5[0],xmm10[1],xmm5[1],xmm10[2],xmm5[2],xmm10[3],xmm5[3],xmm10[4],xmm5[4],xmm10[5],xmm5[5],xmm10[6],xmm5[6],xmm10[7],xmm5[7]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm10, %xmm29
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm5, %xmm30
 ; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm4[0,3,2,1,4,5,6,7]
 ; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,5,6,5]
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm4, %zmm2
 ; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm13, %xmm6
-; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm13, %xmm0
-; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm13, %xmm1
+; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm11, %xmm0
+; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm11, %xmm1
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm11, %xmm31
 ; AVX512F-SLOW-NEXT:    vinserti32x4 $2, %xmm0, %zmm1, %zmm0
-; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm0, (%rsp) # 64-byte Spill
+; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-SLOW-NEXT:    vpbroadcastq {{.*#+}} ymm7 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
-; AVX512F-SLOW-NEXT:    vpshufb %ymm7, %ymm10, %ymm0
-; AVX512F-SLOW-NEXT:    vpshufb %ymm7, %ymm11, %ymm1
+; AVX512F-SLOW-NEXT:    vpshufb %ymm7, %ymm6, %ymm0
+; AVX512F-SLOW-NEXT:    vpshufb %ymm7, %ymm12, %ymm1
 ; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6],ymm1[7],ymm0[7],ymm1[16],ymm0[16],ymm1[17],ymm0[17],ymm1[18],ymm0[18],ymm1[19],ymm0[19],ymm1[20],ymm0[20],ymm1[21],ymm0[21],ymm1[22],ymm0[22],ymm1[23],ymm0[23]
-; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} ymm1 = ymm11[8],ymm10[8],ymm11[9],ymm10[9],ymm11[10],ymm10[10],ymm11[11],ymm10[11],ymm11[12],ymm10[12],ymm11[13],ymm10[13],ymm11[14],ymm10[14],ymm11[15],ymm10[15],ymm11[24],ymm10[24],ymm11[25],ymm10[25],ymm11[26],ymm10[26],ymm11[27],ymm10[27],ymm11[28],ymm10[28],ymm11[29],ymm10[29],ymm11[30],ymm10[30],ymm11[31],ymm10[31]
+; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} ymm1 = ymm12[8],ymm6[8],ymm12[9],ymm6[9],ymm12[10],ymm6[10],ymm12[11],ymm6[11],ymm12[12],ymm6[12],ymm12[13],ymm6[13],ymm12[14],ymm6[14],ymm12[15],ymm6[15],ymm12[24],ymm6[24],ymm12[25],ymm6[25],ymm12[26],ymm6[26],ymm12[27],ymm6[27],ymm12[28],ymm6[28],ymm12[29],ymm6[29],ymm12[30],ymm6[30],ymm12[31],ymm6[31]
 ; AVX512F-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm13 = [24,25,22,23,28,29,26,27,30,31,30,31,30,31,30,31,24,25,22,23,28,29,26,27,30,31,30,31,30,31,30,31]
 ; AVX512F-SLOW-NEXT:    # ymm13 = mem[0,1,0,1]
 ; AVX512F-SLOW-NEXT:    vpshufb %ymm13, %ymm1, %ymm1
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
-; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vpbroadcastq {{.*#+}} ymm12 = [5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10]
-; AVX512F-SLOW-NEXT:    vpshufb %ymm12, %ymm8, %ymm0
-; AVX512F-SLOW-NEXT:    vpshufb %ymm12, %ymm9, %ymm1
-; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6],ymm1[7],ymm0[7],ymm1[16],ymm0[16],ymm1[17],ymm0[17],ymm1[18],ymm0[18],ymm1[19],ymm0[19],ymm1[20],ymm0[20],ymm1[21],ymm0[21],ymm1[22],ymm0[22],ymm1[23],ymm0[23]
-; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} ymm1 = ymm9[8],ymm8[8],ymm9[9],ymm8[9],ymm9[10],ymm8[10],ymm9[11],ymm8[11],ymm9[12],ymm8[12],ymm9[13],ymm8[13],ymm9[14],ymm8[14],ymm9[15],ymm8[15],ymm9[24],ymm8[24],ymm9[25],ymm8[25],ymm9[26],ymm8[26],ymm9[27],ymm8[27],ymm9[28],ymm8[28],ymm9[29],ymm8[29],ymm9[30],ymm8[30],ymm9[31],ymm8[31]
-; AVX512F-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm14 = [26,27,24,25,22,23,28,29,30,31,30,31,30,31,30,31,26,27,24,25,22,23,28,29,30,31,30,31,30,31,30,31]
-; AVX512F-SLOW-NEXT:    # ymm14 = mem[0,1,0,1]
-; AVX512F-SLOW-NEXT:    vpshufb %ymm14, %ymm1, %ymm1
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
-; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm0, (%rsp) # 64-byte Spill
+; AVX512F-SLOW-NEXT:    vpshufb %ymm14, %ymm8, %ymm0
+; AVX512F-SLOW-NEXT:    vpshufb %ymm14, %ymm9, %ymm1
+; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm1 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6],ymm1[7],ymm0[7],ymm1[16],ymm0[16],ymm1[17],ymm0[17],ymm1[18],ymm0[18],ymm1[19],ymm0[19],ymm1[20],ymm0[20],ymm1[21],ymm0[21],ymm1[22],ymm0[22],ymm1[23],ymm0[23]
+; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} ymm2 = ymm9[8],ymm8[8],ymm9[9],ymm8[9],ymm9[10],ymm8[10],ymm9[11],ymm8[11],ymm9[12],ymm8[12],ymm9[13],ymm8[13],ymm9[14],ymm8[14],ymm9[15],ymm8[15],ymm9[24],ymm8[24],ymm9[25],ymm8[25],ymm9[26],ymm8[26],ymm9[27],ymm8[27],ymm9[28],ymm8[28],ymm9[29],ymm8[29],ymm9[30],ymm8[30],ymm9[31],ymm8[31]
+; AVX512F-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = [26,27,24,25,22,23,28,29,30,31,30,31,30,31,30,31,26,27,24,25,22,23,28,29,30,31,30,31,30,31,30,31]
+; AVX512F-SLOW-NEXT:    # ymm0 = mem[0,1,0,1]
+; AVX512F-SLOW-NEXT:    vpshufb %ymm0, %ymm2, %ymm2
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm1, %zmm1
+; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-SLOW-NEXT:    vmovdqa 32(%r9), %ymm4
-; AVX512F-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm3 = [0,10,0,13,0,12,0,11,0,14,0,0,0,0,0,15,0,10,0,13,0,12,0,11,0,14,0,0,0,0,0,15]
-; AVX512F-SLOW-NEXT:    # ymm3 = mem[0,1,0,1]
-; AVX512F-SLOW-NEXT:    vpshufb %ymm3, %ymm4, %ymm0
+; AVX512F-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = [0,10,0,13,0,12,0,11,0,14,0,0,0,0,0,15,0,10,0,13,0,12,0,11,0,14,0,0,0,0,0,15]
+; AVX512F-SLOW-NEXT:    # ymm2 = mem[0,1,0,1]
+; AVX512F-SLOW-NEXT:    vpshufb %ymm2, %ymm4, %ymm10
 ; AVX512F-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [0,6,0,5,0,8,0,7,0,0,0,9,0,0,0,0,0,6,0,5,0,8,0,7,0,0,0,9,0,0,0,0]
 ; AVX512F-SLOW-NEXT:    # ymm1 = mem[0,1,0,1]
-; AVX512F-SLOW-NEXT:    vpshufb %ymm1, %ymm4, %ymm15
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm15, %zmm25
+; AVX512F-SLOW-NEXT:    vpshufb %ymm1, %ymm4, %ymm11
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm10, %zmm11, %zmm3
+; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-SLOW-NEXT:    vmovdqa (%rsi), %ymm15
-; AVX512F-SLOW-NEXT:    vmovdqa (%rdi), %ymm2
-; AVX512F-SLOW-NEXT:    vpshufb %ymm7, %ymm15, %ymm0
-; AVX512F-SLOW-NEXT:    vpshufb %ymm7, %ymm2, %ymm7
-; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm0 = ymm7[0],ymm0[0],ymm7[1],ymm0[1],ymm7[2],ymm0[2],ymm7[3],ymm0[3],ymm7[4],ymm0[4],ymm7[5],ymm0[5],ymm7[6],ymm0[6],ymm7[7],ymm0[7],ymm7[16],ymm0[16],ymm7[17],ymm0[17],ymm7[18],ymm0[18],ymm7[19],ymm0[19],ymm7[20],ymm0[20],ymm7[21],ymm0[21],ymm7[22],ymm0[22],ymm7[23],ymm0[23]
-; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} ymm7 = ymm2[8],ymm15[8],ymm2[9],ymm15[9],ymm2[10],ymm15[10],ymm2[11],ymm15[11],ymm2[12],ymm15[12],ymm2[13],ymm15[13],ymm2[14],ymm15[14],ymm2[15],ymm15[15],ymm2[24],ymm15[24],ymm2[25],ymm15[25],ymm2[26],ymm15[26],ymm2[27],ymm15[27],ymm2[28],ymm15[28],ymm2[29],ymm15[29],ymm2[30],ymm15[30],ymm2[31],ymm15[31]
-; AVX512F-SLOW-NEXT:    vpshufb %ymm13, %ymm7, %ymm7
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm0, %zmm24
-; AVX512F-SLOW-NEXT:    vmovdqa (%rcx), %ymm0
-; AVX512F-SLOW-NEXT:    vmovdqa (%rdx), %ymm13
-; AVX512F-SLOW-NEXT:    vpshufb %ymm12, %ymm0, %ymm7
-; AVX512F-SLOW-NEXT:    vpshufb %ymm12, %ymm13, %ymm12
-; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm7 = ymm12[0],ymm7[0],ymm12[1],ymm7[1],ymm12[2],ymm7[2],ymm12[3],ymm7[3],ymm12[4],ymm7[4],ymm12[5],ymm7[5],ymm12[6],ymm7[6],ymm12[7],ymm7[7],ymm12[16],ymm7[16],ymm12[17],ymm7[17],ymm12[18],ymm7[18],ymm12[19],ymm7[19],ymm12[20],ymm7[20],ymm12[21],ymm7[21],ymm12[22],ymm7[22],ymm12[23],ymm7[23]
-; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} ymm12 = ymm13[8],ymm0[8],ymm13[9],ymm0[9],ymm13[10],ymm0[10],ymm13[11],ymm0[11],ymm13[12],ymm0[12],ymm13[13],ymm0[13],ymm13[14],ymm0[14],ymm13[15],ymm0[15],ymm13[24],ymm0[24],ymm13[25],ymm0[25],ymm13[26],ymm0[26],ymm13[27],ymm0[27],ymm13[28],ymm0[28],ymm13[29],ymm0[29],ymm13[30],ymm0[30],ymm13[31],ymm0[31]
-; AVX512F-SLOW-NEXT:    vpshufb %ymm14, %ymm12, %ymm12
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm12, %zmm7, %zmm23
-; AVX512F-SLOW-NEXT:    vmovdqa (%r9), %ymm7
-; AVX512F-SLOW-NEXT:    vpshufb %ymm3, %ymm7, %ymm3
-; AVX512F-SLOW-NEXT:    vpshufb %ymm1, %ymm7, %ymm1
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm1, %zmm22
-; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm0 = ymm13[0],ymm0[0],ymm13[1],ymm0[1],ymm13[2],ymm0[2],ymm13[3],ymm0[3],ymm13[4],ymm0[4],ymm13[5],ymm0[5],ymm13[6],ymm0[6],ymm13[7],ymm0[7],ymm13[16],ymm0[16],ymm13[17],ymm0[17],ymm13[18],ymm0[18],ymm13[19],ymm0[19],ymm13[20],ymm0[20],ymm13[21],ymm0[21],ymm13[22],ymm0[22],ymm13[23],ymm0[23]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm19
-; AVX512F-SLOW-NEXT:    vmovdqa (%rcx), %xmm1
+; AVX512F-SLOW-NEXT:    vmovdqa (%rdi), %ymm10
+; AVX512F-SLOW-NEXT:    vpshufb %ymm7, %ymm15, %ymm11
+; AVX512F-SLOW-NEXT:    vpshufb %ymm7, %ymm10, %ymm7
+; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm7 = ymm7[0],ymm11[0],ymm7[1],ymm11[1],ymm7[2],ymm11[2],ymm7[3],ymm11[3],ymm7[4],ymm11[4],ymm7[5],ymm11[5],ymm7[6],ymm11[6],ymm7[7],ymm11[7],ymm7[16],ymm11[16],ymm7[17],ymm11[17],ymm7[18],ymm11[18],ymm7[19],ymm11[19],ymm7[20],ymm11[20],ymm7[21],ymm11[21],ymm7[22],ymm11[22],ymm7[23],ymm11[23]
+; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} ymm11 = ymm10[8],ymm15[8],ymm10[9],ymm15[9],ymm10[10],ymm15[10],ymm10[11],ymm15[11],ymm10[12],ymm15[12],ymm10[13],ymm15[13],ymm10[14],ymm15[14],ymm10[15],ymm15[15],ymm10[24],ymm15[24],ymm10[25],ymm15[25],ymm10[26],ymm15[26],ymm10[27],ymm15[27],ymm10[28],ymm15[28],ymm10[29],ymm15[29],ymm10[30],ymm15[30],ymm10[31],ymm15[31]
+; AVX512F-SLOW-NEXT:    vpshufb %ymm13, %ymm11, %ymm11
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm11, %zmm7, %zmm25
+; AVX512F-SLOW-NEXT:    vmovdqa (%rcx), %ymm7
+; AVX512F-SLOW-NEXT:    vmovdqa (%rdx), %ymm11
+; AVX512F-SLOW-NEXT:    vpshufb %ymm14, %ymm7, %ymm13
+; AVX512F-SLOW-NEXT:    vpshufb %ymm14, %ymm11, %ymm5
+; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm5 = ymm5[0],ymm13[0],ymm5[1],ymm13[1],ymm5[2],ymm13[2],ymm5[3],ymm13[3],ymm5[4],ymm13[4],ymm5[5],ymm13[5],ymm5[6],ymm13[6],ymm5[7],ymm13[7],ymm5[16],ymm13[16],ymm5[17],ymm13[17],ymm5[18],ymm13[18],ymm5[19],ymm13[19],ymm5[20],ymm13[20],ymm5[21],ymm13[21],ymm5[22],ymm13[22],ymm5[23],ymm13[23]
+; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} ymm13 = ymm11[8],ymm7[8],ymm11[9],ymm7[9],ymm11[10],ymm7[10],ymm11[11],ymm7[11],ymm11[12],ymm7[12],ymm11[13],ymm7[13],ymm11[14],ymm7[14],ymm11[15],ymm7[15],ymm11[24],ymm7[24],ymm11[25],ymm7[25],ymm11[26],ymm7[26],ymm11[27],ymm7[27],ymm11[28],ymm7[28],ymm11[29],ymm7[29],ymm11[30],ymm7[30],ymm11[31],ymm7[31]
+; AVX512F-SLOW-NEXT:    vpshufb %ymm0, %ymm13, %ymm0
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm5, %zmm24
+; AVX512F-SLOW-NEXT:    vmovdqa (%r9), %ymm3
+; AVX512F-SLOW-NEXT:    vpshufb %ymm2, %ymm3, %ymm2
+; AVX512F-SLOW-NEXT:    vpshufb %ymm1, %ymm3, %ymm1
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm1, %zmm23
+; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm0 = ymm11[0],ymm7[0],ymm11[1],ymm7[1],ymm11[2],ymm7[2],ymm11[3],ymm7[3],ymm11[4],ymm7[4],ymm11[5],ymm7[5],ymm11[6],ymm7[6],ymm11[7],ymm7[7],ymm11[16],ymm7[16],ymm11[17],ymm7[17],ymm11[18],ymm7[18],ymm11[19],ymm7[19],ymm11[20],ymm7[20],ymm11[21],ymm7[21],ymm11[22],ymm7[22],ymm11[23],ymm7[23]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm28
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm29, %xmm0
-; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm13 = xmm5[8],xmm0[8],xmm5[9],xmm0[9],xmm5[10],xmm0[10],xmm5[11],xmm0[11],xmm5[12],xmm0[12],xmm5[13],xmm0[13],xmm5[14],xmm0[14],xmm5[15],xmm0[15]
-; AVX512F-SLOW-NEXT:    vmovdqa (%rdx), %xmm3
-; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm12 = ymm2[0],ymm15[0],ymm2[1],ymm15[1],ymm2[2],ymm15[2],ymm2[3],ymm15[3],ymm2[4],ymm15[4],ymm2[5],ymm15[5],ymm2[6],ymm15[6],ymm2[7],ymm15[7],ymm2[16],ymm15[16],ymm2[17],ymm15[17],ymm2[18],ymm15[18],ymm2[19],ymm15[19],ymm2[20],ymm15[20],ymm2[21],ymm15[21],ymm2[22],ymm15[22],ymm2[23],ymm15[23]
-; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm2 = xmm3[8],xmm1[8],xmm3[9],xmm1[9],xmm3[10],xmm1[10],xmm3[11],xmm1[11],xmm3[12],xmm1[12],xmm3[13],xmm1[13],xmm3[14],xmm1[14],xmm3[15],xmm1[15]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm30, %xmm1
+; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm7 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
+; AVX512F-SLOW-NEXT:    vmovdqa (%rcx), %xmm2
+; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm0 = ymm10[0],ymm15[0],ymm10[1],ymm15[1],ymm10[2],ymm15[2],ymm10[3],ymm15[3],ymm10[4],ymm15[4],ymm10[5],ymm15[5],ymm10[6],ymm15[6],ymm10[7],ymm15[7],ymm10[16],ymm15[16],ymm10[17],ymm15[17],ymm10[18],ymm15[18],ymm10[19],ymm15[19],ymm10[20],ymm15[20],ymm10[21],ymm15[21],ymm10[22],ymm15[22],ymm10[23],ymm15[23]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm19
+; AVX512F-SLOW-NEXT:    vmovdqa (%rdx), %xmm10
+; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm5 = xmm10[8],xmm2[8],xmm10[9],xmm2[9],xmm10[10],xmm2[10],xmm10[11],xmm2[11],xmm10[12],xmm2[12],xmm10[13],xmm2[13],xmm10[14],xmm2[14],xmm10[15],xmm2[15]
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm15 = [10,11,8,9,6,7,12,13,14,15,14,15,14,15,14,15]
-; AVX512F-SLOW-NEXT:    vpshufb %xmm15, %xmm2, %xmm14
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%rcx), %xmm2
+; AVX512F-SLOW-NEXT:    vpshufb %xmm15, %xmm5, %xmm0
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm18
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%rcx), %xmm1
 ; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdx), %xmm0
-; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm5 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15]
+; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm5 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
 ; AVX512F-SLOW-NEXT:    vpshufb %xmm15, %xmm5, %xmm5
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm5, %ymm29
 ; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm5 = ymm9[0],ymm8[0],ymm9[1],ymm8[1],ymm9[2],ymm8[2],ymm9[3],ymm8[3],ymm9[4],ymm8[4],ymm9[5],ymm8[5],ymm9[6],ymm8[6],ymm9[7],ymm8[7],ymm9[16],ymm8[16],ymm9[17],ymm8[17],ymm9[18],ymm8[18],ymm9[19],ymm8[19],ymm9[20],ymm8[20],ymm9[21],ymm8[21],ymm9[22],ymm8[22],ymm9[23],ymm8[23]
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm5, %ymm30
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm26, %xmm5
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm16, %xmm5
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm27, %xmm8
-; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm5 = xmm5[8],xmm8[8],xmm5[9],xmm8[9],xmm5[10],xmm8[10],xmm5[11],xmm8[11],xmm5[12],xmm8[12],xmm5[13],xmm8[13],xmm5[14],xmm8[14],xmm5[15],xmm8[15]
+; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm8 = xmm5[8],xmm8[8],xmm5[9],xmm8[9],xmm5[10],xmm8[10],xmm5[11],xmm8[11],xmm5[12],xmm8[12],xmm5[13],xmm8[13],xmm5[14],xmm8[14],xmm5[15],xmm8[15]
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm9 = [8,9,6,7,12,13,10,11,14,15,14,15,14,15,14,15]
-; AVX512F-SLOW-NEXT:    vpshufb %xmm9, %xmm13, %xmm8
-; AVX512F-SLOW-NEXT:    vpshufb %xmm9, %xmm5, %xmm5
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm5, %ymm31
-; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm5 = ymm11[0],ymm10[0],ymm11[1],ymm10[1],ymm11[2],ymm10[2],ymm11[3],ymm10[3],ymm11[4],ymm10[4],ymm11[5],ymm10[5],ymm11[6],ymm10[6],ymm11[7],ymm10[7],ymm11[16],ymm10[16],ymm11[17],ymm10[17],ymm11[18],ymm10[18],ymm11[19],ymm10[19],ymm11[20],ymm10[20],ymm11[21],ymm10[21],ymm11[22],ymm10[22],ymm11[23],ymm10[23]
+; AVX512F-SLOW-NEXT:    vpshufb %xmm9, %xmm7, %xmm11
+; AVX512F-SLOW-NEXT:    vpshufb %xmm9, %xmm8, %xmm5
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm5, %ymm21
+; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm5 = ymm12[0],ymm6[0],ymm12[1],ymm6[1],ymm12[2],ymm6[2],ymm12[3],ymm6[3],ymm12[4],ymm6[4],ymm12[5],ymm6[5],ymm12[6],ymm6[6],ymm12[7],ymm6[7],ymm12[16],ymm6[16],ymm12[17],ymm6[17],ymm12[18],ymm6[18],ymm12[19],ymm6[19],ymm12[20],ymm6[20],ymm12[21],ymm6[21],ymm12[22],ymm6[22],ymm12[23],ymm6[23]
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm5, %ymm20
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm5 = <u,10,u,13,u,12,u,11,u,14,u,13,u,14,u,15>
-; AVX512F-SLOW-NEXT:    vpshufb %xmm5, %xmm6, %xmm11
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm28, %xmm6
-; AVX512F-SLOW-NEXT:    vpshufb %xmm5, %xmm6, %xmm5
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm5, %ymm28
-; AVX512F-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm5 = [0,2,0,1,0,0,0,3,0,0,0,0,0,4,0,0,0,2,0,1,0,0,0,3,0,0,0,0,0,4,0,0]
-; AVX512F-SLOW-NEXT:    # ymm5 = mem[0,1,0,1]
-; AVX512F-SLOW-NEXT:    vpshufb %ymm5, %ymm7, %ymm9
-; AVX512F-SLOW-NEXT:    vpshufb %ymm5, %ymm4, %ymm4
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm4, %ymm21
-; AVX512F-SLOW-NEXT:    vpbroadcastq {{.*#+}} xmm4 = [5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10]
-; AVX512F-SLOW-NEXT:    vpshufb %xmm4, %xmm2, %xmm5
-; AVX512F-SLOW-NEXT:    vpshufb %xmm4, %xmm0, %xmm7
-; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm5 = xmm7[0],xmm5[0],xmm7[1],xmm5[1],xmm7[2],xmm5[2],xmm7[3],xmm5[3],xmm7[4],xmm5[4],xmm7[5],xmm5[5],xmm7[6],xmm5[6],xmm7[7],xmm5[7]
-; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm6 = <u,10,u,13,u,12,u,11,u,14,u,13,u,14,u,15>
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm31, %xmm5
+; AVX512F-SLOW-NEXT:    vpshufb %xmm6, %xmm5, %xmm13
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm26, %xmm5
+; AVX512F-SLOW-NEXT:    vpshufb %xmm6, %xmm5, %xmm5
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm5, %ymm31
+; AVX512F-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm8 = [0,2,0,1,0,0,0,3,0,0,0,0,0,4,0,0,0,2,0,1,0,0,0,3,0,0,0,0,0,4,0,0]
+; AVX512F-SLOW-NEXT:    # ymm8 = mem[0,1,0,1]
+; AVX512F-SLOW-NEXT:    vpshufb %ymm8, %ymm3, %ymm9
+; AVX512F-SLOW-NEXT:    vpshufb %ymm8, %ymm4, %ymm3
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm3, %ymm22
+; AVX512F-SLOW-NEXT:    vpshufb %xmm14, %xmm1, %xmm3
+; AVX512F-SLOW-NEXT:    vpshufb %xmm14, %xmm0, %xmm4
+; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3],xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7]
+; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
 ; AVX512F-SLOW-NEXT:    vprold $16, %xmm0, %xmm0
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm0, %zmm26
-; AVX512F-SLOW-NEXT:    vpshufb %xmm4, %xmm1, %xmm0
-; AVX512F-SLOW-NEXT:    vpshufb %xmm4, %xmm3, %xmm2
-; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; AVX512F-SLOW-NEXT:    vmovdqa (%r8), %xmm2
-; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm1 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm0, %zmm27
+; AVX512F-SLOW-NEXT:    vpshufb %xmm14, %xmm2, %xmm0
+; AVX512F-SLOW-NEXT:    vpshufb %xmm14, %xmm10, %xmm1
+; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; AVX512F-SLOW-NEXT:    vmovdqa (%r8), %xmm1
+; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm2 = xmm10[0],xmm2[0],xmm10[1],xmm2[1],xmm10[2],xmm2[2],xmm10[3],xmm2[3],xmm10[4],xmm2[4],xmm10[5],xmm2[5],xmm10[6],xmm2[6],xmm10[7],xmm2[7]
 ; AVX512F-SLOW-NEXT:    vmovdqa 32(%r8), %xmm3
-; AVX512F-SLOW-NEXT:    vprold $16, %xmm1, %xmm1
+; AVX512F-SLOW-NEXT:    vprold $16, %xmm2, %xmm2
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm4 = <10,u,13,u,12,u,11,u,14,u,13,u,14,u,15,u>
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm27
-; AVX512F-SLOW-NEXT:    vpshufb %xmm4, %xmm2, %xmm5
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm2, %zmm26
+; AVX512F-SLOW-NEXT:    vpshufb %xmm4, %xmm1, %xmm6
 ; AVX512F-SLOW-NEXT:    vpshufb %xmm4, %xmm3, %xmm15
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm1 = <6,u,5,u,8,u,7,u,9,u,9,u,9,u,9,u>
-; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm3, %xmm13
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm13, %ymm18
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm2 = <6,u,5,u,8,u,7,u,9,u,9,u,9,u,9,u>
+; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm3, %xmm10
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm10, %ymm17
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm4 = <2,u,1,u,0,u,3,u,4,u,4,u,4,u,4,u>
-; AVX512F-SLOW-NEXT:    vpshufb %xmm4, %xmm3, %xmm13
-; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm2, %xmm0
+; AVX512F-SLOW-NEXT:    vpshufb %xmm4, %xmm3, %xmm10
+; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm1, %xmm14
+; AVX512F-SLOW-NEXT:    vpshufb %xmm4, %xmm1, %xmm0
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm16
-; AVX512F-SLOW-NEXT:    vpshufb %xmm4, %xmm2, %xmm0
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm17
-; AVX512F-SLOW-NEXT:    vmovdqa (%r8), %ymm3
-; AVX512F-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [2,0,1,0,0,0,3,0,0,0,0,0,4,0,0,0,2,0,1,0,0,0,3,0,0,0,0,0,4,0,0,0]
-; AVX512F-SLOW-NEXT:    # ymm1 = mem[0,1,0,1]
-; AVX512F-SLOW-NEXT:    vpshufb %ymm1, %ymm3, %ymm4
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%r8), %ymm2
-; AVX512F-SLOW-NEXT:    vpshufb %ymm1, %ymm2, %ymm6
-; AVX512F-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [10,0,13,0,12,0,11,0,14,0,0,0,0,0,15,0,10,0,13,0,12,0,11,0,14,0,0,0,0,0,15,0]
-; AVX512F-SLOW-NEXT:    # ymm1 = mem[0,1,0,1]
-; AVX512F-SLOW-NEXT:    vpshufb %ymm1, %ymm2, %ymm10
+; AVX512F-SLOW-NEXT:    vmovdqa (%r8), %ymm1
+; AVX512F-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = [2,0,1,0,0,0,3,0,0,0,0,0,4,0,0,0,2,0,1,0,0,0,3,0,0,0,0,0,4,0,0,0]
+; AVX512F-SLOW-NEXT:    # ymm2 = mem[0,1,0,1]
+; AVX512F-SLOW-NEXT:    vpshufb %ymm2, %ymm1, %ymm5
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%r8), %ymm3
+; AVX512F-SLOW-NEXT:    vpshufb %ymm2, %ymm3, %ymm2
+; AVX512F-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm4 = [10,0,13,0,12,0,11,0,14,0,0,0,0,0,15,0,10,0,13,0,12,0,11,0,14,0,0,0,0,0,15,0]
+; AVX512F-SLOW-NEXT:    # ymm4 = mem[0,1,0,1]
+; AVX512F-SLOW-NEXT:    vpshufb %ymm4, %ymm3, %ymm7
 ; AVX512F-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = [6,0,5,0,8,0,7,0,0,0,9,0,0,0,0,0,6,0,5,0,8,0,7,0,0,0,9,0,0,0,0,0]
 ; AVX512F-SLOW-NEXT:    # ymm0 = mem[0,1,0,1]
-; AVX512F-SLOW-NEXT:    vpshufb %ymm0, %ymm2, %ymm7
-; AVX512F-SLOW-NEXT:    vpshufb %ymm1, %ymm3, %ymm2
-; AVX512F-SLOW-NEXT:    vpshufb %ymm0, %ymm3, %ymm3
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm14[0,0,0,1]
-; AVX512F-SLOW-NEXT:    vprold $16, %ymm19, %ymm1
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[0,0,0,1]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} ymm12 = ymm12[0,3,2,1,4,5,6,7,8,11,10,9,12,13,14,15]
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm12 = ymm12[0,1,2,3,4,5,6,5,8,9,10,11,12,13,14,13]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[2,2,2,3]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm14 = ymm4[2,2,2,3]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[0,0,0,1]
+; AVX512F-SLOW-NEXT:    vpshufb %ymm0, %ymm3, %ymm12
+; AVX512F-SLOW-NEXT:    vpshufb %ymm4, %ymm1, %ymm3
+; AVX512F-SLOW-NEXT:    vpshufb %ymm0, %ymm1, %ymm4
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm18[0,0,0,1]
+; AVX512F-SLOW-NEXT:    vprold $16, %ymm28, %ymm1
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[0,0,0,1]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm19, %ymm8
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} ymm8 = ymm8[0,3,2,1,4,5,6,7,8,11,10,9,12,13,14,15]
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm8 = ymm8[0,1,2,3,4,5,6,5,8,9,10,11,12,13,14,13]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[2,2,2,3]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,2,2,3]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm28 = ymm6[0,0,0,1]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[0,0,0,1]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[2,2,2,3]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,2,2,3]
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm12, %zmm8, %zmm1
-; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm12 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
-; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm0, %zmm12, %zmm1
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm8, %zmm11, %zmm1
+; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm6 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
+; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm0, %zmm6, %zmm1
 ; AVX512F-SLOW-NEXT:    vextracti64x4 $1, %zmm1, %ymm0
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm8 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535]
-; AVX512F-SLOW-NEXT:    vpternlogq $184, %ymm0, %ymm8, %ymm14
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm4 = [0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0]
-; AVX512F-SLOW-NEXT:    vpternlogq $184, %ymm1, %ymm4, %ymm5
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm14, %zmm0, %zmm0
-; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm5[0,1,2,3],zmm0[4,5,6,7]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm9, %zmm11, %zmm0
-; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255]
-; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm1, %zmm5, %zmm0
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm29[0,0,0,1]
+; AVX512F-SLOW-NEXT:    vpternlogq $184, %ymm0, %ymm6, %ymm5
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm0 = [0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0]
+; AVX512F-SLOW-NEXT:    vpternlogq $184, %ymm1, %ymm0, %ymm28
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm0, %zmm1
+; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm5 = zmm28[0,1,2,3],zmm1[4,5,6,7]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm9, %zmm13, %zmm1
+; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm8 = [255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255]
+; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm5, %zmm8, %zmm1
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm29[0,0,0,1]
 ; AVX512F-SLOW-NEXT:    vprold $16, %ymm30, %ymm9
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[2,2,2,3]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm9, %zmm1, %zmm1
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm31[0,0,0,1]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm20, %ymm11
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} ymm11 = ymm11[0,3,2,1,4,5,6,7,8,11,10,9,12,13,14,15]
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm11 = ymm11[0,1,2,3,4,5,6,5,8,9,10,11,12,13,14,13]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[2,2,2,3]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[2,2,2,3]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm14 = ymm15[0,0,0,1]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm15 = ymm28[0,0,0,1]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm28 = ymm21[2,2,2,3]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm29 = ymm18[0,0,0,1]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm30 = ymm13[0,0,0,1]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm13 = ymm16[0,0,0,1]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm31 = ymm17[0,0,0,1]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[2,2,2,3]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[2,2,2,3]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm11 = ymm21[0,0,0,1]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm20, %ymm13
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} ymm13 = ymm13[0,3,2,1,4,5,6,7,8,11,10,9,12,13,14,15]
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm13 = ymm13[0,1,2,3,4,5,6,5,8,9,10,11,12,13,14,13]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[2,2,2,3]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,2,2,3]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm15 = ymm15[0,0,0,1]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm28 = ymm31[0,0,0,1]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm29 = ymm22[2,2,2,3]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm30 = ymm17[0,0,0,1]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[0,0,0,1]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[0,0,0,1]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm31 = ymm16[0,0,0,1]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[2,2,2,3]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[2,2,2,3]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,2,2,3]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm11, %zmm9, %zmm9
-; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm1, %zmm12, %zmm9
-; AVX512F-SLOW-NEXT:    vextracti64x4 $1, %zmm9, %ymm1
-; AVX512F-SLOW-NEXT:    vpternlogq $184, %ymm1, %ymm8, %ymm6
-; AVX512F-SLOW-NEXT:    vpternlogq $184, %ymm9, %ymm4, %ymm14
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm6, %zmm0, %zmm1
-; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm14[0,1,2,3],zmm1[4,5,6,7]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm28, %zmm15, %zmm6
-; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm1, %zmm5, %zmm6
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm1 = zmm26[0,0,0,1,4,4,4,5]
-; AVX512F-SLOW-NEXT:    vpermq $64, {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Folded Reload
-; AVX512F-SLOW-NEXT:    # zmm5 = mem[0,0,0,1,4,4,4,5]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,2,2,3]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[2,2,2,3]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm9, %zmm5, %zmm5
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm13, %zmm11, %zmm9
+; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm5, %zmm6, %zmm9
+; AVX512F-SLOW-NEXT:    vextracti64x4 $1, %zmm9, %ymm5
+; AVX512F-SLOW-NEXT:    vpternlogq $184, %ymm5, %ymm6, %ymm2
+; AVX512F-SLOW-NEXT:    vpternlogq $184, %ymm9, %ymm0, %ymm15
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm2
+; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm2 = zmm15[0,1,2,3],zmm2[4,5,6,7]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm29, %zmm28, %zmm5
+; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm2, %zmm8, %zmm5
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm2 = zmm27[0,0,0,1,4,4,4,5]
+; AVX512F-SLOW-NEXT:    vpermq $64, {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # zmm8 = mem[0,0,0,1,4,4,4,5]
 ; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm9 = [65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0]
-; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm1, %zmm9, %zmm5
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm1 = zmm27[0,0,0,1,4,4,4,5]
+; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm2, %zmm9, %zmm8
+; AVX512F-SLOW-NEXT:    vextracti64x4 $1, %zmm8, %ymm2
+; AVX512F-SLOW-NEXT:    vpternlogq $184, %ymm2, %ymm9, %ymm30
+; AVX512F-SLOW-NEXT:    vpternlogq $184, %ymm8, %ymm6, %ymm10
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm30, %zmm0, %zmm2
+; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm2 = zmm10[0,1,2,3],zmm2[4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpermq $64, {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # zmm8 = mem[0,0,0,1,4,4,4,5]
+; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm10 = [255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255]
+; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm2, %zmm10, %zmm8
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm2 = zmm26[0,0,0,1,4,4,4,5]
 ; AVX512F-SLOW-NEXT:    vpermq $64, {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Folded Reload
 ; AVX512F-SLOW-NEXT:    # zmm11 = mem[0,0,0,1,4,4,4,5]
-; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm1, %zmm9, %zmm11
-; AVX512F-SLOW-NEXT:    vextracti64x4 $1, %zmm5, %ymm1
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm9 = [65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
-; AVX512F-SLOW-NEXT:    vpternlogq $184, %ymm1, %ymm9, %ymm29
-; AVX512F-SLOW-NEXT:    vpternlogq $184, %ymm5, %ymm8, %ymm30
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm29, %zmm0, %zmm1
-; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm30[0,1,2,3],zmm1[4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpermq $64, {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Folded Reload
-; AVX512F-SLOW-NEXT:    # zmm5 = mem[0,0,0,1,4,4,4,5]
-; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm14 = [255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255]
-; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm1, %zmm14, %zmm5
-; AVX512F-SLOW-NEXT:    vpternlogq $184, %ymm11, %ymm8, %ymm31
-; AVX512F-SLOW-NEXT:    vextracti64x4 $1, %zmm11, %ymm1
-; AVX512F-SLOW-NEXT:    vpternlogq $184, %ymm1, %ymm9, %ymm13
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm13, %zmm0, %zmm1
-; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm31[0,1,2,3],zmm1[4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpermq $64, (%rsp), %zmm8 # 64-byte Folded Reload
-; AVX512F-SLOW-NEXT:    # zmm8 = mem[0,0,0,1,4,4,4,5]
-; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm1, %zmm14, %zmm8
-; AVX512F-SLOW-NEXT:    vpermq $234, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Folded Reload
-; AVX512F-SLOW-NEXT:    # zmm1 = mem[2,2,2,3,6,6,6,7]
-; AVX512F-SLOW-NEXT:    vpermq $234, {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Folded Reload
-; AVX512F-SLOW-NEXT:    # zmm11 = mem[2,2,2,3,6,6,6,7]
-; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm1, %zmm12, %zmm11
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm1 = zmm24[2,2,2,3,6,6,6,7]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm13 = zmm23[2,2,2,3,6,6,6,7]
-; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm1, %zmm12, %zmm13
-; AVX512F-SLOW-NEXT:    vextracti64x4 $1, %zmm11, %ymm1
-; AVX512F-SLOW-NEXT:    vpternlogq $184, %ymm1, %ymm4, %ymm10
-; AVX512F-SLOW-NEXT:    vextracti64x4 $1, %zmm13, %ymm1
-; AVX512F-SLOW-NEXT:    vpternlogq $184, %ymm1, %ymm4, %ymm2
-; AVX512F-SLOW-NEXT:    vpternlogq $184, %ymm11, %ymm9, %ymm7
-; AVX512F-SLOW-NEXT:    vpternlogq $184, %ymm13, %ymm9, %ymm3
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm10, %zmm0, %zmm1
-; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm7[0,1,2,3],zmm1[4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm4 = zmm25[2,2,2,3,6,6,6,7]
-; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm7 = [255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0]
-; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm1, %zmm7, %zmm4
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm1
-; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm3[0,1,2,3],zmm1[4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm2 = zmm22[2,2,2,3,6,6,6,7]
-; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm1, %zmm7, %zmm2
+; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm2, %zmm9, %zmm11
+; AVX512F-SLOW-NEXT:    vextracti64x4 $1, %zmm11, %ymm2
+; AVX512F-SLOW-NEXT:    vpternlogq $184, %ymm2, %ymm9, %ymm14
+; AVX512F-SLOW-NEXT:    vpternlogq $184, %ymm11, %ymm6, %ymm31
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm14, %zmm0, %zmm2
+; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm2 = zmm31[0,1,2,3],zmm2[4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpermq $64, {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # zmm11 = mem[0,0,0,1,4,4,4,5]
+; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm2, %zmm10, %zmm11
+; AVX512F-SLOW-NEXT:    vpermq $234, (%rsp), %zmm2 # 64-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # zmm2 = mem[2,2,2,3,6,6,6,7]
+; AVX512F-SLOW-NEXT:    vpermq $234, {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # zmm10 = mem[2,2,2,3,6,6,6,7]
+; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm2, %zmm6, %zmm10
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm2 = zmm25[2,2,2,3,6,6,6,7]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm13 = zmm24[2,2,2,3,6,6,6,7]
+; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm2, %zmm6, %zmm13
+; AVX512F-SLOW-NEXT:    vextracti64x4 $1, %zmm10, %ymm2
+; AVX512F-SLOW-NEXT:    vpternlogq $184, %ymm2, %ymm0, %ymm7
+; AVX512F-SLOW-NEXT:    vextracti64x4 $1, %zmm13, %ymm2
+; AVX512F-SLOW-NEXT:    vpternlogq $184, %ymm2, %ymm0, %ymm3
+; AVX512F-SLOW-NEXT:    vpternlogq $184, %ymm10, %ymm9, %ymm12
+; AVX512F-SLOW-NEXT:    vpternlogq $184, %ymm13, %ymm9, %ymm4
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm0, %zmm0
+; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm12[0,1,2,3],zmm0[4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpermq $234, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # zmm2 = mem[2,2,2,3,6,6,6,7]
+; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm6 = [255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0]
+; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm0, %zmm6, %zmm2
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm0, %zmm0
+; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm4[0,1,2,3],zmm0[4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm3 = zmm23[2,2,2,3,6,6,6,7]
+; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm0, %zmm6, %zmm3
 ; AVX512F-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm2, 128(%rax)
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm4, 320(%rax)
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm8, (%rax)
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm5, 192(%rax)
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm6, 256(%rax)
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm0, 64(%rax)
-; AVX512F-SLOW-NEXT:    addq $264, %rsp # imm = 0x108
+; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm3, 128(%rax)
+; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm2, 320(%rax)
+; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm11, (%rax)
+; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm8, 192(%rax)
+; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm5, 256(%rax)
+; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm1, 64(%rax)
+; AVX512F-SLOW-NEXT:    addq $328, %rsp # imm = 0x148
 ; AVX512F-SLOW-NEXT:    vzeroupper
 ; AVX512F-SLOW-NEXT:    retq
 ;
 ; AVX512F-FAST-LABEL: store_i8_stride6_vf64:
 ; AVX512F-FAST:       # %bb.0:
-; AVX512F-FAST-NEXT:    subq $360, %rsp # imm = 0x168
+; AVX512F-FAST-NEXT:    subq $392, %rsp # imm = 0x188
 ; AVX512F-FAST-NEXT:    vmovdqa 32(%rsi), %ymm4
 ; AVX512F-FAST-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-FAST-NEXT:    vpbroadcastq {{.*#+}} ymm0 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
@@ -4418,267 +4408,266 @@ define void @store_i8_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-FAST-NEXT:    vmovdqa 32(%rdi), %ymm3
 ; AVX512F-FAST-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-FAST-NEXT:    vpshufb %ymm0, %ymm3, %ymm2
-; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm1 = ymm2[0],ymm1[0],ymm2[1],ymm1[1],ymm2[2],ymm1[2],ymm2[3],ymm1[3],ymm2[4],ymm1[4],ymm2[5],ymm1[5],ymm2[6],ymm1[6],ymm2[7],ymm1[7],ymm2[16],ymm1[16],ymm2[17],ymm1[17],ymm2[18],ymm1[18],ymm2[19],ymm1[19],ymm2[20],ymm1[20],ymm2[21],ymm1[21],ymm2[22],ymm1[22],ymm2[23],ymm1[23]
+; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm2 = ymm2[0],ymm1[0],ymm2[1],ymm1[1],ymm2[2],ymm1[2],ymm2[3],ymm1[3],ymm2[4],ymm1[4],ymm2[5],ymm1[5],ymm2[6],ymm1[6],ymm2[7],ymm1[7],ymm2[16],ymm1[16],ymm2[17],ymm1[17],ymm2[18],ymm1[18],ymm2[19],ymm1[19],ymm2[20],ymm1[20],ymm2[21],ymm1[21],ymm2[22],ymm1[22],ymm2[23],ymm1[23]
 ; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} ymm3 = ymm3[8],ymm4[8],ymm3[9],ymm4[9],ymm3[10],ymm4[10],ymm3[11],ymm4[11],ymm3[12],ymm4[12],ymm3[13],ymm4[13],ymm3[14],ymm4[14],ymm3[15],ymm4[15],ymm3[24],ymm4[24],ymm3[25],ymm4[25],ymm3[26],ymm4[26],ymm3[27],ymm4[27],ymm3[28],ymm4[28],ymm3[29],ymm4[29],ymm3[30],ymm4[30],ymm3[31],ymm4[31]
-; AVX512F-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = [24,25,22,23,28,29,26,27,30,31,30,31,30,31,30,31,24,25,22,23,28,29,26,27,30,31,30,31,30,31,30,31]
-; AVX512F-FAST-NEXT:    # ymm2 = mem[0,1,0,1]
-; AVX512F-FAST-NEXT:    vpshufb %ymm2, %ymm3, %ymm3
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm1, %zmm1
-; AVX512F-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa 32(%rcx), %ymm6
-; AVX512F-FAST-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10]
-; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm6, %ymm3
-; AVX512F-FAST-NEXT:    vmovdqa 32(%rdx), %ymm7
-; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm7, %ymm4
-; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm4 = ymm4[0],ymm3[0],ymm4[1],ymm3[1],ymm4[2],ymm3[2],ymm4[3],ymm3[3],ymm4[4],ymm3[4],ymm4[5],ymm3[5],ymm4[6],ymm3[6],ymm4[7],ymm3[7],ymm4[16],ymm3[16],ymm4[17],ymm3[17],ymm4[18],ymm3[18],ymm4[19],ymm3[19],ymm4[20],ymm3[20],ymm4[21],ymm3[21],ymm4[22],ymm3[22],ymm4[23],ymm3[23]
-; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} ymm5 = ymm7[8],ymm6[8],ymm7[9],ymm6[9],ymm7[10],ymm6[10],ymm7[11],ymm6[11],ymm7[12],ymm6[12],ymm7[13],ymm6[13],ymm7[14],ymm6[14],ymm7[15],ymm6[15],ymm7[24],ymm6[24],ymm7[25],ymm6[25],ymm7[26],ymm6[26],ymm7[27],ymm6[27],ymm7[28],ymm6[28],ymm7[29],ymm6[29],ymm7[30],ymm6[30],ymm7[31],ymm6[31]
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm7, %ymm20
+; AVX512F-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [24,25,22,23,28,29,26,27,30,31,30,31,30,31,30,31,24,25,22,23,28,29,26,27,30,31,30,31,30,31,30,31]
+; AVX512F-FAST-NEXT:    # ymm1 = mem[0,1,0,1]
+; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm3, %ymm3
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm2, %zmm2
+; AVX512F-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-FAST-NEXT:    vmovdqa 32(%rcx), %ymm5
+; AVX512F-FAST-NEXT:    vpbroadcastq {{.*#+}} ymm14 = [5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10]
+; AVX512F-FAST-NEXT:    vpshufb %ymm14, %ymm5, %ymm2
+; AVX512F-FAST-NEXT:    vmovdqa 32(%rdx), %ymm6
+; AVX512F-FAST-NEXT:    vpshufb %ymm14, %ymm6, %ymm3
+; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm3 = ymm3[0],ymm2[0],ymm3[1],ymm2[1],ymm3[2],ymm2[2],ymm3[3],ymm2[3],ymm3[4],ymm2[4],ymm3[5],ymm2[5],ymm3[6],ymm2[6],ymm3[7],ymm2[7],ymm3[16],ymm2[16],ymm3[17],ymm2[17],ymm3[18],ymm2[18],ymm3[19],ymm2[19],ymm3[20],ymm2[20],ymm3[21],ymm2[21],ymm3[22],ymm2[22],ymm3[23],ymm2[23]
+; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} ymm4 = ymm6[8],ymm5[8],ymm6[9],ymm5[9],ymm6[10],ymm5[10],ymm6[11],ymm5[11],ymm6[12],ymm5[12],ymm6[13],ymm5[13],ymm6[14],ymm5[14],ymm6[15],ymm5[15],ymm6[24],ymm5[24],ymm6[25],ymm5[25],ymm6[26],ymm5[26],ymm6[27],ymm5[27],ymm6[28],ymm5[28],ymm6[29],ymm5[29],ymm6[30],ymm5[30],ymm6[31],ymm5[31]
 ; AVX512F-FAST-NEXT:    vmovdqa64 %ymm6, %ymm25
-; AVX512F-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm3 = [26,27,24,25,22,23,28,29,30,31,30,31,30,31,30,31,26,27,24,25,22,23,28,29,30,31,30,31,30,31,30,31]
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm5, %ymm30
+; AVX512F-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = [26,27,24,25,22,23,28,29,30,31,30,31,30,31,30,31,26,27,24,25,22,23,28,29,30,31,30,31,30,31,30,31]
+; AVX512F-FAST-NEXT:    # ymm2 = mem[0,1,0,1]
+; AVX512F-FAST-NEXT:    vpshufb %ymm2, %ymm4, %ymm4
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm3, %zmm3
+; AVX512F-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-FAST-NEXT:    vmovdqa 32(%r9), %ymm7
+; AVX512F-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm3 = [0,10,0,13,0,12,0,11,0,14,0,0,0,0,0,15,0,10,0,13,0,12,0,11,0,14,0,0,0,0,0,15]
 ; AVX512F-FAST-NEXT:    # ymm3 = mem[0,1,0,1]
-; AVX512F-FAST-NEXT:    vpshufb %ymm3, %ymm5, %ymm5
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm5, %zmm4, %zmm4
-; AVX512F-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa 32(%r9), %ymm5
-; AVX512F-FAST-NEXT:    vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm7 = [0,10,0,13,0,12,0,11,0,14,0,0,0,0,0,15,0,10,0,13,0,12,0,11,0,14,0,0,0,0,0,15]
-; AVX512F-FAST-NEXT:    # ymm7 = mem[0,1,0,1]
-; AVX512F-FAST-NEXT:    vpshufb %ymm7, %ymm5, %ymm4
-; AVX512F-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [0,6,0,5,0,8,0,7,0,0,0,9,0,0,0,0,0,6,0,5,0,8,0,7,0,0,0,9,0,0,0,0]
-; AVX512F-FAST-NEXT:    # ymm9 = mem[0,1,0,1]
-; AVX512F-FAST-NEXT:    vpshufb %ymm9, %ymm5, %ymm5
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm5, %zmm4
-; AVX512F-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa (%rsi), %ymm14
-; AVX512F-FAST-NEXT:    vpshufb %ymm0, %ymm14, %ymm6
-; AVX512F-FAST-NEXT:    vmovdqa (%rdi), %ymm8
-; AVX512F-FAST-NEXT:    vpshufb %ymm0, %ymm8, %ymm0
-; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm0 = ymm0[0],ymm6[0],ymm0[1],ymm6[1],ymm0[2],ymm6[2],ymm0[3],ymm6[3],ymm0[4],ymm6[4],ymm0[5],ymm6[5],ymm0[6],ymm6[6],ymm0[7],ymm6[7],ymm0[16],ymm6[16],ymm0[17],ymm6[17],ymm0[18],ymm6[18],ymm0[19],ymm6[19],ymm0[20],ymm6[20],ymm0[21],ymm6[21],ymm0[22],ymm6[22],ymm0[23],ymm6[23]
-; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} ymm6 = ymm8[8],ymm14[8],ymm8[9],ymm14[9],ymm8[10],ymm14[10],ymm8[11],ymm14[11],ymm8[12],ymm14[12],ymm8[13],ymm14[13],ymm8[14],ymm14[14],ymm8[15],ymm14[15],ymm8[24],ymm14[24],ymm8[25],ymm14[25],ymm8[26],ymm14[26],ymm8[27],ymm14[27],ymm8[28],ymm14[28],ymm8[29],ymm14[29],ymm8[30],ymm14[30],ymm8[31],ymm14[31]
-; AVX512F-FAST-NEXT:    vpshufb %ymm2, %ymm6, %ymm2
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm0
+; AVX512F-FAST-NEXT:    vpshufb %ymm3, %ymm7, %ymm5
+; AVX512F-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm4 = [0,6,0,5,0,8,0,7,0,0,0,9,0,0,0,0,0,6,0,5,0,8,0,7,0,0,0,9,0,0,0,0]
+; AVX512F-FAST-NEXT:    # ymm4 = mem[0,1,0,1]
+; AVX512F-FAST-NEXT:    vpshufb %ymm4, %ymm7, %ymm6
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm7, %ymm31
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm5, %zmm6, %zmm5
+; AVX512F-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-FAST-NEXT:    vmovdqa (%rsi), %ymm8
+; AVX512F-FAST-NEXT:    vpshufb %ymm0, %ymm8, %ymm5
+; AVX512F-FAST-NEXT:    vmovdqa (%rdi), %ymm7
+; AVX512F-FAST-NEXT:    vpshufb %ymm0, %ymm7, %ymm0
+; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm0 = ymm0[0],ymm5[0],ymm0[1],ymm5[1],ymm0[2],ymm5[2],ymm0[3],ymm5[3],ymm0[4],ymm5[4],ymm0[5],ymm5[5],ymm0[6],ymm5[6],ymm0[7],ymm5[7],ymm0[16],ymm5[16],ymm0[17],ymm5[17],ymm0[18],ymm5[18],ymm0[19],ymm5[19],ymm0[20],ymm5[20],ymm0[21],ymm5[21],ymm0[22],ymm5[22],ymm0[23],ymm5[23]
+; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} ymm5 = ymm7[8],ymm8[8],ymm7[9],ymm8[9],ymm7[10],ymm8[10],ymm7[11],ymm8[11],ymm7[12],ymm8[12],ymm7[13],ymm8[13],ymm7[14],ymm8[14],ymm7[15],ymm8[15],ymm7[24],ymm8[24],ymm7[25],ymm8[25],ymm7[26],ymm8[26],ymm7[27],ymm8[27],ymm7[28],ymm8[28],ymm7[29],ymm8[29],ymm7[30],ymm8[30],ymm7[31],ymm8[31]
+; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm5, %ymm1
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa (%rcx), %ymm6
-; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm6, %ymm0
-; AVX512F-FAST-NEXT:    vmovdqa (%rdx), %ymm11
-; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm11, %ymm1
+; AVX512F-FAST-NEXT:    vmovdqa (%rcx), %ymm11
+; AVX512F-FAST-NEXT:    vpshufb %ymm14, %ymm11, %ymm0
+; AVX512F-FAST-NEXT:    vmovdqa (%rdx), %ymm12
+; AVX512F-FAST-NEXT:    vpshufb %ymm14, %ymm12, %ymm1
 ; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6],ymm1[7],ymm0[7],ymm1[16],ymm0[16],ymm1[17],ymm0[17],ymm1[18],ymm0[18],ymm1[19],ymm0[19],ymm1[20],ymm0[20],ymm1[21],ymm0[21],ymm1[22],ymm0[22],ymm1[23],ymm0[23]
-; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} ymm1 = ymm11[8],ymm6[8],ymm11[9],ymm6[9],ymm11[10],ymm6[10],ymm11[11],ymm6[11],ymm11[12],ymm6[12],ymm11[13],ymm6[13],ymm11[14],ymm6[14],ymm11[15],ymm6[15],ymm11[24],ymm6[24],ymm11[25],ymm6[25],ymm11[26],ymm6[26],ymm11[27],ymm6[27],ymm11[28],ymm6[28],ymm11[29],ymm6[29],ymm11[30],ymm6[30],ymm11[31],ymm6[31]
-; AVX512F-FAST-NEXT:    vpshufb %ymm3, %ymm1, %ymm1
+; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} ymm1 = ymm12[8],ymm11[8],ymm12[9],ymm11[9],ymm12[10],ymm11[10],ymm12[11],ymm11[11],ymm12[12],ymm11[12],ymm12[13],ymm11[13],ymm12[14],ymm11[14],ymm12[15],ymm11[15],ymm12[24],ymm11[24],ymm12[25],ymm11[25],ymm12[26],ymm11[26],ymm12[27],ymm11[27],ymm12[28],ymm11[28],ymm12[29],ymm11[29],ymm12[30],ymm11[30],ymm12[31],ymm11[31]
+; AVX512F-FAST-NEXT:    vpshufb %ymm2, %ymm1, %ymm1
 ; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa (%r9), %ymm2
-; AVX512F-FAST-NEXT:    vpshufb %ymm7, %ymm2, %ymm0
-; AVX512F-FAST-NEXT:    vpshufb %ymm9, %ymm2, %ymm1
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm2, %ymm16
+; AVX512F-FAST-NEXT:    vmovdqa (%r9), %ymm9
+; AVX512F-FAST-NEXT:    vpshufb %ymm3, %ymm9, %ymm0
+; AVX512F-FAST-NEXT:    vpshufb %ymm4, %ymm9, %ymm1
 ; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
-; AVX512F-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa 32(%rsi), %xmm9
+; AVX512F-FAST-NEXT:    vmovdqu64 %zmm0, (%rsp) # 64-byte Spill
+; AVX512F-FAST-NEXT:    vmovdqa 32(%rsi), %xmm3
 ; AVX512F-FAST-NEXT:    vpbroadcastq {{.*#+}} xmm0 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
-; AVX512F-FAST-NEXT:    vpshufb %xmm0, %xmm9, %xmm1
-; AVX512F-FAST-NEXT:    vmovdqa 32(%rdi), %xmm7
-; AVX512F-FAST-NEXT:    vpshufb %xmm0, %xmm7, %xmm2
+; AVX512F-FAST-NEXT:    vpshufb %xmm0, %xmm3, %xmm1
+; AVX512F-FAST-NEXT:    vmovdqa 32(%rdi), %xmm6
+; AVX512F-FAST-NEXT:    vpshufb %xmm0, %xmm6, %xmm2
 ; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
-; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm2 = xmm7[0],xmm9[0],xmm7[1],xmm9[1],xmm7[2],xmm9[2],xmm7[3],xmm9[3],xmm7[4],xmm9[4],xmm7[5],xmm9[5],xmm7[6],xmm9[6],xmm7[7],xmm9[7]
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm13 = [0,1,6,7,4,5,2,3,8,9,10,11,12,13,10,11]
-; AVX512F-FAST-NEXT:    vpshufb %xmm13, %xmm2, %xmm2
-; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm1, %zmm2, %zmm26
+; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm2 = xmm6[0],xmm3[0],xmm6[1],xmm3[1],xmm6[2],xmm3[2],xmm6[3],xmm3[3],xmm6[4],xmm3[4],xmm6[5],xmm3[5],xmm6[6],xmm3[6],xmm6[7],xmm3[7]
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm3, %xmm18
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm15 = [0,1,6,7,4,5,2,3,8,9,10,11,12,13,10,11]
+; AVX512F-FAST-NEXT:    vpshufb %xmm15, %xmm2, %xmm2
+; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm1, %zmm2, %zmm1
+; AVX512F-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-FAST-NEXT:    vmovdqa 32(%r9), %xmm4
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm12 = <u,6,u,5,u,8,u,7,u,9,u,9,u,9,u,9>
-; AVX512F-FAST-NEXT:    vpshufb %xmm12, %xmm4, %xmm2
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm13 = <u,6,u,5,u,8,u,7,u,9,u,9,u,9,u,9>
+; AVX512F-FAST-NEXT:    vpshufb %xmm13, %xmm4, %xmm2
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm1 = <u,2,u,1,u,0,u,3,u,4,u,4,u,4,u,4>
 ; AVX512F-FAST-NEXT:    vpshufb %xmm1, %xmm4, %xmm3
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm4, %xmm17
-; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm2, %zmm3, %zmm27
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm4, %xmm19
+; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm2, %zmm3, %zmm26
 ; AVX512F-FAST-NEXT:    vmovdqa (%rsi), %xmm5
-; AVX512F-FAST-NEXT:    vmovdqa (%rdi), %xmm2
-; AVX512F-FAST-NEXT:    vpshufb %xmm0, %xmm5, %xmm10
-; AVX512F-FAST-NEXT:    vpshufb %xmm0, %xmm2, %xmm0
-; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm10[8],xmm0[9],xmm10[9],xmm0[10],xmm10[10],xmm0[11],xmm10[11],xmm0[12],xmm10[12],xmm0[13],xmm10[13],xmm0[14],xmm10[14],xmm0[15],xmm10[15]
-; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm10 = xmm2[0],xmm5[0],xmm2[1],xmm5[1],xmm2[2],xmm5[2],xmm2[3],xmm5[3],xmm2[4],xmm5[4],xmm2[5],xmm5[5],xmm2[6],xmm5[6],xmm2[7],xmm5[7]
-; AVX512F-FAST-NEXT:    vpshufb %xmm13, %xmm10, %xmm10
-; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm0, %zmm10, %zmm28
-; AVX512F-FAST-NEXT:    vmovdqa (%r9), %xmm13
-; AVX512F-FAST-NEXT:    vpshufb %xmm12, %xmm13, %xmm0
-; AVX512F-FAST-NEXT:    vpshufb %xmm1, %xmm13, %xmm1
-; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm0, %zmm1, %zmm29
-; AVX512F-FAST-NEXT:    vmovdqa 32(%r8), %ymm12
+; AVX512F-FAST-NEXT:    vmovdqa (%rdi), %xmm3
+; AVX512F-FAST-NEXT:    vpshufb %xmm0, %xmm5, %xmm4
+; AVX512F-FAST-NEXT:    vpshufb %xmm0, %xmm3, %xmm0
+; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm4[8],xmm0[9],xmm4[9],xmm0[10],xmm4[10],xmm0[11],xmm4[11],xmm0[12],xmm4[12],xmm0[13],xmm4[13],xmm0[14],xmm4[14],xmm0[15],xmm4[15]
+; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm4 = xmm3[0],xmm5[0],xmm3[1],xmm5[1],xmm3[2],xmm5[2],xmm3[3],xmm5[3],xmm3[4],xmm5[4],xmm3[5],xmm5[5],xmm3[6],xmm5[6],xmm3[7],xmm5[7]
+; AVX512F-FAST-NEXT:    vpshufb %xmm15, %xmm4, %xmm4
+; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm0, %zmm4, %zmm27
+; AVX512F-FAST-NEXT:    vmovdqa (%r9), %xmm15
+; AVX512F-FAST-NEXT:    vpshufb %xmm13, %xmm15, %xmm0
+; AVX512F-FAST-NEXT:    vpshufb %xmm1, %xmm15, %xmm1
+; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm0, %zmm1, %zmm28
+; AVX512F-FAST-NEXT:    vmovdqa 32(%r8), %ymm13
 ; AVX512F-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [10,0,13,0,12,0,11,0,14,0,0,0,0,0,15,0,10,0,13,0,12,0,11,0,14,0,0,0,0,0,15,0]
 ; AVX512F-FAST-NEXT:    # ymm1 = mem[0,1,0,1]
-; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm12, %ymm0
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm1, %ymm19
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm21 = ymm0[2,2,2,3]
+; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm13, %ymm0
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm1, %ymm21
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm22 = ymm0[2,2,2,3]
 ; AVX512F-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [6,0,5,0,8,0,7,0,0,0,9,0,0,0,0,0,6,0,5,0,8,0,7,0,0,0,9,0,0,0,0,0]
 ; AVX512F-FAST-NEXT:    # ymm1 = mem[0,1,0,1]
-; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm12, %ymm0
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm1, %ymm18
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm22 = ymm0[2,2,2,3]
-; AVX512F-FAST-NEXT:    vmovdqa 32(%rcx), %xmm4
-; AVX512F-FAST-NEXT:    vpbroadcastq {{.*#+}} xmm10 = [5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10]
-; AVX512F-FAST-NEXT:    vpshufb %xmm10, %xmm4, %xmm0
-; AVX512F-FAST-NEXT:    vmovdqa 32(%rdx), %xmm3
-; AVX512F-FAST-NEXT:    vpshufb %xmm10, %xmm3, %xmm15
-; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm15[0],xmm0[0],xmm15[1],xmm0[1],xmm15[2],xmm0[2],xmm15[3],xmm0[3],xmm15[4],xmm0[4],xmm15[5],xmm0[5],xmm15[6],xmm0[6],xmm15[7],xmm0[7]
-; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm15 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3],xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
-; AVX512F-FAST-NEXT:    vprold $16, %xmm15, %xmm15
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm15, %zmm23
-; AVX512F-FAST-NEXT:    vmovdqa (%rcx), %xmm0
-; AVX512F-FAST-NEXT:    vmovdqa (%rdx), %xmm15
-; AVX512F-FAST-NEXT:    vpshufb %xmm10, %xmm0, %xmm1
-; AVX512F-FAST-NEXT:    vpshufb %xmm10, %xmm15, %xmm10
-; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm1 = xmm10[0],xmm1[0],xmm10[1],xmm1[1],xmm10[2],xmm1[2],xmm10[3],xmm1[3],xmm10[4],xmm1[4],xmm10[5],xmm1[5],xmm10[6],xmm1[6],xmm10[7],xmm1[7]
-; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm10 = xmm15[0],xmm0[0],xmm15[1],xmm0[1],xmm15[2],xmm0[2],xmm15[3],xmm0[3],xmm15[4],xmm0[4],xmm15[5],xmm0[5],xmm15[6],xmm0[6],xmm15[7],xmm0[7]
+; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm13, %ymm0
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm1, %ymm20
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm24 = ymm0[2,2,2,3]
+; AVX512F-FAST-NEXT:    vmovdqa 32(%rcx), %xmm2
+; AVX512F-FAST-NEXT:    vpshufb %xmm14, %xmm2, %xmm4
+; AVX512F-FAST-NEXT:    vmovdqa 32(%rdx), %xmm1
+; AVX512F-FAST-NEXT:    vpshufb %xmm14, %xmm1, %xmm10
+; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm4 = xmm10[0],xmm4[0],xmm10[1],xmm4[1],xmm10[2],xmm4[2],xmm10[3],xmm4[3],xmm10[4],xmm4[4],xmm10[5],xmm4[5],xmm10[6],xmm4[6],xmm10[7],xmm4[7]
+; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm10 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
 ; AVX512F-FAST-NEXT:    vprold $16, %xmm10, %xmm10
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm10, %zmm24
-; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm1 = ymm11[0],ymm6[0],ymm11[1],ymm6[1],ymm11[2],ymm6[2],ymm11[3],ymm6[3],ymm11[4],ymm6[4],ymm11[5],ymm6[5],ymm11[6],ymm6[6],ymm11[7],ymm6[7],ymm11[16],ymm6[16],ymm11[17],ymm6[17],ymm11[18],ymm6[18],ymm11[19],ymm6[19],ymm11[20],ymm6[20],ymm11[21],ymm6[21],ymm11[22],ymm6[22],ymm11[23],ymm6[23]
-; AVX512F-FAST-NEXT:    vprold $16, %ymm1, %ymm1
-; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm10 = xmm15[8],xmm0[8],xmm15[9],xmm0[9],xmm15[10],xmm0[10],xmm15[11],xmm0[11],xmm15[12],xmm0[12],xmm15[13],xmm0[13],xmm15[14],xmm0[14],xmm15[15],xmm0[15]
-; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm0 = ymm8[0],ymm14[0],ymm8[1],ymm14[1],ymm8[2],ymm14[2],ymm8[3],ymm14[3],ymm8[4],ymm14[4],ymm8[5],ymm14[5],ymm8[6],ymm14[6],ymm8[7],ymm14[7],ymm8[16],ymm14[16],ymm8[17],ymm14[17],ymm8[18],ymm14[18],ymm8[19],ymm14[19],ymm8[20],ymm14[20],ymm8[21],ymm14[21],ymm8[22],ymm14[22],ymm8[23],ymm14[23]
-; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm2 = xmm2[8],xmm5[8],xmm2[9],xmm5[9],xmm2[10],xmm5[10],xmm2[11],xmm5[11],xmm2[12],xmm5[12],xmm2[13],xmm5[13],xmm2[14],xmm5[14],xmm2[15],xmm5[15]
-; AVX512F-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm5 = [16,17,22,23,20,21,18,19,24,25,26,27,28,29,26,27,16,17,22,23,20,21,18,19,24,25,26,27,28,29,26,27]
-; AVX512F-FAST-NEXT:    # ymm5 = mem[0,1,0,1]
-; AVX512F-FAST-NEXT:    vpshufb %ymm5, %ymm0, %ymm0
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm11 = [8,9,6,7,12,13,10,11,14,15,14,15,14,15,14,15]
-; AVX512F-FAST-NEXT:    vpshufb %xmm11, %xmm2, %xmm6
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm31 = [0,0,0,1,10,10,10,11]
-; AVX512F-FAST-NEXT:    vpermt2q %zmm0, %zmm31, %zmm6
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm0 = [10,11,8,9,6,7,12,13,14,15,14,15,14,15,14,15]
-; AVX512F-FAST-NEXT:    vpshufb %xmm0, %xmm10, %xmm2
-; AVX512F-FAST-NEXT:    vpermt2q %zmm1, %zmm31, %zmm2
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm30 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
-; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm2, %zmm30, %zmm6
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm10, %zmm23
+; AVX512F-FAST-NEXT:    vmovdqa (%rcx), %xmm4
+; AVX512F-FAST-NEXT:    vmovdqa (%rdx), %xmm10
+; AVX512F-FAST-NEXT:    vpshufb %xmm14, %xmm4, %xmm0
+; AVX512F-FAST-NEXT:    vpshufb %xmm14, %xmm10, %xmm14
+; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm14[0],xmm0[0],xmm14[1],xmm0[1],xmm14[2],xmm0[2],xmm14[3],xmm0[3],xmm14[4],xmm0[4],xmm14[5],xmm0[5],xmm14[6],xmm0[6],xmm14[7],xmm0[7]
+; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm14 = xmm10[0],xmm4[0],xmm10[1],xmm4[1],xmm10[2],xmm4[2],xmm10[3],xmm4[3],xmm10[4],xmm4[4],xmm10[5],xmm4[5],xmm10[6],xmm4[6],xmm10[7],xmm4[7]
+; AVX512F-FAST-NEXT:    vprold $16, %xmm14, %xmm14
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm14, %zmm29
+; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm0 = ymm12[0],ymm11[0],ymm12[1],ymm11[1],ymm12[2],ymm11[2],ymm12[3],ymm11[3],ymm12[4],ymm11[4],ymm12[5],ymm11[5],ymm12[6],ymm11[6],ymm12[7],ymm11[7],ymm12[16],ymm11[16],ymm12[17],ymm11[17],ymm12[18],ymm11[18],ymm12[19],ymm11[19],ymm12[20],ymm11[20],ymm12[21],ymm11[21],ymm12[22],ymm11[22],ymm12[23],ymm11[23]
+; AVX512F-FAST-NEXT:    vprold $16, %ymm0, %ymm0
+; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm10 = xmm10[8],xmm4[8],xmm10[9],xmm4[9],xmm10[10],xmm4[10],xmm10[11],xmm4[11],xmm10[12],xmm4[12],xmm10[13],xmm4[13],xmm10[14],xmm4[14],xmm10[15],xmm4[15]
+; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm4 = ymm7[0],ymm8[0],ymm7[1],ymm8[1],ymm7[2],ymm8[2],ymm7[3],ymm8[3],ymm7[4],ymm8[4],ymm7[5],ymm8[5],ymm7[6],ymm8[6],ymm7[7],ymm8[7],ymm7[16],ymm8[16],ymm7[17],ymm8[17],ymm7[18],ymm8[18],ymm7[19],ymm8[19],ymm7[20],ymm8[20],ymm7[21],ymm8[21],ymm7[22],ymm8[22],ymm7[23],ymm8[23]
+; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm3[8],xmm5[8],xmm3[9],xmm5[9],xmm3[10],xmm5[10],xmm3[11],xmm5[11],xmm3[12],xmm5[12],xmm3[13],xmm5[13],xmm3[14],xmm5[14],xmm3[15],xmm5[15]
+; AVX512F-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm14 = [16,17,22,23,20,21,18,19,24,25,26,27,28,29,26,27,16,17,22,23,20,21,18,19,24,25,26,27,28,29,26,27]
+; AVX512F-FAST-NEXT:    # ymm14 = mem[0,1,0,1]
+; AVX512F-FAST-NEXT:    vpshufb %ymm14, %ymm4, %ymm4
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm12 = [8,9,6,7,12,13,10,11,14,15,14,15,14,15,14,15]
+; AVX512F-FAST-NEXT:    vpshufb %xmm12, %xmm3, %xmm11
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm7 = [0,0,0,1,10,10,10,11]
+; AVX512F-FAST-NEXT:    vpermt2q %zmm4, %zmm7, %zmm11
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm4 = [10,11,8,9,6,7,12,13,14,15,14,15,14,15,14,15]
+; AVX512F-FAST-NEXT:    vpshufb %xmm4, %xmm10, %xmm3
+; AVX512F-FAST-NEXT:    vpermt2q %zmm0, %zmm7, %zmm3
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm16 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
+; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm3, %zmm16, %zmm11
 ; AVX512F-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm10 = [0,2,0,1,0,0,0,3,0,0,0,0,0,4,0,0,0,2,0,1,0,0,0,3,0,0,0,0,0,4,0,0]
 ; AVX512F-FAST-NEXT:    # ymm10 = mem[0,1,0,1]
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm16, %ymm1
-; AVX512F-FAST-NEXT:    vpshufb %ymm10, %ymm1, %ymm1
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm8 = <u,10,u,13,u,12,u,11,u,14,u,13,u,14,u,15>
-; AVX512F-FAST-NEXT:    vpshufb %xmm8, %xmm13, %xmm2
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm8, %xmm16
-; AVX512F-FAST-NEXT:    vpermt2q %zmm1, %zmm31, %zmm2
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm20, %ymm1
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm25, %ymm8
-; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm1 = ymm1[0],ymm8[0],ymm1[1],ymm8[1],ymm1[2],ymm8[2],ymm1[3],ymm8[3],ymm1[4],ymm8[4],ymm1[5],ymm8[5],ymm1[6],ymm8[6],ymm1[7],ymm8[7],ymm1[16],ymm8[16],ymm1[17],ymm8[17],ymm1[18],ymm8[18],ymm1[19],ymm8[19],ymm1[20],ymm8[20],ymm1[21],ymm8[21],ymm1[22],ymm8[22],ymm1[23],ymm8[23]
-; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm3[8],xmm4[8],xmm3[9],xmm4[9],xmm3[10],xmm4[10],xmm3[11],xmm4[11],xmm3[12],xmm4[12],xmm3[13],xmm4[13],xmm3[14],xmm4[14],xmm3[15],xmm4[15]
-; AVX512F-FAST-NEXT:    vprold $16, %ymm1, %ymm1
-; AVX512F-FAST-NEXT:    vpshufb %xmm0, %xmm3, %xmm3
-; AVX512F-FAST-NEXT:    vpermt2q %zmm1, %zmm31, %zmm3
+; AVX512F-FAST-NEXT:    vpshufb %ymm10, %ymm9, %ymm0
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm5 = <u,10,u,13,u,12,u,11,u,14,u,13,u,14,u,15>
+; AVX512F-FAST-NEXT:    vpshufb %xmm5, %xmm15, %xmm3
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm5, %xmm17
+; AVX512F-FAST-NEXT:    vpermt2q %zmm0, %zmm7, %zmm3
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm25, %ymm0
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm30, %ymm5
+; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm0 = ymm0[0],ymm5[0],ymm0[1],ymm5[1],ymm0[2],ymm5[2],ymm0[3],ymm5[3],ymm0[4],ymm5[4],ymm0[5],ymm5[5],ymm0[6],ymm5[6],ymm0[7],ymm5[7],ymm0[16],ymm5[16],ymm0[17],ymm5[17],ymm0[18],ymm5[18],ymm0[19],ymm5[19],ymm0[20],ymm5[20],ymm0[21],ymm5[21],ymm0[22],ymm5[22],ymm0[23],ymm5[23]
+; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm1[8],xmm2[8],xmm1[9],xmm2[9],xmm1[10],xmm2[10],xmm1[11],xmm2[11],xmm1[12],xmm2[12],xmm1[13],xmm2[13],xmm1[14],xmm2[14],xmm1[15],xmm2[15]
+; AVX512F-FAST-NEXT:    vprold $16, %ymm0, %ymm0
+; AVX512F-FAST-NEXT:    vpshufb %xmm4, %xmm1, %xmm2
+; AVX512F-FAST-NEXT:    vpermt2q %zmm0, %zmm7, %zmm2
 ; AVX512F-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX512F-FAST-NEXT:    vpunpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm4 # 32-byte Folded Reload
 ; AVX512F-FAST-NEXT:    # ymm4 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[2],mem[2],ymm0[3],mem[3],ymm0[4],mem[4],ymm0[5],mem[5],ymm0[6],mem[6],ymm0[7],mem[7],ymm0[16],mem[16],ymm0[17],mem[17],ymm0[18],mem[18],ymm0[19],mem[19],ymm0[20],mem[20],ymm0[21],mem[21],ymm0[22],mem[22],ymm0[23],mem[23]
-; AVX512F-FAST-NEXT:    vmovdqa (%r8), %ymm13
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm19, %ymm0
-; AVX512F-FAST-NEXT:    vpshufb %ymm0, %ymm13, %ymm8
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm18, %ymm0
-; AVX512F-FAST-NEXT:    vpshufb %ymm0, %ymm13, %ymm1
-; AVX512F-FAST-NEXT:    vpshufb %ymm5, %ymm4, %ymm4
-; AVX512F-FAST-NEXT:    vmovdqa (%r8), %xmm15
-; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm5 = xmm7[8],xmm9[8],xmm7[9],xmm9[9],xmm7[10],xmm9[10],xmm7[11],xmm9[11],xmm7[12],xmm9[12],xmm7[13],xmm9[13],xmm7[14],xmm9[14],xmm7[15],xmm9[15]
-; AVX512F-FAST-NEXT:    vmovdqa 32(%r8), %xmm7
-; AVX512F-FAST-NEXT:    vpshufb %xmm11, %xmm5, %xmm5
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm14 = <6,u,5,u,8,u,7,u,9,u,9,u,9,u,9,u>
-; AVX512F-FAST-NEXT:    vpermt2q %zmm4, %zmm31, %zmm5
-; AVX512F-FAST-NEXT:    vpshufb %xmm14, %xmm7, %xmm4
-; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm3, %zmm30, %zmm5
-; AVX512F-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm3 = [2,0,1,0,0,0,3,0,0,0,0,0,4,0,0,0,2,0,1,0,0,0,3,0,0,0,0,0,4,0,0,0]
-; AVX512F-FAST-NEXT:    # ymm3 = mem[0,1,0,1]
-; AVX512F-FAST-NEXT:    vpshufb %ymm3, %ymm13, %ymm11
-; AVX512F-FAST-NEXT:    vpshufb %ymm3, %ymm12, %ymm9
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm3 = <2,u,1,u,0,u,3,u,4,u,4,u,4,u,4,u>
-; AVX512F-FAST-NEXT:    vpshufb %xmm14, %xmm15, %xmm12
-; AVX512F-FAST-NEXT:    vpshufb %xmm3, %xmm7, %xmm13
-; AVX512F-FAST-NEXT:    vpshufb %xmm3, %xmm15, %xmm14
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm3 = <10,u,13,u,12,u,11,u,14,u,13,u,14,u,15,u>
-; AVX512F-FAST-NEXT:    vpshufb %xmm3, %xmm15, %xmm15
-; AVX512F-FAST-NEXT:    vpshufb %xmm3, %xmm7, %xmm0
-; AVX512F-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX512F-FAST-NEXT:    vpshufb %ymm10, %ymm3, %ymm10
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[2,2,2,3]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,2,2,3]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm4[0,0,0,1]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm7 = ymm13[0,0,0,1]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm12[0,0,0,1]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm12 = ymm14[0,0,0,1]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm13 = ymm15[0,0,0,1]
+; AVX512F-FAST-NEXT:    vmovdqa (%r8), %ymm5
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm21, %ymm0
+; AVX512F-FAST-NEXT:    vpshufb %ymm0, %ymm5, %ymm0
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm20, %ymm1
+; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm5, %ymm1
+; AVX512F-FAST-NEXT:    vpshufb %ymm14, %ymm4, %ymm4
+; AVX512F-FAST-NEXT:    vmovdqa (%r8), %xmm14
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm18, %xmm8
+; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm6[8],xmm8[8],xmm6[9],xmm8[9],xmm6[10],xmm8[10],xmm6[11],xmm8[11],xmm6[12],xmm8[12],xmm6[13],xmm8[13],xmm6[14],xmm8[14],xmm6[15],xmm8[15]
+; AVX512F-FAST-NEXT:    vmovdqa 32(%r8), %xmm15
+; AVX512F-FAST-NEXT:    vpshufb %xmm12, %xmm6, %xmm8
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm6 = <6,u,5,u,8,u,7,u,9,u,9,u,9,u,9,u>
+; AVX512F-FAST-NEXT:    vpermt2q %zmm4, %zmm7, %zmm8
+; AVX512F-FAST-NEXT:    vpshufb %xmm6, %xmm15, %xmm4
+; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm2, %zmm16, %zmm8
+; AVX512F-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = [2,0,1,0,0,0,3,0,0,0,0,0,4,0,0,0,2,0,1,0,0,0,3,0,0,0,0,0,4,0,0,0]
+; AVX512F-FAST-NEXT:    # ymm2 = mem[0,1,0,1]
+; AVX512F-FAST-NEXT:    vpshufb %ymm2, %ymm5, %ymm12
+; AVX512F-FAST-NEXT:    vpshufb %ymm2, %ymm13, %ymm9
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm2 = <2,u,1,u,0,u,3,u,4,u,4,u,4,u,4,u>
+; AVX512F-FAST-NEXT:    vpshufb %xmm6, %xmm14, %xmm6
+; AVX512F-FAST-NEXT:    vpshufb %xmm2, %xmm15, %xmm5
+; AVX512F-FAST-NEXT:    vpshufb %xmm2, %xmm14, %xmm2
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm13 = <10,u,13,u,12,u,11,u,14,u,13,u,14,u,15,u>
+; AVX512F-FAST-NEXT:    vpshufb %xmm13, %xmm14, %xmm14
+; AVX512F-FAST-NEXT:    vpshufb %xmm13, %xmm15, %xmm13
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm31, %ymm15
+; AVX512F-FAST-NEXT:    vpshufb %ymm10, %ymm15, %ymm15
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm30 = ymm0[2,2,2,3]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm31 = ymm1[2,2,2,3]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[0,0,0,1]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[0,0,0,1]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm6[0,0,0,1]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm2[0,0,0,1]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm14[0,0,0,1]
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm19, %xmm6
 ; AVX512F-FAST-NEXT:    vmovdqa64 %xmm17, %xmm14
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm16, %xmm15
-; AVX512F-FAST-NEXT:    vpshufb %xmm15, %xmm14, %xmm14
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm15 = [0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0]
-; AVX512F-FAST-NEXT:    vpternlogq $184, %ymm6, %ymm15, %ymm13
-; AVX512F-FAST-NEXT:    vextracti64x4 $1, %zmm6, %ymm6
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[2,2,2,3]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,0,0,1]
-; AVX512F-FAST-NEXT:    vpternlogq $184, %ymm5, %ymm15, %ymm0
-; AVX512F-FAST-NEXT:    vextracti64x4 $1, %zmm5, %ymm5
+; AVX512F-FAST-NEXT:    vpshufb %xmm14, %xmm6, %xmm6
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm14 = [0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0]
+; AVX512F-FAST-NEXT:    vpternlogq $184, %ymm11, %ymm14, %ymm2
+; AVX512F-FAST-NEXT:    vextracti64x4 $1, %zmm11, %ymm11
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[2,2,2,3]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[0,0,0,1]
+; AVX512F-FAST-NEXT:    vpternlogq $184, %ymm8, %ymm14, %ymm13
+; AVX512F-FAST-NEXT:    vextracti64x4 $1, %zmm8, %ymm8
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[2,2,2,3]
-; AVX512F-FAST-NEXT:    vpermt2q %zmm10, %zmm31, %zmm14
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm10 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535]
-; AVX512F-FAST-NEXT:    vpternlogq $184, %ymm5, %ymm10, %ymm9
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm9, %zmm0, %zmm5
-; AVX512F-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm5[4,5,6,7]
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255]
-; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm0, %zmm5, %zmm14
+; AVX512F-FAST-NEXT:    vpermt2q %zmm15, %zmm7, %zmm6
+; AVX512F-FAST-NEXT:    vpternlogq $184, %ymm8, %ymm16, %ymm9
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm9, %zmm0, %zmm7
+; AVX512F-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm7 = zmm13[0,1,2,3],zmm7[4,5,6,7]
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm8 = [255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255]
+; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm7, %zmm8, %zmm6
 ; AVX512F-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm14, 256(%rax)
-; AVX512F-FAST-NEXT:    vpternlogq $184, %ymm6, %ymm10, %ymm11
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm11, %zmm0, %zmm0
-; AVX512F-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm13[0,1,2,3],zmm0[4,5,6,7]
-; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm0, %zmm5, %zmm2
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm2, 64(%rax)
-; AVX512F-FAST-NEXT:    vpermq $234, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Folded Reload
-; AVX512F-FAST-NEXT:    # zmm0 = mem[2,2,2,3,6,6,6,7]
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm6, 256(%rax)
+; AVX512F-FAST-NEXT:    vpternlogq $184, %ymm11, %ymm16, %ymm12
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm12, %zmm0, %zmm6
+; AVX512F-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm2 = zmm2[0,1,2,3],zmm6[4,5,6,7]
+; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm2, %zmm8, %zmm3
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm3, 64(%rax)
 ; AVX512F-FAST-NEXT:    vpermq $234, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Folded Reload
 ; AVX512F-FAST-NEXT:    # zmm2 = mem[2,2,2,3,6,6,6,7]
-; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm0, %zmm30, %zmm2
-; AVX512F-FAST-NEXT:    vpermq $234, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Folded Reload
-; AVX512F-FAST-NEXT:    # zmm0 = mem[2,2,2,3,6,6,6,7]
-; AVX512F-FAST-NEXT:    vpermq $234, {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Folded Reload
-; AVX512F-FAST-NEXT:    # zmm5 = mem[2,2,2,3,6,6,6,7]
-; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm0, %zmm30, %zmm5
-; AVX512F-FAST-NEXT:    vextracti64x4 $1, %zmm2, %ymm0
-; AVX512F-FAST-NEXT:    vpternlogq $184, %ymm0, %ymm15, %ymm21
-; AVX512F-FAST-NEXT:    vextracti64x4 $1, %zmm5, %ymm0
-; AVX512F-FAST-NEXT:    vpternlogq $184, %ymm0, %ymm15, %ymm8
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = [65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
-; AVX512F-FAST-NEXT:    vpternlogq $184, %ymm2, %ymm0, %ymm22
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm21, %zmm0, %zmm2
-; AVX512F-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm2 = zmm22[0,1,2,3],zmm2[4,5,6,7]
-; AVX512F-FAST-NEXT:    vpermq $234, {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Folded Reload
-; AVX512F-FAST-NEXT:    # zmm6 = mem[2,2,2,3,6,6,6,7]
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm9 = [255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0]
-; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm2, %zmm9, %zmm6
-; AVX512F-FAST-NEXT:    vpternlogq $184, %ymm5, %ymm0, %ymm1
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm8, %zmm0, %zmm2
-; AVX512F-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm1[0,1,2,3],zmm2[4,5,6,7]
+; AVX512F-FAST-NEXT:    vpermq $234, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Folded Reload
+; AVX512F-FAST-NEXT:    # zmm3 = mem[2,2,2,3,6,6,6,7]
+; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm2, %zmm16, %zmm3
+; AVX512F-FAST-NEXT:    vextracti64x4 $1, %zmm3, %ymm2
+; AVX512F-FAST-NEXT:    vpternlogq $184, %ymm2, %ymm14, %ymm22
 ; AVX512F-FAST-NEXT:    vpermq $234, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Folded Reload
 ; AVX512F-FAST-NEXT:    # zmm2 = mem[2,2,2,3,6,6,6,7]
-; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm1, %zmm9, %zmm2
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm1 = zmm23[0,0,0,1,4,4,4,5]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm5 = zmm26[0,0,0,1,4,4,4,5]
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm8 = [65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0]
-; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm1, %zmm8, %zmm5
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm1 = zmm24[0,0,0,1,4,4,4,5]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm9 = zmm28[0,0,0,1,4,4,4,5]
-; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm1, %zmm8, %zmm9
-; AVX512F-FAST-NEXT:    vextracti64x4 $1, %zmm5, %ymm1
-; AVX512F-FAST-NEXT:    vpternlogq $184, %ymm1, %ymm0, %ymm3
-; AVX512F-FAST-NEXT:    vextracti64x4 $1, %zmm9, %ymm1
-; AVX512F-FAST-NEXT:    vpternlogq $184, %ymm1, %ymm0, %ymm4
-; AVX512F-FAST-NEXT:    vpternlogq $184, %ymm5, %ymm10, %ymm7
-; AVX512F-FAST-NEXT:    vpternlogq $184, %ymm9, %ymm10, %ymm12
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm0, %zmm0
-; AVX512F-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm7[0,1,2,3],zmm0[4,5,6,7]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm1 = zmm27[0,0,0,1,4,4,4,5]
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255]
-; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm0, %zmm3, %zmm1
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm0, %zmm0
-; AVX512F-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm12[0,1,2,3],zmm0[4,5,6,7]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm4 = zmm29[0,0,0,1,4,4,4,5]
-; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm0, %zmm3, %zmm4
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm4, (%rax)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm1, 192(%rax)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm2, 128(%rax)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm6, 320(%rax)
-; AVX512F-FAST-NEXT:    addq $360, %rsp # imm = 0x168
+; AVX512F-FAST-NEXT:    vpermq $234, {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Folded Reload
+; AVX512F-FAST-NEXT:    # zmm6 = mem[2,2,2,3,6,6,6,7]
+; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm2, %zmm16, %zmm6
+; AVX512F-FAST-NEXT:    vextracti64x4 $1, %zmm6, %ymm2
+; AVX512F-FAST-NEXT:    vpternlogq $184, %ymm2, %ymm14, %ymm30
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0]
+; AVX512F-FAST-NEXT:    vpternlogq $184, %ymm3, %ymm2, %ymm24
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm22, %zmm0, %zmm3
+; AVX512F-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm3 = zmm24[0,1,2,3],zmm3[4,5,6,7]
+; AVX512F-FAST-NEXT:    vpermq $234, {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Folded Reload
+; AVX512F-FAST-NEXT:    # zmm7 = mem[2,2,2,3,6,6,6,7]
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm8 = [255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0]
+; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm3, %zmm8, %zmm7
+; AVX512F-FAST-NEXT:    vpternlogq $184, %ymm6, %ymm2, %ymm31
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm30, %zmm0, %zmm3
+; AVX512F-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm3 = zmm31[0,1,2,3],zmm3[4,5,6,7]
+; AVX512F-FAST-NEXT:    vpermq $234, (%rsp), %zmm6 # 64-byte Folded Reload
+; AVX512F-FAST-NEXT:    # zmm6 = mem[2,2,2,3,6,6,6,7]
+; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm3, %zmm8, %zmm6
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm3 = zmm23[0,0,0,1,4,4,4,5]
+; AVX512F-FAST-NEXT:    vpermq $64, {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Folded Reload
+; AVX512F-FAST-NEXT:    # zmm8 = mem[0,0,0,1,4,4,4,5]
+; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm3, %zmm2, %zmm8
+; AVX512F-FAST-NEXT:    vextracti64x4 $1, %zmm8, %ymm3
+; AVX512F-FAST-NEXT:    vpternlogq $184, %ymm3, %ymm2, %ymm4
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm3 = zmm29[0,0,0,1,4,4,4,5]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm9 = zmm27[0,0,0,1,4,4,4,5]
+; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm3, %zmm2, %zmm9
+; AVX512F-FAST-NEXT:    vextracti64x4 $1, %zmm9, %ymm3
+; AVX512F-FAST-NEXT:    vpternlogq $184, %ymm3, %ymm2, %ymm0
+; AVX512F-FAST-NEXT:    vpternlogq $184, %ymm8, %ymm16, %ymm5
+; AVX512F-FAST-NEXT:    vpternlogq $184, %ymm9, %ymm16, %ymm1
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm0, %zmm2
+; AVX512F-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm2 = zmm5[0,1,2,3],zmm2[4,5,6,7]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm3 = zmm26[0,0,0,1,4,4,4,5]
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255]
+; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm2, %zmm4, %zmm3
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm0
+; AVX512F-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm1[0,1,2,3],zmm0[4,5,6,7]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm1 = zmm28[0,0,0,1,4,4,4,5]
+; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm0, %zmm4, %zmm1
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm1, (%rax)
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm3, 192(%rax)
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm6, 128(%rax)
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm7, 320(%rax)
+; AVX512F-FAST-NEXT:    addq $392, %rsp # imm = 0x188
 ; AVX512F-FAST-NEXT:    vzeroupper
 ; AVX512F-FAST-NEXT:    retq
 ;
@@ -4765,54 +4754,54 @@ define void @store_i8_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu16 %zmm20, %zmm7 {%k2}
 ; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm20 = xmm15[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
 ; AVX512BW-ONLY-SLOW-NEXT:    vpermw %ymm20, %ymm23, %ymm20
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 32(%r9), %ymm23
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %ymm26, %ymm23, %ymm24
-; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm24 = ymm24[2,2,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm24, %zmm20, %zmm20
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 32(%r9), %ymm24
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %ymm26, %ymm24, %ymm23
+; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm23 = ymm23[2,2,2,3]
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm23, %zmm20, %zmm20
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %zmm20, %zmm7 {%k3}
-; AVX512BW-ONLY-SLOW-NEXT:    vpbroadcastq {{.*#+}} ymm24 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %ymm24, %ymm25, %ymm20
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %ymm24, %ymm27, %ymm26
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm20 = ymm26[0],ymm20[0],ymm26[1],ymm20[1],ymm26[2],ymm20[2],ymm26[3],ymm20[3],ymm26[4],ymm20[4],ymm26[5],ymm20[5],ymm26[6],ymm20[6],ymm26[7],ymm20[7],ymm26[16],ymm20[16],ymm26[17],ymm20[17],ymm26[18],ymm20[18],ymm26[19],ymm20[19],ymm26[20],ymm20[20],ymm26[21],ymm20[21],ymm26[22],ymm20[22],ymm26[23],ymm20[23]
+; AVX512BW-ONLY-SLOW-NEXT:    vpbroadcastq {{.*#+}} ymm26 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %ymm26, %ymm25, %ymm20
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %ymm26, %ymm27, %ymm23
+; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm20 = ymm23[0],ymm20[0],ymm23[1],ymm20[1],ymm23[2],ymm20[2],ymm23[3],ymm20[3],ymm23[4],ymm20[4],ymm23[5],ymm20[5],ymm23[6],ymm20[6],ymm23[7],ymm20[7],ymm23[16],ymm20[16],ymm23[17],ymm20[17],ymm23[18],ymm20[18],ymm23[19],ymm20[19],ymm23[20],ymm20[20],ymm23[21],ymm20[21],ymm23[22],ymm20[22],ymm23[23],ymm20[23]
 ; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm20 = ymm20[2,2,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhbw {{.*#+}} ymm25 = ymm27[8],ymm25[8],ymm27[9],ymm25[9],ymm27[10],ymm25[10],ymm27[11],ymm25[11],ymm27[12],ymm25[12],ymm27[13],ymm25[13],ymm27[14],ymm25[14],ymm27[15],ymm25[15],ymm27[24],ymm25[24],ymm27[25],ymm25[25],ymm27[26],ymm25[26],ymm27[27],ymm25[27],ymm27[28],ymm25[28],ymm27[29],ymm25[29],ymm27[30],ymm25[30],ymm27[31],ymm25[31]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm26 = [12,11,14,13,12,11,14,13,12,11,14,13,15,15,15,15]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermw %ymm25, %ymm26, %ymm25
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm25, %zmm20, %zmm25
-; AVX512BW-ONLY-SLOW-NEXT:    vpbroadcastq {{.*#+}} ymm27 = [5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10]
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %ymm27, %ymm28, %ymm20
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %ymm27, %ymm29, %ymm31
+; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhbw {{.*#+}} ymm23 = ymm27[8],ymm25[8],ymm27[9],ymm25[9],ymm27[10],ymm25[10],ymm27[11],ymm25[11],ymm27[12],ymm25[12],ymm27[13],ymm25[13],ymm27[14],ymm25[14],ymm27[15],ymm25[15],ymm27[24],ymm25[24],ymm27[25],ymm25[25],ymm27[26],ymm25[26],ymm27[27],ymm25[27],ymm27[28],ymm25[28],ymm27[29],ymm25[29],ymm27[30],ymm25[30],ymm27[31],ymm25[31]
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm25 = [12,11,14,13,12,11,14,13,12,11,14,13,15,15,15,15]
+; AVX512BW-ONLY-SLOW-NEXT:    vpermw %ymm23, %ymm25, %ymm23
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm23, %zmm20, %zmm27
+; AVX512BW-ONLY-SLOW-NEXT:    vpbroadcastq {{.*#+}} ymm23 = [5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10]
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %ymm23, %ymm28, %ymm20
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %ymm23, %ymm29, %ymm31
 ; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm20 = ymm31[0],ymm20[0],ymm31[1],ymm20[1],ymm31[2],ymm20[2],ymm31[3],ymm20[3],ymm31[4],ymm20[4],ymm31[5],ymm20[5],ymm31[6],ymm20[6],ymm31[7],ymm20[7],ymm31[16],ymm20[16],ymm31[17],ymm20[17],ymm31[18],ymm20[18],ymm31[19],ymm20[19],ymm31[20],ymm20[20],ymm31[21],ymm20[21],ymm31[22],ymm20[22],ymm31[23],ymm20[23]
 ; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm20 = ymm20[2,2,2,3]
 ; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhbw {{.*#+}} ymm28 = ymm29[8],ymm28[8],ymm29[9],ymm28[9],ymm29[10],ymm28[10],ymm29[11],ymm28[11],ymm29[12],ymm28[12],ymm29[13],ymm28[13],ymm29[14],ymm28[14],ymm29[15],ymm28[15],ymm29[24],ymm28[24],ymm29[25],ymm28[25],ymm29[26],ymm28[26],ymm29[27],ymm28[27],ymm29[28],ymm28[28],ymm29[29],ymm28[29],ymm29[30],ymm28[30],ymm29[31],ymm28[31]
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm29 = [13,12,11,14,13,12,11,14,13,12,11,14,15,15,15,15]
 ; AVX512BW-ONLY-SLOW-NEXT:    vpermw %ymm28, %ymm29, %ymm28
 ; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm28, %zmm20, %zmm20
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu16 %zmm25, %zmm20 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm25 = zmm30[0,1,2,3],zmm14[4,5,6,7]
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu16 %zmm27, %zmm20 {%k1}
+; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm27 = zmm30[0,1,2,3],zmm14[4,5,6,7]
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm28 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,6,u,5,u,8,u,7,u,u,u,9,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,10,u,13,u,12,u,11,u,14,u,u,u,u,u,15,u>
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %zmm28, %zmm25, %zmm25
-; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} zmm25 = zmm25[2,2,2,3,6,6,6,7]
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %zmm28, %zmm27, %zmm27
+; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} zmm27 = zmm27[2,2,2,3,6,6,6,7]
 ; AVX512BW-ONLY-SLOW-NEXT:    movl $-1840700270, %ecx # imm = 0x92492492
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovd %ecx, %k2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu16 %zmm25, %zmm20 {%k2}
-; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm23 = zmm23[0,1,2,3],zmm12[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm25 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,6,u,5,u,8,u,7,u,u,u,9,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,10,u,13,u,12,u,11,u,14,u,u,u,u,u,15>
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %zmm25, %zmm23, %zmm23
-; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} zmm23 = zmm23[2,2,2,3,6,6,6,7]
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu16 %zmm27, %zmm20 {%k2}
+; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm24 = zmm24[0,1,2,3],zmm12[4,5,6,7]
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm27 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,6,u,5,u,8,u,7,u,u,u,9,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,10,u,13,u,12,u,11,u,14,u,u,u,u,u,15>
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %zmm27, %zmm24, %zmm24
+; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} zmm24 = zmm24[2,2,2,3,6,6,6,7]
 ; AVX512BW-ONLY-SLOW-NEXT:    movabsq $-9076969306111049208, %rcx # imm = 0x8208208208208208
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovq %rcx, %k3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %zmm23, %zmm20 {%k3}
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %ymm24, %ymm16, %ymm23
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %ymm24, %ymm17, %ymm24
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm23 = ymm24[0],ymm23[0],ymm24[1],ymm23[1],ymm24[2],ymm23[2],ymm24[3],ymm23[3],ymm24[4],ymm23[4],ymm24[5],ymm23[5],ymm24[6],ymm23[6],ymm24[7],ymm23[7],ymm24[16],ymm23[16],ymm24[17],ymm23[17],ymm24[18],ymm23[18],ymm24[19],ymm23[19],ymm24[20],ymm23[20],ymm24[21],ymm23[21],ymm24[22],ymm23[22],ymm24[23],ymm23[23]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm23 = ymm23[2,2,2,3]
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %zmm24, %zmm20 {%k3}
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %ymm26, %ymm16, %ymm24
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %ymm26, %ymm17, %ymm26
+; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm24 = ymm26[0],ymm24[0],ymm26[1],ymm24[1],ymm26[2],ymm24[2],ymm26[3],ymm24[3],ymm26[4],ymm24[4],ymm26[5],ymm24[5],ymm26[6],ymm24[6],ymm26[7],ymm24[7],ymm26[16],ymm24[16],ymm26[17],ymm24[17],ymm26[18],ymm24[18],ymm26[19],ymm24[19],ymm26[20],ymm24[20],ymm26[21],ymm24[21],ymm26[22],ymm24[22],ymm26[23],ymm24[23]
+; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm24 = ymm24[2,2,2,3]
 ; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhbw {{.*#+}} ymm16 = ymm17[8],ymm16[8],ymm17[9],ymm16[9],ymm17[10],ymm16[10],ymm17[11],ymm16[11],ymm17[12],ymm16[12],ymm17[13],ymm16[13],ymm17[14],ymm16[14],ymm17[15],ymm16[15],ymm17[24],ymm16[24],ymm17[25],ymm16[25],ymm17[26],ymm16[26],ymm17[27],ymm16[27],ymm17[28],ymm16[28],ymm17[29],ymm16[29],ymm17[30],ymm16[30],ymm17[31],ymm16[31]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermw %ymm16, %ymm26, %ymm16
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm16, %zmm23, %zmm17
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %ymm27, %ymm18, %ymm16
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %ymm27, %ymm19, %ymm23
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm16 = ymm23[0],ymm16[0],ymm23[1],ymm16[1],ymm23[2],ymm16[2],ymm23[3],ymm16[3],ymm23[4],ymm16[4],ymm23[5],ymm16[5],ymm23[6],ymm16[6],ymm23[7],ymm16[7],ymm23[16],ymm16[16],ymm23[17],ymm16[17],ymm23[18],ymm16[18],ymm23[19],ymm16[19],ymm23[20],ymm16[20],ymm23[21],ymm16[21],ymm23[22],ymm16[22],ymm23[23],ymm16[23]
+; AVX512BW-ONLY-SLOW-NEXT:    vpermw %ymm16, %ymm25, %ymm16
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm16, %zmm24, %zmm17
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %ymm23, %ymm18, %ymm16
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %ymm23, %ymm19, %ymm24
+; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm16 = ymm24[0],ymm16[0],ymm24[1],ymm16[1],ymm24[2],ymm16[2],ymm24[3],ymm16[3],ymm24[4],ymm16[4],ymm24[5],ymm16[5],ymm24[6],ymm16[6],ymm24[7],ymm16[7],ymm24[16],ymm16[16],ymm24[17],ymm16[17],ymm24[18],ymm16[18],ymm24[19],ymm16[19],ymm24[20],ymm16[20],ymm24[21],ymm16[21],ymm24[22],ymm16[22],ymm24[23],ymm16[23]
 ; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm16 = ymm16[2,2,2,3]
 ; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhbw {{.*#+}} ymm18 = ymm19[8],ymm18[8],ymm19[9],ymm18[9],ymm19[10],ymm18[10],ymm19[11],ymm18[11],ymm19[12],ymm18[12],ymm19[13],ymm18[13],ymm19[14],ymm18[14],ymm19[15],ymm18[15],ymm19[24],ymm18[24],ymm19[25],ymm18[25],ymm19[26],ymm18[26],ymm19[27],ymm18[27],ymm19[28],ymm18[28],ymm19[29],ymm18[29],ymm19[30],ymm18[30],ymm19[31],ymm18[31]
 ; AVX512BW-ONLY-SLOW-NEXT:    vpermw %ymm18, %ymm29, %ymm18
@@ -4823,7 +4812,7 @@ define void @store_i8_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} zmm14 = zmm14[2,2,2,3,6,6,6,7]
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu16 %zmm14, %zmm16 {%k2}
 ; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm22, %zmm12, %zmm12
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %zmm25, %zmm12, %zmm12
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %zmm27, %zmm12, %zmm12
 ; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} zmm12 = zmm12[2,2,2,3,6,6,6,7]
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %zmm12, %zmm16 {%k3}
 ; AVX512BW-ONLY-SLOW-NEXT:    vpbroadcastq {{.*#+}} xmm12 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
@@ -4835,13 +4824,12 @@ define void @store_i8_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm11 = [0,3,2,1,0,3,2,1,0,3,2,1,4,5,6,5]
 ; AVX512BW-ONLY-SLOW-NEXT:    vpermw %ymm9, %ymm11, %ymm9
 ; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm14, %zmm9, %zmm9
-; AVX512BW-ONLY-SLOW-NEXT:    vpbroadcastq {{.*#+}} xmm14 = [5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10]
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %xmm14, %xmm8, %xmm17
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %xmm14, %xmm10, %xmm18
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm17 = xmm18[0],xmm17[0],xmm18[1],xmm17[1],xmm18[2],xmm17[2],xmm18[3],xmm17[3],xmm18[4],xmm17[4],xmm18[5],xmm17[5],xmm18[6],xmm17[6],xmm18[7],xmm17[7]
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %xmm23, %xmm8, %xmm14
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %xmm23, %xmm10, %xmm17
+; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm14 = xmm17[0],xmm14[0],xmm17[1],xmm14[1],xmm17[2],xmm14[2],xmm17[3],xmm14[3],xmm17[4],xmm14[4],xmm17[5],xmm14[5],xmm17[6],xmm14[6],xmm17[7],xmm14[7]
 ; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm8 = xmm10[0],xmm8[0],xmm10[1],xmm8[1],xmm10[2],xmm8[2],xmm10[3],xmm8[3],xmm10[4],xmm8[4],xmm10[5],xmm8[5],xmm10[6],xmm8[6],xmm10[7],xmm8[7]
 ; AVX512BW-ONLY-SLOW-NEXT:    vprold $16, %xmm8, %xmm8
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm17, %zmm8, %zmm8
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm14, %zmm8, %zmm8
 ; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} zmm8 = zmm8[0,0,0,1,4,4,4,5]
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu16 %zmm8, %zmm9 {%k2}
 ; AVX512BW-ONLY-SLOW-NEXT:    vpmovzxbw {{.*#+}} xmm8 = xmm13[0],zero,xmm13[1],zero,xmm13[2],zero,xmm13[3],zero,xmm13[4],zero,xmm13[5],zero,xmm13[6],zero,xmm13[7],zero
@@ -4865,8 +4853,8 @@ define void @store_i8_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm1 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
 ; AVX512BW-ONLY-SLOW-NEXT:    vpermw %ymm1, %ymm11, %ymm1
 ; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm8, %zmm1, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %xmm14, %xmm2, %xmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %xmm14, %xmm4, %xmm8
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %xmm23, %xmm2, %xmm3
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %xmm23, %xmm4, %xmm8
 ; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm3 = xmm8[0],xmm3[0],xmm8[1],xmm3[1],xmm8[2],xmm3[2],xmm8[3],xmm3[3],xmm8[4],xmm3[4],xmm8[5],xmm3[5],xmm8[6],xmm3[6],xmm8[7],xmm3[7]
 ; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm2 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3],xmm4[4],xmm2[4],xmm4[5],xmm2[5],xmm4[6],xmm2[6],xmm4[7],xmm2[7]
 ; AVX512BW-ONLY-SLOW-NEXT:    vprold $16, %xmm2, %xmm2
@@ -4909,93 +4897,92 @@ define void @store_i8_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-FAST-NEXT:    vpermw %ymm1, %ymm9, %ymm1
 ; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm1
 ; AVX512BW-FAST-NEXT:    vmovdqa 32(%rcx), %ymm5
-; AVX512BW-FAST-NEXT:    vpbroadcastq {{.*#+}} ymm12 = [5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10]
-; AVX512BW-FAST-NEXT:    vpshufb %ymm12, %ymm5, %ymm0
+; AVX512BW-FAST-NEXT:    vpbroadcastq {{.*#+}} ymm17 = [5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10]
+; AVX512BW-FAST-NEXT:    vpshufb %ymm17, %ymm5, %ymm0
 ; AVX512BW-FAST-NEXT:    vmovdqa 32(%rdx), %ymm7
-; AVX512BW-FAST-NEXT:    vpshufb %ymm12, %ymm7, %ymm2
+; AVX512BW-FAST-NEXT:    vpshufb %ymm17, %ymm7, %ymm2
 ; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm0 = ymm2[0],ymm0[0],ymm2[1],ymm0[1],ymm2[2],ymm0[2],ymm2[3],ymm0[3],ymm2[4],ymm0[4],ymm2[5],ymm0[5],ymm2[6],ymm0[6],ymm2[7],ymm0[7],ymm2[16],ymm0[16],ymm2[17],ymm0[17],ymm2[18],ymm0[18],ymm2[19],ymm0[19],ymm2[20],ymm0[20],ymm2[21],ymm0[21],ymm2[22],ymm0[22],ymm2[23],ymm0[23]
 ; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,2,2,3]
 ; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} ymm2 = ymm7[8],ymm5[8],ymm7[9],ymm5[9],ymm7[10],ymm5[10],ymm7[11],ymm5[11],ymm7[12],ymm5[12],ymm7[13],ymm5[13],ymm7[14],ymm5[14],ymm7[15],ymm5[15],ymm7[24],ymm5[24],ymm7[25],ymm5[25],ymm7[26],ymm5[26],ymm7[27],ymm5[27],ymm7[28],ymm5[28],ymm7[29],ymm5[29],ymm7[30],ymm5[30],ymm7[31],ymm5[31]
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm13 = [13,12,11,14,13,12,11,14,13,12,11,14,15,15,15,15]
-; AVX512BW-FAST-NEXT:    vpermw %ymm2, %ymm13, %ymm2
+; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm12 = [13,12,11,14,13,12,11,14,13,12,11,14,15,15,15,15]
+; AVX512BW-FAST-NEXT:    vpermw %ymm2, %ymm12, %ymm2
 ; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm0
 ; AVX512BW-FAST-NEXT:    movl $613566756, %eax # imm = 0x24924924
 ; AVX512BW-FAST-NEXT:    kmovd %eax, %k1
 ; AVX512BW-FAST-NEXT:    vmovdqu16 %zmm1, %zmm0 {%k1}
 ; AVX512BW-FAST-NEXT:    vmovdqa 32(%r8), %ymm2
 ; AVX512BW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm2[0,1,2,3],zmm8[4,5,6,7]
-; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm15 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,6,u,5,u,8,u,7,u,u,u,9,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,10,u,13,u,12,u,11,u,14,u,u,u,u,u,15,u>
-; AVX512BW-FAST-NEXT:    vpshufb %zmm15, %zmm1, %zmm1
+; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm13 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,6,u,5,u,8,u,7,u,u,u,9,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,10,u,13,u,12,u,11,u,14,u,u,u,u,u,15,u>
+; AVX512BW-FAST-NEXT:    vpshufb %zmm13, %zmm1, %zmm1
 ; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} zmm1 = zmm1[2,2,2,3,6,6,6,7]
 ; AVX512BW-FAST-NEXT:    movl $-1840700270, %eax # imm = 0x92492492
 ; AVX512BW-FAST-NEXT:    kmovd %eax, %k2
 ; AVX512BW-FAST-NEXT:    vmovdqu16 %zmm1, %zmm0 {%k2}
 ; AVX512BW-FAST-NEXT:    vmovdqa 32(%r9), %ymm1
 ; AVX512BW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm11 = zmm1[0,1,2,3],zmm10[4,5,6,7]
-; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm16 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,6,u,5,u,8,u,7,u,u,u,9,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,10,u,13,u,12,u,11,u,14,u,u,u,u,u,15>
-; AVX512BW-FAST-NEXT:    vpshufb %zmm16, %zmm11, %zmm11
+; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm15 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,6,u,5,u,8,u,7,u,u,u,9,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,10,u,13,u,12,u,11,u,14,u,u,u,u,u,15>
+; AVX512BW-FAST-NEXT:    vpshufb %zmm15, %zmm11, %zmm11
 ; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} zmm11 = zmm11[2,2,2,3,6,6,6,7]
 ; AVX512BW-FAST-NEXT:    movabsq $-9076969306111049208, %rax # imm = 0x8208208208208208
 ; AVX512BW-FAST-NEXT:    kmovq %rax, %k3
 ; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm11, %zmm0 {%k3}
 ; AVX512BW-FAST-NEXT:    vmovdqa (%rsi), %ymm11
-; AVX512BW-FAST-NEXT:    vpshufb %ymm6, %ymm11, %ymm17
+; AVX512BW-FAST-NEXT:    vpshufb %ymm6, %ymm11, %ymm16
 ; AVX512BW-FAST-NEXT:    vmovdqa (%rdi), %ymm14
 ; AVX512BW-FAST-NEXT:    vpshufb %ymm6, %ymm14, %ymm6
-; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm6 = ymm6[0],ymm17[0],ymm6[1],ymm17[1],ymm6[2],ymm17[2],ymm6[3],ymm17[3],ymm6[4],ymm17[4],ymm6[5],ymm17[5],ymm6[6],ymm17[6],ymm6[7],ymm17[7],ymm6[16],ymm17[16],ymm6[17],ymm17[17],ymm6[18],ymm17[18],ymm6[19],ymm17[19],ymm6[20],ymm17[20],ymm6[21],ymm17[21],ymm6[22],ymm17[22],ymm6[23],ymm17[23]
+; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm6 = ymm6[0],ymm16[0],ymm6[1],ymm16[1],ymm6[2],ymm16[2],ymm6[3],ymm16[3],ymm6[4],ymm16[4],ymm6[5],ymm16[5],ymm6[6],ymm16[6],ymm6[7],ymm16[7],ymm6[16],ymm16[16],ymm6[17],ymm16[17],ymm6[18],ymm16[18],ymm6[19],ymm16[19],ymm6[20],ymm16[20],ymm6[21],ymm16[21],ymm6[22],ymm16[22],ymm6[23],ymm16[23]
 ; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[2,2,2,3]
-; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} ymm17 = ymm14[8],ymm11[8],ymm14[9],ymm11[9],ymm14[10],ymm11[10],ymm14[11],ymm11[11],ymm14[12],ymm11[12],ymm14[13],ymm11[13],ymm14[14],ymm11[14],ymm14[15],ymm11[15],ymm14[24],ymm11[24],ymm14[25],ymm11[25],ymm14[26],ymm11[26],ymm14[27],ymm11[27],ymm14[28],ymm11[28],ymm14[29],ymm11[29],ymm14[30],ymm11[30],ymm14[31],ymm11[31]
-; AVX512BW-FAST-NEXT:    vpermw %ymm17, %ymm9, %ymm9
+; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} ymm16 = ymm14[8],ymm11[8],ymm14[9],ymm11[9],ymm14[10],ymm11[10],ymm14[11],ymm11[11],ymm14[12],ymm11[12],ymm14[13],ymm11[13],ymm14[14],ymm11[14],ymm14[15],ymm11[15],ymm14[24],ymm11[24],ymm14[25],ymm11[25],ymm14[26],ymm11[26],ymm14[27],ymm11[27],ymm14[28],ymm11[28],ymm14[29],ymm11[29],ymm14[30],ymm11[30],ymm14[31],ymm11[31]
+; AVX512BW-FAST-NEXT:    vpermw %ymm16, %ymm9, %ymm9
 ; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm9, %zmm6, %zmm9
-; AVX512BW-FAST-NEXT:    vmovdqa64 (%rcx), %ymm18
-; AVX512BW-FAST-NEXT:    vpshufb %ymm12, %ymm18, %ymm6
-; AVX512BW-FAST-NEXT:    vmovdqa64 (%rdx), %ymm19
-; AVX512BW-FAST-NEXT:    vpshufb %ymm12, %ymm19, %ymm12
-; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm6 = ymm12[0],ymm6[0],ymm12[1],ymm6[1],ymm12[2],ymm6[2],ymm12[3],ymm6[3],ymm12[4],ymm6[4],ymm12[5],ymm6[5],ymm12[6],ymm6[6],ymm12[7],ymm6[7],ymm12[16],ymm6[16],ymm12[17],ymm6[17],ymm12[18],ymm6[18],ymm12[19],ymm6[19],ymm12[20],ymm6[20],ymm12[21],ymm6[21],ymm12[22],ymm6[22],ymm12[23],ymm6[23]
+; AVX512BW-FAST-NEXT:    vmovdqa64 (%rcx), %ymm19
+; AVX512BW-FAST-NEXT:    vpshufb %ymm17, %ymm19, %ymm6
+; AVX512BW-FAST-NEXT:    vmovdqa64 (%rdx), %ymm20
+; AVX512BW-FAST-NEXT:    vpshufb %ymm17, %ymm20, %ymm16
+; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm6 = ymm16[0],ymm6[0],ymm16[1],ymm6[1],ymm16[2],ymm6[2],ymm16[3],ymm6[3],ymm16[4],ymm6[4],ymm16[5],ymm6[5],ymm16[6],ymm6[6],ymm16[7],ymm6[7],ymm16[16],ymm6[16],ymm16[17],ymm6[17],ymm16[18],ymm6[18],ymm16[19],ymm6[19],ymm16[20],ymm6[20],ymm16[21],ymm6[21],ymm16[22],ymm6[22],ymm16[23],ymm6[23]
 ; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[2,2,2,3]
-; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} ymm12 = ymm19[8],ymm18[8],ymm19[9],ymm18[9],ymm19[10],ymm18[10],ymm19[11],ymm18[11],ymm19[12],ymm18[12],ymm19[13],ymm18[13],ymm19[14],ymm18[14],ymm19[15],ymm18[15],ymm19[24],ymm18[24],ymm19[25],ymm18[25],ymm19[26],ymm18[26],ymm19[27],ymm18[27],ymm19[28],ymm18[28],ymm19[29],ymm18[29],ymm19[30],ymm18[30],ymm19[31],ymm18[31]
-; AVX512BW-FAST-NEXT:    vpermw %ymm12, %ymm13, %ymm12
+; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} ymm16 = ymm20[8],ymm19[8],ymm20[9],ymm19[9],ymm20[10],ymm19[10],ymm20[11],ymm19[11],ymm20[12],ymm19[12],ymm20[13],ymm19[13],ymm20[14],ymm19[14],ymm20[15],ymm19[15],ymm20[24],ymm19[24],ymm20[25],ymm19[25],ymm20[26],ymm19[26],ymm20[27],ymm19[27],ymm20[28],ymm19[28],ymm20[29],ymm19[29],ymm20[30],ymm19[30],ymm20[31],ymm19[31]
+; AVX512BW-FAST-NEXT:    vpermw %ymm16, %ymm12, %ymm12
 ; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm12, %zmm6, %zmm6
 ; AVX512BW-FAST-NEXT:    vmovdqu16 %zmm9, %zmm6 {%k1}
 ; AVX512BW-FAST-NEXT:    vmovdqa (%r8), %ymm9
 ; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm9, %zmm8, %zmm8
-; AVX512BW-FAST-NEXT:    vpshufb %zmm15, %zmm8, %zmm8
+; AVX512BW-FAST-NEXT:    vpshufb %zmm13, %zmm8, %zmm8
 ; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} zmm8 = zmm8[2,2,2,3,6,6,6,7]
 ; AVX512BW-FAST-NEXT:    vmovdqu16 %zmm8, %zmm6 {%k2}
 ; AVX512BW-FAST-NEXT:    vmovdqa (%r9), %ymm8
 ; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm8, %zmm10, %zmm10
-; AVX512BW-FAST-NEXT:    vpshufb %zmm16, %zmm10, %zmm10
+; AVX512BW-FAST-NEXT:    vpshufb %zmm15, %zmm10, %zmm10
 ; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} zmm10 = zmm10[2,2,2,3,6,6,6,7]
 ; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm10, %zmm6 {%k3}
 ; AVX512BW-FAST-NEXT:    vmovdqa64 (%rcx), %xmm22
-; AVX512BW-FAST-NEXT:    vmovdqa 32(%rcx), %xmm12
-; AVX512BW-FAST-NEXT:    vpbroadcastq {{.*#+}} xmm21 = [5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10]
-; AVX512BW-FAST-NEXT:    vpshufb %xmm21, %xmm12, %xmm10
+; AVX512BW-FAST-NEXT:    vmovdqa 32(%rcx), %xmm13
+; AVX512BW-FAST-NEXT:    vpshufb %xmm17, %xmm13, %xmm10
 ; AVX512BW-FAST-NEXT:    vmovdqa64 (%rdx), %xmm23
 ; AVX512BW-FAST-NEXT:    vmovdqa 32(%rdx), %xmm15
-; AVX512BW-FAST-NEXT:    vpshufb %xmm21, %xmm15, %xmm13
-; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm10 = xmm13[0],xmm10[0],xmm13[1],xmm10[1],xmm13[2],xmm10[2],xmm13[3],xmm10[3],xmm13[4],xmm10[4],xmm13[5],xmm10[5],xmm13[6],xmm10[6],xmm13[7],xmm10[7]
+; AVX512BW-FAST-NEXT:    vpshufb %xmm17, %xmm15, %xmm12
+; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm10 = xmm12[0],xmm10[0],xmm12[1],xmm10[1],xmm12[2],xmm10[2],xmm12[3],xmm10[3],xmm12[4],xmm10[4],xmm12[5],xmm10[5],xmm12[6],xmm10[6],xmm12[7],xmm10[7]
 ; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[0,0,0,1]
-; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm13 = xmm15[0],xmm12[0],xmm15[1],xmm12[1],xmm15[2],xmm12[2],xmm15[3],xmm12[3],xmm15[4],xmm12[4],xmm15[5],xmm12[5],xmm15[6],xmm12[6],xmm15[7],xmm12[7]
+; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm12 = xmm15[0],xmm13[0],xmm15[1],xmm13[1],xmm15[2],xmm13[2],xmm15[3],xmm13[3],xmm15[4],xmm13[4],xmm15[5],xmm13[5],xmm15[6],xmm13[6],xmm15[7],xmm13[7]
 ; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm24 = [1,0,3,2,1,0,3,2,1,0,3,2,5,4,7,6]
-; AVX512BW-FAST-NEXT:    vpermw %ymm13, %ymm24, %ymm13
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm10, %zmm13, %zmm13
-; AVX512BW-FAST-NEXT:    vmovdqa64 32(%rsi), %xmm17
+; AVX512BW-FAST-NEXT:    vpermw %ymm12, %ymm24, %ymm12
+; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm10, %zmm12, %zmm12
+; AVX512BW-FAST-NEXT:    vmovdqa64 32(%rsi), %xmm18
 ; AVX512BW-FAST-NEXT:    vpbroadcastq {{.*#+}} xmm25 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
-; AVX512BW-FAST-NEXT:    vpshufb %xmm25, %xmm17, %xmm10
-; AVX512BW-FAST-NEXT:    vmovdqa64 32(%rdi), %xmm20
-; AVX512BW-FAST-NEXT:    vpshufb %xmm25, %xmm20, %xmm16
+; AVX512BW-FAST-NEXT:    vpshufb %xmm25, %xmm18, %xmm10
+; AVX512BW-FAST-NEXT:    vmovdqa64 32(%rdi), %xmm21
+; AVX512BW-FAST-NEXT:    vpshufb %xmm25, %xmm21, %xmm16
 ; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm10 = xmm16[8],xmm10[8],xmm16[9],xmm10[9],xmm16[10],xmm10[10],xmm16[11],xmm10[11],xmm16[12],xmm10[12],xmm16[13],xmm10[13],xmm16[14],xmm10[14],xmm16[15],xmm10[15]
 ; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[0,0,0,1]
-; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm16 = xmm20[0],xmm17[0],xmm20[1],xmm17[1],xmm20[2],xmm17[2],xmm20[3],xmm17[3],xmm20[4],xmm17[4],xmm20[5],xmm17[5],xmm20[6],xmm17[6],xmm20[7],xmm17[7]
+; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm16 = xmm21[0],xmm18[0],xmm21[1],xmm18[1],xmm21[2],xmm18[2],xmm21[3],xmm18[3],xmm21[4],xmm18[4],xmm21[5],xmm18[5],xmm21[6],xmm18[6],xmm21[7],xmm18[7]
 ; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm26 = [0,3,2,1,0,3,2,1,0,3,2,1,4,5,6,5]
 ; AVX512BW-FAST-NEXT:    vpermw %ymm16, %ymm26, %ymm16
 ; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm10, %zmm16, %zmm10
-; AVX512BW-FAST-NEXT:    vmovdqu16 %zmm13, %zmm10 {%k2}
-; AVX512BW-FAST-NEXT:    vmovdqa 32(%r8), %xmm13
+; AVX512BW-FAST-NEXT:    vmovdqu16 %zmm12, %zmm10 {%k2}
+; AVX512BW-FAST-NEXT:    vmovdqa 32(%r8), %xmm12
 ; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} xmm27 = <8,u,9,u,u,u,u,u,u,u,5,u,6,u,7,u>
-; AVX512BW-FAST-NEXT:    vpshufb %xmm27, %xmm13, %xmm16
-; AVX512BW-FAST-NEXT:    vpmovzxbw {{.*#+}} xmm28 = xmm13[0],zero,xmm13[1],zero,xmm13[2],zero,xmm13[3],zero,xmm13[4],zero,xmm13[5],zero,xmm13[6],zero,xmm13[7],zero
+; AVX512BW-FAST-NEXT:    vpshufb %xmm27, %xmm12, %xmm16
+; AVX512BW-FAST-NEXT:    vpmovzxbw {{.*#+}} xmm28 = xmm12[0],zero,xmm12[1],zero,xmm12[2],zero,xmm12[3],zero,xmm12[4],zero,xmm12[5],zero,xmm12[6],zero,xmm12[7],zero
 ; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm16, %zmm28, %zmm16
 ; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm28 = [2,1,0,3,2,1,0,3,2,1,0,3,4,4,4,4,22,21,16,23,22,21,16,23,22,21,16,23,17,17,17,17]
 ; AVX512BW-FAST-NEXT:    vpermw %zmm16, %zmm28, %zmm10 {%k1}
@@ -5009,52 +4996,52 @@ define void @store_i8_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-FAST-NEXT:    movabsq $585610922974906400, %rax # imm = 0x820820820820820
 ; AVX512BW-FAST-NEXT:    kmovq %rax, %k3
 ; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm30, %zmm10 {%k3}
-; AVX512BW-FAST-NEXT:    vpshufb %xmm21, %xmm22, %xmm30
-; AVX512BW-FAST-NEXT:    vpshufb %xmm21, %xmm23, %xmm21
-; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm21 = xmm21[0],xmm30[0],xmm21[1],xmm30[1],xmm21[2],xmm30[2],xmm21[3],xmm30[3],xmm21[4],xmm30[4],xmm21[5],xmm30[5],xmm21[6],xmm30[6],xmm21[7],xmm30[7]
+; AVX512BW-FAST-NEXT:    vpshufb %xmm17, %xmm22, %xmm30
+; AVX512BW-FAST-NEXT:    vpshufb %xmm17, %xmm23, %xmm17
+; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm17 = xmm17[0],xmm30[0],xmm17[1],xmm30[1],xmm17[2],xmm30[2],xmm17[3],xmm30[3],xmm17[4],xmm30[4],xmm17[5],xmm30[5],xmm17[6],xmm30[6],xmm17[7],xmm30[7]
 ; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm30 = xmm23[0],xmm22[0],xmm23[1],xmm22[1],xmm23[2],xmm22[2],xmm23[3],xmm22[3],xmm23[4],xmm22[4],xmm23[5],xmm22[5],xmm23[6],xmm22[6],xmm23[7],xmm22[7]
 ; AVX512BW-FAST-NEXT:    vpermw %ymm30, %ymm24, %ymm24
 ; AVX512BW-FAST-NEXT:    vmovdqa64 (%rdi), %xmm30
-; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm21 = ymm21[0,0,0,1]
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm21, %zmm24, %zmm24
-; AVX512BW-FAST-NEXT:    vpshufb %xmm25, %xmm31, %xmm21
+; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm17 = ymm17[0,0,0,1]
+; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm17, %zmm24, %zmm24
+; AVX512BW-FAST-NEXT:    vpshufb %xmm25, %xmm31, %xmm17
 ; AVX512BW-FAST-NEXT:    vpshufb %xmm25, %xmm30, %xmm25
-; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm21 = xmm25[8],xmm21[8],xmm25[9],xmm21[9],xmm25[10],xmm21[10],xmm25[11],xmm21[11],xmm25[12],xmm21[12],xmm25[13],xmm21[13],xmm25[14],xmm21[14],xmm25[15],xmm21[15]
+; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm17 = xmm25[8],xmm17[8],xmm25[9],xmm17[9],xmm25[10],xmm17[10],xmm25[11],xmm17[11],xmm25[12],xmm17[12],xmm25[13],xmm17[13],xmm25[14],xmm17[14],xmm25[15],xmm17[15]
 ; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm25 = xmm30[0],xmm31[0],xmm30[1],xmm31[1],xmm30[2],xmm31[2],xmm30[3],xmm31[3],xmm30[4],xmm31[4],xmm30[5],xmm31[5],xmm30[6],xmm31[6],xmm30[7],xmm31[7]
 ; AVX512BW-FAST-NEXT:    vpermw %ymm25, %ymm26, %ymm25
-; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm21 = ymm21[0,0,0,1]
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm21, %zmm25, %zmm21
+; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm17 = ymm17[0,0,0,1]
+; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm17, %zmm25, %zmm17
 ; AVX512BW-FAST-NEXT:    vmovdqa64 (%r8), %xmm25
-; AVX512BW-FAST-NEXT:    vmovdqu16 %zmm24, %zmm21 {%k2}
+; AVX512BW-FAST-NEXT:    vmovdqu16 %zmm24, %zmm17 {%k2}
 ; AVX512BW-FAST-NEXT:    vpshufb %xmm27, %xmm25, %xmm24
 ; AVX512BW-FAST-NEXT:    vpmovzxbw {{.*#+}} xmm26 = xmm25[0],zero,xmm25[1],zero,xmm25[2],zero,xmm25[3],zero,xmm25[4],zero,xmm25[5],zero,xmm25[6],zero,xmm25[7],zero
 ; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm24, %zmm26, %zmm26
 ; AVX512BW-FAST-NEXT:    vmovdqa64 (%r9), %xmm24
-; AVX512BW-FAST-NEXT:    vpermw %zmm26, %zmm28, %zmm21 {%k1}
+; AVX512BW-FAST-NEXT:    vpermw %zmm26, %zmm28, %zmm17 {%k1}
 ; AVX512BW-FAST-NEXT:    vpshufb %xmm29, %xmm24, %xmm26
 ; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm27 = xmm24[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
 ; AVX512BW-FAST-NEXT:    vinserti32x4 $2, %xmm26, %zmm27, %zmm26
 ; AVX512BW-FAST-NEXT:    vpermw %zmm26, %zmm28, %zmm26
-; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm26, %zmm21 {%k3}
-; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm18 = ymm19[0],ymm18[0],ymm19[1],ymm18[1],ymm19[2],ymm18[2],ymm19[3],ymm18[3],ymm19[4],ymm18[4],ymm19[5],ymm18[5],ymm19[6],ymm18[6],ymm19[7],ymm18[7],ymm19[16],ymm18[16],ymm19[17],ymm18[17],ymm19[18],ymm18[18],ymm19[19],ymm18[19],ymm19[20],ymm18[20],ymm19[21],ymm18[21],ymm19[22],ymm18[22],ymm19[23],ymm18[23]
-; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm19 = xmm23[8],xmm22[8],xmm23[9],xmm22[9],xmm23[10],xmm22[10],xmm23[11],xmm22[11],xmm23[12],xmm22[12],xmm23[13],xmm22[13],xmm23[14],xmm22[14],xmm23[15],xmm22[15]
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm18, %zmm19, %zmm18
+; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm26, %zmm17 {%k3}
+; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm19 = ymm20[0],ymm19[0],ymm20[1],ymm19[1],ymm20[2],ymm19[2],ymm20[3],ymm19[3],ymm20[4],ymm19[4],ymm20[5],ymm19[5],ymm20[6],ymm19[6],ymm20[7],ymm19[7],ymm20[16],ymm19[16],ymm20[17],ymm19[17],ymm20[18],ymm19[18],ymm20[19],ymm19[19],ymm20[20],ymm19[20],ymm20[21],ymm19[21],ymm20[22],ymm19[22],ymm20[23],ymm19[23]
+; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm20 = xmm23[8],xmm22[8],xmm23[9],xmm22[9],xmm23[10],xmm22[10],xmm23[11],xmm22[11],xmm23[12],xmm22[12],xmm23[13],xmm22[13],xmm23[14],xmm22[14],xmm23[15],xmm22[15]
+; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm19, %zmm20, %zmm19
 ; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm11 = ymm14[0],ymm11[0],ymm14[1],ymm11[1],ymm14[2],ymm11[2],ymm14[3],ymm11[3],ymm14[4],ymm11[4],ymm14[5],ymm11[5],ymm14[6],ymm11[6],ymm14[7],ymm11[7],ymm14[16],ymm11[16],ymm14[17],ymm11[17],ymm14[18],ymm11[18],ymm14[19],ymm11[19],ymm14[20],ymm11[20],ymm14[21],ymm11[21],ymm14[22],ymm11[22],ymm14[23],ymm11[23]
 ; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm14 = xmm30[8],xmm31[8],xmm30[9],xmm31[9],xmm30[10],xmm31[10],xmm30[11],xmm31[11],xmm30[12],xmm31[12],xmm30[13],xmm31[13],xmm30[14],xmm31[14],xmm30[15],xmm31[15]
 ; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm11, %zmm14, %zmm11
 ; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm14 = [4,3,6,5,4,3,6,5,4,3,6,5,7,7,7,7,24,27,26,25,24,27,26,25,24,27,26,25,28,29,30,29]
 ; AVX512BW-FAST-NEXT:    vpermw %zmm11, %zmm14, %zmm11
-; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm19 = [5,4,3,6,5,4,3,6,5,4,3,6,7,7,7,7,25,24,27,26,25,24,27,26,25,24,27,26,29,28,31,30]
-; AVX512BW-FAST-NEXT:    vpermw %zmm18, %zmm19, %zmm11 {%k1}
+; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm20 = [5,4,3,6,5,4,3,6,5,4,3,6,7,7,7,7,25,24,27,26,25,24,27,26,25,24,27,26,29,28,31,30]
+; AVX512BW-FAST-NEXT:    vpermw %zmm19, %zmm20, %zmm11 {%k1}
 ; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm5 = ymm7[0],ymm5[0],ymm7[1],ymm5[1],ymm7[2],ymm5[2],ymm7[3],ymm5[3],ymm7[4],ymm5[4],ymm7[5],ymm5[5],ymm7[6],ymm5[6],ymm7[7],ymm5[7],ymm7[16],ymm5[16],ymm7[17],ymm5[17],ymm7[18],ymm5[18],ymm7[19],ymm5[19],ymm7[20],ymm5[20],ymm7[21],ymm5[21],ymm7[22],ymm5[22],ymm7[23],ymm5[23]
-; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm7 = xmm15[8],xmm12[8],xmm15[9],xmm12[9],xmm15[10],xmm12[10],xmm15[11],xmm12[11],xmm15[12],xmm12[12],xmm15[13],xmm12[13],xmm15[14],xmm12[14],xmm15[15],xmm12[15]
+; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm7 = xmm15[8],xmm13[8],xmm15[9],xmm13[9],xmm15[10],xmm13[10],xmm15[11],xmm13[11],xmm15[12],xmm13[12],xmm15[13],xmm13[13],xmm15[14],xmm13[14],xmm15[15],xmm13[15]
 ; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm5, %zmm7, %zmm5
 ; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm3 = ymm4[0],ymm3[0],ymm4[1],ymm3[1],ymm4[2],ymm3[2],ymm4[3],ymm3[3],ymm4[4],ymm3[4],ymm4[5],ymm3[5],ymm4[6],ymm3[6],ymm4[7],ymm3[7],ymm4[16],ymm3[16],ymm4[17],ymm3[17],ymm4[18],ymm3[18],ymm4[19],ymm3[19],ymm4[20],ymm3[20],ymm4[21],ymm3[21],ymm4[22],ymm3[22],ymm4[23],ymm3[23]
-; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm20[8],xmm17[8],xmm20[9],xmm17[9],xmm20[10],xmm17[10],xmm20[11],xmm17[11],xmm20[12],xmm17[12],xmm20[13],xmm17[13],xmm20[14],xmm17[14],xmm20[15],xmm17[15]
+; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm21[8],xmm18[8],xmm21[9],xmm18[9],xmm21[10],xmm18[10],xmm21[11],xmm18[11],xmm21[12],xmm18[12],xmm21[13],xmm18[13],xmm21[14],xmm18[14],xmm21[15],xmm18[15]
 ; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm4, %zmm3
 ; AVX512BW-FAST-NEXT:    vpermw %zmm3, %zmm14, %zmm3
 ; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm25[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX512BW-FAST-NEXT:    vpermw %zmm5, %zmm19, %zmm3 {%k1}
+; AVX512BW-FAST-NEXT:    vpermw %zmm5, %zmm20, %zmm3 {%k1}
 ; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = [2,5,4,3,2,5,4,3,2,5,4,3,6,5,6,7]
 ; AVX512BW-FAST-NEXT:    vpermw %ymm4, %ymm5, %ymm4
 ; AVX512BW-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm7 = [2,0,1,0,0,0,3,0,0,0,0,0,4,0,0,0,2,0,1,0,0,0,3,0,0,0,0,0,4,0,0,0]
@@ -5076,7 +5063,7 @@ define void @store_i8_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-FAST-NEXT:    kmovq %rax, %k2
 ; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm4, %zmm11 {%k2}
 ; AVX512BW-FAST-NEXT:    vpshufb %ymm7, %ymm2, %ymm2
-; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm13[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm12[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
 ; AVX512BW-FAST-NEXT:    vpermw %ymm4, %ymm5, %ymm4
 ; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,2,2,3]
 ; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm4, %zmm2
@@ -5090,7 +5077,7 @@ define void @store_i8_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm3, 256(%rax)
 ; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm11, 64(%rax)
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm21, (%rax)
+; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm17, (%rax)
 ; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm10, 192(%rax)
 ; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm6, 128(%rax)
 ; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm0, 320(%rax)
@@ -5180,54 +5167,54 @@ define void @store_i8_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu16 %zmm20, %zmm7 {%k2}
 ; AVX512DQBW-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm20 = xmm15[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
 ; AVX512DQBW-SLOW-NEXT:    vpermw %ymm20, %ymm23, %ymm20
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 32(%r9), %ymm23
-; AVX512DQBW-SLOW-NEXT:    vpshufb %ymm26, %ymm23, %ymm24
-; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} ymm24 = ymm24[2,2,2,3]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $1, %ymm24, %zmm20, %zmm20
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 32(%r9), %ymm24
+; AVX512DQBW-SLOW-NEXT:    vpshufb %ymm26, %ymm24, %ymm23
+; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} ymm23 = ymm23[2,2,2,3]
+; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $1, %ymm23, %zmm20, %zmm20
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %zmm20, %zmm7 {%k3}
-; AVX512DQBW-SLOW-NEXT:    vpbroadcastq {{.*#+}} ymm24 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
-; AVX512DQBW-SLOW-NEXT:    vpshufb %ymm24, %ymm25, %ymm20
-; AVX512DQBW-SLOW-NEXT:    vpshufb %ymm24, %ymm27, %ymm26
-; AVX512DQBW-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm20 = ymm26[0],ymm20[0],ymm26[1],ymm20[1],ymm26[2],ymm20[2],ymm26[3],ymm20[3],ymm26[4],ymm20[4],ymm26[5],ymm20[5],ymm26[6],ymm20[6],ymm26[7],ymm20[7],ymm26[16],ymm20[16],ymm26[17],ymm20[17],ymm26[18],ymm20[18],ymm26[19],ymm20[19],ymm26[20],ymm20[20],ymm26[21],ymm20[21],ymm26[22],ymm20[22],ymm26[23],ymm20[23]
+; AVX512DQBW-SLOW-NEXT:    vpbroadcastq {{.*#+}} ymm26 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
+; AVX512DQBW-SLOW-NEXT:    vpshufb %ymm26, %ymm25, %ymm20
+; AVX512DQBW-SLOW-NEXT:    vpshufb %ymm26, %ymm27, %ymm23
+; AVX512DQBW-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm20 = ymm23[0],ymm20[0],ymm23[1],ymm20[1],ymm23[2],ymm20[2],ymm23[3],ymm20[3],ymm23[4],ymm20[4],ymm23[5],ymm20[5],ymm23[6],ymm20[6],ymm23[7],ymm20[7],ymm23[16],ymm20[16],ymm23[17],ymm20[17],ymm23[18],ymm20[18],ymm23[19],ymm20[19],ymm23[20],ymm20[20],ymm23[21],ymm20[21],ymm23[22],ymm20[22],ymm23[23],ymm20[23]
 ; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} ymm20 = ymm20[2,2,2,3]
-; AVX512DQBW-SLOW-NEXT:    vpunpckhbw {{.*#+}} ymm25 = ymm27[8],ymm25[8],ymm27[9],ymm25[9],ymm27[10],ymm25[10],ymm27[11],ymm25[11],ymm27[12],ymm25[12],ymm27[13],ymm25[13],ymm27[14],ymm25[14],ymm27[15],ymm25[15],ymm27[24],ymm25[24],ymm27[25],ymm25[25],ymm27[26],ymm25[26],ymm27[27],ymm25[27],ymm27[28],ymm25[28],ymm27[29],ymm25[29],ymm27[30],ymm25[30],ymm27[31],ymm25[31]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm26 = [12,11,14,13,12,11,14,13,12,11,14,13,15,15,15,15]
-; AVX512DQBW-SLOW-NEXT:    vpermw %ymm25, %ymm26, %ymm25
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $1, %ymm25, %zmm20, %zmm25
-; AVX512DQBW-SLOW-NEXT:    vpbroadcastq {{.*#+}} ymm27 = [5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10]
-; AVX512DQBW-SLOW-NEXT:    vpshufb %ymm27, %ymm28, %ymm20
-; AVX512DQBW-SLOW-NEXT:    vpshufb %ymm27, %ymm29, %ymm31
+; AVX512DQBW-SLOW-NEXT:    vpunpckhbw {{.*#+}} ymm23 = ymm27[8],ymm25[8],ymm27[9],ymm25[9],ymm27[10],ymm25[10],ymm27[11],ymm25[11],ymm27[12],ymm25[12],ymm27[13],ymm25[13],ymm27[14],ymm25[14],ymm27[15],ymm25[15],ymm27[24],ymm25[24],ymm27[25],ymm25[25],ymm27[26],ymm25[26],ymm27[27],ymm25[27],ymm27[28],ymm25[28],ymm27[29],ymm25[29],ymm27[30],ymm25[30],ymm27[31],ymm25[31]
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm25 = [12,11,14,13,12,11,14,13,12,11,14,13,15,15,15,15]
+; AVX512DQBW-SLOW-NEXT:    vpermw %ymm23, %ymm25, %ymm23
+; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $1, %ymm23, %zmm20, %zmm27
+; AVX512DQBW-SLOW-NEXT:    vpbroadcastq {{.*#+}} ymm23 = [5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10]
+; AVX512DQBW-SLOW-NEXT:    vpshufb %ymm23, %ymm28, %ymm20
+; AVX512DQBW-SLOW-NEXT:    vpshufb %ymm23, %ymm29, %ymm31
 ; AVX512DQBW-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm20 = ymm31[0],ymm20[0],ymm31[1],ymm20[1],ymm31[2],ymm20[2],ymm31[3],ymm20[3],ymm31[4],ymm20[4],ymm31[5],ymm20[5],ymm31[6],ymm20[6],ymm31[7],ymm20[7],ymm31[16],ymm20[16],ymm31[17],ymm20[17],ymm31[18],ymm20[18],ymm31[19],ymm20[19],ymm31[20],ymm20[20],ymm31[21],ymm20[21],ymm31[22],ymm20[22],ymm31[23],ymm20[23]
 ; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} ymm20 = ymm20[2,2,2,3]
 ; AVX512DQBW-SLOW-NEXT:    vpunpckhbw {{.*#+}} ymm28 = ymm29[8],ymm28[8],ymm29[9],ymm28[9],ymm29[10],ymm28[10],ymm29[11],ymm28[11],ymm29[12],ymm28[12],ymm29[13],ymm28[13],ymm29[14],ymm28[14],ymm29[15],ymm28[15],ymm29[24],ymm28[24],ymm29[25],ymm28[25],ymm29[26],ymm28[26],ymm29[27],ymm28[27],ymm29[28],ymm28[28],ymm29[29],ymm28[29],ymm29[30],ymm28[30],ymm29[31],ymm28[31]
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm29 = [13,12,11,14,13,12,11,14,13,12,11,14,15,15,15,15]
 ; AVX512DQBW-SLOW-NEXT:    vpermw %ymm28, %ymm29, %ymm28
 ; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $1, %ymm28, %zmm20, %zmm20
-; AVX512DQBW-SLOW-NEXT:    vmovdqu16 %zmm25, %zmm20 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm25 = zmm30[0,1,2,3],zmm14[4,5,6,7]
+; AVX512DQBW-SLOW-NEXT:    vmovdqu16 %zmm27, %zmm20 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm27 = zmm30[0,1,2,3],zmm14[4,5,6,7]
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm28 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,6,u,5,u,8,u,7,u,u,u,9,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,10,u,13,u,12,u,11,u,14,u,u,u,u,u,15,u>
-; AVX512DQBW-SLOW-NEXT:    vpshufb %zmm28, %zmm25, %zmm25
-; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} zmm25 = zmm25[2,2,2,3,6,6,6,7]
+; AVX512DQBW-SLOW-NEXT:    vpshufb %zmm28, %zmm27, %zmm27
+; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} zmm27 = zmm27[2,2,2,3,6,6,6,7]
 ; AVX512DQBW-SLOW-NEXT:    movl $-1840700270, %ecx # imm = 0x92492492
 ; AVX512DQBW-SLOW-NEXT:    kmovd %ecx, %k2
-; AVX512DQBW-SLOW-NEXT:    vmovdqu16 %zmm25, %zmm20 {%k2}
-; AVX512DQBW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm23 = zmm23[0,1,2,3],zmm12[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm25 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,6,u,5,u,8,u,7,u,u,u,9,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,10,u,13,u,12,u,11,u,14,u,u,u,u,u,15>
-; AVX512DQBW-SLOW-NEXT:    vpshufb %zmm25, %zmm23, %zmm23
-; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} zmm23 = zmm23[2,2,2,3,6,6,6,7]
+; AVX512DQBW-SLOW-NEXT:    vmovdqu16 %zmm27, %zmm20 {%k2}
+; AVX512DQBW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm24 = zmm24[0,1,2,3],zmm12[4,5,6,7]
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm27 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,6,u,5,u,8,u,7,u,u,u,9,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,10,u,13,u,12,u,11,u,14,u,u,u,u,u,15>
+; AVX512DQBW-SLOW-NEXT:    vpshufb %zmm27, %zmm24, %zmm24
+; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} zmm24 = zmm24[2,2,2,3,6,6,6,7]
 ; AVX512DQBW-SLOW-NEXT:    movabsq $-9076969306111049208, %rcx # imm = 0x8208208208208208
 ; AVX512DQBW-SLOW-NEXT:    kmovq %rcx, %k3
-; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %zmm23, %zmm20 {%k3}
-; AVX512DQBW-SLOW-NEXT:    vpshufb %ymm24, %ymm16, %ymm23
-; AVX512DQBW-SLOW-NEXT:    vpshufb %ymm24, %ymm17, %ymm24
-; AVX512DQBW-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm23 = ymm24[0],ymm23[0],ymm24[1],ymm23[1],ymm24[2],ymm23[2],ymm24[3],ymm23[3],ymm24[4],ymm23[4],ymm24[5],ymm23[5],ymm24[6],ymm23[6],ymm24[7],ymm23[7],ymm24[16],ymm23[16],ymm24[17],ymm23[17],ymm24[18],ymm23[18],ymm24[19],ymm23[19],ymm24[20],ymm23[20],ymm24[21],ymm23[21],ymm24[22],ymm23[22],ymm24[23],ymm23[23]
-; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} ymm23 = ymm23[2,2,2,3]
+; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %zmm24, %zmm20 {%k3}
+; AVX512DQBW-SLOW-NEXT:    vpshufb %ymm26, %ymm16, %ymm24
+; AVX512DQBW-SLOW-NEXT:    vpshufb %ymm26, %ymm17, %ymm26
+; AVX512DQBW-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm24 = ymm26[0],ymm24[0],ymm26[1],ymm24[1],ymm26[2],ymm24[2],ymm26[3],ymm24[3],ymm26[4],ymm24[4],ymm26[5],ymm24[5],ymm26[6],ymm24[6],ymm26[7],ymm24[7],ymm26[16],ymm24[16],ymm26[17],ymm24[17],ymm26[18],ymm24[18],ymm26[19],ymm24[19],ymm26[20],ymm24[20],ymm26[21],ymm24[21],ymm26[22],ymm24[22],ymm26[23],ymm24[23]
+; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} ymm24 = ymm24[2,2,2,3]
 ; AVX512DQBW-SLOW-NEXT:    vpunpckhbw {{.*#+}} ymm16 = ymm17[8],ymm16[8],ymm17[9],ymm16[9],ymm17[10],ymm16[10],ymm17[11],ymm16[11],ymm17[12],ymm16[12],ymm17[13],ymm16[13],ymm17[14],ymm16[14],ymm17[15],ymm16[15],ymm17[24],ymm16[24],ymm17[25],ymm16[25],ymm17[26],ymm16[26],ymm17[27],ymm16[27],ymm17[28],ymm16[28],ymm17[29],ymm16[29],ymm17[30],ymm16[30],ymm17[31],ymm16[31]
-; AVX512DQBW-SLOW-NEXT:    vpermw %ymm16, %ymm26, %ymm16
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $1, %ymm16, %zmm23, %zmm17
-; AVX512DQBW-SLOW-NEXT:    vpshufb %ymm27, %ymm18, %ymm16
-; AVX512DQBW-SLOW-NEXT:    vpshufb %ymm27, %ymm19, %ymm23
-; AVX512DQBW-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm16 = ymm23[0],ymm16[0],ymm23[1],ymm16[1],ymm23[2],ymm16[2],ymm23[3],ymm16[3],ymm23[4],ymm16[4],ymm23[5],ymm16[5],ymm23[6],ymm16[6],ymm23[7],ymm16[7],ymm23[16],ymm16[16],ymm23[17],ymm16[17],ymm23[18],ymm16[18],ymm23[19],ymm16[19],ymm23[20],ymm16[20],ymm23[21],ymm16[21],ymm23[22],ymm16[22],ymm23[23],ymm16[23]
+; AVX512DQBW-SLOW-NEXT:    vpermw %ymm16, %ymm25, %ymm16
+; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $1, %ymm16, %zmm24, %zmm17
+; AVX512DQBW-SLOW-NEXT:    vpshufb %ymm23, %ymm18, %ymm16
+; AVX512DQBW-SLOW-NEXT:    vpshufb %ymm23, %ymm19, %ymm24
+; AVX512DQBW-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm16 = ymm24[0],ymm16[0],ymm24[1],ymm16[1],ymm24[2],ymm16[2],ymm24[3],ymm16[3],ymm24[4],ymm16[4],ymm24[5],ymm16[5],ymm24[6],ymm16[6],ymm24[7],ymm16[7],ymm24[16],ymm16[16],ymm24[17],ymm16[17],ymm24[18],ymm16[18],ymm24[19],ymm16[19],ymm24[20],ymm16[20],ymm24[21],ymm16[21],ymm24[22],ymm16[22],ymm24[23],ymm16[23]
 ; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} ymm16 = ymm16[2,2,2,3]
 ; AVX512DQBW-SLOW-NEXT:    vpunpckhbw {{.*#+}} ymm18 = ymm19[8],ymm18[8],ymm19[9],ymm18[9],ymm19[10],ymm18[10],ymm19[11],ymm18[11],ymm19[12],ymm18[12],ymm19[13],ymm18[13],ymm19[14],ymm18[14],ymm19[15],ymm18[15],ymm19[24],ymm18[24],ymm19[25],ymm18[25],ymm19[26],ymm18[26],ymm19[27],ymm18[27],ymm19[28],ymm18[28],ymm19[29],ymm18[29],ymm19[30],ymm18[30],ymm19[31],ymm18[31]
 ; AVX512DQBW-SLOW-NEXT:    vpermw %ymm18, %ymm29, %ymm18
@@ -5238,7 +5225,7 @@ define void @store_i8_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} zmm14 = zmm14[2,2,2,3,6,6,6,7]
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu16 %zmm14, %zmm16 {%k2}
 ; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $1, %ymm22, %zmm12, %zmm12
-; AVX512DQBW-SLOW-NEXT:    vpshufb %zmm25, %zmm12, %zmm12
+; AVX512DQBW-SLOW-NEXT:    vpshufb %zmm27, %zmm12, %zmm12
 ; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} zmm12 = zmm12[2,2,2,3,6,6,6,7]
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %zmm12, %zmm16 {%k3}
 ; AVX512DQBW-SLOW-NEXT:    vpbroadcastq {{.*#+}} xmm12 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
@@ -5250,13 +5237,12 @@ define void @store_i8_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa {{.*#+}} ymm11 = [0,3,2,1,0,3,2,1,0,3,2,1,4,5,6,5]
 ; AVX512DQBW-SLOW-NEXT:    vpermw %ymm9, %ymm11, %ymm9
 ; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $1, %ymm14, %zmm9, %zmm9
-; AVX512DQBW-SLOW-NEXT:    vpbroadcastq {{.*#+}} xmm14 = [5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10]
-; AVX512DQBW-SLOW-NEXT:    vpshufb %xmm14, %xmm8, %xmm17
-; AVX512DQBW-SLOW-NEXT:    vpshufb %xmm14, %xmm10, %xmm18
-; AVX512DQBW-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm17 = xmm18[0],xmm17[0],xmm18[1],xmm17[1],xmm18[2],xmm17[2],xmm18[3],xmm17[3],xmm18[4],xmm17[4],xmm18[5],xmm17[5],xmm18[6],xmm17[6],xmm18[7],xmm17[7]
+; AVX512DQBW-SLOW-NEXT:    vpshufb %xmm23, %xmm8, %xmm14
+; AVX512DQBW-SLOW-NEXT:    vpshufb %xmm23, %xmm10, %xmm17
+; AVX512DQBW-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm14 = xmm17[0],xmm14[0],xmm17[1],xmm14[1],xmm17[2],xmm14[2],xmm17[3],xmm14[3],xmm17[4],xmm14[4],xmm17[5],xmm14[5],xmm17[6],xmm14[6],xmm17[7],xmm14[7]
 ; AVX512DQBW-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm8 = xmm10[0],xmm8[0],xmm10[1],xmm8[1],xmm10[2],xmm8[2],xmm10[3],xmm8[3],xmm10[4],xmm8[4],xmm10[5],xmm8[5],xmm10[6],xmm8[6],xmm10[7],xmm8[7]
 ; AVX512DQBW-SLOW-NEXT:    vprold $16, %xmm8, %xmm8
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $1, %ymm17, %zmm8, %zmm8
+; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $1, %ymm14, %zmm8, %zmm8
 ; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} zmm8 = zmm8[0,0,0,1,4,4,4,5]
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu16 %zmm8, %zmm9 {%k2}
 ; AVX512DQBW-SLOW-NEXT:    vpmovzxbw {{.*#+}} xmm8 = xmm13[0],zero,xmm13[1],zero,xmm13[2],zero,xmm13[3],zero,xmm13[4],zero,xmm13[5],zero,xmm13[6],zero,xmm13[7],zero
@@ -5280,8 +5266,8 @@ define void @store_i8_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512DQBW-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm1 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
 ; AVX512DQBW-SLOW-NEXT:    vpermw %ymm1, %ymm11, %ymm1
 ; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $1, %ymm8, %zmm1, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vpshufb %xmm14, %xmm2, %xmm3
-; AVX512DQBW-SLOW-NEXT:    vpshufb %xmm14, %xmm4, %xmm8
+; AVX512DQBW-SLOW-NEXT:    vpshufb %xmm23, %xmm2, %xmm3
+; AVX512DQBW-SLOW-NEXT:    vpshufb %xmm23, %xmm4, %xmm8
 ; AVX512DQBW-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm3 = xmm8[0],xmm3[0],xmm8[1],xmm3[1],xmm8[2],xmm3[2],xmm8[3],xmm3[3],xmm8[4],xmm3[4],xmm8[5],xmm3[5],xmm8[6],xmm3[6],xmm8[7],xmm3[7]
 ; AVX512DQBW-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm2 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3],xmm4[4],xmm2[4],xmm4[5],xmm2[5],xmm4[6],xmm2[6],xmm4[7],xmm2[7]
 ; AVX512DQBW-SLOW-NEXT:    vprold $16, %xmm2, %xmm2
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-7.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-7.ll
index a7fffc339586896..cb65fe5fa68b0c0 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-7.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-7.ll
@@ -7382,21 +7382,21 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ;
 ; AVX512F-SLOW-LABEL: store_i8_stride7_vf64:
 ; AVX512F-SLOW:       # %bb.0:
-; AVX512F-SLOW-NEXT:    subq $1464, %rsp # imm = 0x5B8
+; AVX512F-SLOW-NEXT:    subq $1448, %rsp # imm = 0x5A8
 ; AVX512F-SLOW-NEXT:    vmovdqa (%rsi), %ymm1
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = zero,zero,zero,ymm1[14],zero,zero,zero,zero,zero,zero,ymm1[15],zero,zero,zero,zero,zero,zero,ymm1[16],zero,zero,zero,zero,zero,zero,ymm1[17],zero,zero,zero,zero,zero,zero,ymm1[18]
-; AVX512F-SLOW-NEXT:    vmovdqa %ymm1, %ymm9
+; AVX512F-SLOW-NEXT:    vmovdqa %ymm1, %ymm10
 ; AVX512F-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-SLOW-NEXT:    vmovdqa (%rdi), %ymm2
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm2[0,1,14],zero,ymm2[12,13,0,1,14,15],zero,ymm2[3,12,13,2,3,16],zero,ymm2[30,31,28,29,16,17],zero,ymm2[31,18,19,28,29,18],zero
-; AVX512F-SLOW-NEXT:    vmovdqa %ymm2, %ymm10
+; AVX512F-SLOW-NEXT:    vmovdqa %ymm2, %ymm14
 ; AVX512F-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-SLOW-NEXT:    vpor %ymm0, %ymm1, %ymm0
 ; AVX512F-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa (%rcx), %ymm7
+; AVX512F-SLOW-NEXT:    vmovdqa (%rcx), %ymm6
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm1 = [128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128]
-; AVX512F-SLOW-NEXT:    vpshufb %ymm1, %ymm7, %ymm0
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm1, %ymm27
+; AVX512F-SLOW-NEXT:    vpshufb %ymm1, %ymm6, %ymm0
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm1, %ymm23
 ; AVX512F-SLOW-NEXT:    vmovdqa (%rdx), %ymm8
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,1,0,1,14,128,14,15,0,1,14,15,128,13,14,15,16,17,16,128,30,31,30,31,16,17,128,31,28,29,30,31]
 ; AVX512F-SLOW-NEXT:    vpshufb %ymm2, %ymm8, %ymm1
@@ -7407,239 +7407,242 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm2 = [128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128]
 ; AVX512F-SLOW-NEXT:    vpshufb %ymm2, %ymm0, %ymm0
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm20
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm18
 ; AVX512F-SLOW-NEXT:    vmovdqa (%r9), %ymm1
 ; AVX512F-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm3 = [13,0,0,0,128,16,128,14,0,0,0,128,17,128,15,0,13,0,0,0,128,16,128,14,0,0,0,128,17,128,15,0]
 ; AVX512F-SLOW-NEXT:    # ymm3 = mem[0,1,0,1]
 ; AVX512F-SLOW-NEXT:    vpshufb %ymm3, %ymm1, %ymm1
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm3, %ymm21
-; AVX512F-SLOW-NEXT:    vpor %ymm0, %ymm1, %ymm0
-; AVX512F-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%r9), %ymm13
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%r8), %ymm14
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm14[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm14[27],zero,zero,zero,zero,ymm14[30],zero,ymm14[28],zero,zero,zero,zero,ymm14[31],zero,ymm14[29]
-; AVX512F-SLOW-NEXT:    vmovdqu %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm13[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm13[25],zero,ymm13[23],zero,zero,zero,zero,ymm13[26],zero,ymm13[24],zero,zero
-; AVX512F-SLOW-NEXT:    vmovdqu %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
-; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%rcx), %ymm6
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdx), %ymm11
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm11[30],zero,ymm11[28],zero,zero,zero,zero,ymm11[31],zero,ymm11[29],zero,zero
-; AVX512F-SLOW-NEXT:    vmovdqu %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = [128,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128,128,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128]
-; AVX512F-SLOW-NEXT:    # ymm2 = mem[0,1,0,1]
-; AVX512F-SLOW-NEXT:    vpshufb %ymm2, %ymm6, %ymm1
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm3, %ymm22
+; AVX512F-SLOW-NEXT:    vporq %ymm0, %ymm1, %ymm24
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%r9), %ymm11
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%r8), %ymm1
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm1[27],zero,zero,zero,zero,ymm1[30],zero,ymm1[28],zero,zero,zero,zero,ymm1[31],zero,ymm1[29]
+; AVX512F-SLOW-NEXT:    vmovdqa %ymm1, %ymm13
+; AVX512F-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm11[25],zero,ymm11[23],zero,zero,zero,zero,ymm11[26],zero,ymm11[24],zero,zero
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
 ; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%rcx), %ymm7
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdx), %ymm9
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm9[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm9[30],zero,ymm9[28],zero,zero,zero,zero,ymm9[31],zero,ymm9[29],zero,zero
+; AVX512F-SLOW-NEXT:    vmovdqu %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = [128,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128,128,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128]
+; AVX512F-SLOW-NEXT:    # ymm0 = mem[0,1,0,1]
+; AVX512F-SLOW-NEXT:    vpshufb %ymm0, %ymm7, %ymm2
+; AVX512F-SLOW-NEXT:    vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm1
+; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-SLOW-NEXT:    vmovdqa 32(%rsi), %ymm5
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm5[30],zero,ymm5[28],zero,zero,zero,zero,ymm5[31],zero,ymm5[29],zero,zero,zero
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm5[30],zero,ymm5[28],zero,zero,zero,zero,ymm5[31],zero,ymm5[29],zero,zero,zero
 ; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdi), %ymm4
-; AVX512F-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [128,23,128,128,128,128,26,128,24,128,128,128,128,27,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128,25]
-; AVX512F-SLOW-NEXT:    # ymm1 = mem[0,1,0,1]
-; AVX512F-SLOW-NEXT:    vpshufb %ymm1, %ymm4, %ymm3
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm4, %ymm19
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm3, %zmm0
-; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm3 = [128,23,128,128,128,128,26,128,24,128,128,128,128,27,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128,25]
+; AVX512F-SLOW-NEXT:    # ymm3 = mem[0,1,0,1]
+; AVX512F-SLOW-NEXT:    vpshufb %ymm3, %ymm4, %ymm2
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm4, %ymm20
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm1
+; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512F-SLOW-NEXT:    vmovdqa 32(%rax), %ymm4
 ; AVX512F-SLOW-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm3 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,26,27,28,29,30,31,30,31,28,29,28,29,30,31]
-; AVX512F-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = [22,23,26,27,24,25,22,23,24,25,26,27,26,27,24,25,22,23,26,27,24,25,22,23,24,25,26,27,26,27,24,25]
-; AVX512F-SLOW-NEXT:    # ymm0 = mem[0,1,0,1]
-; AVX512F-SLOW-NEXT:    vpshufb %ymm0, %ymm4, %ymm4
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm4, %zmm3
-; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vpshufb %ymm2, %ymm7, %ymm2
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm3 = ymm7[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm7[18],zero,zero,zero,zero,ymm7[21],zero,ymm7[19],zero,zero,zero,zero,ymm7[22],zero,ymm7[20]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm3, %zmm2
-; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = ymm8[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25],zero,ymm8[23],zero,ymm8[21,22,23,26],zero,ymm8[24],zero,ymm8[28,29,26,27]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm3 = ymm8[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18],zero,ymm8[18,19,20,21],zero,ymm8[19],zero,ymm8[25,26,27,22],zero,ymm8[20],zero
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm8, %ymm18
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm3, %zmm2
-; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vpshufb %ymm1, %ymm10, %ymm1
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = ymm9[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm9[21],zero,ymm9[19],zero,zero,zero,zero,ymm9[22],zero,ymm9[20],zero,zero
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm1
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,26,27,28,29,30,31,30,31,28,29,28,29,30,31]
+; AVX512F-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = [22,23,26,27,24,25,22,23,24,25,26,27,26,27,24,25,22,23,26,27,24,25,22,23,24,25,26,27,26,27,24,25]
+; AVX512F-SLOW-NEXT:    # ymm2 = mem[0,1,0,1]
+; AVX512F-SLOW-NEXT:    vpshufb %ymm2, %ymm4, %ymm4
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm4, %zmm1
+; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-SLOW-NEXT:    vpshufb %ymm3, %ymm14, %ymm1
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm3 = ymm10[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm10[21],zero,ymm10[19],zero,zero,zero,zero,ymm10[22],zero,ymm10[20],zero,zero
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm3, %zmm1
 ; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa (%rax), %ymm3
-; AVX512F-SLOW-NEXT:    vpshufb %ymm0, %ymm3, %ymm0
-; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm25 = <u,5,4,u,5,u,4,u,20,21,u,23,u,21,u,23>
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm3[2,1,1,2,4,5,6,7,10,9,9,10,12,13,14,15]
-; AVX512F-SLOW-NEXT:    vpermi2d %zmm0, %zmm1, %zmm25
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdx), %xmm2
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%rcx), %xmm15
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm1 = <u,u,u,128,7,128,5,u,u,u,128,8,128,6,u,u>
-; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm15, %xmm0
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm1, %xmm16
+; AVX512F-SLOW-NEXT:    vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-SLOW-NEXT:    vpshufb %ymm0, %ymm6, %ymm0
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm6[18],zero,zero,zero,zero,ymm6[21],zero,ymm6[19],zero,zero,zero,zero,ymm6[22],zero,ymm6[20]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm8[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25],zero,ymm8[23],zero,ymm8[21,22,23,26],zero,ymm8[24],zero,ymm8[28,29,26,27]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm8[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18],zero,ymm8[18,19,20,21],zero,ymm8[19],zero,ymm8[25,26,27,22],zero,ymm8[20],zero
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm8, %ymm19
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-SLOW-NEXT:    vmovdqa (%rax), %ymm1
+; AVX512F-SLOW-NEXT:    vpshufb %ymm2, %ymm1, %ymm0
+; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm16 = <u,5,4,u,5,u,4,u,20,21,u,23,u,21,u,23>
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} ymm2 = ymm1[2,1,1,2,4,5,6,7,10,9,9,10,12,13,14,15]
+; AVX512F-SLOW-NEXT:    vpermi2d %zmm0, %zmm2, %zmm16
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdx), %xmm3
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%rcx), %xmm0
+; AVX512F-SLOW-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm2 = <u,u,u,128,7,128,5,u,u,u,128,8,128,6,u,u>
+; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm0, %xmm0
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm2, %xmm29
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm4 = <u,u,u,7,128,5,128,u,u,u,8,128,6,128,u,u>
-; AVX512F-SLOW-NEXT:    vpshufb %xmm4, %xmm2, %xmm1
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm4, %xmm23
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm2, %xmm30
-; AVX512F-SLOW-NEXT:    vpor %xmm0, %xmm1, %xmm0
+; AVX512F-SLOW-NEXT:    vpshufb %xmm4, %xmm3, %xmm2
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm4, %xmm31
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm3, %xmm30
+; AVX512F-SLOW-NEXT:    vpor %xmm0, %xmm2, %xmm0
 ; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdi), %xmm7
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%rsi), %xmm1
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm8 = <u,128,7,128,5,u,u,u,128,8,128,6,u,u,u,128>
-; AVX512F-SLOW-NEXT:    vpshufb %xmm8, %xmm1, %xmm0
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm4 = <u,7,128,5,128,u,u,u,8,128,6,128,u,u,u,9>
-; AVX512F-SLOW-NEXT:    vpshufb %xmm4, %xmm7, %xmm2
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm7, %xmm22
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdi), %xmm3
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%rsi), %xmm4
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm12 = <u,128,7,128,5,u,u,u,128,8,128,6,u,u,u,128>
+; AVX512F-SLOW-NEXT:    vpshufb %xmm12, %xmm4, %xmm0
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm15 = <u,7,128,5,128,u,u,u,8,128,6,128,u,u,u,9>
+; AVX512F-SLOW-NEXT:    vpshufb %xmm15, %xmm3, %xmm2
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm3, %xmm21
 ; AVX512F-SLOW-NEXT:    vpor %xmm0, %xmm2, %xmm0
 ; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm7 = <0,u,0,u,2,3,u,1,u,18,u,19,18,u,19,u>
+; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm3 = <0,u,0,u,2,3,u,1,u,18,u,19,18,u,19,u>
 ; AVX512F-SLOW-NEXT:    vmovdqa 32(%rax), %xmm2
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm2, (%rsp) # 16-byte Spill
+; AVX512F-SLOW-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm2[0,1,2,3,4,5,5,6]
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[4,5,4,5,4,5,8,9,6,7,6,7,6,7,6,7]
-; AVX512F-SLOW-NEXT:    vpermi2d %zmm0, %zmm2, %zmm7
-; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-SLOW-NEXT:    vpermi2d %zmm0, %zmm2, %zmm3
+; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-SLOW-NEXT:    vmovdqa 32(%r9), %xmm0
 ; AVX512F-SLOW-NEXT:    vmovdqa 32(%r8), %xmm2
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm7 = <128,4,u,u,u,128,7,128,5,u,u,u,128,8,128,6>
-; AVX512F-SLOW-NEXT:    vpshufb %xmm7, %xmm0, %xmm9
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm0, %xmm31
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm12 = <4,128,u,u,u,7,128,5,128,u,u,u,8,128,6,128>
-; AVX512F-SLOW-NEXT:    vpshufb %xmm12, %xmm2, %xmm10
-; AVX512F-SLOW-NEXT:    vporq %xmm9, %xmm10, %xmm24
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm27, %ymm0
-; AVX512F-SLOW-NEXT:    vpshufb %ymm0, %ymm6, %ymm9
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm6, %ymm26
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm6 = <128,4,u,u,u,128,7,128,5,u,u,u,128,8,128,6>
+; AVX512F-SLOW-NEXT:    vpshufb %xmm6, %xmm0, %xmm8
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm0, %xmm28
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm14 = <4,128,u,u,u,7,128,5,128,u,u,u,8,128,6,128>
+; AVX512F-SLOW-NEXT:    vpshufb %xmm14, %xmm2, %xmm10
+; AVX512F-SLOW-NEXT:    vmovdqa %xmm2, %xmm3
+; AVX512F-SLOW-NEXT:    vporq %xmm8, %xmm10, %xmm26
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm23, %ymm0
+; AVX512F-SLOW-NEXT:    vpshufb %ymm0, %ymm7, %ymm8
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm17, %ymm0
-; AVX512F-SLOW-NEXT:    vpshufb %ymm0, %ymm11, %ymm10
-; AVX512F-SLOW-NEXT:    vpor %ymm9, %ymm10, %ymm0
+; AVX512F-SLOW-NEXT:    vpshufb %ymm0, %ymm9, %ymm10
+; AVX512F-SLOW-NEXT:    vpor %ymm8, %ymm10, %ymm0
 ; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm9 = zero,zero,zero,ymm5[14],zero,zero,zero,zero,zero,zero,ymm5[15],zero,zero,zero,zero,zero,zero,ymm5[16],zero,zero,zero,zero,zero,zero,ymm5[17],zero,zero,zero,zero,zero,zero,ymm5[18]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm5, %ymm27
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm19, %ymm0
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm8 = zero,zero,zero,ymm5[14],zero,zero,zero,zero,zero,zero,ymm5[15],zero,zero,zero,zero,zero,zero,ymm5[16],zero,zero,zero,zero,zero,zero,ymm5[17],zero,zero,zero,zero,zero,zero,ymm5[18]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm5, %ymm23
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm20, %ymm0
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm10 = ymm0[0,1,14],zero,ymm0[12,13,0,1,14,15],zero,ymm0[3,12,13,2,3,16],zero,ymm0[30,31,28,29,16,17],zero,ymm0[31,18,19,28,29,18],zero
-; AVX512F-SLOW-NEXT:    vpor %ymm9, %ymm10, %ymm5
-; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm20, %ymm9
-; AVX512F-SLOW-NEXT:    vpshufb %ymm9, %ymm14, %ymm9
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm21, %ymm6
-; AVX512F-SLOW-NEXT:    vpshufb %ymm6, %ymm13, %ymm10
-; AVX512F-SLOW-NEXT:    vpor %ymm9, %ymm10, %ymm5
-; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa (%rsi), %xmm9
-; AVX512F-SLOW-NEXT:    vpshufb %xmm8, %xmm9, %xmm8
-; AVX512F-SLOW-NEXT:    vmovdqa (%rdi), %xmm10
-; AVX512F-SLOW-NEXT:    vpshufb %xmm4, %xmm10, %xmm4
-; AVX512F-SLOW-NEXT:    vporq %xmm8, %xmm4, %xmm21
-; AVX512F-SLOW-NEXT:    vmovdqa (%rcx), %xmm5
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm16, %xmm4
-; AVX512F-SLOW-NEXT:    vpshufb %xmm4, %xmm5, %xmm4
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm5, %xmm11
-; AVX512F-SLOW-NEXT:    vmovdqa (%rdx), %xmm6
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm23, %xmm5
-; AVX512F-SLOW-NEXT:    vpshufb %xmm5, %xmm6, %xmm8
-; AVX512F-SLOW-NEXT:    vporq %xmm4, %xmm8, %xmm19
-; AVX512F-SLOW-NEXT:    vmovdqa (%r9), %xmm5
+; AVX512F-SLOW-NEXT:    vpor %ymm8, %ymm10, %ymm2
+; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm18, %ymm2
+; AVX512F-SLOW-NEXT:    vpshufb %ymm2, %ymm13, %ymm8
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm22, %ymm2
+; AVX512F-SLOW-NEXT:    vpshufb %ymm2, %ymm11, %ymm10
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm11, %ymm27
+; AVX512F-SLOW-NEXT:    vpor %ymm8, %ymm10, %ymm2
+; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-SLOW-NEXT:    vmovdqa (%rsi), %xmm2
+; AVX512F-SLOW-NEXT:    vpshufb %xmm12, %xmm2, %xmm5
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm2, %xmm25
+; AVX512F-SLOW-NEXT:    vmovdqa (%rdi), %xmm12
+; AVX512F-SLOW-NEXT:    vpshufb %xmm15, %xmm12, %xmm9
+; AVX512F-SLOW-NEXT:    vporq %xmm5, %xmm9, %xmm22
+; AVX512F-SLOW-NEXT:    vmovdqa (%rcx), %xmm13
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm29, %xmm2
+; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm13, %xmm7
+; AVX512F-SLOW-NEXT:    vmovdqa (%rdx), %xmm9
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm31, %xmm2
+; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm9, %xmm10
+; AVX512F-SLOW-NEXT:    vpor %xmm7, %xmm10, %xmm2
+; AVX512F-SLOW-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-SLOW-NEXT:    vmovdqa (%r9), %xmm2
+; AVX512F-SLOW-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-SLOW-NEXT:    vpshufb %xmm6, %xmm2, %xmm6
+; AVX512F-SLOW-NEXT:    vmovdqa (%r8), %xmm5
 ; AVX512F-SLOW-NEXT:    vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-SLOW-NEXT:    vpshufb %xmm7, %xmm5, %xmm4
-; AVX512F-SLOW-NEXT:    vmovdqa (%r8), %xmm8
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-SLOW-NEXT:    vpshufb %xmm12, %xmm8, %xmm7
-; AVX512F-SLOW-NEXT:    vpor %xmm4, %xmm7, %xmm4
-; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm4 # 32-byte Folded Reload
-; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm7 = xmm8[8],xmm5[8],xmm8[9],xmm5[9],xmm8[10],xmm5[10],xmm8[11],xmm5[11],xmm8[12],xmm5[12],xmm8[13],xmm5[13],xmm8[14],xmm5[14],xmm8[15],xmm5[15]
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm5 = <u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10>
-; AVX512F-SLOW-NEXT:    vpshufb %xmm5, %xmm7, %xmm7
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm5, %xmm29
-; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm4 = zmm7[0,1,0,1],zmm4[4,5,6,7]
-; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa (%rax), %xmm13
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm13[0,1,2,3,4,5,5,6]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[2,2,3,3]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[0,1,0,1]
-; AVX512F-SLOW-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm4
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm12 = zero,ymm3[13],zero,zero,zero,zero,zero,zero,ymm3[14],zero,zero,zero,zero,zero,zero,ymm3[15],zero,zero,zero,zero,zero,zero,ymm3[16],zero,zero,zero,zero,zero,zero,ymm3[17],zero,zero
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm12, %zmm4, %zmm23
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm18, %ymm4
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm4[30],zero,ymm4[28],zero,zero,zero,zero,ymm4[31],zero,ymm4[29],zero,zero
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm4, %ymm18
+; AVX512F-SLOW-NEXT:    vpshufb %xmm14, %xmm5, %xmm11
+; AVX512F-SLOW-NEXT:    vpor %xmm6, %xmm11, %xmm6
+; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm24, %zmm0, %zmm6
+; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm11 = xmm5[8],xmm2[8],xmm5[9],xmm2[9],xmm5[10],xmm2[10],xmm5[11],xmm2[11],xmm5[12],xmm2[12],xmm5[13],xmm2[13],xmm5[14],xmm2[14],xmm5[15],xmm2[15]
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm7 = <u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10>
+; AVX512F-SLOW-NEXT:    vpshufb %xmm7, %xmm11, %xmm11
+; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm2 = zmm11[0,1,0,1],zmm6[4,5,6,7]
+; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-SLOW-NEXT:    vmovdqa (%rax), %xmm10
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm11 = xmm10[0,1,2,3,4,5,5,6]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm11 = xmm11[2,2,3,3]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[0,1,0,1]
+; AVX512F-SLOW-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm11, %ymm11
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm14 = zero,ymm1[13],zero,zero,zero,zero,zero,zero,ymm1[14],zero,zero,zero,zero,zero,zero,ymm1[15],zero,zero,zero,zero,zero,zero,ymm1[16],zero,zero,zero,zero,zero,zero,ymm1[17],zero,zero
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm14, %zmm11, %zmm24
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm19, %ymm2
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm2[30],zero,ymm2[28],zero,zero,zero,zero,ymm2[31],zero,ymm2[29],zero,zero
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm18
 ; AVX512F-SLOW-NEXT:    vpbroadcastq {{.*#+}} ymm5 = [13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14]
+; AVX512F-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX512F-SLOW-NEXT:    vpshufb %ymm5, %ymm2, %ymm11
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm5, %ymm29
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[2,3,2,3]
 ; AVX512F-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
-; AVX512F-SLOW-NEXT:    vpshufb %ymm5, %ymm14, %ymm4
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm5, %ymm28
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm12 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm5[27],zero,zero,zero,zero,ymm5[30],zero,ymm5[28],zero,zero,zero,zero,ymm5[31],zero,ymm5[29]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm12
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm31, %xmm7
-; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm4 = xmm2[0],xmm7[0],xmm2[1],xmm7[1],xmm2[2],xmm7[2],xmm2[3],xmm7[3],xmm2[4],xmm7[4],xmm2[5],xmm7[5],xmm2[6],xmm7[6],xmm2[7],xmm7[7]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
-; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm4 = zmm12[0,1,2,3],zmm4[0,1,0,1]
-; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,26,27,28,29,30,31,30,31,28,29,28,29,30,31]
-; AVX512F-SLOW-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm3 = ymm0[0,1,2,3,6,7,7,6,8,9,10,11,14,15,15,14]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm17
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm3[2,2,3,3,6,6,7,7]
-; AVX512F-SLOW-NEXT:    vpbroadcastq {{.*#+}} ymm4 = [9,8,7,0,0,0,11,10,9,8,7,0,0,0,11,10,9,8,7,0,0,0,11,10,9,8,7,0,0,0,11,10]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm27, %ymm8
-; AVX512F-SLOW-NEXT:    vpshufb %ymm4, %ymm8, %ymm12
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm12, %zmm20
-; AVX512F-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm3[30],zero,ymm3[28],zero,zero,zero,zero,ymm3[31],zero,ymm3[29],zero,zero,zero
-; AVX512F-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vpshufb %ymm4, %ymm3, %ymm3
-; AVX512F-SLOW-NEXT:    vpshuflw $233, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Folded Reload
-; AVX512F-SLOW-NEXT:    # ymm4 = mem[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm4 = ymm4[0,0,1,1,4,4,5,5]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm4, %zmm0
-; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm15 = ymm14[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm14[27],zero,zero,zero,zero,ymm14[30],zero,ymm14[28],zero,zero,zero,zero,ymm14[31],zero,ymm14[29]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm15 = ymm15[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm31 = [18374967954648269055,71777218572844800,18374967954648269055,71777218572844800,18446463693966278655,18446742978476179455,18446463693966278655,18446742978476179455]
+; AVX512F-SLOW-NEXT:    vpternlogq $248, %ymm31, %ymm11, %ymm15
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm28, %xmm6
+; AVX512F-SLOW-NEXT:    vmovdqa %xmm3, %xmm8
+; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm11 = xmm3[0],xmm6[0],xmm3[1],xmm6[1],xmm3[2],xmm6[2],xmm3[3],xmm6[3],xmm3[4],xmm6[4],xmm3[5],xmm6[5],xmm3[6],xmm6[6],xmm3[7],xmm6[7]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm11 = xmm11[u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
+; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm5 = zmm15[0,1,2,3],zmm11[0,1,0,1]
+; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,26,27,28,29,30,31,30,31,28,29,28,29,30,31]
+; AVX512F-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm1 = ymm0[0,1,2,3,6,7,7,6,8,9,10,11,14,15,15,14]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm20, %ymm17
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm1[2,2,3,3,6,6,7,7]
+; AVX512F-SLOW-NEXT:    vpbroadcastq {{.*#+}} ymm11 = [9,8,7,0,0,0,11,10,9,8,7,0,0,0,11,10,9,8,7,0,0,0,11,10,9,8,7,0,0,0,11,10]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm23, %ymm5
+; AVX512F-SLOW-NEXT:    vpshufb %ymm11, %ymm5, %ymm15
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm15, %zmm19
+; AVX512F-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm0[30],zero,ymm0[28],zero,zero,zero,zero,ymm0[31],zero,ymm0[29],zero,zero,zero
+; AVX512F-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-SLOW-NEXT:    vpshufb %ymm11, %ymm0, %ymm1
+; AVX512F-SLOW-NEXT:    vpshuflw $233, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # ymm11 = mem[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm11 = ymm11[0,0,1,1,4,4,5,5]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm11, %zmm23
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm30, %xmm0
-; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm3 = xmm0[0],xmm15[0],xmm0[1],xmm15[1],xmm0[2],xmm15[2],xmm0[3],xmm15[3],xmm0[4],xmm15[4],xmm0[5],xmm15[5],xmm0[6],xmm15[6],xmm0[7],xmm15[7]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm3, %xmm16
-; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm15[8],xmm0[8],xmm15[9],xmm0[9],xmm15[10],xmm0[10],xmm15[11],xmm0[11],xmm15[12],xmm0[12],xmm15[13],xmm0[13],xmm15[14],xmm0[14],xmm15[15],xmm0[15]
-; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm11[8],xmm6[8],xmm11[9],xmm6[9],xmm11[10],xmm6[10],xmm11[11],xmm6[11],xmm11[12],xmm6[12],xmm11[13],xmm6[13],xmm11[14],xmm6[14],xmm11[15],xmm6[15]
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm11, %xmm12
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm15 = <6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7>
-; AVX512F-SLOW-NEXT:    vpshufb %xmm15, %xmm4, %xmm0
-; AVX512F-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vpshufb %xmm15, %xmm3, %xmm3
-; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-SLOW-NEXT:    vinserti32x4 $2, %xmm3, %zmm0, %zmm30
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm22, %xmm0
-; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm15 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm11 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
 ; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
-; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm9[8],xmm10[8],xmm9[9],xmm10[9],xmm9[10],xmm10[10],xmm9[11],xmm10[11],xmm9[12],xmm10[12],xmm9[13],xmm10[13],xmm9[14],xmm10[14],xmm9[15],xmm10[15]
+; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm15 = xmm13[8],xmm9[8],xmm13[9],xmm9[9],xmm13[10],xmm9[10],xmm13[11],xmm9[11],xmm13[12],xmm9[12],xmm13[13],xmm9[13],xmm13[14],xmm9[14],xmm13[15],xmm9[15]
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm0 = <6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7>
+; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm15, %xmm3
+; AVX512F-SLOW-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm1, %xmm0
+; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512F-SLOW-NEXT:    vinserti32x4 $2, %xmm0, %zmm1, %zmm30
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm21, %xmm0
+; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm15 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
+; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm4[8],xmm0[8],xmm4[9],xmm0[9],xmm4[10],xmm0[10],xmm4[11],xmm0[11],xmm4[12],xmm0[12],xmm4[13],xmm0[13],xmm4[14],xmm0[14],xmm4[15],xmm0[15]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm25, %xmm3
+; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm3[8],xmm12[8],xmm3[9],xmm12[9],xmm3[10],xmm12[10],xmm3[11],xmm12[11],xmm3[12],xmm12[12],xmm3[13],xmm12[13],xmm3[14],xmm12[14],xmm3[15],xmm12[15]
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm4 = <2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u>
-; AVX512F-SLOW-NEXT:    vpshufb %xmm4, %xmm3, %xmm0
-; AVX512F-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-SLOW-NEXT:    vpshufb %xmm4, %xmm1, %xmm1
-; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-SLOW-NEXT:    vinserti32x4 $2, %xmm1, %zmm0, %zmm22
-; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm2[8],xmm7[8],xmm2[9],xmm7[9],xmm2[10],xmm7[10],xmm2[11],xmm7[11],xmm2[12],xmm7[12],xmm2[13],xmm7[13],xmm2[14],xmm7[14],xmm2[15],xmm7[15]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm29, %xmm1
-; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm0, %xmm0
-; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm24 = zmm24[0,1,0,1],zmm0[0,1,0,1]
-; AVX512F-SLOW-NEXT:    vpbroadcastq {{.*#+}} ymm2 = [11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12]
+; AVX512F-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-SLOW-NEXT:    vpshufb %xmm4, %xmm0, %xmm0
+; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512F-SLOW-NEXT:    vinserti32x4 $2, %xmm0, %zmm1, %zmm21
+; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm8[8],xmm6[8],xmm8[9],xmm6[9],xmm8[10],xmm6[10],xmm8[11],xmm6[11],xmm8[12],xmm6[12],xmm8[13],xmm6[13],xmm8[14],xmm6[14],xmm8[15],xmm6[15]
+; AVX512F-SLOW-NEXT:    vpshufb %xmm7, %xmm0, %xmm0
+; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm26 = zmm26[0,1,0,1],zmm0[0,1,0,1]
+; AVX512F-SLOW-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12]
+; AVX512F-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX512F-SLOW-NEXT:    vpshufb %ymm1, %ymm0, %ymm6
 ; AVX512F-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512F-SLOW-NEXT:    vpshufb %ymm2, %ymm0, %ymm4
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm26, %ymm0
-; AVX512F-SLOW-NEXT:    vpshufb %ymm2, %ymm0, %ymm7
+; AVX512F-SLOW-NEXT:    vpshufb %ymm1, %ymm0, %ymm8
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm0[18],zero,zero,zero,zero,ymm0[21],zero,ymm0[19],zero,zero,zero,zero,ymm0[22],zero,ymm0[20]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm29
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm28
 ; AVX512F-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm11 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25],zero,ymm0[23],zero,ymm0[21,22,23,26],zero,ymm0[24],zero,ymm0[28,29,26,27]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm7 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25],zero,ymm0[23],zero,ymm0[21,22,23,26],zero,ymm0[24],zero,ymm0[28,29,26,27]
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18],zero,ymm0[18,19,20,21],zero,ymm0[19],zero,ymm0[25,26,27,22],zero,ymm0[20],zero
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm26
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm8 = ymm8[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm8[21],zero,ymm8[19],zero,zero,zero,zero,ymm8[22],zero,ymm8[20],zero,zero
-; AVX512F-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm28, %ymm0
-; AVX512F-SLOW-NEXT:    vpshufb %ymm0, %ymm1, %ymm3
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = ymm14[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm14[25],zero,ymm14[23],zero,zero,zero,zero,ymm14[26],zero,ymm14[24],zero,zero
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm20
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm5 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm5[21],zero,ymm5[19],zero,zero,zero,zero,ymm5[22],zero,ymm5[20],zero,zero
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm27, %ymm1
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm29, %ymm0
+; AVX512F-SLOW-NEXT:    vpshufb %ymm0, %ymm1, %ymm4
+; AVX512F-SLOW-NEXT:    vmovdqa %ymm2, %ymm0
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm2[25],zero,ymm2[23],zero,zero,zero,zero,ymm2[26],zero,ymm2[24],zero,zero
 ; AVX512F-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = [128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22]
 ; AVX512F-SLOW-NEXT:    # ymm2 = mem[0,1,0,1]
-; AVX512F-SLOW-NEXT:    vpshufb %ymm2, %ymm14, %ymm0
+; AVX512F-SLOW-NEXT:    vpshufb %ymm2, %ymm0, %ymm0
 ; AVX512F-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-SLOW-NEXT:    vpshufb %ymm2, %ymm1, %ymm0
 ; AVX512F-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
@@ -7647,14 +7650,14 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-SLOW-NEXT:    # ymm2 = mem[0,1,0,1]
 ; AVX512F-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX512F-SLOW-NEXT:    vpshufb %ymm2, %ymm0, %ymm1
-; AVX512F-SLOW-NEXT:    vpshufb %ymm2, %ymm5, %ymm2
+; AVX512F-SLOW-NEXT:    vpshufb %ymm2, %ymm14, %ymm2
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm27
 ; AVX512F-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = [20,128,18,128,20,21,20,21,128,19,128,19,20,21,22,128,20,128,18,128,20,21,20,21,128,19,128,19,20,21,22,128]
 ; AVX512F-SLOW-NEXT:    # ymm2 = mem[0,1,0,1]
-; AVX512F-SLOW-NEXT:    vpshufb %ymm2, %ymm5, %ymm5
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm5, %ymm28
+; AVX512F-SLOW-NEXT:    vpshufb %ymm2, %ymm14, %ymm14
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm14, %ymm25
 ; AVX512F-SLOW-NEXT:    vpshufb %ymm2, %ymm0, %ymm0
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm31
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm29
 ; AVX512F-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = zero,ymm0[13],zero,zero,zero,zero,zero,zero,ymm0[14],zero,zero,zero,zero,zero,zero,ymm0[15],zero,zero,zero,zero,zero,zero,ymm0[16],zero,zero,zero,zero,zero,zero,ymm0[17],zero,zero
 ; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} ymm14 = ymm0[2,1,1,2,4,5,6,7,10,9,9,10,12,13,14,15]
@@ -7662,157 +7665,155 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[2,2,3,2]
 ; AVX512F-SLOW-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm14, %ymm14
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm14, %zmm2, %zmm2
-; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm10 = xmm10[0],xmm9[0],xmm10[1],xmm9[1],xmm10[2],xmm9[2],xmm10[3],xmm9[3],xmm10[4],xmm9[4],xmm10[5],xmm9[5],xmm10[6],xmm9[6],xmm10[7],xmm9[7]
+; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm12 = xmm12[0],xmm3[0],xmm12[1],xmm3[1],xmm12[2],xmm3[2],xmm12[3],xmm3[3],xmm12[4],xmm3[4],xmm12[5],xmm3[5],xmm12[6],xmm3[6],xmm12[7],xmm3[7]
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm14 = <0,1,u,u,u,6,7,2,3,u,u,u,8,9,4,5>
 ; AVX512F-SLOW-NEXT:    vpshufb %xmm14, %xmm15, %xmm15
-; AVX512F-SLOW-NEXT:    vpshufb %xmm14, %xmm10, %xmm10
-; AVX512F-SLOW-NEXT:    vinserti32x4 $2, %xmm21, %zmm10, %zmm0
+; AVX512F-SLOW-NEXT:    vpshufb %xmm14, %xmm12, %xmm12
+; AVX512F-SLOW-NEXT:    vinserti32x4 $2, %xmm22, %zmm12, %zmm0
 ; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm6 = xmm6[0],xmm12[0],xmm6[1],xmm12[1],xmm6[2],xmm12[2],xmm6[3],xmm12[3],xmm6[4],xmm12[4],xmm6[5],xmm12[5],xmm6[6],xmm12[6],xmm6[7],xmm12[7]
+; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm13 = xmm9[0],xmm13[0],xmm9[1],xmm13[1],xmm9[2],xmm13[2],xmm9[3],xmm13[3],xmm9[4],xmm13[4],xmm9[5],xmm13[5],xmm9[6],xmm13[6],xmm9[7],xmm13[7]
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm14 = <4,5,0,1,u,u,u,6,7,2,3,u,u,u,8,9>
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm16, %xmm0
-; AVX512F-SLOW-NEXT:    vpshufb %xmm14, %xmm0, %xmm10
-; AVX512F-SLOW-NEXT:    vpshufb %xmm14, %xmm6, %xmm6
-; AVX512F-SLOW-NEXT:    vinserti32x4 $2, %xmm19, %zmm6, %zmm6
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm4[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vpshufb %xmm14, %xmm11, %xmm9
+; AVX512F-SLOW-NEXT:    vpshufb %xmm14, %xmm13, %xmm11
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm13 = ymm6[2,3,2,3]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm14 = ymm18[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm19 = ymm3[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm1[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm11[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm8[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm17, %ymm1
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm1[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm21 = ymm1[0,0,1,1,4,4,5,5]
-; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX512F-SLOW-NEXT:    vpunpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
-; AVX512F-SLOW-NEXT:    # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3],xmm1[4],mem[4],xmm1[5],mem[5],xmm1[6],mem[6],xmm1[7],mem[7]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
-; AVX512F-SLOW-NEXT:    vshufi64x2 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm1 # 64-byte Folded Reload
-; AVX512F-SLOW-NEXT:    # zmm1 = zmm1[0,1,0,1],mem[0,1,0,1]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm13[4,5,4,5,4,5,8,9,6,7,6,7,6,7,6,7]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm8 = xmm13[1,1,0,0,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm8 = xmm8[0,1,2,0]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm22 = ymm4[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm8[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm7[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm5[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm17, %ymm3
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} ymm3 = ymm3[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm3[0,0,1,1,4,4,5,5]
+; AVX512F-SLOW-NEXT:    vinserti32x4 $2, {{[-0-9]+}}(%r{{[sb]}}p), %zmm11, %zmm12 # 16-byte Folded Reload
+; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
+; AVX512F-SLOW-NEXT:    vpunpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm5 # 16-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # xmm5 = xmm5[0],mem[0],xmm5[1],mem[1],xmm5[2],mem[2],xmm5[3],mem[3],xmm5[4],mem[4],xmm5[5],mem[5],xmm5[6],mem[6],xmm5[7],mem[7]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
+; AVX512F-SLOW-NEXT:    vshufi64x2 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm5, %zmm11 # 64-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # zmm11 = zmm5[0,1,0,1],mem[0,1,0,1]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm10[4,5,4,5,4,5,8,9,6,7,6,7,6,7,6,7]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm10[1,1,0,0,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm7[0,1,2,0]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm7, %zmm7
+; AVX512F-SLOW-NEXT:    vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # zmm10 = mem[2,3,2,3,6,7,6,7]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm5 = zmm19[2,3,2,3,6,7,6,7]
+; AVX512F-SLOW-NEXT:    vpternlogq $236, %zmm31, %zmm10, %zmm5
+; AVX512F-SLOW-NEXT:    vpandq %ymm31, %ymm22, %ymm10
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm10, %zmm1, %zmm1
+; AVX512F-SLOW-NEXT:    vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # zmm10 = mem[2,3,2,3,6,7,6,7]
+; AVX512F-SLOW-NEXT:    vporq %zmm10, %zmm1, %zmm1
+; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm10 = [18374966859431673855,18446463693966278655,18374966859431673855,18446463693966278655,18374967954648269055,71777218572844800,18374967954648269055,71777218572844800]
+; AVX512F-SLOW-NEXT:    vpand %ymm6, %ymm10, %ymm6
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm6, %zmm0, %zmm0
+; AVX512F-SLOW-NEXT:    vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # zmm6 = mem[2,3,2,3,6,7,6,7]
+; AVX512F-SLOW-NEXT:    vporq %zmm6, %zmm0, %zmm0
+; AVX512F-SLOW-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm5
+; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255]
+; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm1, %zmm0, %zmm5
+; AVX512F-SLOW-NEXT:    vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # zmm1 = mem[2,3,2,3,6,7,6,7]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm6 = zmm23[2,3,2,3,6,7,6,7]
+; AVX512F-SLOW-NEXT:    vpternlogq $236, %zmm10, %zmm1, %zmm6
+; AVX512F-SLOW-NEXT:    vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # zmm1 = mem[2,3,2,3,6,7,6,7]
+; AVX512F-SLOW-NEXT:    vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # zmm17 = mem[2,3,2,3,6,7,6,7]
+; AVX512F-SLOW-NEXT:    vporq %zmm1, %zmm17, %zmm1
+; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm6, %zmm0, %zmm1
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm6 = zmm30[0,1,0,1,4,5,4,5]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm17 = zmm21[0,1,0,1,4,5,4,5]
+; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm6, %zmm0, %zmm17
+; AVX512F-SLOW-NEXT:    vpternlogq $248, %ymm10, %ymm13, %ymm14
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm3[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vpternlogq $236, %ymm10, %ymm4, %ymm0
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm9[0,1,0,1]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm14, %zmm3
+; AVX512F-SLOW-NEXT:    vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # ymm4 = mem[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vpshufhw $190, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # ymm6 = mem[0,1,2,3,6,7,7,6,8,9,10,11,14,15,15,14]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm6 = ymm6[2,2,3,3,6,6,7,7]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vpternlogq $236, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm6
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm15[0,1,0,1]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm6, %zmm4
+; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm6 = [255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255]
+; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm3, %zmm6, %zmm4
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm28[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm20[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vpor %ymm3, %ymm8, %ymm3
+; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm8, %zmm3
-; AVX512F-SLOW-NEXT:    vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm19, %ymm8
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm8, %zmm0, %zmm0
-; AVX512F-SLOW-NEXT:    vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Folded Reload
-; AVX512F-SLOW-NEXT:    # zmm8 = mem[2,3,2,3,6,7,6,7]
-; AVX512F-SLOW-NEXT:    vporq %zmm8, %zmm0, %zmm0
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm8 = [18374966859431673855,18446463693966278655,18374966859431673855,18446463693966278655]
-; AVX512F-SLOW-NEXT:    vpand %ymm7, %ymm8, %ymm7
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm5, %zmm5
-; AVX512F-SLOW-NEXT:    vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Folded Reload
-; AVX512F-SLOW-NEXT:    # zmm7 = mem[2,3,2,3,6,7,6,7]
-; AVX512F-SLOW-NEXT:    vporq %zmm7, %zmm5, %zmm5
-; AVX512F-SLOW-NEXT:    vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Folded Reload
-; AVX512F-SLOW-NEXT:    # zmm7 = mem[2,3,2,3,6,7,6,7]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm9 = zmm20[2,3,2,3,6,7,6,7]
-; AVX512F-SLOW-NEXT:    vpternlogq $236, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm7, %zmm9
-; AVX512F-SLOW-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm9
-; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255]
-; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm0, %zmm5, %zmm9
-; AVX512F-SLOW-NEXT:    vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Folded Reload
-; AVX512F-SLOW-NEXT:    # zmm0 = mem[2,3,2,3,6,7,6,7]
-; AVX512F-SLOW-NEXT:    vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Folded Reload
-; AVX512F-SLOW-NEXT:    # zmm7 = mem[2,3,2,3,6,7,6,7]
-; AVX512F-SLOW-NEXT:    vporq %zmm0, %zmm7, %zmm0
-; AVX512F-SLOW-NEXT:    vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Folded Reload
-; AVX512F-SLOW-NEXT:    # zmm7 = mem[2,3,2,3,6,7,6,7]
-; AVX512F-SLOW-NEXT:    vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Folded Reload
-; AVX512F-SLOW-NEXT:    # zmm13 = mem[2,3,2,3,6,7,6,7]
-; AVX512F-SLOW-NEXT:    vpternlogq $236, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm7, %zmm13
-; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm0, %zmm5, %zmm13
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm0 = zmm30[0,1,0,1,4,5,4,5]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm7 = zmm22[0,1,0,1,4,5,4,5]
-; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm0, %zmm5, %zmm7
-; AVX512F-SLOW-NEXT:    vpternlogq $248, %ymm8, %ymm12, %ymm14
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm21[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vpternlogq $236, %ymm8, %ymm4, %ymm0
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm10[0,1,0,1]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm14, %zmm4
-; AVX512F-SLOW-NEXT:    vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
-; AVX512F-SLOW-NEXT:    # ymm5 = mem[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vpshufhw $190, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Folded Reload
-; AVX512F-SLOW-NEXT:    # ymm8 = mem[0,1,2,3,6,7,7,6,8,9,10,11,14,15,15,14]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm8 = ymm8[2,2,3,3,6,6,7,7]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vpternlogq $236, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm5, %ymm8
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm15[0,1,0,1]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm8, %zmm5
-; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm8 = [255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255]
-; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm4, %zmm8, %zmm5
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm29[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm11 = ymm26[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vpor %ymm4, %ymm11, %ymm4
-; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm10, %zmm4
-; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm10, %zmm0
-; AVX512F-SLOW-NEXT:    vpermq $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Folded Reload
-; AVX512F-SLOW-NEXT:    # ymm11 = mem[0,1,0,1]
-; AVX512F-SLOW-NEXT:    vpermq $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Folded Reload
-; AVX512F-SLOW-NEXT:    # ymm12 = mem[0,1,0,1]
-; AVX512F-SLOW-NEXT:    vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Folded Reload
-; AVX512F-SLOW-NEXT:    # ymm14 = mem[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vpshuflw $5, (%rsp), %xmm15 # 16-byte Folded Reload
-; AVX512F-SLOW-NEXT:    # xmm15 = mem[1,1,0,0,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm15 = xmm15[0,1,2,0]
-; AVX512F-SLOW-NEXT:    vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm17 # 32-byte Folded Reload
-; AVX512F-SLOW-NEXT:    # ymm17 = mem[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm8, %zmm0
+; AVX512F-SLOW-NEXT:    vpermq $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # ymm8 = mem[0,1,0,1]
+; AVX512F-SLOW-NEXT:    vpermq $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # ymm10 = mem[0,1,0,1]
+; AVX512F-SLOW-NEXT:    vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # ymm13 = mem[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vpshuflw $5, {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # xmm14 = mem[1,1,0,0,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm14 = xmm14[0,1,2,0]
+; AVX512F-SLOW-NEXT:    vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # ymm15 = mem[2,3,2,3]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm18 = ymm27[2,3,2,3]
 ; AVX512F-SLOW-NEXT:    vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm19 # 32-byte Folded Reload
 ; AVX512F-SLOW-NEXT:    # ymm19 = mem[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm20 = ymm28[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm20 = ymm25[2,3,2,3]
 ; AVX512F-SLOW-NEXT:    vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm21 # 32-byte Folded Reload
 ; AVX512F-SLOW-NEXT:    # ymm21 = mem[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm22 = ymm31[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm4, %zmm8, %zmm0
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm11, %zmm4 # 32-byte Folded Reload
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm12, %zmm8 # 32-byte Folded Reload
-; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm4, %zmm8
-; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-SLOW-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm4, %zmm23
-; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm8, %zmm23
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm15[0,0,1,0]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm14, %zmm4
-; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm8, %zmm4
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm22 = ymm29[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm3, %zmm6, %zmm0
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm3 # 32-byte Folded Reload
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm10, %zmm6 # 32-byte Folded Reload
+; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm6
+; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512F-SLOW-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm24
+; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm6, %zmm24
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm14[0,0,1,0]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm13, %zmm3
+; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
+; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm6, %zmm3
+; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm4, %zmm3
+; AVX512F-SLOW-NEXT:    vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # zmm4 = mem[2,3,2,3,6,7,6,7]
 ; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm4
-; AVX512F-SLOW-NEXT:    vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Folded Reload
-; AVX512F-SLOW-NEXT:    # zmm5 = mem[2,3,2,3,6,7,6,7]
-; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm9, %zmm5
-; AVX512F-SLOW-NEXT:    vporq %ymm17, %ymm18, %ymm8
-; AVX512F-SLOW-NEXT:    vporq %ymm19, %ymm20, %ymm9
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm8, %zmm0, %zmm8
-; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm8 = zmm9[0,1,2,3],zmm8[4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm8, %zmm25
-; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm13, %zmm25
-; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512F-SLOW-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm8, %zmm24
-; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm7, %zmm24
-; AVX512F-SLOW-NEXT:    vporq %ymm21, %ymm22, %ymm7
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm0, %zmm7
-; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm7 = zmm8[0,1,2,3],zmm7[4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm7, %zmm2
+; AVX512F-SLOW-NEXT:    vporq %ymm15, %ymm18, %ymm5
+; AVX512F-SLOW-NEXT:    vporq %ymm19, %ymm20, %ymm6
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm0, %zmm5
+; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm5 = zmm6[0,1,2,3],zmm5[4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm16
+; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm16
+; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512F-SLOW-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm26
+; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm17, %zmm26
+; AVX512F-SLOW-NEXT:    vporq %ymm21, %ymm22, %ymm1
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm1
+; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm5[0,1,2,3],zmm1[4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm2
 ; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm2
 ; AVX512F-SLOW-NEXT:    vpermq $68, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Folded Reload
 ; AVX512F-SLOW-NEXT:    # zmm0 = mem[0,1,0,1,4,5,4,5]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm6 = zmm6[0,1,0,1,4,5,4,5]
-; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm6
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm0 = zmm3[0,0,1,0,4,4,5,4]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm1 = zmm12[0,1,0,1,4,5,4,5]
+; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm1
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm0 = zmm7[0,0,1,0,4,4,5,4]
+; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm11, %zmm0
 ; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm0
-; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm6, %zmm0
 ; AVX512F-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm0, (%rax)
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm2, 320(%rax)
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm24, 256(%rax)
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm25, 128(%rax)
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm5, 384(%rax)
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm4, 192(%rax)
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm23, 64(%rax)
-; AVX512F-SLOW-NEXT:    addq $1464, %rsp # imm = 0x5B8
+; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm26, 256(%rax)
+; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm16, 128(%rax)
+; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm4, 384(%rax)
+; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm3, 192(%rax)
+; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm24, 64(%rax)
+; AVX512F-SLOW-NEXT:    addq $1448, %rsp # imm = 0x5A8
 ; AVX512F-SLOW-NEXT:    vzeroupper
 ; AVX512F-SLOW-NEXT:    retq
 ;
@@ -8702,28 +8703,27 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-ONLY-SLOW-NEXT:    vpermw %ymm15, %ymm1, %ymm1
 ; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm3
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa (%r9), %ymm10
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm20 = [13,128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128]
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %ymm20, %ymm10, %ymm0
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm22 = [13,128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128]
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %ymm22, %ymm10, %ymm0
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa (%r8), %ymm11
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm24 = [128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128]
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %ymm24, %ymm11, %ymm1
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm23 = [128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128]
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %ymm23, %ymm11, %ymm1
 ; AVX512BW-ONLY-SLOW-NEXT:    vpor %ymm0, %ymm1, %ymm5
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa (%r9), %xmm0
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa (%r9), %xmm4
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa (%r8), %xmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm4 = <u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10>
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %xmm4, %xmm6, %xmm6
+; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm1[8],xmm4[8],xmm1[9],xmm4[9],xmm1[10],xmm4[10],xmm1[11],xmm4[11],xmm1[12],xmm4[12],xmm1[13],xmm4[13],xmm1[14],xmm4[14],xmm1[15],xmm4[15]
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10]
 ; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[0,1,0,1]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm6, %zmm21
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm6, %zmm20
 ; AVX512BW-ONLY-SLOW-NEXT:    movabsq $2323999253380730912, %r10 # imm = 0x2040810204081020
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovq %r10, %k1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %zmm3, %zmm21 {%k1}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %zmm3, %zmm20 {%k1}
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 (%rdx), %ymm16
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm14 = [0,1,0,1,14,128,14,15,0,1,14,15,128,13,14,15,16,17,16,128,30,31,30,31,16,17,128,31,28,29,30,31]
 ; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %ymm14, %ymm16, %ymm3
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 (%rcx), %ymm17
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm23 = [128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128]
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %ymm23, %ymm17, %ymm5
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm25 = [128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128]
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %ymm25, %ymm17, %ymm5
 ; AVX512BW-ONLY-SLOW-NEXT:    vpor %ymm3, %ymm5, %ymm3
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa (%rdx), %xmm5
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa (%rcx), %xmm6
@@ -8731,194 +8731,195 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm12 = <6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7>
 ; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %xmm12, %xmm7, %xmm7
 ; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[0,1,0,1]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm7, %zmm22
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm7, %zmm21
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 (%rdi), %ymm18
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm25 = [0,1,14,128,12,13,0,1,14,15,128,3,12,13,2,3,16,128,30,31,28,29,16,17,128,31,18,19,28,29,18,128]
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %ymm25, %ymm18, %ymm3
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm26 = [0,1,14,128,12,13,0,1,14,15,128,3,12,13,2,3,16,128,30,31,28,29,16,17,128,31,18,19,28,29,18,128]
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %ymm26, %ymm18, %ymm3
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 (%rsi), %ymm19
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm26 = [128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128,128,18]
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %ymm26, %ymm19, %ymm7
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm27 = [128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128,128,18]
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %ymm27, %ymm19, %ymm7
 ; AVX512BW-ONLY-SLOW-NEXT:    vpor %ymm3, %ymm7, %ymm3
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa (%rdi), %xmm7
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa (%rsi), %xmm8
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm27 = xmm8[8],xmm7[8],xmm8[9],xmm7[9],xmm8[10],xmm7[10],xmm8[11],xmm7[11],xmm8[12],xmm7[12],xmm8[13],xmm7[13],xmm8[14],xmm7[14],xmm8[15],xmm7[15]
+; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm24 = xmm8[8],xmm7[8],xmm8[9],xmm7[9],xmm8[10],xmm7[10],xmm8[11],xmm7[11],xmm8[12],xmm7[12],xmm8[13],xmm7[13],xmm8[14],xmm7[14],xmm8[15],xmm7[15]
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm13 = <2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u>
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %xmm13, %xmm27, %xmm27
-; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm27 = ymm27[0,1,0,1]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm27, %zmm3
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %xmm13, %xmm24, %xmm24
+; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm24 = ymm24[0,1,0,1]
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm24, %zmm3
 ; AVX512BW-ONLY-SLOW-NEXT:    movabsq $435749860008887046, %r10 # imm = 0x60C183060C18306
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovq %r10, %k1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %zmm22, %zmm3 {%k1}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %zmm21, %zmm3 {%k1}
 ; AVX512BW-ONLY-SLOW-NEXT:    movabsq $4066998693416279096, %r10 # imm = 0x3870E1C3870E1C38
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovq %r10, %k1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %zmm21, %zmm3 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm21 = [10,9,9,10,10,9,9,10,9,10,14,15,10,9,9,10]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermw %ymm2, %ymm21, %ymm21
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %zmm20, %zmm3 {%k1}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm21 = [10,9,9,10,10,9,9,10,9,10,14,15,10,9,9,10,27,29,28,27,28,29,29,28,27,29,28,27,28,29,29,28]
+; AVX512BW-ONLY-SLOW-NEXT:    vpermw %ymm2, %ymm21, %ymm20
 ; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %ymm9, %ymm2, %ymm9
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm21, %zmm9, %zmm9
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 32(%r9), %ymm21
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %ymm20, %ymm21, %ymm20
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm20, %zmm9, %zmm9
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 32(%r9), %ymm20
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %ymm22, %ymm20, %ymm24
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 32(%r8), %ymm22
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %ymm24, %ymm22, %ymm24
-; AVX512BW-ONLY-SLOW-NEXT:    vporq %ymm20, %ymm24, %ymm20
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm24 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm22[20],zero,ymm22[18],zero,ymm22[20,21,20,21],zero,ymm22[19],zero,ymm22[19,20,21,22],zero
-; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm24 = ymm24[2,3,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm27 = ymm21[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm21[20],zero,ymm21[18],zero,zero,zero,zero,ymm21[21],zero,ymm21[19],zero,zero,zero,zero,ymm21[22]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm27 = ymm27[2,3,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vporq %ymm24, %ymm27, %ymm24
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm24, %zmm20, %zmm24
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %ymm23, %ymm22, %ymm23
+; AVX512BW-ONLY-SLOW-NEXT:    vporq %ymm24, %ymm23, %ymm24
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm23 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,128,128,128,25,128,23,128,128,128,128,26,128,24,128,128>
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %ymm23, %ymm20, %ymm28
+; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm28 = ymm28[2,3,2,3]
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm29 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm22[20],zero,ymm22[18],zero,ymm22[20,21,20,21],zero,ymm22[19],zero,ymm22[19,20,21,22],zero
+; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm29 = ymm29[2,3,2,3]
+; AVX512BW-ONLY-SLOW-NEXT:    vporq %ymm29, %ymm28, %ymm28
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm28, %zmm24, %zmm28
 ; AVX512BW-ONLY-SLOW-NEXT:    movabsq $145249953336295682, %r10 # imm = 0x204081020408102
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovq %r10, %k1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %zmm9, %zmm24 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 32(%rsi), %ymm27
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 32(%rdi), %ymm28
-; AVX512BW-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm9 = ymm28[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %zmm9, %zmm28 {%k1}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 32(%rsi), %ymm29
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 32(%rdi), %ymm30
+; AVX512BW-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm9 = ymm30[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
 ; AVX512BW-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm9 = ymm9[0,0,1,1,4,4,5,5]
-; AVX512BW-ONLY-SLOW-NEXT:    vpbroadcastd {{.*#+}} ymm20 = [5,4,3,6,5,4,3,6,5,4,3,6,5,4,3,6,5,4,3,6,5,4,3,6,5,4,3,6,5,4,3,6]
+; AVX512BW-ONLY-SLOW-NEXT:    vpbroadcastd {{.*#+}} ymm24 = [5,4,3,6,5,4,3,6,5,4,3,6,5,4,3,6,5,4,3,6,5,4,3,6,5,4,3,6,5,4,3,6]
 ; AVX512BW-ONLY-SLOW-NEXT:    movl $676341840, %r10d # imm = 0x28502850
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovd %r10d, %k1
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %ymm20, %ymm27, %ymm9 {%k1}
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %ymm24, %ymm29, %ymm9 {%k1}
 ; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[2,3,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %ymm25, %ymm28, %ymm25
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %ymm26, %ymm27, %ymm26
-; AVX512BW-ONLY-SLOW-NEXT:    vporq %ymm25, %ymm26, %ymm25
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm9, %zmm25, %zmm9
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 32(%rdx), %ymm26
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %ymm14, %ymm26, %ymm14
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 32(%rcx), %ymm29
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %ymm23, %ymm29, %ymm23
-; AVX512BW-ONLY-SLOW-NEXT:    vporq %ymm14, %ymm23, %ymm14
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm23 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm26[18],zero,ymm26[18,19,20,21],zero,ymm26[19],zero,ymm26[25,26,27,22],zero,ymm26[20],zero
-; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm23 = ymm23[2,3,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm25 = ymm29[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm29[18],zero,zero,zero,zero,ymm29[21],zero,ymm29[19],zero,zero,zero,zero,ymm29[22],zero,ymm29[20]
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %ymm26, %ymm30, %ymm26
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %ymm27, %ymm29, %ymm27
+; AVX512BW-ONLY-SLOW-NEXT:    vporq %ymm26, %ymm27, %ymm26
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm9, %zmm26, %zmm9
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 32(%rdx), %ymm31
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %ymm14, %ymm31, %ymm14
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 32(%rcx), %ymm0
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %ymm25, %ymm0, %ymm25
+; AVX512BW-ONLY-SLOW-NEXT:    vporq %ymm14, %ymm25, %ymm14
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm26 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128>
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %ymm26, %ymm0, %ymm25
 ; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm25 = ymm25[2,3,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vporq %ymm23, %ymm25, %ymm23
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm23, %zmm14, %zmm14
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm27 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm31[18],zero,ymm31[18,19,20,21],zero,ymm31[19],zero,ymm31[25,26,27,22],zero,ymm31[20],zero
+; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm27 = ymm27[2,3,2,3]
+; AVX512BW-ONLY-SLOW-NEXT:    vporq %ymm27, %ymm25, %ymm25
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm25, %zmm14, %zmm14
 ; AVX512BW-ONLY-SLOW-NEXT:    movabsq $3485998880071096368, %r10 # imm = 0x3060C183060C1830
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovq %r10, %k2
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %zmm14, %zmm9 {%k2}
 ; AVX512BW-ONLY-SLOW-NEXT:    movabsq $-4357498600088870461, %r10 # imm = 0xC3870E1C3870E1C3
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovq %r10, %k3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %zmm24, %zmm9 {%k3}
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm14 = ymm28[0,1,2,3,6,7,7,6,8,9,10,11,14,15,15,14]
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %zmm28, %zmm9 {%k3}
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm14 = ymm30[0,1,2,3,6,7,7,6,8,9,10,11,14,15,15,14]
 ; AVX512BW-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm14 = ymm14[2,2,3,3,6,6,7,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vpbroadcastd {{.*#+}} ymm24 = [13,12,15,14,13,12,15,14,13,12,15,14,13,12,15,14,13,12,15,14,13,12,15,14,13,12,15,14,13,12,15,14]
+; AVX512BW-ONLY-SLOW-NEXT:    vpbroadcastd {{.*#+}} ymm25 = [13,12,15,14,13,12,15,14,13,12,15,14,13,12,15,14,13,12,15,14,13,12,15,14,13,12,15,14,13,12,15,14]
 ; AVX512BW-ONLY-SLOW-NEXT:    movl $338170920, %r10d # imm = 0x14281428
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovd %r10d, %k4
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %ymm24, %ymm27, %ymm14 {%k4}
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %ymm25, %ymm29, %ymm14 {%k4}
+; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} ymm28 = [25,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128]
+; AVX512BW-ONLY-SLOW-NEXT:    # ymm28 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %ymm28, %ymm29, %ymm27
+; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm27 = ymm27[2,3,2,3]
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm29 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128,25]
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %ymm29, %ymm30, %ymm30
+; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm30 = ymm30[2,3,2,3]
+; AVX512BW-ONLY-SLOW-NEXT:    vporq %ymm27, %ymm30, %ymm27
 ; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[2,3,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} ymm23 = [25,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128]
-; AVX512BW-ONLY-SLOW-NEXT:    # ymm23 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %ymm23, %ymm27, %ymm25
-; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm25 = ymm25[2,3,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm27 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128,25]
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %ymm27, %ymm28, %ymm28
-; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm28 = ymm28[2,3,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vporq %ymm25, %ymm28, %ymm25
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm14, %zmm25, %zmm14
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm25 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm26[24,25],zero,ymm26[23],zero,ymm26[21,22,23,26],zero,ymm26[24],zero,ymm26[28,29,26,27]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm25 = ymm25[2,3,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm28 = ymm29[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm29[25],zero,ymm29[23],zero,zero,zero,zero,ymm29[26],zero,ymm29[24],zero,zero,zero,zero
-; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm28 = ymm28[2,3,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vporq %ymm25, %ymm28, %ymm28
-; AVX512BW-ONLY-SLOW-NEXT:    vpbroadcastq {{.*#+}} ymm25 = [11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12]
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %ymm25, %ymm29, %ymm29
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm26 = ymm26[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm26 = ymm26[0,2,3,3,4,6,7,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %ymm26, %ymm29 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 (%rdx), %zmm26
-; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm29 = ymm29[2,3,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm29, %zmm28, %zmm28
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 (%r8), %zmm29
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm14, %zmm27, %zmm14
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm27 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm31[24,25],zero,ymm31[23],zero,ymm31[21,22,23,26],zero,ymm31[24],zero,ymm31[28,29,26,27]
+; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm27 = ymm27[2,3,2,3]
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm30 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm0[25],zero,ymm0[23],zero,zero,zero,zero,ymm0[26],zero,ymm0[24],zero,zero,zero,zero
+; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm30 = ymm30[2,3,2,3]
+; AVX512BW-ONLY-SLOW-NEXT:    vporq %ymm27, %ymm30, %ymm30
+; AVX512BW-ONLY-SLOW-NEXT:    vpbroadcastq {{.*#+}} ymm27 = [11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12]
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %ymm27, %ymm0, %ymm0
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm31 = ymm31[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm31 = ymm31[0,2,3,3,4,6,7,7]
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %ymm31, %ymm0 {%k1}
+; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm30, %zmm0
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 (%r8), %zmm30
 ; AVX512BW-ONLY-SLOW-NEXT:    movabsq $1742999440035548184, %r10 # imm = 0x183060C183060C18
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovq %r10, %k3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %zmm28, %zmm14 {%k3}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 (%r9), %zmm28
-; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm22 = zmm22[0,1,2,3],zmm28[4,5,6,7]
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %zmm0, %zmm14 {%k3}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 (%r9), %zmm0
+; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm22 = zmm22[0,1,2,3],zmm0[4,5,6,7]
 ; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} zmm22 = zmm22[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,24,25],zero,zmm22[23],zero,zmm22[23,24,25,26],zero,zmm22[24],zero,zmm22[30,31,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,61],zero,zmm22[59],zero,zero,zero,zero,zmm22[62],zero,zmm22[60],zero,zero,zero,zero,zmm22[63],zero
 ; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} zmm22 = zmm22[2,3,2,3,6,7,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm21 = zmm21[0,1,2,3],zmm29[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} zmm21 = zmm21[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm21[25],zero,zmm21[23],zero,zero,zero,zero,zmm21[26],zero,zmm21[24],zero,zero,zmm21[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm21[59],zero,zero,zero,zero,zmm21[62],zero,zmm21[60],zero,zero,zero,zero,zmm21[63],zero,zmm21[61]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} zmm21 = zmm21[2,3,2,3,6,7,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vporq %zmm22, %zmm21, %zmm22
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 (%rax), %zmm21
+; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm20 = zmm20[0,1,2,3],zmm30[4,5,6,7]
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} zmm20 = zmm20[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm20[25],zero,zmm20[23],zero,zero,zero,zero,zmm20[26],zero,zmm20[24],zero,zero,zmm20[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm20[59],zero,zero,zero,zero,zmm20[62],zero,zmm20[60],zero,zero,zero,zero,zmm20[63],zero,zmm20[61]
+; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} zmm20 = zmm20[2,3,2,3,6,7,6,7]
+; AVX512BW-ONLY-SLOW-NEXT:    vporq %zmm22, %zmm20, %zmm20
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 (%rax), %zmm22
 ; AVX512BW-ONLY-SLOW-NEXT:    movabsq $6971997760142192736, %rax # imm = 0x60C183060C183060
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovq %rax, %k3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %zmm22, %zmm14 {%k3}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm22 = [11,13,12,11,12,13,13,12,11,13,12,11,12,13,13,12,62,61,62,63,63,62,62,63,62,61,62,63,63,62,62,63]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2w %zmm21, %zmm2, %zmm22
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %zmm20, %zmm14 {%k3}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm20 = [11,13,12,11,12,13,13,12,11,13,12,11,12,13,13,12,62,61,62,63,63,62,62,63,62,61,62,63,63,62,62,63]
+; AVX512BW-ONLY-SLOW-NEXT:    vpermi2w %zmm22, %zmm2, %zmm20
 ; AVX512BW-ONLY-SLOW-NEXT:    movabsq $-9150747060186627967, %rax # imm = 0x8102040810204081
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovq %rax, %k5
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %zmm22, %zmm14 {%k5}
-; AVX512BW-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm22 = ymm18[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm30 = ymm22[0,0,1,1,4,4,5,5]
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %ymm20, %ymm19, %ymm30 {%k1}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %zmm20, %zmm14 {%k5}
+; AVX512BW-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm20 = ymm18[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm20 = ymm20[0,0,1,1,4,4,5,5]
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %ymm24, %ymm19, %ymm20 {%k1}
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %ymm28, %ymm19, %ymm24
+; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm24 = ymm24[2,3,2,3]
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %ymm29, %ymm18, %ymm28
+; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm28 = ymm28[2,3,2,3]
+; AVX512BW-ONLY-SLOW-NEXT:    vporq %ymm24, %ymm28, %ymm24
+; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm20 = ymm20[2,3,2,3]
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm24, %zmm20, %zmm28
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 (%rcx), %zmm20
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %ymm23, %ymm19, %ymm22
-; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm22 = ymm22[2,3,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %ymm27, %ymm18, %ymm23
-; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm23 = ymm23[2,3,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vporq %ymm22, %ymm23, %ymm23
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 32(%r9), %xmm22
-; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm27 = ymm30[2,3,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm23, %zmm27, %zmm27
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 32(%r8), %xmm23
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm16, %zmm26, %zmm26
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} zmm26 = zmm26[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18],zero,zmm26[18,19,20,21],zero,zmm26[19],zero,zmm26[25,26,27,22],zero,zmm26[20],zero,zmm26[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,56,57],zero,zmm26[55],zero,zmm26[53,54,55,58],zero,zmm26[56],zero,zmm26[60,61,58,59]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} zmm26 = zmm26[2,3,2,3,6,7,6,7]
 ; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm17, %zmm20, %zmm20
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} zmm20 = zmm20[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm20[18],zero,zero,zero,zero,zmm20[21],zero,zmm20[19],zero,zero,zero,zero,zmm20[22],zero,zmm20[20,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zmm20[57],zero,zmm20[55],zero,zero,zero,zero,zmm20[58],zero,zmm20[56],zero,zero,zero,zero
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %zmm26, %zmm20, %zmm20
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 (%rdx), %zmm24
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm16, %zmm24, %zmm24
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} zmm24 = zmm24[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18],zero,zmm24[18,19,20,21],zero,zmm24[19],zero,zmm24[25,26,27,22],zero,zmm24[20],zero,zmm24[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,56,57],zero,zmm24[55],zero,zmm24[53,54,55,58],zero,zmm24[56],zero,zmm24[60,61,58,59]
+; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} zmm24 = zmm24[2,3,2,3,6,7,6,7]
 ; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} zmm20 = zmm20[2,3,2,3,6,7,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vporq %zmm26, %zmm20, %zmm20
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 32(%rdx), %xmm26
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %zmm27, %zmm20 {%k3}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 32(%rcx), %xmm27
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm11, %zmm29, %zmm29
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} zmm29 = zmm29[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20],zero,zmm29[18],zero,zmm29[20,21,20,21],zero,zmm29[19],zero,zmm29[19,20,21,22],zero,zmm29[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,56,57,56,57],zero,zmm29[55],zero,zmm29[55,56,57,58],zero,zmm29[56],zero,zmm29[62,63]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} zmm29 = zmm29[2,3,2,3,6,7,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm10, %zmm28, %zmm28
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} zmm28 = zmm28[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm28[20],zero,zmm28[18],zero,zero,zero,zero,zmm28[21],zero,zmm28[19],zero,zero,zero,zero,zmm28[22,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm28[57],zero,zmm28[55],zero,zero,zero,zero,zmm28[58],zero,zmm28[56],zero,zero
+; AVX512BW-ONLY-SLOW-NEXT:    vporq %zmm24, %zmm20, %zmm20
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 32(%r9), %xmm24
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %zmm28, %zmm20 {%k3}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 32(%r8), %xmm26
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm10, %zmm0, %zmm0
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %zmm23, %zmm0, %zmm0
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 32(%rdx), %xmm23
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm11, %zmm30, %zmm28
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} zmm28 = zmm28[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20],zero,zmm28[18],zero,zmm28[20,21,20,21],zero,zmm28[19],zero,zmm28[19,20,21,22],zero,zmm28[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,56,57,56,57],zero,zmm28[55],zero,zmm28[55,56,57,58],zero,zmm28[56],zero,zmm28[62,63]
 ; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} zmm28 = zmm28[2,3,2,3,6,7,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vporq %zmm29, %zmm28, %zmm29
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm15, %zmm21, %zmm15
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm28 = [10,9,9,10,10,9,9,10,9,10,14,15,10,9,9,10,27,29,28,27,28,29,29,28,27,29,28,27,28,29,29,28]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermw %zmm15, %zmm28, %zmm28
+; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} zmm0 = zmm0[2,3,2,3,6,7,6,7]
+; AVX512BW-ONLY-SLOW-NEXT:    vporq %zmm28, %zmm0, %zmm0
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 32(%rcx), %xmm28
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm15, %zmm22, %zmm15
+; AVX512BW-ONLY-SLOW-NEXT:    vpermw %zmm15, %zmm21, %zmm21
 ; AVX512BW-ONLY-SLOW-NEXT:    movabsq $1161999626690365456, %rax # imm = 0x1020408102040810
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovq %rax, %k5
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %zmm28, %zmm29 {%k5}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 32(%rdi), %xmm28
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %zmm21, %zmm0 {%k5}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 32(%rdi), %xmm21
 ; AVX512BW-ONLY-SLOW-NEXT:    movabsq $2033499346708139548, %rax # imm = 0x1C3870E1C3870E1C
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovq %rax, %k5
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %zmm29, %zmm20 {%k5}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 32(%rsi), %xmm29
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %zmm0, %zmm20 {%k5}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 32(%rsi), %xmm0
 ; AVX512BW-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm18 = ymm18[0,1,2,3,6,7,7,6,8,9,10,11,14,15,15,14]
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm30 = ymm18[2,2,3,3,6,6,7,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %ymm24, %ymm19, %ymm30 {%k4}
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm19 = xmm28[0],xmm29[0],xmm28[1],xmm29[1],xmm28[2],xmm29[2],xmm28[3],xmm29[3],xmm28[4],xmm29[4],xmm28[5],xmm29[5],xmm28[6],xmm29[6],xmm28[7],xmm29[7]
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm29 = ymm18[2,2,3,3,6,6,7,7]
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %ymm25, %ymm19, %ymm29 {%k4}
+; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm19 = xmm21[0],xmm0[0],xmm21[1],xmm0[1],xmm21[2],xmm0[2],xmm21[3],xmm0[3],xmm21[4],xmm0[4],xmm21[5],xmm0[5],xmm21[6],xmm0[6],xmm21[7],xmm0[7]
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} xmm18 = <0,1,u,u,u,6,7,2,3,u,u,u,8,9,4,5>
 ; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %xmm18, %xmm19, %xmm19
-; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm19 = zmm30[2,3,2,3],zmm19[0,1,0,1]
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm24 = xmm26[0],xmm27[0],xmm26[1],xmm27[1],xmm26[2],xmm27[2],xmm26[3],xmm27[3],xmm26[4],xmm27[4],xmm26[5],xmm27[5],xmm26[6],xmm27[6],xmm26[7],xmm27[7]
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %ymm25, %ymm17, %ymm25
+; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm19 = zmm29[2,3,2,3],zmm19[0,1,0,1]
+; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm25 = xmm23[0],xmm28[0],xmm23[1],xmm28[1],xmm23[2],xmm28[2],xmm23[3],xmm28[3],xmm23[4],xmm28[4],xmm23[5],xmm28[5],xmm23[6],xmm28[6],xmm23[7],xmm28[7]
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %ymm27, %ymm17, %ymm27
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} xmm17 = <4,5,0,1,u,u,u,6,7,2,3,u,u,u,8,9>
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %xmm17, %xmm24, %xmm24
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %xmm17, %xmm25, %xmm25
 ; AVX512BW-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm16 = ymm16[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
 ; AVX512BW-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm16 = ymm16[0,2,3,3,4,6,7,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %ymm16, %ymm25 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm16 = zmm25[2,3,2,3],zmm24[0,1,0,1]
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %ymm16, %ymm27 {%k1}
+; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm16 = zmm27[2,3,2,3],zmm25[0,1,0,1]
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %zmm19, %zmm16 {%k2}
 ; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm10 = ymm10[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,29],zero,ymm10[27],zero,zero,zero,zero,ymm10[30],zero,ymm10[28],zero,zero,zero,zero,ymm10[31],zero
 ; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[2,3,2,3]
 ; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm11 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm11[27],zero,zero,zero,zero,ymm11[30],zero,ymm11[28],zero,zero,zero,zero,ymm11[31],zero,ymm11[29]
 ; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[2,3,2,3]
 ; AVX512BW-ONLY-SLOW-NEXT:    vpor %ymm10, %ymm11, %ymm11
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm19 = xmm23[0],xmm22[0],xmm23[1],xmm22[1],xmm23[2],xmm22[2],xmm23[3],xmm22[3],xmm23[4],xmm22[4],xmm23[5],xmm22[5],xmm23[6],xmm22[6],xmm23[7],xmm22[7]
+; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm19 = xmm26[0],xmm24[0],xmm26[1],xmm24[1],xmm26[2],xmm24[2],xmm26[3],xmm24[3],xmm26[4],xmm24[4],xmm26[5],xmm24[5],xmm26[6],xmm24[6],xmm26[7],xmm24[7]
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm10 = <u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u>
 ; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %xmm10, %xmm19, %xmm19
 ; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm19 = ymm19[0,1,0,1]
 ; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm19, %zmm11, %zmm11
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm19 = [14,13,14,15,15,14,14,15,14,13,14,15,15,14,14,15,17,17,16,16,17,17,16,16,20,21,17,17,17,17,16,16]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermw %zmm21, %zmm19, %zmm19
+; AVX512BW-ONLY-SLOW-NEXT:    vpermw %zmm22, %zmm19, %zmm19
 ; AVX512BW-ONLY-SLOW-NEXT:    movabsq $580999813345182728, %rax # imm = 0x810204081020408
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovq %rax, %k1
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %zmm19, %zmm11 {%k1}
@@ -8926,64 +8927,64 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovq %rax, %k1
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %zmm11, %zmm16 {%k1}
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} xmm19 = <u,u,u,128,7,128,5,u,u,u,128,8,128,6,u,u>
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %xmm19, %xmm27, %xmm11
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} xmm24 = <u,u,u,7,128,5,128,u,u,u,8,128,6,128,u,u>
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %xmm24, %xmm26, %xmm25
-; AVX512BW-ONLY-SLOW-NEXT:    vporq %xmm11, %xmm25, %xmm11
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm25 = xmm27[8],xmm26[8],xmm27[9],xmm26[9],xmm27[10],xmm26[10],xmm27[11],xmm26[11],xmm27[12],xmm26[12],xmm27[13],xmm26[13],xmm27[14],xmm26[14],xmm27[15],xmm26[15]
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %xmm12, %xmm25, %xmm12
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %xmm19, %xmm28, %xmm11
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} xmm25 = <u,u,u,7,128,5,128,u,u,u,8,128,6,128,u,u>
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %xmm25, %xmm23, %xmm27
+; AVX512BW-ONLY-SLOW-NEXT:    vporq %xmm11, %xmm27, %xmm11
+; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm23 = xmm28[8],xmm23[8],xmm28[9],xmm23[9],xmm28[10],xmm23[10],xmm28[11],xmm23[11],xmm28[12],xmm23[12],xmm28[13],xmm23[13],xmm28[14],xmm23[14],xmm28[15],xmm23[15]
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %xmm12, %xmm23, %xmm12
 ; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm12, %zmm11, %zmm11
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm12 = <u,128,7,128,5,u,u,u,128,8,128,6,u,u,u,128>
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %xmm12, %xmm29, %xmm25
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} xmm26 = <u,7,128,5,128,u,u,u,8,128,6,128,u,u,u,9>
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %xmm26, %xmm28, %xmm27
-; AVX512BW-ONLY-SLOW-NEXT:    vporq %xmm25, %xmm27, %xmm25
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm27 = xmm29[8],xmm28[8],xmm29[9],xmm28[9],xmm29[10],xmm28[10],xmm29[11],xmm28[11],xmm29[12],xmm28[12],xmm29[13],xmm28[13],xmm29[14],xmm28[14],xmm29[15],xmm28[15]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} zmm28 = zmm11[0,1,0,1,4,5,4,5]
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %xmm13, %xmm27, %xmm11
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm11, %zmm25, %zmm11
-; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} zmm11 = zmm11[0,1,0,1,4,5,4,5]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %zmm28, %zmm11 {%k3}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm13 = <128,4,u,u,u,128,7,128,5,u,u,u,128,8,128,6>
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %xmm13, %xmm22, %xmm25
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm27 = [2,2,2,4,2,2,2,4,3,3,3,3,2,2,2,4,52,53,52,53,53,54,53,54,52,53,52,53,53,54,53,54]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2w %zmm21, %zmm2, %zmm27
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %xmm12, %xmm0, %xmm23
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} xmm27 = <u,7,128,5,128,u,u,u,8,128,6,128,u,u,u,9>
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %xmm27, %xmm21, %xmm28
+; AVX512BW-ONLY-SLOW-NEXT:    vporq %xmm23, %xmm28, %xmm23
+; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm21[8],xmm0[9],xmm21[9],xmm0[10],xmm21[10],xmm0[11],xmm21[11],xmm0[12],xmm21[12],xmm0[13],xmm21[13],xmm0[14],xmm21[14],xmm0[15],xmm21[15]
+; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} zmm21 = zmm11[0,1,0,1,4,5,4,5]
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %xmm13, %xmm0, %xmm0
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm0, %zmm23, %zmm0
+; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} zmm11 = zmm0[0,1,0,1,4,5,4,5]
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %zmm21, %zmm11 {%k3}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm0 = <128,4,u,u,u,128,7,128,5,u,u,u,128,8,128,6>
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %xmm0, %xmm24, %xmm13
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm21 = [2,2,2,4,2,2,2,4,3,3,3,3,2,2,2,4,52,53,52,53,53,54,53,54,52,53,52,53,53,54,53,54]
+; AVX512BW-ONLY-SLOW-NEXT:    vpermi2w %zmm22, %zmm2, %zmm21
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm2 = <4,128,u,u,u,7,128,5,128,u,u,u,8,128,6,128>
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %xmm2, %xmm23, %xmm21
-; AVX512BW-ONLY-SLOW-NEXT:    vporq %xmm25, %xmm21, %xmm21
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm22 = xmm23[8],xmm22[8],xmm23[9],xmm22[9],xmm23[10],xmm22[10],xmm23[11],xmm22[11],xmm23[12],xmm22[12],xmm23[13],xmm22[13],xmm23[14],xmm22[14],xmm23[15],xmm22[15]
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %xmm4, %xmm22, %xmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm4, %zmm21, %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} zmm4 = zmm4[0,1,0,1,4,5,4,5]
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %xmm2, %xmm26, %xmm22
+; AVX512BW-ONLY-SLOW-NEXT:    vporq %xmm13, %xmm22, %xmm13
+; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm22 = xmm26[8],xmm24[8],xmm26[9],xmm24[9],xmm26[10],xmm24[10],xmm26[11],xmm24[11],xmm26[12],xmm24[12],xmm26[13],xmm24[13],xmm26[14],xmm24[14],xmm26[15],xmm24[15]
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm22 = xmm22[u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10]
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm22, %zmm13, %zmm13
+; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} zmm13 = zmm13[0,1,0,1,4,5,4,5]
 ; AVX512BW-ONLY-SLOW-NEXT:    movabsq $290499906672591364, %rax # imm = 0x408102040810204
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovq %rax, %k1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %zmm27, %zmm4 {%k1}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %zmm21, %zmm13 {%k1}
 ; AVX512BW-ONLY-SLOW-NEXT:    movabsq $-8714997200177740921, %rax # imm = 0x870E1C3870E1C387
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovq %rax, %k1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %zmm4, %zmm11 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %xmm19, %xmm6, %xmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %xmm24, %xmm5, %xmm19
-; AVX512BW-ONLY-SLOW-NEXT:    vporq %xmm4, %xmm19, %xmm4
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %zmm13, %zmm11 {%k1}
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %xmm19, %xmm6, %xmm13
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %xmm25, %xmm5, %xmm19
+; AVX512BW-ONLY-SLOW-NEXT:    vporq %xmm13, %xmm19, %xmm13
 ; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1],xmm5[2],xmm6[2],xmm5[3],xmm6[3],xmm5[4],xmm6[4],xmm5[5],xmm6[5],xmm5[6],xmm6[6],xmm5[7],xmm6[7]
 ; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %xmm17, %xmm5, %xmm5
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm4, %zmm5, %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %xmm12, %xmm8, %xmm5
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %xmm26, %xmm7, %xmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vpor %xmm5, %xmm6, %xmm5
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm6 = xmm7[0],xmm8[0],xmm7[1],xmm8[1],xmm7[2],xmm8[2],xmm7[3],xmm8[3],xmm7[4],xmm8[4],xmm7[5],xmm8[5],xmm7[6],xmm8[6],xmm7[7],xmm8[7]
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %xmm18, %xmm6, %xmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm5, %zmm6, %zmm5
-; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} zmm4 = zmm4[0,1,0,1,4,5,4,5]
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm13, %zmm5, %zmm5
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %xmm12, %xmm8, %xmm6
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %xmm27, %xmm7, %xmm12
+; AVX512BW-ONLY-SLOW-NEXT:    vpor %xmm6, %xmm12, %xmm6
+; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm7 = xmm7[0],xmm8[0],xmm7[1],xmm8[1],xmm7[2],xmm8[2],xmm7[3],xmm8[3],xmm7[4],xmm8[4],xmm7[5],xmm8[5],xmm7[6],xmm8[6],xmm7[7],xmm8[7]
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %xmm18, %xmm7, %xmm7
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm6, %zmm7, %zmm6
 ; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} zmm5 = zmm5[0,1,0,1,4,5,4,5]
+; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} zmm6 = zmm6[0,1,0,1,4,5,4,5]
 ; AVX512BW-ONLY-SLOW-NEXT:    movabsq $871499720017774092, %rax # imm = 0xC183060C183060C
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovq %rax, %k1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %zmm4, %zmm5 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %xmm13, %xmm0, %xmm4
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %zmm5, %zmm6 {%k1}
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %xmm0, %xmm4, %xmm0
 ; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %xmm2, %xmm1, %xmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vpor %xmm4, %xmm2, %xmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %xmm10, %xmm0, %xmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm2, %zmm0, %zmm0
+; AVX512BW-ONLY-SLOW-NEXT:    vpor %xmm0, %xmm2, %xmm0
+; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %xmm10, %xmm1, %xmm1
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm0, %zmm1, %zmm0
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [1,1,0,0,1,1,0,0,4,5,1,1,1,1,0,0,18,18,18,20,18,18,18,20,19,19,19,19,18,18,18,20]
 ; AVX512BW-ONLY-SLOW-NEXT:    vpermw %zmm15, %zmm1, %zmm1
 ; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} zmm0 = zmm0[0,1,0,1,4,5,4,5]
@@ -8992,9 +8993,9 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %zmm1, %zmm0 {%k1}
 ; AVX512BW-ONLY-SLOW-NEXT:    movabsq $8133997386832558192, %rax # imm = 0x70E1C3870E1C3870
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovq %rax, %k1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %zmm0, %zmm5 {%k1}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %zmm0, %zmm6 {%k1}
 ; AVX512BW-ONLY-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, (%rax)
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, (%rax)
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, 320(%rax)
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, 256(%rax)
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm16, 192(%rax)
@@ -9331,28 +9332,27 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512DQBW-SLOW-NEXT:    vpermw %ymm15, %ymm1, %ymm1
 ; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm3
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa (%r9), %ymm10
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm20 = [13,128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128]
-; AVX512DQBW-SLOW-NEXT:    vpshufb %ymm20, %ymm10, %ymm0
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm22 = [13,128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128]
+; AVX512DQBW-SLOW-NEXT:    vpshufb %ymm22, %ymm10, %ymm0
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa (%r8), %ymm11
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm24 = [128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128]
-; AVX512DQBW-SLOW-NEXT:    vpshufb %ymm24, %ymm11, %ymm1
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm23 = [128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128]
+; AVX512DQBW-SLOW-NEXT:    vpshufb %ymm23, %ymm11, %ymm1
 ; AVX512DQBW-SLOW-NEXT:    vpor %ymm0, %ymm1, %ymm5
-; AVX512DQBW-SLOW-NEXT:    vmovdqa (%r9), %xmm0
+; AVX512DQBW-SLOW-NEXT:    vmovdqa (%r9), %xmm4
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa (%r8), %xmm1
-; AVX512DQBW-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm4 = <u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10>
-; AVX512DQBW-SLOW-NEXT:    vpshufb %xmm4, %xmm6, %xmm6
+; AVX512DQBW-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm1[8],xmm4[8],xmm1[9],xmm4[9],xmm1[10],xmm4[10],xmm1[11],xmm4[11],xmm1[12],xmm4[12],xmm1[13],xmm4[13],xmm1[14],xmm4[14],xmm1[15],xmm4[15]
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10]
 ; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[0,1,0,1]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm6, %zmm21
+; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm6, %zmm20
 ; AVX512DQBW-SLOW-NEXT:    movabsq $2323999253380730912, %r10 # imm = 0x2040810204081020
 ; AVX512DQBW-SLOW-NEXT:    kmovq %r10, %k1
-; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %zmm3, %zmm21 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %zmm3, %zmm20 {%k1}
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 (%rdx), %ymm16
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa {{.*#+}} ymm14 = [0,1,0,1,14,128,14,15,0,1,14,15,128,13,14,15,16,17,16,128,30,31,30,31,16,17,128,31,28,29,30,31]
 ; AVX512DQBW-SLOW-NEXT:    vpshufb %ymm14, %ymm16, %ymm3
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 (%rcx), %ymm17
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm23 = [128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128]
-; AVX512DQBW-SLOW-NEXT:    vpshufb %ymm23, %ymm17, %ymm5
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm25 = [128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128]
+; AVX512DQBW-SLOW-NEXT:    vpshufb %ymm25, %ymm17, %ymm5
 ; AVX512DQBW-SLOW-NEXT:    vpor %ymm3, %ymm5, %ymm3
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa (%rdx), %xmm5
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa (%rcx), %xmm6
@@ -9360,194 +9360,195 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm12 = <6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7>
 ; AVX512DQBW-SLOW-NEXT:    vpshufb %xmm12, %xmm7, %xmm7
 ; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[0,1,0,1]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm7, %zmm22
+; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm7, %zmm21
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 (%rdi), %ymm18
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm25 = [0,1,14,128,12,13,0,1,14,15,128,3,12,13,2,3,16,128,30,31,28,29,16,17,128,31,18,19,28,29,18,128]
-; AVX512DQBW-SLOW-NEXT:    vpshufb %ymm25, %ymm18, %ymm3
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm26 = [0,1,14,128,12,13,0,1,14,15,128,3,12,13,2,3,16,128,30,31,28,29,16,17,128,31,18,19,28,29,18,128]
+; AVX512DQBW-SLOW-NEXT:    vpshufb %ymm26, %ymm18, %ymm3
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 (%rsi), %ymm19
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm26 = [128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128,128,18]
-; AVX512DQBW-SLOW-NEXT:    vpshufb %ymm26, %ymm19, %ymm7
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm27 = [128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128,128,18]
+; AVX512DQBW-SLOW-NEXT:    vpshufb %ymm27, %ymm19, %ymm7
 ; AVX512DQBW-SLOW-NEXT:    vpor %ymm3, %ymm7, %ymm3
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa (%rdi), %xmm7
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa (%rsi), %xmm8
-; AVX512DQBW-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm27 = xmm8[8],xmm7[8],xmm8[9],xmm7[9],xmm8[10],xmm7[10],xmm8[11],xmm7[11],xmm8[12],xmm7[12],xmm8[13],xmm7[13],xmm8[14],xmm7[14],xmm8[15],xmm7[15]
+; AVX512DQBW-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm24 = xmm8[8],xmm7[8],xmm8[9],xmm7[9],xmm8[10],xmm7[10],xmm8[11],xmm7[11],xmm8[12],xmm7[12],xmm8[13],xmm7[13],xmm8[14],xmm7[14],xmm8[15],xmm7[15]
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm13 = <2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u>
-; AVX512DQBW-SLOW-NEXT:    vpshufb %xmm13, %xmm27, %xmm27
-; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} ymm27 = ymm27[0,1,0,1]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm27, %zmm3
+; AVX512DQBW-SLOW-NEXT:    vpshufb %xmm13, %xmm24, %xmm24
+; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} ymm24 = ymm24[0,1,0,1]
+; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm24, %zmm3
 ; AVX512DQBW-SLOW-NEXT:    movabsq $435749860008887046, %r10 # imm = 0x60C183060C18306
 ; AVX512DQBW-SLOW-NEXT:    kmovq %r10, %k1
-; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %zmm22, %zmm3 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %zmm21, %zmm3 {%k1}
 ; AVX512DQBW-SLOW-NEXT:    movabsq $4066998693416279096, %r10 # imm = 0x3870E1C3870E1C38
 ; AVX512DQBW-SLOW-NEXT:    kmovq %r10, %k1
-; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %zmm21, %zmm3 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm21 = [10,9,9,10,10,9,9,10,9,10,14,15,10,9,9,10]
-; AVX512DQBW-SLOW-NEXT:    vpermw %ymm2, %ymm21, %ymm21
+; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %zmm20, %zmm3 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm21 = [10,9,9,10,10,9,9,10,9,10,14,15,10,9,9,10,27,29,28,27,28,29,29,28,27,29,28,27,28,29,29,28]
+; AVX512DQBW-SLOW-NEXT:    vpermw %ymm2, %ymm21, %ymm20
 ; AVX512DQBW-SLOW-NEXT:    vpshufb %ymm9, %ymm2, %ymm9
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $1, %ymm21, %zmm9, %zmm9
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 32(%r9), %ymm21
-; AVX512DQBW-SLOW-NEXT:    vpshufb %ymm20, %ymm21, %ymm20
+; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $1, %ymm20, %zmm9, %zmm9
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 32(%r9), %ymm20
+; AVX512DQBW-SLOW-NEXT:    vpshufb %ymm22, %ymm20, %ymm24
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 32(%r8), %ymm22
-; AVX512DQBW-SLOW-NEXT:    vpshufb %ymm24, %ymm22, %ymm24
-; AVX512DQBW-SLOW-NEXT:    vporq %ymm20, %ymm24, %ymm20
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} ymm24 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm22[20],zero,ymm22[18],zero,ymm22[20,21,20,21],zero,ymm22[19],zero,ymm22[19,20,21,22],zero
-; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} ymm24 = ymm24[2,3,2,3]
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} ymm27 = ymm21[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm21[20],zero,ymm21[18],zero,zero,zero,zero,ymm21[21],zero,ymm21[19],zero,zero,zero,zero,ymm21[22]
-; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} ymm27 = ymm27[2,3,2,3]
-; AVX512DQBW-SLOW-NEXT:    vporq %ymm24, %ymm27, %ymm24
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $1, %ymm24, %zmm20, %zmm24
+; AVX512DQBW-SLOW-NEXT:    vpshufb %ymm23, %ymm22, %ymm23
+; AVX512DQBW-SLOW-NEXT:    vporq %ymm24, %ymm23, %ymm24
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm23 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,128,128,128,25,128,23,128,128,128,128,26,128,24,128,128>
+; AVX512DQBW-SLOW-NEXT:    vpshufb %ymm23, %ymm20, %ymm28
+; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} ymm28 = ymm28[2,3,2,3]
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} ymm29 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm22[20],zero,ymm22[18],zero,ymm22[20,21,20,21],zero,ymm22[19],zero,ymm22[19,20,21,22],zero
+; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} ymm29 = ymm29[2,3,2,3]
+; AVX512DQBW-SLOW-NEXT:    vporq %ymm29, %ymm28, %ymm28
+; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $1, %ymm28, %zmm24, %zmm28
 ; AVX512DQBW-SLOW-NEXT:    movabsq $145249953336295682, %r10 # imm = 0x204081020408102
 ; AVX512DQBW-SLOW-NEXT:    kmovq %r10, %k1
-; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %zmm9, %zmm24 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 32(%rsi), %ymm27
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 32(%rdi), %ymm28
-; AVX512DQBW-SLOW-NEXT:    vpshuflw {{.*#+}} ymm9 = ymm28[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
+; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %zmm9, %zmm28 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 32(%rsi), %ymm29
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 32(%rdi), %ymm30
+; AVX512DQBW-SLOW-NEXT:    vpshuflw {{.*#+}} ymm9 = ymm30[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
 ; AVX512DQBW-SLOW-NEXT:    vpshufd {{.*#+}} ymm9 = ymm9[0,0,1,1,4,4,5,5]
-; AVX512DQBW-SLOW-NEXT:    vpbroadcastd {{.*#+}} ymm20 = [5,4,3,6,5,4,3,6,5,4,3,6,5,4,3,6,5,4,3,6,5,4,3,6,5,4,3,6,5,4,3,6]
+; AVX512DQBW-SLOW-NEXT:    vpbroadcastd {{.*#+}} ymm24 = [5,4,3,6,5,4,3,6,5,4,3,6,5,4,3,6,5,4,3,6,5,4,3,6,5,4,3,6,5,4,3,6]
 ; AVX512DQBW-SLOW-NEXT:    movl $676341840, %r10d # imm = 0x28502850
 ; AVX512DQBW-SLOW-NEXT:    kmovd %r10d, %k1
-; AVX512DQBW-SLOW-NEXT:    vpshufb %ymm20, %ymm27, %ymm9 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vpshufb %ymm24, %ymm29, %ymm9 {%k1}
 ; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[2,3,2,3]
-; AVX512DQBW-SLOW-NEXT:    vpshufb %ymm25, %ymm28, %ymm25
-; AVX512DQBW-SLOW-NEXT:    vpshufb %ymm26, %ymm27, %ymm26
-; AVX512DQBW-SLOW-NEXT:    vporq %ymm25, %ymm26, %ymm25
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $1, %ymm9, %zmm25, %zmm9
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 32(%rdx), %ymm26
-; AVX512DQBW-SLOW-NEXT:    vpshufb %ymm14, %ymm26, %ymm14
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 32(%rcx), %ymm29
-; AVX512DQBW-SLOW-NEXT:    vpshufb %ymm23, %ymm29, %ymm23
-; AVX512DQBW-SLOW-NEXT:    vporq %ymm14, %ymm23, %ymm14
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} ymm23 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm26[18],zero,ymm26[18,19,20,21],zero,ymm26[19],zero,ymm26[25,26,27,22],zero,ymm26[20],zero
-; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} ymm23 = ymm23[2,3,2,3]
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} ymm25 = ymm29[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm29[18],zero,zero,zero,zero,ymm29[21],zero,ymm29[19],zero,zero,zero,zero,ymm29[22],zero,ymm29[20]
+; AVX512DQBW-SLOW-NEXT:    vpshufb %ymm26, %ymm30, %ymm26
+; AVX512DQBW-SLOW-NEXT:    vpshufb %ymm27, %ymm29, %ymm27
+; AVX512DQBW-SLOW-NEXT:    vporq %ymm26, %ymm27, %ymm26
+; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $1, %ymm9, %zmm26, %zmm9
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 32(%rdx), %ymm31
+; AVX512DQBW-SLOW-NEXT:    vpshufb %ymm14, %ymm31, %ymm14
+; AVX512DQBW-SLOW-NEXT:    vmovdqa 32(%rcx), %ymm0
+; AVX512DQBW-SLOW-NEXT:    vpshufb %ymm25, %ymm0, %ymm25
+; AVX512DQBW-SLOW-NEXT:    vporq %ymm14, %ymm25, %ymm14
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm26 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128>
+; AVX512DQBW-SLOW-NEXT:    vpshufb %ymm26, %ymm0, %ymm25
 ; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} ymm25 = ymm25[2,3,2,3]
-; AVX512DQBW-SLOW-NEXT:    vporq %ymm23, %ymm25, %ymm23
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $1, %ymm23, %zmm14, %zmm14
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} ymm27 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm31[18],zero,ymm31[18,19,20,21],zero,ymm31[19],zero,ymm31[25,26,27,22],zero,ymm31[20],zero
+; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} ymm27 = ymm27[2,3,2,3]
+; AVX512DQBW-SLOW-NEXT:    vporq %ymm27, %ymm25, %ymm25
+; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $1, %ymm25, %zmm14, %zmm14
 ; AVX512DQBW-SLOW-NEXT:    movabsq $3485998880071096368, %r10 # imm = 0x3060C183060C1830
 ; AVX512DQBW-SLOW-NEXT:    kmovq %r10, %k2
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %zmm14, %zmm9 {%k2}
 ; AVX512DQBW-SLOW-NEXT:    movabsq $-4357498600088870461, %r10 # imm = 0xC3870E1C3870E1C3
 ; AVX512DQBW-SLOW-NEXT:    kmovq %r10, %k3
-; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %zmm24, %zmm9 {%k3}
-; AVX512DQBW-SLOW-NEXT:    vpshufhw {{.*#+}} ymm14 = ymm28[0,1,2,3,6,7,7,6,8,9,10,11,14,15,15,14]
+; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %zmm28, %zmm9 {%k3}
+; AVX512DQBW-SLOW-NEXT:    vpshufhw {{.*#+}} ymm14 = ymm30[0,1,2,3,6,7,7,6,8,9,10,11,14,15,15,14]
 ; AVX512DQBW-SLOW-NEXT:    vpshufd {{.*#+}} ymm14 = ymm14[2,2,3,3,6,6,7,7]
-; AVX512DQBW-SLOW-NEXT:    vpbroadcastd {{.*#+}} ymm24 = [13,12,15,14,13,12,15,14,13,12,15,14,13,12,15,14,13,12,15,14,13,12,15,14,13,12,15,14,13,12,15,14]
+; AVX512DQBW-SLOW-NEXT:    vpbroadcastd {{.*#+}} ymm25 = [13,12,15,14,13,12,15,14,13,12,15,14,13,12,15,14,13,12,15,14,13,12,15,14,13,12,15,14,13,12,15,14]
 ; AVX512DQBW-SLOW-NEXT:    movl $338170920, %r10d # imm = 0x14281428
 ; AVX512DQBW-SLOW-NEXT:    kmovd %r10d, %k4
-; AVX512DQBW-SLOW-NEXT:    vpshufb %ymm24, %ymm27, %ymm14 {%k4}
+; AVX512DQBW-SLOW-NEXT:    vpshufb %ymm25, %ymm29, %ymm14 {%k4}
+; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x2 {{.*#+}} ymm28 = [25,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128]
+; AVX512DQBW-SLOW-NEXT:    # ymm28 = mem[0,1,0,1]
+; AVX512DQBW-SLOW-NEXT:    vpshufb %ymm28, %ymm29, %ymm27
+; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} ymm27 = ymm27[2,3,2,3]
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm29 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128,25]
+; AVX512DQBW-SLOW-NEXT:    vpshufb %ymm29, %ymm30, %ymm30
+; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} ymm30 = ymm30[2,3,2,3]
+; AVX512DQBW-SLOW-NEXT:    vporq %ymm27, %ymm30, %ymm27
 ; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[2,3,2,3]
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x2 {{.*#+}} ymm23 = [25,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128]
-; AVX512DQBW-SLOW-NEXT:    # ymm23 = mem[0,1,0,1]
-; AVX512DQBW-SLOW-NEXT:    vpshufb %ymm23, %ymm27, %ymm25
-; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} ymm25 = ymm25[2,3,2,3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm27 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128,25]
-; AVX512DQBW-SLOW-NEXT:    vpshufb %ymm27, %ymm28, %ymm28
-; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} ymm28 = ymm28[2,3,2,3]
-; AVX512DQBW-SLOW-NEXT:    vporq %ymm25, %ymm28, %ymm25
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $1, %ymm14, %zmm25, %zmm14
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} ymm25 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm26[24,25],zero,ymm26[23],zero,ymm26[21,22,23,26],zero,ymm26[24],zero,ymm26[28,29,26,27]
-; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} ymm25 = ymm25[2,3,2,3]
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} ymm28 = ymm29[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm29[25],zero,ymm29[23],zero,zero,zero,zero,ymm29[26],zero,ymm29[24],zero,zero,zero,zero
-; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} ymm28 = ymm28[2,3,2,3]
-; AVX512DQBW-SLOW-NEXT:    vporq %ymm25, %ymm28, %ymm28
-; AVX512DQBW-SLOW-NEXT:    vpbroadcastq {{.*#+}} ymm25 = [11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12]
-; AVX512DQBW-SLOW-NEXT:    vpshufb %ymm25, %ymm29, %ymm29
-; AVX512DQBW-SLOW-NEXT:    vpshufhw {{.*#+}} ymm26 = ymm26[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
-; AVX512DQBW-SLOW-NEXT:    vpshufd {{.*#+}} ymm26 = ymm26[0,2,3,3,4,6,7,7]
-; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %ymm26, %ymm29 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 (%rdx), %zmm26
-; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} ymm29 = ymm29[2,3,2,3]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $1, %ymm29, %zmm28, %zmm28
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 (%r8), %zmm29
+; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $1, %ymm14, %zmm27, %zmm14
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} ymm27 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm31[24,25],zero,ymm31[23],zero,ymm31[21,22,23,26],zero,ymm31[24],zero,ymm31[28,29,26,27]
+; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} ymm27 = ymm27[2,3,2,3]
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} ymm30 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm0[25],zero,ymm0[23],zero,zero,zero,zero,ymm0[26],zero,ymm0[24],zero,zero,zero,zero
+; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} ymm30 = ymm30[2,3,2,3]
+; AVX512DQBW-SLOW-NEXT:    vporq %ymm27, %ymm30, %ymm30
+; AVX512DQBW-SLOW-NEXT:    vpbroadcastq {{.*#+}} ymm27 = [11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12]
+; AVX512DQBW-SLOW-NEXT:    vpshufb %ymm27, %ymm0, %ymm0
+; AVX512DQBW-SLOW-NEXT:    vpshufhw {{.*#+}} ymm31 = ymm31[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
+; AVX512DQBW-SLOW-NEXT:    vpshufd {{.*#+}} ymm31 = ymm31[0,2,3,3,4,6,7,7]
+; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %ymm31, %ymm0 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
+; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm30, %zmm0
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 (%r8), %zmm30
 ; AVX512DQBW-SLOW-NEXT:    movabsq $1742999440035548184, %r10 # imm = 0x183060C183060C18
 ; AVX512DQBW-SLOW-NEXT:    kmovq %r10, %k3
-; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %zmm28, %zmm14 {%k3}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 (%r9), %zmm28
-; AVX512DQBW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm22 = zmm22[0,1,2,3],zmm28[4,5,6,7]
+; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %zmm0, %zmm14 {%k3}
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 (%r9), %zmm0
+; AVX512DQBW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm22 = zmm22[0,1,2,3],zmm0[4,5,6,7]
 ; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} zmm22 = zmm22[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,24,25],zero,zmm22[23],zero,zmm22[23,24,25,26],zero,zmm22[24],zero,zmm22[30,31,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,61],zero,zmm22[59],zero,zero,zero,zero,zmm22[62],zero,zmm22[60],zero,zero,zero,zero,zmm22[63],zero
 ; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} zmm22 = zmm22[2,3,2,3,6,7,6,7]
-; AVX512DQBW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm21 = zmm21[0,1,2,3],zmm29[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} zmm21 = zmm21[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm21[25],zero,zmm21[23],zero,zero,zero,zero,zmm21[26],zero,zmm21[24],zero,zero,zmm21[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm21[59],zero,zero,zero,zero,zmm21[62],zero,zmm21[60],zero,zero,zero,zero,zmm21[63],zero,zmm21[61]
-; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} zmm21 = zmm21[2,3,2,3,6,7,6,7]
-; AVX512DQBW-SLOW-NEXT:    vporq %zmm22, %zmm21, %zmm22
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 (%rax), %zmm21
+; AVX512DQBW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm20 = zmm20[0,1,2,3],zmm30[4,5,6,7]
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} zmm20 = zmm20[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm20[25],zero,zmm20[23],zero,zero,zero,zero,zmm20[26],zero,zmm20[24],zero,zero,zmm20[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm20[59],zero,zero,zero,zero,zmm20[62],zero,zmm20[60],zero,zero,zero,zero,zmm20[63],zero,zmm20[61]
+; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} zmm20 = zmm20[2,3,2,3,6,7,6,7]
+; AVX512DQBW-SLOW-NEXT:    vporq %zmm22, %zmm20, %zmm20
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 (%rax), %zmm22
 ; AVX512DQBW-SLOW-NEXT:    movabsq $6971997760142192736, %rax # imm = 0x60C183060C183060
 ; AVX512DQBW-SLOW-NEXT:    kmovq %rax, %k3
-; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %zmm22, %zmm14 {%k3}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm22 = [11,13,12,11,12,13,13,12,11,13,12,11,12,13,13,12,62,61,62,63,63,62,62,63,62,61,62,63,63,62,62,63]
-; AVX512DQBW-SLOW-NEXT:    vpermi2w %zmm21, %zmm2, %zmm22
+; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %zmm20, %zmm14 {%k3}
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm20 = [11,13,12,11,12,13,13,12,11,13,12,11,12,13,13,12,62,61,62,63,63,62,62,63,62,61,62,63,63,62,62,63]
+; AVX512DQBW-SLOW-NEXT:    vpermi2w %zmm22, %zmm2, %zmm20
 ; AVX512DQBW-SLOW-NEXT:    movabsq $-9150747060186627967, %rax # imm = 0x8102040810204081
 ; AVX512DQBW-SLOW-NEXT:    kmovq %rax, %k5
-; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %zmm22, %zmm14 {%k5}
-; AVX512DQBW-SLOW-NEXT:    vpshuflw {{.*#+}} ymm22 = ymm18[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
-; AVX512DQBW-SLOW-NEXT:    vpshufd {{.*#+}} ymm30 = ymm22[0,0,1,1,4,4,5,5]
-; AVX512DQBW-SLOW-NEXT:    vpshufb %ymm20, %ymm19, %ymm30 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %zmm20, %zmm14 {%k5}
+; AVX512DQBW-SLOW-NEXT:    vpshuflw {{.*#+}} ymm20 = ymm18[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
+; AVX512DQBW-SLOW-NEXT:    vpshufd {{.*#+}} ymm20 = ymm20[0,0,1,1,4,4,5,5]
+; AVX512DQBW-SLOW-NEXT:    vpshufb %ymm24, %ymm19, %ymm20 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vpshufb %ymm28, %ymm19, %ymm24
+; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} ymm24 = ymm24[2,3,2,3]
+; AVX512DQBW-SLOW-NEXT:    vpshufb %ymm29, %ymm18, %ymm28
+; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} ymm28 = ymm28[2,3,2,3]
+; AVX512DQBW-SLOW-NEXT:    vporq %ymm24, %ymm28, %ymm24
+; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} ymm20 = ymm20[2,3,2,3]
+; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $1, %ymm24, %zmm20, %zmm28
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 (%rcx), %zmm20
-; AVX512DQBW-SLOW-NEXT:    vpshufb %ymm23, %ymm19, %ymm22
-; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} ymm22 = ymm22[2,3,2,3]
-; AVX512DQBW-SLOW-NEXT:    vpshufb %ymm27, %ymm18, %ymm23
-; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} ymm23 = ymm23[2,3,2,3]
-; AVX512DQBW-SLOW-NEXT:    vporq %ymm22, %ymm23, %ymm23
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 32(%r9), %xmm22
-; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} ymm27 = ymm30[2,3,2,3]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $1, %ymm23, %zmm27, %zmm27
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 32(%r8), %xmm23
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $1, %ymm16, %zmm26, %zmm26
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} zmm26 = zmm26[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18],zero,zmm26[18,19,20,21],zero,zmm26[19],zero,zmm26[25,26,27,22],zero,zmm26[20],zero,zmm26[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,56,57],zero,zmm26[55],zero,zmm26[53,54,55,58],zero,zmm26[56],zero,zmm26[60,61,58,59]
-; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} zmm26 = zmm26[2,3,2,3,6,7,6,7]
 ; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $1, %ymm17, %zmm20, %zmm20
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} zmm20 = zmm20[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm20[18],zero,zero,zero,zero,zmm20[21],zero,zmm20[19],zero,zero,zero,zero,zmm20[22],zero,zmm20[20,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zmm20[57],zero,zmm20[55],zero,zero,zero,zero,zmm20[58],zero,zmm20[56],zero,zero,zero,zero
+; AVX512DQBW-SLOW-NEXT:    vpshufb %zmm26, %zmm20, %zmm20
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 (%rdx), %zmm24
+; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $1, %ymm16, %zmm24, %zmm24
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} zmm24 = zmm24[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18],zero,zmm24[18,19,20,21],zero,zmm24[19],zero,zmm24[25,26,27,22],zero,zmm24[20],zero,zmm24[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,56,57],zero,zmm24[55],zero,zmm24[53,54,55,58],zero,zmm24[56],zero,zmm24[60,61,58,59]
+; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} zmm24 = zmm24[2,3,2,3,6,7,6,7]
 ; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} zmm20 = zmm20[2,3,2,3,6,7,6,7]
-; AVX512DQBW-SLOW-NEXT:    vporq %zmm26, %zmm20, %zmm20
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 32(%rdx), %xmm26
-; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %zmm27, %zmm20 {%k3}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 32(%rcx), %xmm27
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $1, %ymm11, %zmm29, %zmm29
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} zmm29 = zmm29[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20],zero,zmm29[18],zero,zmm29[20,21,20,21],zero,zmm29[19],zero,zmm29[19,20,21,22],zero,zmm29[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,56,57,56,57],zero,zmm29[55],zero,zmm29[55,56,57,58],zero,zmm29[56],zero,zmm29[62,63]
-; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} zmm29 = zmm29[2,3,2,3,6,7,6,7]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $1, %ymm10, %zmm28, %zmm28
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} zmm28 = zmm28[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm28[20],zero,zmm28[18],zero,zero,zero,zero,zmm28[21],zero,zmm28[19],zero,zero,zero,zero,zmm28[22,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm28[57],zero,zmm28[55],zero,zero,zero,zero,zmm28[58],zero,zmm28[56],zero,zero
+; AVX512DQBW-SLOW-NEXT:    vporq %zmm24, %zmm20, %zmm20
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 32(%r9), %xmm24
+; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %zmm28, %zmm20 {%k3}
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 32(%r8), %xmm26
+; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $1, %ymm10, %zmm0, %zmm0
+; AVX512DQBW-SLOW-NEXT:    vpshufb %zmm23, %zmm0, %zmm0
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 32(%rdx), %xmm23
+; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $1, %ymm11, %zmm30, %zmm28
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} zmm28 = zmm28[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20],zero,zmm28[18],zero,zmm28[20,21,20,21],zero,zmm28[19],zero,zmm28[19,20,21,22],zero,zmm28[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,56,57,56,57],zero,zmm28[55],zero,zmm28[55,56,57,58],zero,zmm28[56],zero,zmm28[62,63]
 ; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} zmm28 = zmm28[2,3,2,3,6,7,6,7]
-; AVX512DQBW-SLOW-NEXT:    vporq %zmm29, %zmm28, %zmm29
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $1, %ymm15, %zmm21, %zmm15
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm28 = [10,9,9,10,10,9,9,10,9,10,14,15,10,9,9,10,27,29,28,27,28,29,29,28,27,29,28,27,28,29,29,28]
-; AVX512DQBW-SLOW-NEXT:    vpermw %zmm15, %zmm28, %zmm28
+; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} zmm0 = zmm0[2,3,2,3,6,7,6,7]
+; AVX512DQBW-SLOW-NEXT:    vporq %zmm28, %zmm0, %zmm0
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 32(%rcx), %xmm28
+; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $1, %ymm15, %zmm22, %zmm15
+; AVX512DQBW-SLOW-NEXT:    vpermw %zmm15, %zmm21, %zmm21
 ; AVX512DQBW-SLOW-NEXT:    movabsq $1161999626690365456, %rax # imm = 0x1020408102040810
 ; AVX512DQBW-SLOW-NEXT:    kmovq %rax, %k5
-; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %zmm28, %zmm29 {%k5}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 32(%rdi), %xmm28
+; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %zmm21, %zmm0 {%k5}
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 32(%rdi), %xmm21
 ; AVX512DQBW-SLOW-NEXT:    movabsq $2033499346708139548, %rax # imm = 0x1C3870E1C3870E1C
 ; AVX512DQBW-SLOW-NEXT:    kmovq %rax, %k5
-; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %zmm29, %zmm20 {%k5}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 32(%rsi), %xmm29
+; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %zmm0, %zmm20 {%k5}
+; AVX512DQBW-SLOW-NEXT:    vmovdqa 32(%rsi), %xmm0
 ; AVX512DQBW-SLOW-NEXT:    vpshufhw {{.*#+}} ymm18 = ymm18[0,1,2,3,6,7,7,6,8,9,10,11,14,15,15,14]
-; AVX512DQBW-SLOW-NEXT:    vpshufd {{.*#+}} ymm30 = ymm18[2,2,3,3,6,6,7,7]
-; AVX512DQBW-SLOW-NEXT:    vpshufb %ymm24, %ymm19, %ymm30 {%k4}
-; AVX512DQBW-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm19 = xmm28[0],xmm29[0],xmm28[1],xmm29[1],xmm28[2],xmm29[2],xmm28[3],xmm29[3],xmm28[4],xmm29[4],xmm28[5],xmm29[5],xmm28[6],xmm29[6],xmm28[7],xmm29[7]
+; AVX512DQBW-SLOW-NEXT:    vpshufd {{.*#+}} ymm29 = ymm18[2,2,3,3,6,6,7,7]
+; AVX512DQBW-SLOW-NEXT:    vpshufb %ymm25, %ymm19, %ymm29 {%k4}
+; AVX512DQBW-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm19 = xmm21[0],xmm0[0],xmm21[1],xmm0[1],xmm21[2],xmm0[2],xmm21[3],xmm0[3],xmm21[4],xmm0[4],xmm21[5],xmm0[5],xmm21[6],xmm0[6],xmm21[7],xmm0[7]
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} xmm18 = <0,1,u,u,u,6,7,2,3,u,u,u,8,9,4,5>
 ; AVX512DQBW-SLOW-NEXT:    vpshufb %xmm18, %xmm19, %xmm19
-; AVX512DQBW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm19 = zmm30[2,3,2,3],zmm19[0,1,0,1]
-; AVX512DQBW-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm24 = xmm26[0],xmm27[0],xmm26[1],xmm27[1],xmm26[2],xmm27[2],xmm26[3],xmm27[3],xmm26[4],xmm27[4],xmm26[5],xmm27[5],xmm26[6],xmm27[6],xmm26[7],xmm27[7]
-; AVX512DQBW-SLOW-NEXT:    vpshufb %ymm25, %ymm17, %ymm25
+; AVX512DQBW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm19 = zmm29[2,3,2,3],zmm19[0,1,0,1]
+; AVX512DQBW-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm25 = xmm23[0],xmm28[0],xmm23[1],xmm28[1],xmm23[2],xmm28[2],xmm23[3],xmm28[3],xmm23[4],xmm28[4],xmm23[5],xmm28[5],xmm23[6],xmm28[6],xmm23[7],xmm28[7]
+; AVX512DQBW-SLOW-NEXT:    vpshufb %ymm27, %ymm17, %ymm27
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} xmm17 = <4,5,0,1,u,u,u,6,7,2,3,u,u,u,8,9>
-; AVX512DQBW-SLOW-NEXT:    vpshufb %xmm17, %xmm24, %xmm24
+; AVX512DQBW-SLOW-NEXT:    vpshufb %xmm17, %xmm25, %xmm25
 ; AVX512DQBW-SLOW-NEXT:    vpshufhw {{.*#+}} ymm16 = ymm16[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
 ; AVX512DQBW-SLOW-NEXT:    vpshufd {{.*#+}} ymm16 = ymm16[0,2,3,3,4,6,7,7]
-; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %ymm16, %ymm25 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm16 = zmm25[2,3,2,3],zmm24[0,1,0,1]
+; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %ymm16, %ymm27 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm16 = zmm27[2,3,2,3],zmm25[0,1,0,1]
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %zmm19, %zmm16 {%k2}
 ; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} ymm10 = ymm10[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,29],zero,ymm10[27],zero,zero,zero,zero,ymm10[30],zero,ymm10[28],zero,zero,zero,zero,ymm10[31],zero
 ; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[2,3,2,3]
 ; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} ymm11 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm11[27],zero,zero,zero,zero,ymm11[30],zero,ymm11[28],zero,zero,zero,zero,ymm11[31],zero,ymm11[29]
 ; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[2,3,2,3]
 ; AVX512DQBW-SLOW-NEXT:    vpor %ymm10, %ymm11, %ymm11
-; AVX512DQBW-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm19 = xmm23[0],xmm22[0],xmm23[1],xmm22[1],xmm23[2],xmm22[2],xmm23[3],xmm22[3],xmm23[4],xmm22[4],xmm23[5],xmm22[5],xmm23[6],xmm22[6],xmm23[7],xmm22[7]
+; AVX512DQBW-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm19 = xmm26[0],xmm24[0],xmm26[1],xmm24[1],xmm26[2],xmm24[2],xmm26[3],xmm24[3],xmm26[4],xmm24[4],xmm26[5],xmm24[5],xmm26[6],xmm24[6],xmm26[7],xmm24[7]
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm10 = <u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u>
 ; AVX512DQBW-SLOW-NEXT:    vpshufb %xmm10, %xmm19, %xmm19
 ; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} ymm19 = ymm19[0,1,0,1]
 ; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $1, %ymm19, %zmm11, %zmm11
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm19 = [14,13,14,15,15,14,14,15,14,13,14,15,15,14,14,15,17,17,16,16,17,17,16,16,20,21,17,17,17,17,16,16]
-; AVX512DQBW-SLOW-NEXT:    vpermw %zmm21, %zmm19, %zmm19
+; AVX512DQBW-SLOW-NEXT:    vpermw %zmm22, %zmm19, %zmm19
 ; AVX512DQBW-SLOW-NEXT:    movabsq $580999813345182728, %rax # imm = 0x810204081020408
 ; AVX512DQBW-SLOW-NEXT:    kmovq %rax, %k1
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %zmm19, %zmm11 {%k1}
@@ -9555,64 +9556,64 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512DQBW-SLOW-NEXT:    kmovq %rax, %k1
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %zmm11, %zmm16 {%k1}
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} xmm19 = <u,u,u,128,7,128,5,u,u,u,128,8,128,6,u,u>
-; AVX512DQBW-SLOW-NEXT:    vpshufb %xmm19, %xmm27, %xmm11
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} xmm24 = <u,u,u,7,128,5,128,u,u,u,8,128,6,128,u,u>
-; AVX512DQBW-SLOW-NEXT:    vpshufb %xmm24, %xmm26, %xmm25
-; AVX512DQBW-SLOW-NEXT:    vporq %xmm11, %xmm25, %xmm11
-; AVX512DQBW-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm25 = xmm27[8],xmm26[8],xmm27[9],xmm26[9],xmm27[10],xmm26[10],xmm27[11],xmm26[11],xmm27[12],xmm26[12],xmm27[13],xmm26[13],xmm27[14],xmm26[14],xmm27[15],xmm26[15]
-; AVX512DQBW-SLOW-NEXT:    vpshufb %xmm12, %xmm25, %xmm12
+; AVX512DQBW-SLOW-NEXT:    vpshufb %xmm19, %xmm28, %xmm11
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} xmm25 = <u,u,u,7,128,5,128,u,u,u,8,128,6,128,u,u>
+; AVX512DQBW-SLOW-NEXT:    vpshufb %xmm25, %xmm23, %xmm27
+; AVX512DQBW-SLOW-NEXT:    vporq %xmm11, %xmm27, %xmm11
+; AVX512DQBW-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm23 = xmm28[8],xmm23[8],xmm28[9],xmm23[9],xmm28[10],xmm23[10],xmm28[11],xmm23[11],xmm28[12],xmm23[12],xmm28[13],xmm23[13],xmm28[14],xmm23[14],xmm28[15],xmm23[15]
+; AVX512DQBW-SLOW-NEXT:    vpshufb %xmm12, %xmm23, %xmm12
 ; AVX512DQBW-SLOW-NEXT:    vinserti32x4 $2, %xmm12, %zmm11, %zmm11
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm12 = <u,128,7,128,5,u,u,u,128,8,128,6,u,u,u,128>
-; AVX512DQBW-SLOW-NEXT:    vpshufb %xmm12, %xmm29, %xmm25
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} xmm26 = <u,7,128,5,128,u,u,u,8,128,6,128,u,u,u,9>
-; AVX512DQBW-SLOW-NEXT:    vpshufb %xmm26, %xmm28, %xmm27
-; AVX512DQBW-SLOW-NEXT:    vporq %xmm25, %xmm27, %xmm25
-; AVX512DQBW-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm27 = xmm29[8],xmm28[8],xmm29[9],xmm28[9],xmm29[10],xmm28[10],xmm29[11],xmm28[11],xmm29[12],xmm28[12],xmm29[13],xmm28[13],xmm29[14],xmm28[14],xmm29[15],xmm28[15]
-; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} zmm28 = zmm11[0,1,0,1,4,5,4,5]
-; AVX512DQBW-SLOW-NEXT:    vpshufb %xmm13, %xmm27, %xmm11
-; AVX512DQBW-SLOW-NEXT:    vinserti32x4 $2, %xmm11, %zmm25, %zmm11
-; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} zmm11 = zmm11[0,1,0,1,4,5,4,5]
-; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %zmm28, %zmm11 {%k3}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm13 = <128,4,u,u,u,128,7,128,5,u,u,u,128,8,128,6>
-; AVX512DQBW-SLOW-NEXT:    vpshufb %xmm13, %xmm22, %xmm25
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm27 = [2,2,2,4,2,2,2,4,3,3,3,3,2,2,2,4,52,53,52,53,53,54,53,54,52,53,52,53,53,54,53,54]
-; AVX512DQBW-SLOW-NEXT:    vpermi2w %zmm21, %zmm2, %zmm27
+; AVX512DQBW-SLOW-NEXT:    vpshufb %xmm12, %xmm0, %xmm23
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} xmm27 = <u,7,128,5,128,u,u,u,8,128,6,128,u,u,u,9>
+; AVX512DQBW-SLOW-NEXT:    vpshufb %xmm27, %xmm21, %xmm28
+; AVX512DQBW-SLOW-NEXT:    vporq %xmm23, %xmm28, %xmm23
+; AVX512DQBW-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm21[8],xmm0[9],xmm21[9],xmm0[10],xmm21[10],xmm0[11],xmm21[11],xmm0[12],xmm21[12],xmm0[13],xmm21[13],xmm0[14],xmm21[14],xmm0[15],xmm21[15]
+; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} zmm21 = zmm11[0,1,0,1,4,5,4,5]
+; AVX512DQBW-SLOW-NEXT:    vpshufb %xmm13, %xmm0, %xmm0
+; AVX512DQBW-SLOW-NEXT:    vinserti32x4 $2, %xmm0, %zmm23, %zmm0
+; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} zmm11 = zmm0[0,1,0,1,4,5,4,5]
+; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %zmm21, %zmm11 {%k3}
+; AVX512DQBW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm0 = <128,4,u,u,u,128,7,128,5,u,u,u,128,8,128,6>
+; AVX512DQBW-SLOW-NEXT:    vpshufb %xmm0, %xmm24, %xmm13
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm21 = [2,2,2,4,2,2,2,4,3,3,3,3,2,2,2,4,52,53,52,53,53,54,53,54,52,53,52,53,53,54,53,54]
+; AVX512DQBW-SLOW-NEXT:    vpermi2w %zmm22, %zmm2, %zmm21
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm2 = <4,128,u,u,u,7,128,5,128,u,u,u,8,128,6,128>
-; AVX512DQBW-SLOW-NEXT:    vpshufb %xmm2, %xmm23, %xmm21
-; AVX512DQBW-SLOW-NEXT:    vporq %xmm25, %xmm21, %xmm21
-; AVX512DQBW-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm22 = xmm23[8],xmm22[8],xmm23[9],xmm22[9],xmm23[10],xmm22[10],xmm23[11],xmm22[11],xmm23[12],xmm22[12],xmm23[13],xmm22[13],xmm23[14],xmm22[14],xmm23[15],xmm22[15]
-; AVX512DQBW-SLOW-NEXT:    vpshufb %xmm4, %xmm22, %xmm4
-; AVX512DQBW-SLOW-NEXT:    vinserti32x4 $2, %xmm4, %zmm21, %zmm4
-; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} zmm4 = zmm4[0,1,0,1,4,5,4,5]
+; AVX512DQBW-SLOW-NEXT:    vpshufb %xmm2, %xmm26, %xmm22
+; AVX512DQBW-SLOW-NEXT:    vporq %xmm13, %xmm22, %xmm13
+; AVX512DQBW-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm22 = xmm26[8],xmm24[8],xmm26[9],xmm24[9],xmm26[10],xmm24[10],xmm26[11],xmm24[11],xmm26[12],xmm24[12],xmm26[13],xmm24[13],xmm26[14],xmm24[14],xmm26[15],xmm24[15]
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm22 = xmm22[u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10]
+; AVX512DQBW-SLOW-NEXT:    vinserti32x4 $2, %xmm22, %zmm13, %zmm13
+; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} zmm13 = zmm13[0,1,0,1,4,5,4,5]
 ; AVX512DQBW-SLOW-NEXT:    movabsq $290499906672591364, %rax # imm = 0x408102040810204
 ; AVX512DQBW-SLOW-NEXT:    kmovq %rax, %k1
-; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %zmm27, %zmm4 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %zmm21, %zmm13 {%k1}
 ; AVX512DQBW-SLOW-NEXT:    movabsq $-8714997200177740921, %rax # imm = 0x870E1C3870E1C387
 ; AVX512DQBW-SLOW-NEXT:    kmovq %rax, %k1
-; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %zmm4, %zmm11 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vpshufb %xmm19, %xmm6, %xmm4
-; AVX512DQBW-SLOW-NEXT:    vpshufb %xmm24, %xmm5, %xmm19
-; AVX512DQBW-SLOW-NEXT:    vporq %xmm4, %xmm19, %xmm4
+; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %zmm13, %zmm11 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vpshufb %xmm19, %xmm6, %xmm13
+; AVX512DQBW-SLOW-NEXT:    vpshufb %xmm25, %xmm5, %xmm19
+; AVX512DQBW-SLOW-NEXT:    vporq %xmm13, %xmm19, %xmm13
 ; AVX512DQBW-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1],xmm5[2],xmm6[2],xmm5[3],xmm6[3],xmm5[4],xmm6[4],xmm5[5],xmm6[5],xmm5[6],xmm6[6],xmm5[7],xmm6[7]
 ; AVX512DQBW-SLOW-NEXT:    vpshufb %xmm17, %xmm5, %xmm5
-; AVX512DQBW-SLOW-NEXT:    vinserti32x4 $2, %xmm4, %zmm5, %zmm4
-; AVX512DQBW-SLOW-NEXT:    vpshufb %xmm12, %xmm8, %xmm5
-; AVX512DQBW-SLOW-NEXT:    vpshufb %xmm26, %xmm7, %xmm6
-; AVX512DQBW-SLOW-NEXT:    vpor %xmm5, %xmm6, %xmm5
-; AVX512DQBW-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm6 = xmm7[0],xmm8[0],xmm7[1],xmm8[1],xmm7[2],xmm8[2],xmm7[3],xmm8[3],xmm7[4],xmm8[4],xmm7[5],xmm8[5],xmm7[6],xmm8[6],xmm7[7],xmm8[7]
-; AVX512DQBW-SLOW-NEXT:    vpshufb %xmm18, %xmm6, %xmm6
-; AVX512DQBW-SLOW-NEXT:    vinserti32x4 $2, %xmm5, %zmm6, %zmm5
-; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} zmm4 = zmm4[0,1,0,1,4,5,4,5]
+; AVX512DQBW-SLOW-NEXT:    vinserti32x4 $2, %xmm13, %zmm5, %zmm5
+; AVX512DQBW-SLOW-NEXT:    vpshufb %xmm12, %xmm8, %xmm6
+; AVX512DQBW-SLOW-NEXT:    vpshufb %xmm27, %xmm7, %xmm12
+; AVX512DQBW-SLOW-NEXT:    vpor %xmm6, %xmm12, %xmm6
+; AVX512DQBW-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm7 = xmm7[0],xmm8[0],xmm7[1],xmm8[1],xmm7[2],xmm8[2],xmm7[3],xmm8[3],xmm7[4],xmm8[4],xmm7[5],xmm8[5],xmm7[6],xmm8[6],xmm7[7],xmm8[7]
+; AVX512DQBW-SLOW-NEXT:    vpshufb %xmm18, %xmm7, %xmm7
+; AVX512DQBW-SLOW-NEXT:    vinserti32x4 $2, %xmm6, %zmm7, %zmm6
 ; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} zmm5 = zmm5[0,1,0,1,4,5,4,5]
+; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} zmm6 = zmm6[0,1,0,1,4,5,4,5]
 ; AVX512DQBW-SLOW-NEXT:    movabsq $871499720017774092, %rax # imm = 0xC183060C183060C
 ; AVX512DQBW-SLOW-NEXT:    kmovq %rax, %k1
-; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %zmm4, %zmm5 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vpshufb %xmm13, %xmm0, %xmm4
+; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %zmm5, %zmm6 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vpshufb %xmm0, %xmm4, %xmm0
 ; AVX512DQBW-SLOW-NEXT:    vpshufb %xmm2, %xmm1, %xmm2
-; AVX512DQBW-SLOW-NEXT:    vpor %xmm4, %xmm2, %xmm2
-; AVX512DQBW-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; AVX512DQBW-SLOW-NEXT:    vpshufb %xmm10, %xmm0, %xmm0
-; AVX512DQBW-SLOW-NEXT:    vinserti32x4 $2, %xmm2, %zmm0, %zmm0
+; AVX512DQBW-SLOW-NEXT:    vpor %xmm0, %xmm2, %xmm0
+; AVX512DQBW-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
+; AVX512DQBW-SLOW-NEXT:    vpshufb %xmm10, %xmm1, %xmm1
+; AVX512DQBW-SLOW-NEXT:    vinserti32x4 $2, %xmm0, %zmm1, %zmm0
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [1,1,0,0,1,1,0,0,4,5,1,1,1,1,0,0,18,18,18,20,18,18,18,20,19,19,19,19,18,18,18,20]
 ; AVX512DQBW-SLOW-NEXT:    vpermw %zmm15, %zmm1, %zmm1
 ; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} zmm0 = zmm0[0,1,0,1,4,5,4,5]
@@ -9621,9 +9622,9 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %zmm1, %zmm0 {%k1}
 ; AVX512DQBW-SLOW-NEXT:    movabsq $8133997386832558192, %rax # imm = 0x70E1C3870E1C3870
 ; AVX512DQBW-SLOW-NEXT:    kmovq %rax, %k1
-; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %zmm0, %zmm5 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %zmm0, %zmm6 {%k1}
 ; AVX512DQBW-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm5, (%rax)
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm6, (%rax)
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm9, 320(%rax)
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm11, 256(%rax)
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm16, 192(%rax)
diff --git a/llvm/test/CodeGen/X86/vector-sext.ll b/llvm/test/CodeGen/X86/vector-sext.ll
index eae1b1b23bcea6e..4709525e71c279d 100644
--- a/llvm/test/CodeGen/X86/vector-sext.ll
+++ b/llvm/test/CodeGen/X86/vector-sext.ll
@@ -2357,8 +2357,9 @@ define <8 x i32> @load_sext_8i1_to_8i32(ptr%ptr) {
 ; AVX1-NEXT:    vmovd %eax, %xmm0
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
 ; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
-; AVX1-NEXT:    vpcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
+; AVX1-NEXT:    vmovaps {{.*#+}} ymm1 = [1,2,4,8,16,32,64,128]
+; AVX1-NEXT:    vandps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm1
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; AVX1-NEXT:    vpcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
@@ -2597,8 +2598,9 @@ define <16 x i16> @load_sext_16i1_to_16i16(ptr%ptr) {
 ; AVX1-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
 ; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
-; AVX1-NEXT:    vpcmpeqw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
+; AVX1-NEXT:    vmovaps {{.*#+}} ymm1 = [1,2,4,8,16,32,64,128,256,512,1024,2048,4096,8192,16384,32768]
+; AVX1-NEXT:    vandps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT:    vpcmpeqw %xmm1, %xmm0, %xmm1
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; AVX1-NEXT:    vpcmpeqw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
diff --git a/llvm/test/CodeGen/X86/vector-shuffle-combining-avx.ll b/llvm/test/CodeGen/X86/vector-shuffle-combining-avx.ll
index b34af730565e4d5..3aafb1af366ba56 100644
--- a/llvm/test/CodeGen/X86/vector-shuffle-combining-avx.ll
+++ b/llvm/test/CodeGen/X86/vector-shuffle-combining-avx.ll
@@ -653,12 +653,13 @@ define <16 x i64> @bit_reversal_permutation(<16 x i64> %a0) nounwind {
 ; X86-AVX1-NEXT:    movl %esp, %ebp
 ; X86-AVX1-NEXT:    andl $-32, %esp
 ; X86-AVX1-NEXT:    subl $32, %esp
-; X86-AVX1-NEXT:    vperm2f128 {{.*#+}} ymm3 = ymm0[2,3],ymm1[2,3]
-; X86-AVX1-NEXT:    vperm2f128 {{.*#+}} ymm5 = ymm2[2,3],mem[2,3]
-; X86-AVX1-NEXT:    vunpcklpd {{.*#+}} ymm4 = ymm3[0],ymm5[0],ymm3[2],ymm5[2]
-; X86-AVX1-NEXT:    vunpckhpd {{.*#+}} ymm3 = ymm3[1],ymm5[1],ymm3[3],ymm5[3]
+; X86-AVX1-NEXT:    vmovaps 8(%ebp), %ymm5
+; X86-AVX1-NEXT:    vperm2f128 {{.*#+}} ymm3 = ymm2[2,3],ymm5[2,3]
+; X86-AVX1-NEXT:    vperm2f128 {{.*#+}} ymm6 = ymm0[2,3],ymm1[2,3]
+; X86-AVX1-NEXT:    vunpcklpd {{.*#+}} ymm4 = ymm6[0],ymm3[0],ymm6[2],ymm3[2]
+; X86-AVX1-NEXT:    vunpckhpd {{.*#+}} ymm3 = ymm6[1],ymm3[1],ymm6[3],ymm3[3]
+; X86-AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm2, %ymm2
 ; X86-AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
-; X86-AVX1-NEXT:    vinsertf128 $1, 8(%ebp), %ymm2, %ymm2
 ; X86-AVX1-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm2[0],ymm1[2],ymm2[2]
 ; X86-AVX1-NEXT:    vunpckhpd {{.*#+}} ymm2 = ymm1[1],ymm2[1],ymm1[3],ymm2[3]
 ; X86-AVX1-NEXT:    vmovaps %ymm4, %ymm1
@@ -672,17 +673,16 @@ define <16 x i64> @bit_reversal_permutation(<16 x i64> %a0) nounwind {
 ; X86-AVX2-NEXT:    movl %esp, %ebp
 ; X86-AVX2-NEXT:    andl $-32, %esp
 ; X86-AVX2-NEXT:    subl $32, %esp
-; X86-AVX2-NEXT:    vmovaps 8(%ebp), %ymm3
-; X86-AVX2-NEXT:    vperm2f128 {{.*#+}} ymm4 = ymm0[2,3],ymm1[2,3]
-; X86-AVX2-NEXT:    vunpcklpd {{.*#+}} ymm5 = ymm2[0],ymm3[0],ymm2[2],ymm3[2]
-; X86-AVX2-NEXT:    vpermpd {{.*#+}} ymm5 = ymm5[0,2,2,3]
-; X86-AVX2-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm5[2,3],ymm4[4,5],ymm5[6,7]
-; X86-AVX2-NEXT:    vperm2f128 {{.*#+}} ymm3 = ymm2[2,3],ymm3[2,3]
+; X86-AVX2-NEXT:    vmovaps 8(%ebp), %ymm6
+; X86-AVX2-NEXT:    vperm2f128 {{.*#+}} ymm3 = ymm0[2,3],ymm1[2,3]
+; X86-AVX2-NEXT:    vunpcklpd {{.*#+}} ymm4 = ymm2[0],ymm6[0],ymm2[2],ymm6[2]
+; X86-AVX2-NEXT:    vpermpd {{.*#+}} ymm4 = ymm4[0,2,2,3]
+; X86-AVX2-NEXT:    vblendps {{.*#+}} ymm4 = ymm3[0,1],ymm4[2,3],ymm3[4,5],ymm4[6,7]
+; X86-AVX2-NEXT:    vperm2f128 {{.*#+}} ymm3 = ymm2[2,3],ymm6[2,3]
 ; X86-AVX2-NEXT:    vunpckhpd {{.*#+}} ymm5 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
 ; X86-AVX2-NEXT:    vpermpd {{.*#+}} ymm5 = ymm5[2,1,3,3]
 ; X86-AVX2-NEXT:    vblendps {{.*#+}} ymm3 = ymm5[0,1],ymm3[2,3],ymm5[4,5],ymm3[6,7]
 ; X86-AVX2-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm5
-; X86-AVX2-NEXT:    vmovaps 8(%ebp), %xmm6
 ; X86-AVX2-NEXT:    vmovlhps {{.*#+}} xmm7 = xmm2[0],xmm6[0]
 ; X86-AVX2-NEXT:    vpermpd {{.*#+}} ymm7 = ymm7[0,0,2,1]
 ; X86-AVX2-NEXT:    vblendps {{.*#+}} ymm5 = ymm5[0,1],ymm7[2,3],ymm5[4,5],ymm7[6,7]
diff --git a/llvm/test/CodeGen/X86/viabs.ll b/llvm/test/CodeGen/X86/viabs.ll
index 974af2c089a1fa0..ba8194936977bfa 100644
--- a/llvm/test/CodeGen/X86/viabs.ll
+++ b/llvm/test/CodeGen/X86/viabs.ll
@@ -809,15 +809,15 @@ define <8 x i64> @test_abs_le_v8i64_fold(ptr %a.ptr) nounwind {
 ;
 ; AVX1-LABEL: test_abs_le_v8i64_fold:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vmovupd (%rdi), %ymm0
-; AVX1-NEXT:    vmovupd 32(%rdi), %ymm1
+; AVX1-NEXT:    vmovdqu (%rdi), %ymm0
+; AVX1-NEXT:    vmovdqu 32(%rdi), %ymm1
 ; AVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX1-NEXT:    vpsubq 16(%rdi), %xmm2, %xmm3
-; AVX1-NEXT:    vpsubq (%rdi), %xmm2, %xmm4
+; AVX1-NEXT:    vpsubq %xmm0, %xmm2, %xmm4
 ; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm4, %ymm3
 ; AVX1-NEXT:    vblendvpd %ymm0, %ymm3, %ymm0, %ymm0
 ; AVX1-NEXT:    vpsubq 48(%rdi), %xmm2, %xmm3
-; AVX1-NEXT:    vpsubq 32(%rdi), %xmm2, %xmm2
+; AVX1-NEXT:    vpsubq %xmm1, %xmm2, %xmm2
 ; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm2, %ymm2
 ; AVX1-NEXT:    vblendvpd %ymm1, %ymm2, %ymm1, %ymm1
 ; AVX1-NEXT:    retq
diff --git a/llvm/test/CodeGen/X86/vselect-avx.ll b/llvm/test/CodeGen/X86/vselect-avx.ll
index 5040232fadad253..529101f4df0b255 100644
--- a/llvm/test/CodeGen/X86/vselect-avx.ll
+++ b/llvm/test/CodeGen/X86/vselect-avx.ll
@@ -328,8 +328,8 @@ define void @vselect_concat_splat() {
 ; AVX512-LABEL: vselect_concat_splat:
 ; AVX512:       ## %bb.0: ## %entry
 ; AVX512-NEXT:    vmovups (%rax), %ymm0
-; AVX512-NEXT:    vmovups (%rax), %xmm1
-; AVX512-NEXT:    vmovaps {{.*#+}} xmm2 = [0,3,6,9]
+; AVX512-NEXT:    vmovaps {{.*#+}} ymm1 = [0,3,6,9,1,4,7,10]
+; AVX512-NEXT:    vmovaps %ymm1, %ymm2
 ; AVX512-NEXT:    vpermi2ps %ymm1, %ymm0, %ymm2
 ; AVX512-NEXT:    vmovups 32, %xmm3
 ; AVX512-NEXT:    vmovups 0, %ymm4
@@ -337,9 +337,8 @@ define void @vselect_concat_splat() {
 ; AVX512-NEXT:    vcmpneqps %xmm5, %xmm2, %k0
 ; AVX512-NEXT:    kshiftlw $4, %k0, %k1
 ; AVX512-NEXT:    korw %k1, %k0, %k1
-; AVX512-NEXT:    vmovaps {{.*#+}} ymm2 = [0,3,6,9,1,4,7,10]
-; AVX512-NEXT:    vpermt2ps %ymm3, %ymm2, %ymm4
-; AVX512-NEXT:    vpermt2ps %ymm1, %ymm2, %ymm0
+; AVX512-NEXT:    vpermt2ps %ymm3, %ymm1, %ymm4
+; AVX512-NEXT:    vpermt2ps %ymm1, %ymm1, %ymm0
 ; AVX512-NEXT:    vmovaps %ymm4, %ymm0 {%k1}
 ; AVX512-NEXT:    vmovups %ymm0, (%rax)
 ; AVX512-NEXT:    vzeroupper
diff --git a/llvm/test/CodeGen/X86/x86-interleaved-access.ll b/llvm/test/CodeGen/X86/x86-interleaved-access.ll
index aaba44c8dc11161..216332943993e37 100644
--- a/llvm/test/CodeGen/X86/x86-interleaved-access.ll
+++ b/llvm/test/CodeGen/X86/x86-interleaved-access.ll
@@ -8,13 +8,15 @@ define <4 x double> @load_factorf64_4(ptr %ptr) nounwind {
 ; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vmovupd (%rdi), %ymm0
 ; AVX1-NEXT:    vmovupd 32(%rdi), %ymm1
-; AVX1-NEXT:    vinsertf128 $1, 64(%rdi), %ymm0, %ymm2
-; AVX1-NEXT:    vinsertf128 $1, 96(%rdi), %ymm1, %ymm3
-; AVX1-NEXT:    vhaddpd %ymm3, %ymm2, %ymm2
-; AVX1-NEXT:    vperm2f128 $49, 64(%rdi), %ymm0, %ymm0 # ymm0 = ymm0[2,3],mem[2,3]
-; AVX1-NEXT:    vperm2f128 $49, 96(%rdi), %ymm1, %ymm1 # ymm1 = ymm1[2,3],mem[2,3]
-; AVX1-NEXT:    vunpcklpd {{.*#+}} ymm3 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
-; AVX1-NEXT:    vaddpd %ymm3, %ymm2, %ymm2
+; AVX1-NEXT:    vmovupd 64(%rdi), %ymm2
+; AVX1-NEXT:    vmovupd 96(%rdi), %ymm3
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm4
+; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm1, %ymm5
+; AVX1-NEXT:    vhaddpd %ymm5, %ymm4, %ymm4
+; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[2,3]
+; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm1[2,3],ymm3[2,3]
+; AVX1-NEXT:    vunpcklpd {{.*#+}} ymm2 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
+; AVX1-NEXT:    vaddpd %ymm2, %ymm4, %ymm2
 ; AVX1-NEXT:    vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
 ; AVX1-NEXT:    vaddpd %ymm0, %ymm2, %ymm0
 ; AVX1-NEXT:    retq
@@ -51,13 +53,15 @@ define <4 x double> @load_factorf64_2(ptr %ptr) nounwind {
 ; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vmovupd (%rdi), %ymm0
 ; AVX1-NEXT:    vmovupd 32(%rdi), %ymm1
-; AVX1-NEXT:    vinsertf128 $1, 64(%rdi), %ymm0, %ymm2
-; AVX1-NEXT:    vinsertf128 $1, 96(%rdi), %ymm1, %ymm3
-; AVX1-NEXT:    vunpcklpd {{.*#+}} ymm2 = ymm2[0],ymm3[0],ymm2[2],ymm3[2]
-; AVX1-NEXT:    vperm2f128 $49, 64(%rdi), %ymm0, %ymm0 # ymm0 = ymm0[2,3],mem[2,3]
-; AVX1-NEXT:    vperm2f128 $49, 96(%rdi), %ymm1, %ymm1 # ymm1 = ymm1[2,3],mem[2,3]
+; AVX1-NEXT:    vmovupd 64(%rdi), %ymm2
+; AVX1-NEXT:    vmovupd 96(%rdi), %ymm3
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm4
+; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm1, %ymm5
+; AVX1-NEXT:    vunpcklpd {{.*#+}} ymm4 = ymm4[0],ymm5[0],ymm4[2],ymm5[2]
+; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[2,3]
+; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm1[2,3],ymm3[2,3]
 ; AVX1-NEXT:    vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
-; AVX1-NEXT:    vmulpd %ymm0, %ymm2, %ymm0
+; AVX1-NEXT:    vmulpd %ymm0, %ymm4, %ymm0
 ; AVX1-NEXT:    retq
 ;
 ; AVX2OR512-LABEL: load_factorf64_2:
@@ -113,23 +117,25 @@ define <4 x i64> @load_factori64_4(ptr %ptr) nounwind {
 ; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vmovups (%rdi), %ymm0
 ; AVX1-NEXT:    vmovups 32(%rdi), %ymm1
-; AVX1-NEXT:    vinsertf128 $1, 64(%rdi), %ymm0, %ymm2
-; AVX1-NEXT:    vinsertf128 $1, 96(%rdi), %ymm1, %ymm3
-; AVX1-NEXT:    vperm2f128 $49, 64(%rdi), %ymm0, %ymm0 # ymm0 = ymm0[2,3],mem[2,3]
-; AVX1-NEXT:    vperm2f128 $49, 96(%rdi), %ymm1, %ymm1 # ymm1 = ymm1[2,3],mem[2,3]
-; AVX1-NEXT:    vunpcklpd {{.*#+}} ymm4 = ymm2[0],ymm3[0],ymm2[2],ymm3[2]
-; AVX1-NEXT:    vunpcklpd {{.*#+}} ymm5 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
-; AVX1-NEXT:    vunpckhpd {{.*#+}} ymm2 = ymm2[1],ymm3[1],ymm2[3],ymm3[3]
+; AVX1-NEXT:    vmovups 64(%rdi), %ymm2
+; AVX1-NEXT:    vmovups 96(%rdi), %ymm3
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm4
+; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm1, %ymm5
+; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[2,3]
+; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm1[2,3],ymm3[2,3]
+; AVX1-NEXT:    vunpcklpd {{.*#+}} ymm2 = ymm4[0],ymm5[0],ymm4[2],ymm5[2]
+; AVX1-NEXT:    vunpcklpd {{.*#+}} ymm3 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
+; AVX1-NEXT:    vunpckhpd {{.*#+}} ymm4 = ymm4[1],ymm5[1],ymm4[3],ymm5[3]
 ; AVX1-NEXT:    vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
-; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm1
-; AVX1-NEXT:    vextractf128 $1, %ymm4, %xmm3
-; AVX1-NEXT:    vpaddq %xmm1, %xmm3, %xmm1
-; AVX1-NEXT:    vpaddq %xmm2, %xmm4, %xmm2
-; AVX1-NEXT:    vextractf128 $1, %ymm5, %xmm3
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm4
-; AVX1-NEXT:    vpaddq %xmm4, %xmm3, %xmm3
-; AVX1-NEXT:    vpaddq %xmm3, %xmm1, %xmm1
-; AVX1-NEXT:    vpaddq %xmm0, %xmm5, %xmm0
+; AVX1-NEXT:    vextractf128 $1, %ymm4, %xmm1
+; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm5
+; AVX1-NEXT:    vpaddq %xmm1, %xmm5, %xmm1
+; AVX1-NEXT:    vpaddq %xmm4, %xmm2, %xmm2
+; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm4
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
+; AVX1-NEXT:    vpaddq %xmm5, %xmm4, %xmm4
+; AVX1-NEXT:    vpaddq %xmm4, %xmm1, %xmm1
+; AVX1-NEXT:    vpaddq %xmm0, %xmm3, %xmm0
 ; AVX1-NEXT:    vpaddq %xmm0, %xmm2, %xmm0
 ; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
 ; AVX1-NEXT:    retq
@@ -396,23 +402,22 @@ define <8 x i8> @interleaved_load_vf8_i8_stride4(ptr %ptr) nounwind {
 ;
 ; AVX512-LABEL: interleaved_load_vf8_i8_stride4:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    vmovdqa (%rdi), %xmm0
+; AVX512-NEXT:    vmovdqu (%rdi), %ymm0
 ; AVX512-NEXT:    vmovdqa 16(%rdi), %xmm1
 ; AVX512-NEXT:    vmovdqa {{.*#+}} xmm2 = <3,u,7,u,11,u,15,u,7,u,15,u,6,u,7,u>
 ; AVX512-NEXT:    vpshufb %xmm2, %xmm1, %xmm3
 ; AVX512-NEXT:    vpshufb %xmm2, %xmm0, %xmm2
 ; AVX512-NEXT:    vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
 ; AVX512-NEXT:    vpsrld $16, %xmm1, %xmm1
-; AVX512-NEXT:    vpsrld $16, %xmm0, %xmm0
-; AVX512-NEXT:    vpackusdw %xmm1, %xmm0, %xmm0
-; AVX512-NEXT:    vpaddb %xmm0, %xmm2, %xmm0
-; AVX512-NEXT:    vmovdqu (%rdi), %ymm1
-; AVX512-NEXT:    vpmovdw %zmm1, %ymm1
+; AVX512-NEXT:    vpsrld $16, %xmm0, %xmm3
+; AVX512-NEXT:    vpackusdw %xmm1, %xmm3, %xmm1
+; AVX512-NEXT:    vpaddb %xmm1, %xmm2, %xmm1
 ; AVX512-NEXT:    vmovdqu (%rdi), %ymm2
-; AVX512-NEXT:    vpsrlw $8, %ymm2, %ymm2
-; AVX512-NEXT:    vpmovwb %zmm2, %ymm2
-; AVX512-NEXT:    vpaddb %xmm2, %xmm1, %xmm1
-; AVX512-NEXT:    vpmullw %xmm0, %xmm1, %xmm0
+; AVX512-NEXT:    vpmovdw %zmm2, %ymm2
+; AVX512-NEXT:    vpsrlw $8, %ymm0, %ymm0
+; AVX512-NEXT:    vpmovwb %zmm0, %ymm0
+; AVX512-NEXT:    vpaddb %xmm0, %xmm2, %xmm0
+; AVX512-NEXT:    vpmullw %xmm1, %xmm0, %xmm0
 ; AVX512-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
 ; AVX512-NEXT:    vzeroupper
 ; AVX512-NEXT:    retq
@@ -652,29 +657,27 @@ define <32 x i1> @interleaved_load_vf32_i8_stride4(ptr %ptr) nounwind {
 ; AVX2-NEXT:    vpshufb %xmm6, %xmm5, %xmm7
 ; AVX2-NEXT:    vpshufb %xmm6, %xmm4, %xmm6
 ; AVX2-NEXT:    vpunpckldq {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1]
-; AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm7 = [0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12]
+; AVX2-NEXT:    vpbroadcastd {{.*#+}} ymm7 = [0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12]
 ; AVX2-NEXT:    vpshufb %xmm7, %xmm3, %xmm8
-; AVX2-NEXT:    vpshufb %xmm7, %xmm2, %xmm7
-; AVX2-NEXT:    vpunpckldq {{.*#+}} xmm7 = xmm7[0],xmm8[0],xmm7[1],xmm8[1]
-; AVX2-NEXT:    vpblendd {{.*#+}} xmm7 = xmm7[0,1],xmm6[2,3]
-; AVX2-NEXT:    vpbroadcastd {{.*#+}} ymm8 = [0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12]
-; AVX2-NEXT:    vpshufb %ymm8, %ymm1, %ymm9
+; AVX2-NEXT:    vpshufb %xmm7, %xmm2, %xmm9
+; AVX2-NEXT:    vpunpckldq {{.*#+}} xmm8 = xmm9[0],xmm8[0],xmm9[1],xmm8[1]
+; AVX2-NEXT:    vpblendd {{.*#+}} xmm8 = xmm8[0,1],xmm6[2,3]
+; AVX2-NEXT:    vpshufb %ymm7, %ymm1, %ymm9
 ; AVX2-NEXT:    vpbroadcastq {{.*#+}} ymm6 = [0,4,0,4,0,4,0,4]
 ; AVX2-NEXT:    vpermd %ymm9, %ymm6, %ymm9
-; AVX2-NEXT:    vpshufb %ymm8, %ymm0, %ymm8
-; AVX2-NEXT:    vpermd %ymm8, %ymm6, %ymm8
-; AVX2-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3,4,5],ymm9[6,7]
-; AVX2-NEXT:    vpblendd {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm8[4,5,6,7]
+; AVX2-NEXT:    vpshufb %ymm7, %ymm0, %ymm7
+; AVX2-NEXT:    vpermd %ymm7, %ymm6, %ymm7
+; AVX2-NEXT:    vpblendd {{.*#+}} ymm7 = ymm7[0,1,2,3,4,5],ymm9[6,7]
+; AVX2-NEXT:    vpblendd {{.*#+}} ymm7 = ymm8[0,1,2,3],ymm7[4,5,6,7]
 ; AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm8 = [1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13]
 ; AVX2-NEXT:    vpshufb %xmm8, %xmm5, %xmm9
 ; AVX2-NEXT:    vpshufb %xmm8, %xmm4, %xmm8
 ; AVX2-NEXT:    vpunpckldq {{.*#+}} xmm8 = xmm8[0],xmm9[0],xmm8[1],xmm9[1]
-; AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm9 = [1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13]
-; AVX2-NEXT:    vpshufb %xmm9, %xmm3, %xmm10
-; AVX2-NEXT:    vpshufb %xmm9, %xmm2, %xmm9
-; AVX2-NEXT:    vpunpckldq {{.*#+}} xmm9 = xmm9[0],xmm10[0],xmm9[1],xmm10[1]
-; AVX2-NEXT:    vpblendd {{.*#+}} xmm8 = xmm9[0,1],xmm8[2,3]
 ; AVX2-NEXT:    vpbroadcastd {{.*#+}} ymm9 = [1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13]
+; AVX2-NEXT:    vpshufb %xmm9, %xmm3, %xmm10
+; AVX2-NEXT:    vpshufb %xmm9, %xmm2, %xmm11
+; AVX2-NEXT:    vpunpckldq {{.*#+}} xmm10 = xmm11[0],xmm10[0],xmm11[1],xmm10[1]
+; AVX2-NEXT:    vpblendd {{.*#+}} xmm8 = xmm10[0,1],xmm8[2,3]
 ; AVX2-NEXT:    vpshufb %ymm9, %ymm1, %ymm10
 ; AVX2-NEXT:    vpermd %ymm10, %ymm6, %ymm10
 ; AVX2-NEXT:    vpshufb %ymm9, %ymm0, %ymm9
@@ -686,12 +689,11 @@ define <32 x i1> @interleaved_load_vf32_i8_stride4(ptr %ptr) nounwind {
 ; AVX2-NEXT:    vpshufb %xmm8, %xmm5, %xmm9
 ; AVX2-NEXT:    vpshufb %xmm8, %xmm4, %xmm8
 ; AVX2-NEXT:    vpunpckldq {{.*#+}} xmm8 = xmm8[0],xmm9[0],xmm8[1],xmm9[1]
-; AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm9 = [2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14]
-; AVX2-NEXT:    vpshufb %xmm9, %xmm3, %xmm10
-; AVX2-NEXT:    vpshufb %xmm9, %xmm2, %xmm9
-; AVX2-NEXT:    vpunpckldq {{.*#+}} xmm9 = xmm9[0],xmm10[0],xmm9[1],xmm10[1]
-; AVX2-NEXT:    vpblendd {{.*#+}} xmm8 = xmm9[0,1],xmm8[2,3]
 ; AVX2-NEXT:    vpbroadcastd {{.*#+}} ymm9 = [2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14]
+; AVX2-NEXT:    vpshufb %xmm9, %xmm3, %xmm10
+; AVX2-NEXT:    vpshufb %xmm9, %xmm2, %xmm11
+; AVX2-NEXT:    vpunpckldq {{.*#+}} xmm10 = xmm11[0],xmm10[0],xmm11[1],xmm10[1]
+; AVX2-NEXT:    vpblendd {{.*#+}} xmm8 = xmm10[0,1],xmm8[2,3]
 ; AVX2-NEXT:    vpshufb %ymm9, %ymm1, %ymm10
 ; AVX2-NEXT:    vpermd %ymm10, %ymm6, %ymm10
 ; AVX2-NEXT:    vpshufb %ymm9, %ymm0, %ymm9
@@ -702,15 +704,14 @@ define <32 x i1> @interleaved_load_vf32_i8_stride4(ptr %ptr) nounwind {
 ; AVX2-NEXT:    vpshufb %xmm9, %xmm5, %xmm5
 ; AVX2-NEXT:    vpshufb %xmm9, %xmm4, %xmm4
 ; AVX2-NEXT:    vpunpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
-; AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm5 = [3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15]
+; AVX2-NEXT:    vpbroadcastd {{.*#+}} ymm5 = [3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15]
 ; AVX2-NEXT:    vpshufb %xmm5, %xmm3, %xmm3
 ; AVX2-NEXT:    vpshufb %xmm5, %xmm2, %xmm2
 ; AVX2-NEXT:    vpunpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
 ; AVX2-NEXT:    vpblendd {{.*#+}} xmm2 = xmm2[0,1],xmm4[2,3]
-; AVX2-NEXT:    vpbroadcastd {{.*#+}} ymm3 = [3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15]
-; AVX2-NEXT:    vpshufb %ymm3, %ymm1, %ymm1
+; AVX2-NEXT:    vpshufb %ymm5, %ymm1, %ymm1
 ; AVX2-NEXT:    vpermd %ymm1, %ymm6, %ymm1
-; AVX2-NEXT:    vpshufb %ymm3, %ymm0, %ymm0
+; AVX2-NEXT:    vpshufb %ymm5, %ymm0, %ymm0
 ; AVX2-NEXT:    vpermd %ymm0, %ymm6, %ymm0
 ; AVX2-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
 ; AVX2-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
diff --git a/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast.ll b/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast.ll
index 92973274f291909..3ce4bb3306ab0f7 100644
--- a/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast.ll
+++ b/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast.ll
@@ -3314,12 +3314,14 @@ define void @vec384_i8_widen_to_i128_factor16_broadcast_to_v3i128_factor3(ptr %i
 ; AVX2-NEXT:    vpaddb 32(%rsi), %ymm1, %ymm1
 ; AVX2-NEXT:    vpaddb (%rsi), %ymm0, %ymm0
 ; AVX2-NEXT:    vpermq {{.*#+}} ymm2 = ymm0[0,1,0,1]
-; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2
-; AVX2-NEXT:    vperm2i128 {{.*#+}} ymm1 = ymm1[2,3],ymm2[2,3]
-; AVX2-NEXT:    vbroadcasti128 {{.*#+}} ymm3 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
+; AVX2-NEXT:    vbroadcasti128 {{.*#+}} ymm3 = [255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
 ; AVX2-NEXT:    # ymm3 = mem[0,1,0,1]
-; AVX2-NEXT:    vpblendvb %ymm3, %ymm1, %ymm2, %ymm1
-; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT:    vpand %ymm3, %ymm2, %ymm2
+; AVX2-NEXT:    vperm2i128 {{.*#+}} ymm1 = ymm1[2,3],ymm2[2,3]
+; AVX2-NEXT:    vbroadcasti128 {{.*#+}} ymm4 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
+; AVX2-NEXT:    # ymm4 = mem[0,1,0,1]
+; AVX2-NEXT:    vpblendvb %ymm4, %ymm1, %ymm2, %ymm1
+; AVX2-NEXT:    vpand %xmm3, %xmm0, %xmm0
 ; AVX2-NEXT:    vpaddb (%rdx), %ymm1, %ymm1
 ; AVX2-NEXT:    vpaddb 32(%rdx), %ymm0, %ymm0
 ; AVX2-NEXT:    vmovdqa %ymm0, 32(%rcx)
diff --git a/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast_from_memory.ll b/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast_from_memory.ll
index bbd641662cc0359..134908f4c7392c7 100644
--- a/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast_from_memory.ll
+++ b/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast_from_memory.ll
@@ -2665,18 +2665,19 @@ define void @vec384_i8_widen_to_i128_factor16_broadcast_to_v3i128_factor3(ptr %i
 ;
 ; AVX2-LABEL: vec384_i8_widen_to_i128_factor16_broadcast_to_v3i128_factor3:
 ; AVX2:       # %bb.0:
-; AVX2-NEXT:    vmovdqa (%rdi), %xmm0
-; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; AVX2-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = mem[0,1,0,1]
-; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
-; AVX2-NEXT:    vperm2i128 {{.*#+}} ymm2 = mem[2,3],ymm1[2,3]
+; AVX2-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
+; AVX2-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
+; AVX2-NEXT:    # ymm1 = mem[0,1,0,1]
+; AVX2-NEXT:    vpand %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    vperm2i128 {{.*#+}} ymm2 = mem[2,3],ymm0[2,3]
 ; AVX2-NEXT:    vbroadcasti128 {{.*#+}} ymm3 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
 ; AVX2-NEXT:    # ymm3 = mem[0,1,0,1]
-; AVX2-NEXT:    vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
-; AVX2-NEXT:    vpaddb (%rsi), %ymm1, %ymm1
-; AVX2-NEXT:    vpaddb 32(%rsi), %ymm0, %ymm0
-; AVX2-NEXT:    vmovdqa %ymm0, 32(%rdx)
-; AVX2-NEXT:    vmovdqa %ymm1, (%rdx)
+; AVX2-NEXT:    vpblendvb %ymm3, %ymm2, %ymm0, %ymm0
+; AVX2-NEXT:    vpand (%rdi), %xmm1, %xmm1
+; AVX2-NEXT:    vpaddb (%rsi), %ymm0, %ymm0
+; AVX2-NEXT:    vpaddb 32(%rsi), %ymm1, %ymm1
+; AVX2-NEXT:    vmovdqa %ymm1, 32(%rdx)
+; AVX2-NEXT:    vmovdqa %ymm0, (%rdx)
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
 ;

>From add93524b7bc7cf8c12a347610d84eda0e115f5d Mon Sep 17 00:00:00 2001
From: Simon Pilgrim <llvm-dev at redking.me.uk>
Date: Thu, 23 Nov 2023 11:01:37 +0000
Subject: [PATCH 2/2] Add early-out if the load isn't a simple XMM/YMM load to
 reduce if depth

---
 llvm/lib/Target/X86/X86ISelDAGToDAG.cpp | 55 +++++++++++++------------
 1 file changed, 28 insertions(+), 27 deletions(-)

diff --git a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
index 8b9393580dc45bf..d4d152d1947d2b3 100644
--- a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
+++ b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
@@ -1041,33 +1041,34 @@ void X86DAGToDAGISel::PreprocessISelDAG() {
       // load, then just extract the lower subvector and avoid the second load.
       auto *Ld = cast<LoadSDNode>(N);
       MVT VT = N->getSimpleValueType(0);
-      if (ISD::isNormalLoad(Ld) && Ld->isSimple() &&
-          (VT.is128BitVector() || VT.is256BitVector())) {
-        SDValue Ptr = Ld->getBasePtr();
-        SDValue Chain = Ld->getChain();
-        for (SDNode *User : Ptr->uses()) {
-          auto *UserLd = dyn_cast<LoadSDNode>(N);
-          MVT UserVT = User->getSimpleValueType(0);
-          if (User != N && UserLd && ISD::isNormalLoad(User) &&
-              UserLd->getBasePtr() == Ptr && UserLd->getChain() == Chain &&
-              !User->hasAnyUseOfValue(1) &&
-              (UserVT.is256BitVector() || UserVT.is512BitVector()) &&
-              UserVT.getSizeInBits() > VT.getSizeInBits()) {
-            SDLoc dl(N);
-            unsigned NumSubElts =
-                VT.getSizeInBits() / UserVT.getScalarSizeInBits();
-            MVT SubVT = MVT::getVectorVT(UserVT.getScalarType(), NumSubElts);
-            SDValue Extract = CurDAG->getNode(ISD::EXTRACT_SUBVECTOR, dl, SubVT,
-                                              SDValue(User, 0),
-                                              CurDAG->getIntPtrConstant(0, dl));
-            SDValue Res = CurDAG->getBitcast(VT, Extract);
-            --I;
-            SDValue To[] = {Res, SDValue(UserLd, 1)};
-            CurDAG->ReplaceAllUsesWith(N, To);
-            ++I;
-            MadeChange = true;
-            continue;
-          }
+      if (!ISD::isNormalLoad(Ld) || !Ld->isSimple() ||
+          !(VT.is128BitVector() || VT.is256BitVector()))
+        break;
+
+      SDValue Ptr = Ld->getBasePtr();
+      SDValue Chain = Ld->getChain();
+      for (SDNode *User : Ptr->uses()) {
+        auto *UserLd = dyn_cast<LoadSDNode>(N);
+        MVT UserVT = User->getSimpleValueType(0);
+        if (User != N && UserLd && ISD::isNormalLoad(User) &&
+            UserLd->getBasePtr() == Ptr && UserLd->getChain() == Chain &&
+            !User->hasAnyUseOfValue(1) &&
+            (UserVT.is256BitVector() || UserVT.is512BitVector()) &&
+            UserVT.getSizeInBits() > VT.getSizeInBits()) {
+          SDLoc dl(N);
+          unsigned NumSubElts =
+              VT.getSizeInBits() / UserVT.getScalarSizeInBits();
+          MVT SubVT = MVT::getVectorVT(UserVT.getScalarType(), NumSubElts);
+          SDValue Extract = CurDAG->getNode(ISD::EXTRACT_SUBVECTOR, dl, SubVT,
+                                            SDValue(User, 0),
+                                            CurDAG->getIntPtrConstant(0, dl));
+          SDValue Res = CurDAG->getBitcast(VT, Extract);
+          --I;
+          SDValue To[] = {Res, SDValue(UserLd, 1)};
+          CurDAG->ReplaceAllUsesWith(N, To);
+          ++I;
+          MadeChange = true;
+          continue;
         }
       }
       break;



More information about the llvm-commits mailing list