[llvm] [X86] Fold blend(pshufb(x,m1),pshufb(y,m2)) -> blend(pshufb(x,blend(m1,m2)),pshufb(y,blend(m1,m2))) to reduce constant pool (PR #98466)
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Tue Jul 16 03:32:03 PDT 2024
- Previous message: [llvm] [X86] Fold blend(pshufb(x,m1),pshufb(y,m2)) -> blend(pshufb(x,blend(m1,m2)),pshufb(y,blend(m1,m2))) to reduce constant pool (PR #98466)
- Next message: [llvm] [X86] Fold blend(pshufb(x,m1),pshufb(y,m2)) -> blend(pshufb(x,blend(m1,m2)),pshufb(y,blend(m1,m2))) to reduce constant pool (PR #98466)
- Messages sorted by:
[ date ]
[ thread ]
[ subject ]
[ author ]
https://github.com/RKSimon updated https://github.com/llvm/llvm-project/pull/98466
>From cedb9ebdfb0e684fd241ed099527cac3301327c2 Mon Sep 17 00:00:00 2001
From: Simon Pilgrim <llvm-dev at redking.me.uk>
Date: Wed, 19 Jun 2024 13:10:53 +0100
Subject: [PATCH 1/2] [X86] Fold blend(pshufb(x,m1),pshufb(y,m2)) ->
blend(pshufb(x,blend(m1,m2)),pshufb(y,blend(m1,m2))) to reduce constant pool
Share PSHUFB masks where we have no overlap in used elements.
---
llvm/lib/Target/X86/X86ISelLowering.cpp | 70 +-
llvm/test/CodeGen/X86/oddshuffles.ll | 14 +-
.../X86/shuffle-strided-with-offset-512.ll | 5 +-
llvm/test/CodeGen/X86/shuffle-vs-trunc-512.ll | 10 +-
.../vector-interleaved-load-i16-stride-2.ll | 99 +-
.../vector-interleaved-load-i16-stride-4.ll | 1338 +++--
.../vector-interleaved-load-i16-stride-5.ll | 116 +-
.../vector-interleaved-load-i16-stride-6.ll | 4568 ++++++++-------
.../vector-interleaved-load-i16-stride-7.ll | 5105 +++++++++--------
.../vector-interleaved-load-i8-stride-2.ll | 84 +-
.../vector-interleaved-load-i8-stride-5.ll | 142 +-
.../vector-interleaved-load-i8-stride-6.ll | 232 +-
.../vector-interleaved-load-i8-stride-7.ll | 392 +-
.../vector-interleaved-store-i16-stride-3.ll | 28 +-
.../vector-interleaved-store-i16-stride-4.ll | 28 +-
.../vector-interleaved-store-i16-stride-5.ll | 86 +-
.../vector-interleaved-store-i16-stride-6.ll | 228 +-
.../vector-interleaved-store-i16-stride-7.ll | 21 +-
.../vector-interleaved-store-i16-stride-8.ll | 30 +-
.../vector-interleaved-store-i8-stride-6.ll | 77 +-
.../vector-interleaved-store-i8-stride-8.ll | 431 +-
.../CodeGen/X86/vector-shuffle-256-v16.ll | 21 +-
.../CodeGen/X86/vector-shuffle-256-v32.ll | 40 +-
23 files changed, 6669 insertions(+), 6496 deletions(-)
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 1d947ac2346d0..7f44fb0fa266d 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -41005,23 +41005,59 @@ static SDValue combineTargetShuffle(SDValue N, const SDLoc &DL,
case X86ISD::BLENDI: {
SDValue N0 = N.getOperand(0);
SDValue N1 = N.getOperand(1);
-
- // blend(bitcast(x),bitcast(y)) -> bitcast(blend(x,y)) to narrower types.
- // TODO: Handle MVT::v16i16 repeated blend mask.
- if (N0.getOpcode() == ISD::BITCAST && N1.getOpcode() == ISD::BITCAST &&
- N0.getOperand(0).getValueType() == N1.getOperand(0).getValueType()) {
- MVT SrcVT = N0.getOperand(0).getSimpleValueType();
- if ((VT.getScalarSizeInBits() % SrcVT.getScalarSizeInBits()) == 0 &&
- SrcVT.getScalarSizeInBits() >= 32) {
- unsigned Size = VT.getVectorNumElements();
- unsigned NewSize = SrcVT.getVectorNumElements();
- APInt BlendMask = N.getConstantOperandAPInt(2).zextOrTrunc(Size);
- APInt NewBlendMask = APIntOps::ScaleBitMask(BlendMask, NewSize);
- return DAG.getBitcast(
- VT, DAG.getNode(X86ISD::BLENDI, DL, SrcVT, N0.getOperand(0),
- N1.getOperand(0),
- DAG.getTargetConstant(NewBlendMask.getZExtValue(),
- DL, MVT::i8)));
+ unsigned EltBits = VT.getScalarSizeInBits();
+
+ if (N0.getOpcode() == ISD::BITCAST && N1.getOpcode() == ISD::BITCAST) {
+ // blend(bitcast(x),bitcast(y)) -> bitcast(blend(x,y)) to narrower types.
+ // TODO: Handle MVT::v16i16 repeated blend mask.
+ if (N0.getOperand(0).getValueType() == N1.getOperand(0).getValueType()) {
+ MVT SrcVT = N0.getOperand(0).getSimpleValueType();
+ unsigned SrcBits = SrcVT.getScalarSizeInBits();
+ if ((EltBits % SrcBits) == 0 && SrcBits >= 32) {
+ unsigned Size = VT.getVectorNumElements();
+ unsigned NewSize = SrcVT.getVectorNumElements();
+ APInt BlendMask = N.getConstantOperandAPInt(2).zextOrTrunc(Size);
+ APInt NewBlendMask = APIntOps::ScaleBitMask(BlendMask, NewSize);
+ return DAG.getBitcast(
+ VT, DAG.getNode(X86ISD::BLENDI, DL, SrcVT, N0.getOperand(0),
+ N1.getOperand(0),
+ DAG.getTargetConstant(NewBlendMask.getZExtValue(),
+ DL, MVT::i8)));
+ }
+ }
+ // Share PSHUFB masks:
+ // blend(pshufb(x,m1),pshufb(y,m2))
+ // --> m3 = blend(m1,m2)
+ // blend(pshufb(x,m3),pshufb(y,m3))
+ if (N0.hasOneUse() && N1.hasOneUse()) {
+ SmallVector<int> Mask, ByteMask;
+ SmallVector<SDValue> Ops;
+ SDValue LHS = peekThroughOneUseBitcasts(N0);
+ SDValue RHS = peekThroughOneUseBitcasts(N1);
+ if (LHS.getOpcode() == X86ISD::PSHUFB &&
+ RHS.getOpcode() == X86ISD::PSHUFB &&
+ LHS.getOperand(1) != RHS.getOperand(1) &&
+ (LHS.getOperand(1).hasOneUse() || RHS.getOperand(1).hasOneUse()) &&
+ getTargetShuffleMask(N, false, Ops, Mask)) {
+ assert(Ops.size() == 2 && LHS == peekThroughOneUseBitcasts(Ops[0]) &&
+ RHS == peekThroughOneUseBitcasts(Ops[1]) &&
+ "BLENDI decode mismatch");
+ MVT ShufVT = LHS.getSimpleValueType();
+ SDValue MaskLHS = LHS.getOperand(1);
+ SDValue MaskRHS = RHS.getOperand(1);
+ llvm::narrowShuffleMaskElts(EltBits / 8, Mask, ByteMask);
+ if (SDValue NewMask = combineX86ShufflesConstants(
+ ShufVT, {MaskLHS, MaskRHS}, ByteMask, true, DAG, DL,
+ Subtarget)) {
+ SDValue NewLHS = DAG.getNode(X86ISD::PSHUFB, DL, ShufVT,
+ LHS.getOperand(0), NewMask);
+ SDValue NewRHS = DAG.getNode(X86ISD::PSHUFB, DL, ShufVT,
+ RHS.getOperand(0), NewMask);
+ return DAG.getNode(X86ISD::BLENDI, DL, VT,
+ DAG.getBitcast(VT, NewLHS),
+ DAG.getBitcast(VT, NewRHS), N.getOperand(2));
+ }
+ }
}
}
return SDValue();
diff --git a/llvm/test/CodeGen/X86/oddshuffles.ll b/llvm/test/CodeGen/X86/oddshuffles.ll
index d3a3b1e980db0..b40b2c82843cc 100644
--- a/llvm/test/CodeGen/X86/oddshuffles.ll
+++ b/llvm/test/CodeGen/X86/oddshuffles.ll
@@ -1294,10 +1294,11 @@ define void @interleave_24i16_in(ptr %p, ptr %q1, ptr %q2, ptr %q3) nounwind {
; AVX2-SLOW-NEXT: vmovdqu (%rdx), %xmm1
; AVX2-SLOW-NEXT: vmovdqu (%rcx), %xmm2
; AVX2-SLOW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm3
-; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm4 = ymm3[0,1,u,u,6,7,2,3,u,u,8,9,4,5,u,u,16,17,u,u,22,23,18,19,u,u,24,25,20,21,u,u]
+; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,0,1,6,7,2,3,2,3,8,9,4,5,4,5,16,17,6,7,22,23,18,19,8,9,24,25,20,21,10,11]
+; AVX2-SLOW-NEXT: vpshufb %ymm4, %ymm3, %ymm5
; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,3,0,1]
-; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,0,1,u,u,u,u,2,3,u,u,u,u,4,5,u,u,22,23,u,u,u,u,24,25,u,u,u,u,26,27]
-; AVX2-SLOW-NEXT: vpblendw {{.*#+}} ymm3 = ymm4[0],ymm3[1],ymm4[2,3],ymm3[4],ymm4[5,6],ymm3[7],ymm4[8],ymm3[9],ymm4[10,11],ymm3[12],ymm4[13,14],ymm3[15]
+; AVX2-SLOW-NEXT: vpshufb %ymm4, %ymm3, %ymm3
+; AVX2-SLOW-NEXT: vpblendw {{.*#+}} ymm3 = ymm5[0],ymm3[1],ymm5[2,3],ymm3[4],ymm5[5,6],ymm3[7],ymm5[8],ymm3[9],ymm5[10,11],ymm3[12],ymm5[13,14],ymm3[15]
; AVX2-SLOW-NEXT: vpmovsxbd {{.*#+}} ymm4 = [0,0,0,0,1,1,0,2]
; AVX2-SLOW-NEXT: vpermd %ymm2, %ymm4, %ymm4
; AVX2-SLOW-NEXT: vpmovsxbw {{.*#+}} ymm5 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535]
@@ -1339,10 +1340,11 @@ define void @interleave_24i16_in(ptr %p, ptr %q1, ptr %q2, ptr %q3) nounwind {
; AVX2-FAST-PERLANE-NEXT: vmovdqu (%rdx), %xmm1
; AVX2-FAST-PERLANE-NEXT: vmovdqu (%rcx), %xmm2
; AVX2-FAST-PERLANE-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm3
-; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} ymm4 = ymm3[0,1,u,u,6,7,2,3,u,u,8,9,4,5,u,u,16,17,u,u,22,23,18,19,u,u,24,25,20,21,u,u]
+; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,0,1,6,7,2,3,2,3,8,9,4,5,4,5,16,17,6,7,22,23,18,19,8,9,24,25,20,21,10,11]
+; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm4, %ymm3, %ymm5
; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,3,0,1]
-; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,0,1,u,u,u,u,2,3,u,u,u,u,4,5,u,u,22,23,u,u,u,u,24,25,u,u,u,u,26,27]
-; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} ymm3 = ymm4[0],ymm3[1],ymm4[2,3],ymm3[4],ymm4[5,6],ymm3[7],ymm4[8],ymm3[9],ymm4[10,11],ymm3[12],ymm4[13,14],ymm3[15]
+; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm4, %ymm3, %ymm3
+; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} ymm3 = ymm5[0],ymm3[1],ymm5[2,3],ymm3[4],ymm5[5,6],ymm3[7],ymm5[8],ymm3[9],ymm5[10,11],ymm3[12],ymm5[13,14],ymm3[15]
; AVX2-FAST-PERLANE-NEXT: vpmovsxbd {{.*#+}} ymm4 = [0,0,0,0,1,1,0,2]
; AVX2-FAST-PERLANE-NEXT: vpermd %ymm2, %ymm4, %ymm4
; AVX2-FAST-PERLANE-NEXT: vpmovsxbw {{.*#+}} ymm5 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535]
diff --git a/llvm/test/CodeGen/X86/shuffle-strided-with-offset-512.ll b/llvm/test/CodeGen/X86/shuffle-strided-with-offset-512.ll
index e94f51233256c..45842d4148a8b 100644
--- a/llvm/test/CodeGen/X86/shuffle-strided-with-offset-512.ll
+++ b/llvm/test/CodeGen/X86/shuffle-strided-with-offset-512.ll
@@ -12,8 +12,9 @@ define void @shuffle_v64i8_to_v32i8_1(ptr %L, ptr %S) nounwind {
; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
; AVX512F-NEXT: vmovdqa 32(%rdi), %ymm1
-; AVX512F-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u,17,19,21,23,25,27,29,31]
-; AVX512F-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u,17,19,21,23,25,27,29,31,u,u,u,u,u,u,u,u]
+; AVX512F-NEXT: vpbroadcastq {{.*#+}} ymm2 = [1,3,5,7,9,11,13,15,1,3,5,7,9,11,13,15,1,3,5,7,9,11,13,15,1,3,5,7,9,11,13,15]
+; AVX512F-NEXT: vpshufb %ymm2, %ymm1, %ymm1
+; AVX512F-NEXT: vpshufb %ymm2, %ymm0, %ymm0
; AVX512F-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
; AVX512F-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
; AVX512F-NEXT: vmovdqa %ymm0, (%rsi)
diff --git a/llvm/test/CodeGen/X86/shuffle-vs-trunc-512.ll b/llvm/test/CodeGen/X86/shuffle-vs-trunc-512.ll
index 95e249984e184..cf0820aac3262 100644
--- a/llvm/test/CodeGen/X86/shuffle-vs-trunc-512.ll
+++ b/llvm/test/CodeGen/X86/shuffle-vs-trunc-512.ll
@@ -20,8 +20,9 @@ define void @shuffle_v64i8_to_v32i8(ptr %L, ptr %S) nounwind {
; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
; AVX512F-NEXT: vmovdqa 32(%rdi), %ymm1
-; AVX512F-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u,16,18,20,22,24,26,28,30]
-; AVX512F-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u,16,18,20,22,24,26,28,30,u,u,u,u,u,u,u,u]
+; AVX512F-NEXT: vpbroadcastq {{.*#+}} ymm2 = [0,2,4,6,8,10,12,14,0,2,4,6,8,10,12,14,0,2,4,6,8,10,12,14,0,2,4,6,8,10,12,14]
+; AVX512F-NEXT: vpshufb %ymm2, %ymm1, %ymm1
+; AVX512F-NEXT: vpshufb %ymm2, %ymm0, %ymm0
; AVX512F-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
; AVX512F-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
; AVX512F-NEXT: vmovdqa %ymm0, (%rsi)
@@ -44,8 +45,9 @@ define void @shuffle_v64i8_to_v32i8(ptr %L, ptr %S) nounwind {
; AVX512VL-FAST-PERLANE: # %bb.0:
; AVX512VL-FAST-PERLANE-NEXT: vmovdqa (%rdi), %ymm0
; AVX512VL-FAST-PERLANE-NEXT: vmovdqa 32(%rdi), %ymm1
-; AVX512VL-FAST-PERLANE-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u,16,18,20,22,24,26,28,30]
-; AVX512VL-FAST-PERLANE-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u,16,18,20,22,24,26,28,30,u,u,u,u,u,u,u,u]
+; AVX512VL-FAST-PERLANE-NEXT: vpbroadcastq {{.*#+}} ymm2 = [0,2,4,6,8,10,12,14,0,2,4,6,8,10,12,14,0,2,4,6,8,10,12,14,0,2,4,6,8,10,12,14]
+; AVX512VL-FAST-PERLANE-NEXT: vpshufb %ymm2, %ymm1, %ymm1
+; AVX512VL-FAST-PERLANE-NEXT: vpshufb %ymm2, %ymm0, %ymm0
; AVX512VL-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
; AVX512VL-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
; AVX512VL-FAST-PERLANE-NEXT: vmovdqa %ymm0, (%rsi)
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-2.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-2.ll
index 3bc97f71f04fb..00e43df15deea 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-2.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-2.ll
@@ -488,8 +488,9 @@ define void @load_i16_stride2_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1) no
; AVX2-NEXT: vpshufhw {{.*#+}} ymm3 = ymm3[0,1,2,3,4,6,6,7,8,9,10,11,12,14,14,15]
; AVX2-NEXT: vshufps {{.*#+}} ymm2 = ymm3[0,2],ymm2[0,2],ymm3[4,6],ymm2[4,6]
; AVX2-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,2,1,3]
-; AVX2-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,2,3,6,7,10,11,14,15,u,u,u,u,u,u,u,u,18,19,22,23,26,27,30,31]
-; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[2,3,6,7,10,11,14,15,u,u,u,u,u,u,u,u,18,19,22,23,26,27,30,31,u,u,u,u,u,u,u,u]
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [2,3,6,7,10,11,14,15,2,3,6,7,10,11,14,15,18,19,22,23,26,27,30,31,18,19,22,23,26,27,30,31]
+; AVX2-NEXT: vpshufb %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: vpshufb %ymm3, %ymm0, %ymm0
; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
; AVX2-NEXT: vmovaps %ymm2, (%rsi)
@@ -506,8 +507,9 @@ define void @load_i16_stride2_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1) no
; AVX2-FP-NEXT: vpshufb %ymm2, %ymm0, %ymm2
; AVX2-FP-NEXT: vshufps {{.*#+}} ymm2 = ymm2[0,2],ymm3[0,2],ymm2[4,6],ymm3[4,6]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,2,1,3]
-; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,2,3,6,7,10,11,14,15,u,u,u,u,u,u,u,u,18,19,22,23,26,27,30,31]
-; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[2,3,6,7,10,11,14,15,u,u,u,u,u,u,u,u,18,19,22,23,26,27,30,31,u,u,u,u,u,u,u,u]
+; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm3 = [2,3,6,7,10,11,14,15,2,3,6,7,10,11,14,15,18,19,22,23,26,27,30,31,18,19,22,23,26,27,30,31]
+; AVX2-FP-NEXT: vpshufb %ymm3, %ymm1, %ymm1
+; AVX2-FP-NEXT: vpshufb %ymm3, %ymm0, %ymm0
; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
; AVX2-FP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
; AVX2-FP-NEXT: vmovaps %ymm2, (%rsi)
@@ -524,8 +526,9 @@ define void @load_i16_stride2_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1) no
; AVX2-FCP-NEXT: vpshufb %ymm2, %ymm0, %ymm2
; AVX2-FCP-NEXT: vshufps {{.*#+}} ymm2 = ymm2[0,2],ymm3[0,2],ymm2[4,6],ymm3[4,6]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,2,1,3]
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,2,3,6,7,10,11,14,15,u,u,u,u,u,u,u,u,18,19,22,23,26,27,30,31]
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[2,3,6,7,10,11,14,15,u,u,u,u,u,u,u,u,18,19,22,23,26,27,30,31,u,u,u,u,u,u,u,u]
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm3 = [2,3,6,7,10,11,14,15,2,3,6,7,10,11,14,15,18,19,22,23,26,27,30,31,18,19,22,23,26,27,30,31]
+; AVX2-FCP-NEXT: vpshufb %ymm3, %ymm1, %ymm1
+; AVX2-FCP-NEXT: vpshufb %ymm3, %ymm0, %ymm0
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
; AVX2-FCP-NEXT: vmovaps %ymm2, (%rsi)
@@ -736,14 +739,13 @@ define void @load_i16_stride2_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1) no
; AVX2-NEXT: vpshufhw {{.*#+}} ymm6 = ymm6[0,1,2,3,4,6,6,7,8,9,10,11,12,14,14,15]
; AVX2-NEXT: vshufps {{.*#+}} ymm5 = ymm6[0,2],ymm5[0,2],ymm6[4,6],ymm5[4,6]
; AVX2-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,2,1,3]
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm6 = [u,u,u,u,u,u,u,u,2,3,6,7,10,11,14,15,u,u,u,u,u,u,u,u,18,19,22,23,26,27,30,31]
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm6 = [2,3,6,7,10,11,14,15,2,3,6,7,10,11,14,15,18,19,22,23,26,27,30,31,18,19,22,23,26,27,30,31]
; AVX2-NEXT: vpshufb %ymm6, %ymm3, %ymm3
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm7 = [2,3,6,7,10,11,14,15,u,u,u,u,u,u,u,u,18,19,22,23,26,27,30,31,u,u,u,u,u,u,u,u]
-; AVX2-NEXT: vpshufb %ymm7, %ymm2, %ymm2
+; AVX2-NEXT: vpshufb %ymm6, %ymm2, %ymm2
; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1],ymm3[2,3],ymm2[4,5],ymm3[6,7]
; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,1,3]
; AVX2-NEXT: vpshufb %ymm6, %ymm1, %ymm1
-; AVX2-NEXT: vpshufb %ymm7, %ymm0, %ymm0
+; AVX2-NEXT: vpshufb %ymm6, %ymm0, %ymm0
; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
; AVX2-NEXT: vmovaps %ymm5, (%rsi)
@@ -768,14 +770,13 @@ define void @load_i16_stride2_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1) no
; AVX2-FP-NEXT: vpshufb %ymm4, %ymm0, %ymm4
; AVX2-FP-NEXT: vshufps {{.*#+}} ymm4 = ymm4[0,2],ymm6[0,2],ymm4[4,6],ymm6[4,6]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,2,1,3]
-; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm6 = [u,u,u,u,u,u,u,u,2,3,6,7,10,11,14,15,u,u,u,u,u,u,u,u,18,19,22,23,26,27,30,31]
+; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm6 = [2,3,6,7,10,11,14,15,2,3,6,7,10,11,14,15,18,19,22,23,26,27,30,31,18,19,22,23,26,27,30,31]
; AVX2-FP-NEXT: vpshufb %ymm6, %ymm3, %ymm3
-; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm7 = [2,3,6,7,10,11,14,15,u,u,u,u,u,u,u,u,18,19,22,23,26,27,30,31,u,u,u,u,u,u,u,u]
-; AVX2-FP-NEXT: vpshufb %ymm7, %ymm2, %ymm2
+; AVX2-FP-NEXT: vpshufb %ymm6, %ymm2, %ymm2
; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1],ymm3[2,3],ymm2[4,5],ymm3[6,7]
; AVX2-FP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,1,3]
; AVX2-FP-NEXT: vpshufb %ymm6, %ymm1, %ymm1
-; AVX2-FP-NEXT: vpshufb %ymm7, %ymm0, %ymm0
+; AVX2-FP-NEXT: vpshufb %ymm6, %ymm0, %ymm0
; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
; AVX2-FP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
; AVX2-FP-NEXT: vmovaps %ymm4, (%rsi)
@@ -800,14 +801,13 @@ define void @load_i16_stride2_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1) no
; AVX2-FCP-NEXT: vpshufb %ymm4, %ymm0, %ymm4
; AVX2-FCP-NEXT: vshufps {{.*#+}} ymm4 = ymm4[0,2],ymm6[0,2],ymm4[4,6],ymm6[4,6]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,2,1,3]
-; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm6 = [u,u,u,u,u,u,u,u,2,3,6,7,10,11,14,15,u,u,u,u,u,u,u,u,18,19,22,23,26,27,30,31]
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm6 = [2,3,6,7,10,11,14,15,2,3,6,7,10,11,14,15,18,19,22,23,26,27,30,31,18,19,22,23,26,27,30,31]
; AVX2-FCP-NEXT: vpshufb %ymm6, %ymm3, %ymm3
-; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm7 = [2,3,6,7,10,11,14,15,u,u,u,u,u,u,u,u,18,19,22,23,26,27,30,31,u,u,u,u,u,u,u,u]
-; AVX2-FCP-NEXT: vpshufb %ymm7, %ymm2, %ymm2
+; AVX2-FCP-NEXT: vpshufb %ymm6, %ymm2, %ymm2
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1],ymm3[2,3],ymm2[4,5],ymm3[6,7]
; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,1,3]
; AVX2-FCP-NEXT: vpshufb %ymm6, %ymm1, %ymm1
-; AVX2-FCP-NEXT: vpshufb %ymm7, %ymm0, %ymm0
+; AVX2-FCP-NEXT: vpshufb %ymm6, %ymm0, %ymm0
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
; AVX2-FCP-NEXT: vmovaps %ymm4, (%rsi)
@@ -1180,20 +1180,20 @@ define void @load_i16_stride2_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1) no
; AVX2-NEXT: vmovdqa 224(%rdi), %ymm6
; AVX2-NEXT: vmovdqa (%rdi), %ymm3
; AVX2-NEXT: vmovdqa 32(%rdi), %ymm5
-; AVX2-NEXT: vmovdqa 64(%rdi), %ymm8
-; AVX2-NEXT: vmovdqa 96(%rdi), %ymm9
-; AVX2-NEXT: vpshuflw {{.*#+}} ymm2 = ymm9[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
+; AVX2-NEXT: vmovdqa 64(%rdi), %ymm7
+; AVX2-NEXT: vmovdqa 96(%rdi), %ymm8
+; AVX2-NEXT: vpshuflw {{.*#+}} ymm2 = ymm8[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
; AVX2-NEXT: vpshufhw {{.*#+}} ymm2 = ymm2[0,1,2,3,4,6,6,7,8,9,10,11,12,14,14,15]
-; AVX2-NEXT: vpshuflw {{.*#+}} ymm7 = ymm8[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
-; AVX2-NEXT: vpshufhw {{.*#+}} ymm7 = ymm7[0,1,2,3,4,6,6,7,8,9,10,11,12,14,14,15]
-; AVX2-NEXT: vshufps {{.*#+}} ymm2 = ymm7[0,2],ymm2[0,2],ymm7[4,6],ymm2[4,6]
+; AVX2-NEXT: vpshuflw {{.*#+}} ymm9 = ymm7[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
+; AVX2-NEXT: vpshufhw {{.*#+}} ymm9 = ymm9[0,1,2,3,4,6,6,7,8,9,10,11,12,14,14,15]
+; AVX2-NEXT: vshufps {{.*#+}} ymm2 = ymm9[0,2],ymm2[0,2],ymm9[4,6],ymm2[4,6]
; AVX2-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,2,1,3]
-; AVX2-NEXT: vpshuflw {{.*#+}} ymm7 = ymm6[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
-; AVX2-NEXT: vpshufhw {{.*#+}} ymm7 = ymm7[0,1,2,3,4,6,6,7,8,9,10,11,12,14,14,15]
+; AVX2-NEXT: vpshuflw {{.*#+}} ymm9 = ymm6[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
+; AVX2-NEXT: vpshufhw {{.*#+}} ymm9 = ymm9[0,1,2,3,4,6,6,7,8,9,10,11,12,14,14,15]
; AVX2-NEXT: vpshuflw {{.*#+}} ymm10 = ymm4[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
; AVX2-NEXT: vpshufhw {{.*#+}} ymm10 = ymm10[0,1,2,3,4,6,6,7,8,9,10,11,12,14,14,15]
-; AVX2-NEXT: vshufps {{.*#+}} ymm7 = ymm10[0,2],ymm7[0,2],ymm10[4,6],ymm7[4,6]
-; AVX2-NEXT: vpermpd {{.*#+}} ymm7 = ymm7[0,2,1,3]
+; AVX2-NEXT: vshufps {{.*#+}} ymm9 = ymm10[0,2],ymm9[0,2],ymm10[4,6],ymm9[4,6]
+; AVX2-NEXT: vpermpd {{.*#+}} ymm9 = ymm9[0,2,1,3]
; AVX2-NEXT: vpshuflw {{.*#+}} ymm10 = ymm5[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
; AVX2-NEXT: vpshufhw {{.*#+}} ymm10 = ymm10[0,1,2,3,4,6,6,7,8,9,10,11,12,14,14,15]
; AVX2-NEXT: vpshuflw {{.*#+}} ymm11 = ymm3[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
@@ -1206,32 +1206,31 @@ define void @load_i16_stride2_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1) no
; AVX2-NEXT: vpshufhw {{.*#+}} ymm12 = ymm12[0,1,2,3,4,6,6,7,8,9,10,11,12,14,14,15]
; AVX2-NEXT: vshufps {{.*#+}} ymm11 = ymm12[0,2],ymm11[0,2],ymm12[4,6],ymm11[4,6]
; AVX2-NEXT: vpermpd {{.*#+}} ymm11 = ymm11[0,2,1,3]
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm12 = [u,u,u,u,u,u,u,u,2,3,6,7,10,11,14,15,u,u,u,u,u,u,u,u,18,19,22,23,26,27,30,31]
-; AVX2-NEXT: vpshufb %ymm12, %ymm9, %ymm9
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm13 = [2,3,6,7,10,11,14,15,u,u,u,u,u,u,u,u,18,19,22,23,26,27,30,31,u,u,u,u,u,u,u,u]
-; AVX2-NEXT: vpshufb %ymm13, %ymm8, %ymm8
-; AVX2-NEXT: vpblendd {{.*#+}} ymm8 = ymm8[0,1],ymm9[2,3],ymm8[4,5],ymm9[6,7]
-; AVX2-NEXT: vpermq {{.*#+}} ymm8 = ymm8[0,2,1,3]
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm12 = [2,3,6,7,10,11,14,15,2,3,6,7,10,11,14,15,18,19,22,23,26,27,30,31,18,19,22,23,26,27,30,31]
+; AVX2-NEXT: vpshufb %ymm12, %ymm8, %ymm8
+; AVX2-NEXT: vpshufb %ymm12, %ymm7, %ymm7
+; AVX2-NEXT: vpblendd {{.*#+}} ymm7 = ymm7[0,1],ymm8[2,3],ymm7[4,5],ymm8[6,7]
+; AVX2-NEXT: vpermq {{.*#+}} ymm7 = ymm7[0,2,1,3]
; AVX2-NEXT: vpshufb %ymm12, %ymm6, %ymm6
-; AVX2-NEXT: vpshufb %ymm13, %ymm4, %ymm4
+; AVX2-NEXT: vpshufb %ymm12, %ymm4, %ymm4
; AVX2-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1],ymm6[2,3],ymm4[4,5],ymm6[6,7]
; AVX2-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,2,1,3]
; AVX2-NEXT: vpshufb %ymm12, %ymm5, %ymm5
-; AVX2-NEXT: vpshufb %ymm13, %ymm3, %ymm3
+; AVX2-NEXT: vpshufb %ymm12, %ymm3, %ymm3
; AVX2-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1],ymm5[2,3],ymm3[4,5],ymm5[6,7]
; AVX2-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,2,1,3]
; AVX2-NEXT: vpshufb %ymm12, %ymm1, %ymm1
-; AVX2-NEXT: vpshufb %ymm13, %ymm0, %ymm0
+; AVX2-NEXT: vpshufb %ymm12, %ymm0, %ymm0
; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
; AVX2-NEXT: vmovaps %ymm11, 64(%rsi)
; AVX2-NEXT: vmovaps %ymm10, (%rsi)
-; AVX2-NEXT: vmovaps %ymm7, 96(%rsi)
+; AVX2-NEXT: vmovaps %ymm9, 96(%rsi)
; AVX2-NEXT: vmovaps %ymm2, 32(%rsi)
; AVX2-NEXT: vmovdqa %ymm0, 64(%rdx)
; AVX2-NEXT: vmovdqa %ymm3, (%rdx)
; AVX2-NEXT: vmovdqa %ymm4, 96(%rdx)
-; AVX2-NEXT: vmovdqa %ymm8, 32(%rdx)
+; AVX2-NEXT: vmovdqa %ymm7, 32(%rdx)
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -1262,22 +1261,21 @@ define void @load_i16_stride2_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1) no
; AVX2-FP-NEXT: vpshufb %ymm9, %ymm0, %ymm9
; AVX2-FP-NEXT: vshufps {{.*#+}} ymm9 = ymm9[0,2],ymm12[0,2],ymm9[4,6],ymm12[4,6]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm9 = ymm9[0,2,1,3]
-; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm12 = [u,u,u,u,u,u,u,u,2,3,6,7,10,11,14,15,u,u,u,u,u,u,u,u,18,19,22,23,26,27,30,31]
+; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm12 = [2,3,6,7,10,11,14,15,2,3,6,7,10,11,14,15,18,19,22,23,26,27,30,31,18,19,22,23,26,27,30,31]
; AVX2-FP-NEXT: vpshufb %ymm12, %ymm8, %ymm8
-; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm13 = [2,3,6,7,10,11,14,15,u,u,u,u,u,u,u,u,18,19,22,23,26,27,30,31,u,u,u,u,u,u,u,u]
-; AVX2-FP-NEXT: vpshufb %ymm13, %ymm7, %ymm7
+; AVX2-FP-NEXT: vpshufb %ymm12, %ymm7, %ymm7
; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm7 = ymm7[0,1],ymm8[2,3],ymm7[4,5],ymm8[6,7]
; AVX2-FP-NEXT: vpermq {{.*#+}} ymm7 = ymm7[0,2,1,3]
; AVX2-FP-NEXT: vpshufb %ymm12, %ymm4, %ymm4
-; AVX2-FP-NEXT: vpshufb %ymm13, %ymm3, %ymm3
+; AVX2-FP-NEXT: vpshufb %ymm12, %ymm3, %ymm3
; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1],ymm4[2,3],ymm3[4,5],ymm4[6,7]
; AVX2-FP-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,2,1,3]
; AVX2-FP-NEXT: vpshufb %ymm12, %ymm6, %ymm4
-; AVX2-FP-NEXT: vpshufb %ymm13, %ymm5, %ymm5
+; AVX2-FP-NEXT: vpshufb %ymm12, %ymm5, %ymm5
; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm4 = ymm5[0,1],ymm4[2,3],ymm5[4,5],ymm4[6,7]
; AVX2-FP-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,2,1,3]
; AVX2-FP-NEXT: vpshufb %ymm12, %ymm1, %ymm1
-; AVX2-FP-NEXT: vpshufb %ymm13, %ymm0, %ymm0
+; AVX2-FP-NEXT: vpshufb %ymm12, %ymm0, %ymm0
; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
; AVX2-FP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
; AVX2-FP-NEXT: vmovaps %ymm9, 64(%rsi)
@@ -1318,22 +1316,21 @@ define void @load_i16_stride2_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1) no
; AVX2-FCP-NEXT: vpshufb %ymm9, %ymm0, %ymm9
; AVX2-FCP-NEXT: vshufps {{.*#+}} ymm9 = ymm9[0,2],ymm12[0,2],ymm9[4,6],ymm12[4,6]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm9 = ymm9[0,2,1,3]
-; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm12 = [u,u,u,u,u,u,u,u,2,3,6,7,10,11,14,15,u,u,u,u,u,u,u,u,18,19,22,23,26,27,30,31]
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm12 = [2,3,6,7,10,11,14,15,2,3,6,7,10,11,14,15,18,19,22,23,26,27,30,31,18,19,22,23,26,27,30,31]
; AVX2-FCP-NEXT: vpshufb %ymm12, %ymm8, %ymm8
-; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm13 = [2,3,6,7,10,11,14,15,u,u,u,u,u,u,u,u,18,19,22,23,26,27,30,31,u,u,u,u,u,u,u,u]
-; AVX2-FCP-NEXT: vpshufb %ymm13, %ymm7, %ymm7
+; AVX2-FCP-NEXT: vpshufb %ymm12, %ymm7, %ymm7
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm7[0,1],ymm8[2,3],ymm7[4,5],ymm8[6,7]
; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm7 = ymm7[0,2,1,3]
; AVX2-FCP-NEXT: vpshufb %ymm12, %ymm4, %ymm4
-; AVX2-FCP-NEXT: vpshufb %ymm13, %ymm3, %ymm3
+; AVX2-FCP-NEXT: vpshufb %ymm12, %ymm3, %ymm3
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1],ymm4[2,3],ymm3[4,5],ymm4[6,7]
; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,2,1,3]
; AVX2-FCP-NEXT: vpshufb %ymm12, %ymm6, %ymm4
-; AVX2-FCP-NEXT: vpshufb %ymm13, %ymm5, %ymm5
+; AVX2-FCP-NEXT: vpshufb %ymm12, %ymm5, %ymm5
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm5[0,1],ymm4[2,3],ymm5[4,5],ymm4[6,7]
; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,2,1,3]
; AVX2-FCP-NEXT: vpshufb %ymm12, %ymm1, %ymm1
-; AVX2-FCP-NEXT: vpshufb %ymm13, %ymm0, %ymm0
+; AVX2-FCP-NEXT: vpshufb %ymm12, %ymm0, %ymm0
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
; AVX2-FCP-NEXT: vmovaps %ymm9, 64(%rsi)
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-4.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-4.ll
index 3f77e50260c8d..df28ac14a30c0 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-4.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-4.ll
@@ -1235,64 +1235,62 @@ define void @load_i16_stride4_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX2-FCP-NEXT: vpackusdw %xmm3, %xmm0, %xmm0
; AVX2-FCP-NEXT: vpmovsxbd {{.*#+}} ymm3 = [0,2,2,3,0,2,4,6]
; AVX2-FCP-NEXT: vpermd %ymm2, %ymm3, %ymm4
-; AVX2-FCP-NEXT: vpbroadcastq {{.*#+}} ymm5 = [16,17,20,21,24,25,28,29,16,17,20,21,24,25,28,29,16,17,20,21,24,25,28,29,16,17,20,21,24,25,28,29]
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm5 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,16,17,20,21,24,25,28,29]
; AVX2-FCP-NEXT: vpshufb %ymm5, %ymm4, %ymm6
-; AVX2-FCP-NEXT: vpermd %ymm1, %ymm3, %ymm7
-; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm8 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,u,u,u,u,u,u,u,u]
-; AVX2-FCP-NEXT: vpshufb %ymm8, %ymm7, %ymm3
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5],ymm6[6,7]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm3[4,5,6,7]
-; AVX2-FCP-NEXT: vmovdqa (%rdi), %xmm3
-; AVX2-FCP-NEXT: vmovdqa 16(%rdi), %xmm6
-; AVX2-FCP-NEXT: vmovdqa 32(%rdi), %xmm9
-; AVX2-FCP-NEXT: vmovdqa 48(%rdi), %xmm10
-; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm11 = [0,1,2,3,2,3,10,11,8,9,10,11,12,13,14,15]
-; AVX2-FCP-NEXT: vpshufb %xmm11, %xmm10, %xmm12
-; AVX2-FCP-NEXT: vpshufb %xmm11, %xmm9, %xmm11
+; AVX2-FCP-NEXT: vpermd %ymm1, %ymm3, %ymm3
+; AVX2-FCP-NEXT: vpshufb %ymm5, %ymm3, %ymm7
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm7[0,1,2,3,4,5],ymm6[6,7]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm6[4,5,6,7]
+; AVX2-FCP-NEXT: vmovdqa (%rdi), %xmm6
+; AVX2-FCP-NEXT: vmovdqa 16(%rdi), %xmm7
+; AVX2-FCP-NEXT: vmovdqa 32(%rdi), %xmm8
+; AVX2-FCP-NEXT: vmovdqa 48(%rdi), %xmm9
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm10 = [0,1,2,3,2,3,10,11,8,9,10,11,12,13,14,15]
+; AVX2-FCP-NEXT: vpshufb %xmm10, %xmm9, %xmm11
+; AVX2-FCP-NEXT: vpshufb %xmm10, %xmm8, %xmm10
+; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm10 = xmm10[0],xmm11[0],xmm10[1],xmm11[1]
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm11 = [2,3,10,11,8,9,10,11,8,9,10,11,12,13,14,15]
+; AVX2-FCP-NEXT: vpshufb %xmm11, %xmm7, %xmm12
+; AVX2-FCP-NEXT: vpshufb %xmm11, %xmm6, %xmm11
; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm11 = xmm11[0],xmm12[0],xmm11[1],xmm12[1]
-; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm12 = [2,3,10,11,8,9,10,11,8,9,10,11,12,13,14,15]
-; AVX2-FCP-NEXT: vpshufb %xmm12, %xmm6, %xmm13
-; AVX2-FCP-NEXT: vpshufb %xmm12, %xmm3, %xmm12
-; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm12 = xmm12[0],xmm13[0],xmm12[1],xmm13[1]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm11 = xmm12[0,1],xmm11[2,3]
-; AVX2-FCP-NEXT: vpbroadcastq {{.*#+}} ymm12 = [18,19,22,23,26,27,30,31,18,19,22,23,26,27,30,31,18,19,22,23,26,27,30,31,18,19,22,23,26,27,30,31]
-; AVX2-FCP-NEXT: vpshufb %ymm12, %ymm4, %ymm4
-; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm13 = [2,3,6,7,10,11,14,15,14,15,10,11,12,13,14,15,18,19,22,23,26,27,30,31,u,u,u,u,u,u,u,u]
-; AVX2-FCP-NEXT: vpshufb %ymm13, %ymm7, %ymm7
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm7[0,1,2,3,4,5],ymm4[6,7]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm11[0,1,2,3],ymm4[4,5,6,7]
-; AVX2-FCP-NEXT: vpmovsxbd {{.*#+}} ymm7 = [1,3,2,3,1,3,5,7]
-; AVX2-FCP-NEXT: vpermd %ymm2, %ymm7, %ymm2
-; AVX2-FCP-NEXT: vpshufb %ymm5, %ymm2, %ymm5
-; AVX2-FCP-NEXT: vpermd %ymm1, %ymm7, %ymm1
-; AVX2-FCP-NEXT: vpshufb %ymm8, %ymm1, %ymm7
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm7[0,1,2,3,4,5],ymm5[6,7]
-; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm7 = xmm10[3,1,2,3]
-; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm8 = xmm7[0,1,2,0,4,5,6,7]
-; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm9 = xmm9[3,1,2,3]
-; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm10 = xmm9[0,1,2,0,4,5,6,7]
-; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm8 = xmm10[0],xmm8[0],xmm10[1],xmm8[1]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm10 = xmm11[0,1],xmm10[2,3]
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm11 = [2,3,6,7,10,11,14,15,14,15,10,11,12,13,14,15,18,19,22,23,26,27,30,31,18,19,22,23,26,27,30,31]
+; AVX2-FCP-NEXT: vpshufb %ymm11, %ymm4, %ymm4
+; AVX2-FCP-NEXT: vpshufb %ymm11, %ymm3, %ymm3
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5],ymm4[6,7]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm10[0,1,2,3],ymm3[4,5,6,7]
+; AVX2-FCP-NEXT: vpmovsxbd {{.*#+}} ymm4 = [1,3,2,3,1,3,5,7]
+; AVX2-FCP-NEXT: vpermd %ymm2, %ymm4, %ymm2
+; AVX2-FCP-NEXT: vpshufb %ymm5, %ymm2, %ymm10
+; AVX2-FCP-NEXT: vpermd %ymm1, %ymm4, %ymm1
+; AVX2-FCP-NEXT: vpshufb %ymm5, %ymm1, %ymm4
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5],ymm10[6,7]
+; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm5 = xmm9[3,1,2,3]
+; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm9 = xmm5[0,1,2,0,4,5,6,7]
+; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm8 = xmm8[3,1,2,3]
+; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm10 = xmm8[0,1,2,0,4,5,6,7]
+; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm9 = xmm10[0],xmm9[0],xmm10[1],xmm9[1]
+; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm7 = xmm7[3,1,2,3]
+; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm10 = xmm7[2,0,2,3,4,5,6,7]
; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm6 = xmm6[3,1,2,3]
-; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm10 = xmm6[2,0,2,3,4,5,6,7]
-; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[3,1,2,3]
-; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm11 = xmm3[2,0,2,3,4,5,6,7]
-; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm10 = xmm11[0],xmm10[0],xmm11[1],xmm10[1]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm8 = xmm10[0,1],xmm8[2,3]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm8[0,1,2,3],ymm5[4,5,6,7]
-; AVX2-FCP-NEXT: vpshufb %ymm12, %ymm2, %ymm2
-; AVX2-FCP-NEXT: vpshufb %ymm13, %ymm1, %ymm1
+; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm12 = xmm6[2,0,2,3,4,5,6,7]
+; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm10 = xmm12[0],xmm10[0],xmm12[1],xmm10[1]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm9 = xmm10[0,1],xmm9[2,3]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm9[0,1,2,3],ymm4[4,5,6,7]
+; AVX2-FCP-NEXT: vpshufb %ymm11, %ymm2, %ymm2
+; AVX2-FCP-NEXT: vpshufb %ymm11, %ymm1, %ymm1
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm2[6,7]
-; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm2 = xmm7[0,1,3,1,4,5,6,7]
-; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm7 = xmm9[0,1,3,1,4,5,6,7]
-; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm7[0],xmm2[0],xmm7[1],xmm2[1]
+; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm2 = xmm5[0,1,3,1,4,5,6,7]
+; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm5 = xmm8[0,1,3,1,4,5,6,7]
+; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm5[0],xmm2[0],xmm5[1],xmm2[1]
+; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm5 = xmm7[3,1,2,3,4,5,6,7]
; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm6 = xmm6[3,1,2,3,4,5,6,7]
-; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm3 = xmm3[3,1,2,3,4,5,6,7]
-; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm3[0],xmm6[0],xmm3[1],xmm6[1]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm2 = xmm3[0,1],xmm2[2,3]
+; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm5 = xmm6[0],xmm5[0],xmm6[1],xmm5[1]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm2 = xmm5[0,1],xmm2[2,3]
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FCP-NEXT: vmovdqa %ymm0, (%rsi)
-; AVX2-FCP-NEXT: vmovdqa %ymm4, (%rdx)
-; AVX2-FCP-NEXT: vmovdqa %ymm5, (%rcx)
+; AVX2-FCP-NEXT: vmovdqa %ymm3, (%rdx)
+; AVX2-FCP-NEXT: vmovdqa %ymm4, (%rcx)
; AVX2-FCP-NEXT: vmovdqa %ymm1, (%r8)
; AVX2-FCP-NEXT: vzeroupper
; AVX2-FCP-NEXT: retq
@@ -1382,10 +1380,9 @@ define void @load_i16_stride4_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512-FCP-NEXT: vmovdqa64 (%rdi), %zmm4
; AVX512-FCP-NEXT: vpmovqw %zmm4, %xmm9
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm9[0,1,2,3],ymm7[4,5,6,7]
-; AVX512-FCP-NEXT: vpbroadcastq {{.*#+}} ymm9 = [18,19,22,23,26,27,30,31,18,19,22,23,26,27,30,31,18,19,22,23,26,27,30,31,18,19,22,23,26,27,30,31]
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm9 = [2,3,6,7,10,11,14,15,14,15,10,11,12,13,14,15,18,19,22,23,26,27,30,31,18,19,22,23,26,27,30,31]
; AVX512-FCP-NEXT: vpshufb %ymm9, %ymm3, %ymm3
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm10 = [2,3,6,7,10,11,14,15,14,15,10,11,12,13,14,15,18,19,22,23,26,27,30,31,u,u,u,u,u,u,u,u]
-; AVX512-FCP-NEXT: vpshufb %ymm10, %ymm1, %ymm1
+; AVX512-FCP-NEXT: vpshufb %ymm9, %ymm1, %ymm1
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm3[6,7]
; AVX512-FCP-NEXT: vpsrlq $16, %zmm4, %zmm3
; AVX512-FCP-NEXT: vpmovqw %zmm3, %xmm3
@@ -1400,7 +1397,7 @@ define void @load_i16_stride4_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512-FCP-NEXT: vpmovqw %zmm0, %xmm0
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm5[4,5,6,7]
; AVX512-FCP-NEXT: vpshufb %ymm9, %ymm2, %ymm2
-; AVX512-FCP-NEXT: vpshufb %ymm10, %ymm3, %ymm3
+; AVX512-FCP-NEXT: vpshufb %ymm9, %ymm3, %ymm3
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3,4,5],ymm2[6,7]
; AVX512-FCP-NEXT: vpsrlq $48, %zmm4, %zmm3
; AVX512-FCP-NEXT: vpmovqw %zmm3, %xmm3
@@ -1497,10 +1494,9 @@ define void @load_i16_stride4_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512DQ-FCP-NEXT: vmovdqa64 (%rdi), %zmm4
; AVX512DQ-FCP-NEXT: vpmovqw %zmm4, %xmm9
; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm9[0,1,2,3],ymm7[4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpbroadcastq {{.*#+}} ymm9 = [18,19,22,23,26,27,30,31,18,19,22,23,26,27,30,31,18,19,22,23,26,27,30,31,18,19,22,23,26,27,30,31]
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm9 = [2,3,6,7,10,11,14,15,14,15,10,11,12,13,14,15,18,19,22,23,26,27,30,31,18,19,22,23,26,27,30,31]
; AVX512DQ-FCP-NEXT: vpshufb %ymm9, %ymm3, %ymm3
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm10 = [2,3,6,7,10,11,14,15,14,15,10,11,12,13,14,15,18,19,22,23,26,27,30,31,u,u,u,u,u,u,u,u]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm10, %ymm1, %ymm1
+; AVX512DQ-FCP-NEXT: vpshufb %ymm9, %ymm1, %ymm1
; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm3[6,7]
; AVX512DQ-FCP-NEXT: vpsrlq $16, %zmm4, %zmm3
; AVX512DQ-FCP-NEXT: vpmovqw %zmm3, %xmm3
@@ -1515,7 +1511,7 @@ define void @load_i16_stride4_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512DQ-FCP-NEXT: vpmovqw %zmm0, %xmm0
; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm5[4,5,6,7]
; AVX512DQ-FCP-NEXT: vpshufb %ymm9, %ymm2, %ymm2
-; AVX512DQ-FCP-NEXT: vpshufb %ymm10, %ymm3, %ymm3
+; AVX512DQ-FCP-NEXT: vpshufb %ymm9, %ymm3, %ymm3
; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3,4,5],ymm2[6,7]
; AVX512DQ-FCP-NEXT: vpsrlq $48, %zmm4, %zmm3
; AVX512DQ-FCP-NEXT: vpmovqw %zmm3, %xmm3
@@ -2563,145 +2559,140 @@ define void @load_i16_stride4_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX2-FCP-LABEL: load_i16_stride4_vf32:
; AVX2-FCP: # %bb.0:
; AVX2-FCP-NEXT: subq $104, %rsp
-; AVX2-FCP-NEXT: vmovdqa 192(%rdi), %ymm5
-; AVX2-FCP-NEXT: vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vmovdqa 192(%rdi), %ymm3
+; AVX2-FCP-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vmovdqa 224(%rdi), %ymm6
; AVX2-FCP-NEXT: vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vmovdqa 64(%rdi), %ymm7
-; AVX2-FCP-NEXT: vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vmovdqa 96(%rdi), %ymm8
+; AVX2-FCP-NEXT: vmovdqa 64(%rdi), %ymm9
+; AVX2-FCP-NEXT: vmovdqu %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vmovdqa 96(%rdi), %ymm7
; AVX2-FCP-NEXT: vpxor %xmm0, %xmm0, %xmm0
; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm1 = mem[0],ymm0[1,2,3],mem[4],ymm0[5,6,7],mem[8],ymm0[9,10,11],mem[12],ymm0[13,14,15]
; AVX2-FCP-NEXT: vextracti128 $1, %ymm1, %xmm2
; AVX2-FCP-NEXT: vpackusdw %xmm2, %xmm1, %xmm1
; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm2 = mem[0],ymm0[1,2,3],mem[4],ymm0[5,6,7],mem[8],ymm0[9,10,11],mem[12],ymm0[13,14,15]
-; AVX2-FCP-NEXT: vextracti128 $1, %ymm2, %xmm3
-; AVX2-FCP-NEXT: vpackusdw %xmm3, %xmm2, %xmm2
+; AVX2-FCP-NEXT: vextracti128 $1, %ymm2, %xmm4
+; AVX2-FCP-NEXT: vpackusdw %xmm4, %xmm2, %xmm2
; AVX2-FCP-NEXT: vpackusdw %xmm1, %xmm2, %xmm1
; AVX2-FCP-NEXT: vpmovsxbd {{.*#+}} ymm2 = [0,2,2,3,0,2,4,6]
-; AVX2-FCP-NEXT: vpermd %ymm8, %ymm2, %ymm10
-; AVX2-FCP-NEXT: vpbroadcastq {{.*#+}} ymm4 = [16,17,20,21,24,25,28,29,16,17,20,21,24,25,28,29,16,17,20,21,24,25,28,29,16,17,20,21,24,25,28,29]
-; AVX2-FCP-NEXT: vpshufb %ymm4, %ymm10, %ymm3
-; AVX2-FCP-NEXT: vmovdqa %ymm4, %ymm9
-; AVX2-FCP-NEXT: vpermd %ymm7, %ymm2, %ymm11
-; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm7 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,u,u,u,u,u,u,u,u]
-; AVX2-FCP-NEXT: vpshufb %ymm7, %ymm11, %ymm4
-; AVX2-FCP-NEXT: vmovdqa %ymm7, %ymm12
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3,4,5],ymm3[6,7]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm3[4,5,6,7]
+; AVX2-FCP-NEXT: vpermd %ymm7, %ymm2, %ymm5
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,16,17,20,21,24,25,28,29]
+; AVX2-FCP-NEXT: vpshufb %ymm4, %ymm5, %ymm8
+; AVX2-FCP-NEXT: vpermd %ymm9, %ymm2, %ymm10
+; AVX2-FCP-NEXT: vpshufb %ymm4, %ymm10, %ymm9
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm9[0,1,2,3,4,5],ymm8[6,7]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm8[4,5,6,7]
; AVX2-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm1 = mem[0],ymm0[1,2,3],mem[4],ymm0[5,6,7],mem[8],ymm0[9,10,11],mem[12],ymm0[13,14,15]
-; AVX2-FCP-NEXT: vextracti128 $1, %ymm1, %xmm3
-; AVX2-FCP-NEXT: vpackusdw %xmm3, %xmm1, %xmm1
+; AVX2-FCP-NEXT: vextracti128 $1, %ymm1, %xmm8
+; AVX2-FCP-NEXT: vpackusdw %xmm8, %xmm1, %xmm1
; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm0 = mem[0],ymm0[1,2,3],mem[4],ymm0[5,6,7],mem[8],ymm0[9,10,11],mem[12],ymm0[13,14,15]
-; AVX2-FCP-NEXT: vextracti128 $1, %ymm0, %xmm3
-; AVX2-FCP-NEXT: vpackusdw %xmm3, %xmm0, %xmm0
+; AVX2-FCP-NEXT: vextracti128 $1, %ymm0, %xmm8
+; AVX2-FCP-NEXT: vpackusdw %xmm8, %xmm0, %xmm0
; AVX2-FCP-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
-; AVX2-FCP-NEXT: vpermd %ymm6, %ymm2, %ymm7
-; AVX2-FCP-NEXT: vpshufb %ymm9, %ymm7, %ymm1
-; AVX2-FCP-NEXT: vpermd %ymm5, %ymm2, %ymm4
-; AVX2-FCP-NEXT: vpshufb %ymm12, %ymm4, %ymm3
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3,4,5],ymm1[6,7]
+; AVX2-FCP-NEXT: vpermd %ymm6, %ymm2, %ymm9
+; AVX2-FCP-NEXT: vpshufb %ymm4, %ymm9, %ymm1
+; AVX2-FCP-NEXT: vpermd %ymm3, %ymm2, %ymm6
+; AVX2-FCP-NEXT: vpshufb %ymm4, %ymm6, %ymm8
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm8[0,1,2,3,4,5],ymm1[6,7]
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vmovdqa 32(%rdi), %xmm15
-; AVX2-FCP-NEXT: vmovdqa 48(%rdi), %xmm5
-; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,2,3,2,3,10,11,8,9,10,11,12,13,14,15]
-; AVX2-FCP-NEXT: vpshufb %xmm3, %xmm5, %xmm0
-; AVX2-FCP-NEXT: vpshufb %xmm3, %xmm15, %xmm9
-; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm9 = xmm9[0],xmm0[0],xmm9[1],xmm0[1]
-; AVX2-FCP-NEXT: vmovdqa (%rdi), %xmm14
-; AVX2-FCP-NEXT: vmovdqa 16(%rdi), %xmm6
-; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm2 = [2,3,10,11,8,9,10,11,8,9,10,11,12,13,14,15]
-; AVX2-FCP-NEXT: vpshufb %xmm2, %xmm6, %xmm12
-; AVX2-FCP-NEXT: vpshufb %xmm2, %xmm14, %xmm13
-; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm12 = xmm13[0],xmm12[0],xmm13[1],xmm12[1]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm12 = xmm12[0,1],xmm9[2,3]
-; AVX2-FCP-NEXT: vpbroadcastq {{.*#+}} ymm0 = [18,19,22,23,26,27,30,31,18,19,22,23,26,27,30,31,18,19,22,23,26,27,30,31,18,19,22,23,26,27,30,31]
-; AVX2-FCP-NEXT: vpshufb %ymm0, %ymm10, %ymm13
-; AVX2-FCP-NEXT: vmovdqa %ymm0, %ymm9
-; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm0 = [2,3,6,7,10,11,14,15,14,15,10,11,12,13,14,15,18,19,22,23,26,27,30,31,u,u,u,u,u,u,u,u]
-; AVX2-FCP-NEXT: vpshufb %ymm0, %ymm11, %ymm11
-; AVX2-FCP-NEXT: vmovdqa %ymm0, %ymm10
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm11 = ymm11[0,1,2,3,4,5],ymm13[6,7]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm12[0,1,2,3],ymm11[4,5,6,7]
+; AVX2-FCP-NEXT: vmovdqa 32(%rdi), %xmm14
+; AVX2-FCP-NEXT: vmovdqa 48(%rdi), %xmm3
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm12 = [0,1,2,3,2,3,10,11,8,9,10,11,12,13,14,15]
+; AVX2-FCP-NEXT: vpshufb %xmm12, %xmm3, %xmm0
+; AVX2-FCP-NEXT: vpshufb %xmm12, %xmm14, %xmm8
+; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm8 = xmm8[0],xmm0[0],xmm8[1],xmm0[1]
+; AVX2-FCP-NEXT: vmovdqa (%rdi), %xmm13
+; AVX2-FCP-NEXT: vmovdqa 16(%rdi), %xmm2
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm1 = [2,3,10,11,8,9,10,11,8,9,10,11,12,13,14,15]
+; AVX2-FCP-NEXT: vpshufb %xmm1, %xmm2, %xmm4
+; AVX2-FCP-NEXT: vpshufb %xmm1, %xmm13, %xmm11
+; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm11[0],xmm4[0],xmm11[1],xmm4[1]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm4 = xmm4[0,1],xmm8[2,3]
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm0 = [2,3,6,7,10,11,14,15,14,15,10,11,12,13,14,15,18,19,22,23,26,27,30,31,18,19,22,23,26,27,30,31]
+; AVX2-FCP-NEXT: vpshufb %ymm0, %ymm5, %ymm5
+; AVX2-FCP-NEXT: vpshufb %ymm0, %ymm10, %ymm10
+; AVX2-FCP-NEXT: vmovdqa %ymm0, %ymm8
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm10[0,1,2,3,4,5],ymm5[6,7]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm4[0,1,2,3],ymm5[4,5,6,7]
; AVX2-FCP-NEXT: vmovdqu %ymm0, (%rsp) # 32-byte Spill
-; AVX2-FCP-NEXT: vmovdqa 160(%rdi), %xmm13
-; AVX2-FCP-NEXT: vmovdqa 176(%rdi), %xmm11
-; AVX2-FCP-NEXT: vpshufb %xmm3, %xmm11, %xmm12
-; AVX2-FCP-NEXT: vpshufb %xmm3, %xmm13, %xmm3
-; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm3[0],xmm12[0],xmm3[1],xmm12[1]
-; AVX2-FCP-NEXT: vmovdqa 128(%rdi), %xmm12
-; AVX2-FCP-NEXT: vmovdqa 144(%rdi), %xmm3
-; AVX2-FCP-NEXT: vpshufb %xmm2, %xmm3, %xmm0
-; AVX2-FCP-NEXT: vpshufb %xmm2, %xmm12, %xmm2
-; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
-; AVX2-FCP-NEXT: vpshufb %ymm9, %ymm7, %ymm1
-; AVX2-FCP-NEXT: vpshufb %ymm10, %ymm4, %ymm2
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3,4,5],ymm1[6,7]
+; AVX2-FCP-NEXT: vmovdqa 160(%rdi), %xmm11
+; AVX2-FCP-NEXT: vmovdqa 176(%rdi), %xmm15
+; AVX2-FCP-NEXT: vpshufb %xmm12, %xmm15, %xmm4
+; AVX2-FCP-NEXT: vpshufb %xmm12, %xmm11, %xmm10
+; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm10[0],xmm4[0],xmm10[1],xmm4[1]
+; AVX2-FCP-NEXT: vmovdqa 128(%rdi), %xmm10
+; AVX2-FCP-NEXT: vmovdqa 144(%rdi), %xmm12
+; AVX2-FCP-NEXT: vpshufb %xmm1, %xmm12, %xmm0
+; AVX2-FCP-NEXT: vpshufb %xmm1, %xmm10, %xmm1
+; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm4[2,3]
+; AVX2-FCP-NEXT: vpshufb %ymm8, %ymm9, %ymm1
+; AVX2-FCP-NEXT: vpshufb %ymm8, %ymm6, %ymm4
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm4[0,1,2,3,4,5],ymm1[6,7]
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpmovsxbd {{.*#+}} ymm7 = [1,3,2,3,1,3,5,7]
-; AVX2-FCP-NEXT: vpermd %ymm8, %ymm7, %ymm4
-; AVX2-FCP-NEXT: vpermd {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm8 # 32-byte Folded Reload
-; AVX2-FCP-NEXT: vpbroadcastq {{.*#+}} ymm10 = [16,17,20,21,24,25,28,29,16,17,20,21,24,25,28,29,16,17,20,21,24,25,28,29,16,17,20,21,24,25,28,29]
-; AVX2-FCP-NEXT: vpshufb %ymm10, %ymm4, %ymm0
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm8[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,u,u,u,u,u,u,u,u]
+; AVX2-FCP-NEXT: vpmovsxbd {{.*#+}} ymm6 = [1,3,2,3,1,3,5,7]
+; AVX2-FCP-NEXT: vpermd %ymm7, %ymm6, %ymm4
+; AVX2-FCP-NEXT: vpermd {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm7 # 32-byte Folded Reload
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm5 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,16,17,20,21,24,25,28,29]
+; AVX2-FCP-NEXT: vpshufb %ymm5, %ymm4, %ymm0
+; AVX2-FCP-NEXT: vpshufb %ymm5, %ymm7, %ymm1
+; AVX2-FCP-NEXT: vmovdqa %ymm5, %ymm8
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
-; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm1 = xmm5[3,1,2,3]
-; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm15 = xmm15[3,1,2,3]
-; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm5 = xmm1[0,1,2,0,4,5,6,7]
-; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm2 = xmm15[0,1,2,0,4,5,6,7]
-; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1]
-; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm6 = xmm6[3,1,2,3]
+; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm1 = xmm3[3,1,2,3]
; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm14 = xmm14[3,1,2,3]
-; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm5 = xmm6[2,0,2,3,4,5,6,7]
-; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm9 = xmm14[2,0,2,3,4,5,6,7]
+; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm3 = xmm1[0,1,2,0,4,5,6,7]
+; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm5 = xmm14[0,1,2,0,4,5,6,7]
+; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm5[0],xmm3[0],xmm5[1],xmm3[1]
+; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[3,1,2,3]
+; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm13 = xmm13[3,1,2,3]
+; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm5 = xmm2[2,0,2,3,4,5,6,7]
+; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm9 = xmm13[2,0,2,3,4,5,6,7]
; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm5 = xmm9[0],xmm5[0],xmm9[1],xmm5[1]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm2 = xmm5[0,1],xmm2[2,3]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm3 = xmm5[0,1],xmm3[2,3]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm0[4,5,6,7]
; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpermd {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm5 # 32-byte Folded Reload
-; AVX2-FCP-NEXT: vpermd {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm2 # 32-byte Folded Reload
-; AVX2-FCP-NEXT: vpshufb %ymm10, %ymm5, %ymm7
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm9 = ymm2[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,u,u,u,u,u,u,u,u]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm9[0,1,2,3,4,5],ymm7[6,7]
-; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm9 = xmm11[3,1,2,3]
-; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm11 = xmm13[3,1,2,3]
-; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm13 = xmm9[0,1,2,0,4,5,6,7]
-; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm10 = xmm11[0,1,2,0,4,5,6,7]
-; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm10 = xmm10[0],xmm13[0],xmm10[1],xmm13[1]
-; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[3,1,2,3]
+; AVX2-FCP-NEXT: vpermd {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm3 # 32-byte Folded Reload
+; AVX2-FCP-NEXT: vpermd {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm5 # 32-byte Folded Reload
+; AVX2-FCP-NEXT: vpshufb %ymm8, %ymm3, %ymm6
+; AVX2-FCP-NEXT: vpshufb %ymm8, %ymm5, %ymm9
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm9[0,1,2,3,4,5],ymm6[6,7]
+; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm9 = xmm15[3,1,2,3]
+; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm11 = xmm11[3,1,2,3]
+; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm15 = xmm9[0,1,2,0,4,5,6,7]
+; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm0 = xmm11[0,1,2,0,4,5,6,7]
+; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm15[0],xmm0[1],xmm15[1]
; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm12 = xmm12[3,1,2,3]
-; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm13 = xmm3[2,0,2,3,4,5,6,7]
-; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm0 = xmm12[2,0,2,3,4,5,6,7]
-; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm13[0],xmm0[1],xmm13[1]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm10[2,3]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm7[4,5,6,7]
-; AVX2-FCP-NEXT: vpbroadcastq {{.*#+}} ymm10 = [18,19,22,23,26,27,30,31,18,19,22,23,26,27,30,31,18,19,22,23,26,27,30,31,18,19,22,23,26,27,30,31]
-; AVX2-FCP-NEXT: vpshufb %ymm10, %ymm4, %ymm4
-; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm13 = [2,3,6,7,10,11,14,15,14,15,10,11,12,13,14,15,18,19,22,23,26,27,30,31,u,u,u,u,u,u,u,u]
-; AVX2-FCP-NEXT: vpshufb %ymm13, %ymm8, %ymm7
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm7[0,1,2,3,4,5],ymm4[6,7]
+; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm10 = xmm10[3,1,2,3]
+; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm15 = xmm12[2,0,2,3,4,5,6,7]
+; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm8 = xmm10[2,0,2,3,4,5,6,7]
+; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm8 = xmm8[0],xmm15[0],xmm8[1],xmm15[1]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm0 = xmm8[0,1],xmm0[2,3]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm6[4,5,6,7]
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm8 = [2,3,6,7,10,11,14,15,14,15,10,11,12,13,14,15,18,19,22,23,26,27,30,31,18,19,22,23,26,27,30,31]
+; AVX2-FCP-NEXT: vpshufb %ymm8, %ymm4, %ymm4
+; AVX2-FCP-NEXT: vpshufb %ymm8, %ymm7, %ymm6
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm6[0,1,2,3,4,5],ymm4[6,7]
; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,1,3,1,4,5,6,7]
-; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm7 = xmm15[0,1,3,1,4,5,6,7]
-; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm7[0],xmm1[0],xmm7[1],xmm1[1]
-; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm6 = xmm6[3,1,2,3,4,5,6,7]
-; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm7 = xmm14[3,1,2,3,4,5,6,7]
-; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[1],xmm6[1]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm1 = xmm6[0,1],xmm1[2,3]
+; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm6 = xmm14[0,1,3,1,4,5,6,7]
+; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm6[0],xmm1[0],xmm6[1],xmm1[1]
+; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm2 = xmm2[3,1,2,3,4,5,6,7]
+; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm6 = xmm13[3,1,2,3,4,5,6,7]
+; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm6[0],xmm2[0],xmm6[1],xmm2[1]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3]
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm4[4,5,6,7]
-; AVX2-FCP-NEXT: vpshufb %ymm10, %ymm5, %ymm4
-; AVX2-FCP-NEXT: vpshufb %ymm13, %ymm2, %ymm2
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5],ymm4[6,7]
-; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm4 = xmm9[0,1,3,1,4,5,6,7]
-; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm5 = xmm11[0,1,3,1,4,5,6,7]
+; AVX2-FCP-NEXT: vpshufb %ymm8, %ymm3, %ymm2
+; AVX2-FCP-NEXT: vpshufb %ymm8, %ymm5, %ymm3
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3,4,5],ymm2[6,7]
+; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm3 = xmm9[0,1,3,1,4,5,6,7]
+; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm4 = xmm11[0,1,3,1,4,5,6,7]
+; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
+; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm4 = xmm12[3,1,2,3,4,5,6,7]
+; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm5 = xmm10[3,1,2,3,4,5,6,7]
; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
-; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm3 = xmm3[3,1,2,3,4,5,6,7]
-; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm5 = xmm12[3,1,2,3,4,5,6,7]
-; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm5[0],xmm3[0],xmm5[1],xmm3[1]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm3 = xmm3[0,1],xmm4[2,3]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm3 = xmm4[0,1],xmm3[2,3]
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5,6,7]
; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
; AVX2-FCP-NEXT: vmovaps %ymm3, 32(%rsi)
@@ -2858,9 +2849,9 @@ define void @load_i16_stride4_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [16,17,20,21,0,0,0,0,24,25,28,29,0,0,0,0,16,17,20,21,0,0,0,0,24,25,28,29,0,0,0,0]
; AVX512-FCP-NEXT: # ymm2 = mem[0,1,0,1]
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm4 = [0,2,2,3,0,2,4,6]
-; AVX512-FCP-NEXT: vmovdqa 224(%rdi), %ymm6
-; AVX512-FCP-NEXT: vpermd %ymm6, %ymm4, %ymm5
-; AVX512-FCP-NEXT: vpshufb %ymm2, %ymm5, %ymm7
+; AVX512-FCP-NEXT: vmovdqa 224(%rdi), %ymm5
+; AVX512-FCP-NEXT: vpermd %ymm5, %ymm4, %ymm6
+; AVX512-FCP-NEXT: vpshufb %ymm2, %ymm6, %ymm7
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,4,5,u,u,u,u,8,9,12,13,12,13,14,15,16,17,20,21,u,u,u,u,24,25,28,29,u,u,u,u]
; AVX512-FCP-NEXT: vmovdqa 192(%rdi), %ymm8
; AVX512-FCP-NEXT: vpermd %ymm8, %ymm4, %ymm9
@@ -2879,54 +2870,53 @@ define void @load_i16_stride4_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512-FCP-NEXT: vpmovqw %zmm0, %xmm13
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm13[0,1,2,3],ymm4[4,5,6,7]
; AVX512-FCP-NEXT: vshufi64x2 {{.*#+}} zmm7 = zmm4[0,1,2,3],zmm7[0,1,2,3]
-; AVX512-FCP-NEXT: vpbroadcastq {{.*#+}} ymm4 = [18,19,22,23,26,27,30,31,18,19,22,23,26,27,30,31,18,19,22,23,26,27,30,31,18,19,22,23,26,27,30,31]
-; AVX512-FCP-NEXT: vpshufb %ymm4, %ymm5, %ymm13
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm5 = [2,3,6,7,10,11,14,15,14,15,10,11,12,13,14,15,18,19,22,23,26,27,30,31,u,u,u,u,u,u,u,u]
-; AVX512-FCP-NEXT: vpshufb %ymm5, %ymm9, %ymm9
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3,4,5],ymm13[6,7]
-; AVX512-FCP-NEXT: vpsrlq $16, %zmm1, %zmm13
-; AVX512-FCP-NEXT: vpmovqw %zmm13, %xmm13
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm9 = ymm13[0,1,2,3],ymm9[4,5,6,7]
-; AVX512-FCP-NEXT: vpshufb %ymm4, %ymm12, %ymm12
-; AVX512-FCP-NEXT: vpshufb %ymm5, %ymm15, %ymm13
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm12 = ymm13[0,1,2,3,4,5],ymm12[6,7]
-; AVX512-FCP-NEXT: vpsrlq $16, %zmm0, %zmm13
-; AVX512-FCP-NEXT: vpmovqw %zmm13, %xmm13
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm12 = ymm13[0,1,2,3],ymm12[4,5,6,7]
-; AVX512-FCP-NEXT: vshufi64x2 {{.*#+}} zmm9 = zmm12[0,1,2,3],zmm9[0,1,2,3]
-; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm12 = [1,3,2,3,1,3,5,7]
-; AVX512-FCP-NEXT: vpermd %ymm6, %ymm12, %ymm6
-; AVX512-FCP-NEXT: vpshufb %ymm2, %ymm6, %ymm13
-; AVX512-FCP-NEXT: vpermd %ymm8, %ymm12, %ymm8
-; AVX512-FCP-NEXT: vpshufb %ymm3, %ymm8, %ymm15
-; AVX512-FCP-NEXT: vpermt2d %ymm13, %ymm11, %ymm15
-; AVX512-FCP-NEXT: vpsrlq $32, %zmm1, %zmm13
-; AVX512-FCP-NEXT: vpmovqw %zmm13, %xmm13
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm13 = ymm13[0,1,2,3],ymm15[4,5,6,7]
-; AVX512-FCP-NEXT: vpermd %ymm10, %ymm12, %ymm10
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm4 = [2,3,6,7,10,11,14,15,14,15,10,11,12,13,14,15,18,19,22,23,26,27,30,31,18,19,22,23,26,27,30,31]
+; AVX512-FCP-NEXT: vpshufb %ymm4, %ymm6, %ymm6
+; AVX512-FCP-NEXT: vpshufb %ymm4, %ymm9, %ymm9
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm9[0,1,2,3,4,5],ymm6[6,7]
+; AVX512-FCP-NEXT: vpsrlq $16, %zmm1, %zmm9
+; AVX512-FCP-NEXT: vpmovqw %zmm9, %xmm9
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm9[0,1,2,3],ymm6[4,5,6,7]
+; AVX512-FCP-NEXT: vpshufb %ymm4, %ymm12, %ymm9
+; AVX512-FCP-NEXT: vpshufb %ymm4, %ymm15, %ymm12
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm9 = ymm12[0,1,2,3,4,5],ymm9[6,7]
+; AVX512-FCP-NEXT: vpsrlq $16, %zmm0, %zmm12
+; AVX512-FCP-NEXT: vpmovqw %zmm12, %xmm12
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm9 = ymm12[0,1,2,3],ymm9[4,5,6,7]
+; AVX512-FCP-NEXT: vshufi64x2 {{.*#+}} zmm6 = zmm9[0,1,2,3],zmm6[0,1,2,3]
+; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm9 = [1,3,2,3,1,3,5,7]
+; AVX512-FCP-NEXT: vpermd %ymm5, %ymm9, %ymm5
+; AVX512-FCP-NEXT: vpshufb %ymm2, %ymm5, %ymm12
+; AVX512-FCP-NEXT: vpermd %ymm8, %ymm9, %ymm8
+; AVX512-FCP-NEXT: vpshufb %ymm3, %ymm8, %ymm13
+; AVX512-FCP-NEXT: vpermt2d %ymm12, %ymm11, %ymm13
+; AVX512-FCP-NEXT: vpsrlq $32, %zmm1, %zmm12
+; AVX512-FCP-NEXT: vpmovqw %zmm12, %xmm12
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm12 = ymm12[0,1,2,3],ymm13[4,5,6,7]
+; AVX512-FCP-NEXT: vpermd %ymm10, %ymm9, %ymm10
; AVX512-FCP-NEXT: vpshufb %ymm2, %ymm10, %ymm2
-; AVX512-FCP-NEXT: vpermd %ymm14, %ymm12, %ymm12
-; AVX512-FCP-NEXT: vpshufb %ymm3, %ymm12, %ymm3
+; AVX512-FCP-NEXT: vpermd %ymm14, %ymm9, %ymm9
+; AVX512-FCP-NEXT: vpshufb %ymm3, %ymm9, %ymm3
; AVX512-FCP-NEXT: vpermt2d %ymm2, %ymm11, %ymm3
; AVX512-FCP-NEXT: vpsrlq $32, %zmm0, %zmm2
; AVX512-FCP-NEXT: vpmovqw %zmm2, %xmm2
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
-; AVX512-FCP-NEXT: vshufi64x2 {{.*#+}} zmm2 = zmm2[0,1,2,3],zmm13[0,1,2,3]
-; AVX512-FCP-NEXT: vpshufb %ymm4, %ymm6, %ymm3
-; AVX512-FCP-NEXT: vpshufb %ymm5, %ymm8, %ymm6
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm6[0,1,2,3,4,5],ymm3[6,7]
+; AVX512-FCP-NEXT: vshufi64x2 {{.*#+}} zmm2 = zmm2[0,1,2,3],zmm12[0,1,2,3]
+; AVX512-FCP-NEXT: vpshufb %ymm4, %ymm5, %ymm3
+; AVX512-FCP-NEXT: vpshufb %ymm4, %ymm8, %ymm5
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm5[0,1,2,3,4,5],ymm3[6,7]
; AVX512-FCP-NEXT: vpsrlq $48, %zmm1, %zmm1
; AVX512-FCP-NEXT: vpmovqw %zmm1, %xmm1
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm3[4,5,6,7]
; AVX512-FCP-NEXT: vpshufb %ymm4, %ymm10, %ymm3
-; AVX512-FCP-NEXT: vpshufb %ymm5, %ymm12, %ymm4
+; AVX512-FCP-NEXT: vpshufb %ymm4, %ymm9, %ymm4
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3,4,5],ymm3[6,7]
; AVX512-FCP-NEXT: vpsrlq $48, %zmm0, %zmm0
; AVX512-FCP-NEXT: vpmovqw %zmm0, %xmm0
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm3[4,5,6,7]
; AVX512-FCP-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm1[0,1,2,3]
; AVX512-FCP-NEXT: vmovdqa64 %zmm7, (%rsi)
-; AVX512-FCP-NEXT: vmovdqa64 %zmm9, (%rdx)
+; AVX512-FCP-NEXT: vmovdqa64 %zmm6, (%rdx)
; AVX512-FCP-NEXT: vmovdqa64 %zmm2, (%rcx)
; AVX512-FCP-NEXT: vmovdqa64 %zmm0, (%r8)
; AVX512-FCP-NEXT: vzeroupper
@@ -3070,9 +3060,9 @@ define void @load_i16_stride4_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512DQ-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [16,17,20,21,0,0,0,0,24,25,28,29,0,0,0,0,16,17,20,21,0,0,0,0,24,25,28,29,0,0,0,0]
; AVX512DQ-FCP-NEXT: # ymm2 = mem[0,1,0,1]
; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm4 = [0,2,2,3,0,2,4,6]
-; AVX512DQ-FCP-NEXT: vmovdqa 224(%rdi), %ymm6
-; AVX512DQ-FCP-NEXT: vpermd %ymm6, %ymm4, %ymm5
-; AVX512DQ-FCP-NEXT: vpshufb %ymm2, %ymm5, %ymm7
+; AVX512DQ-FCP-NEXT: vmovdqa 224(%rdi), %ymm5
+; AVX512DQ-FCP-NEXT: vpermd %ymm5, %ymm4, %ymm6
+; AVX512DQ-FCP-NEXT: vpshufb %ymm2, %ymm6, %ymm7
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,4,5,u,u,u,u,8,9,12,13,12,13,14,15,16,17,20,21,u,u,u,u,24,25,28,29,u,u,u,u]
; AVX512DQ-FCP-NEXT: vmovdqa 192(%rdi), %ymm8
; AVX512DQ-FCP-NEXT: vpermd %ymm8, %ymm4, %ymm9
@@ -3091,54 +3081,53 @@ define void @load_i16_stride4_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512DQ-FCP-NEXT: vpmovqw %zmm0, %xmm13
; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm13[0,1,2,3],ymm4[4,5,6,7]
; AVX512DQ-FCP-NEXT: vshufi64x2 {{.*#+}} zmm7 = zmm4[0,1,2,3],zmm7[0,1,2,3]
-; AVX512DQ-FCP-NEXT: vpbroadcastq {{.*#+}} ymm4 = [18,19,22,23,26,27,30,31,18,19,22,23,26,27,30,31,18,19,22,23,26,27,30,31,18,19,22,23,26,27,30,31]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm4, %ymm5, %ymm13
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm5 = [2,3,6,7,10,11,14,15,14,15,10,11,12,13,14,15,18,19,22,23,26,27,30,31,u,u,u,u,u,u,u,u]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm5, %ymm9, %ymm9
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3,4,5],ymm13[6,7]
-; AVX512DQ-FCP-NEXT: vpsrlq $16, %zmm1, %zmm13
-; AVX512DQ-FCP-NEXT: vpmovqw %zmm13, %xmm13
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm9 = ymm13[0,1,2,3],ymm9[4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm4, %ymm12, %ymm12
-; AVX512DQ-FCP-NEXT: vpshufb %ymm5, %ymm15, %ymm13
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm12 = ymm13[0,1,2,3,4,5],ymm12[6,7]
-; AVX512DQ-FCP-NEXT: vpsrlq $16, %zmm0, %zmm13
-; AVX512DQ-FCP-NEXT: vpmovqw %zmm13, %xmm13
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm12 = ymm13[0,1,2,3],ymm12[4,5,6,7]
-; AVX512DQ-FCP-NEXT: vshufi64x2 {{.*#+}} zmm9 = zmm12[0,1,2,3],zmm9[0,1,2,3]
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm12 = [1,3,2,3,1,3,5,7]
-; AVX512DQ-FCP-NEXT: vpermd %ymm6, %ymm12, %ymm6
-; AVX512DQ-FCP-NEXT: vpshufb %ymm2, %ymm6, %ymm13
-; AVX512DQ-FCP-NEXT: vpermd %ymm8, %ymm12, %ymm8
-; AVX512DQ-FCP-NEXT: vpshufb %ymm3, %ymm8, %ymm15
-; AVX512DQ-FCP-NEXT: vpermt2d %ymm13, %ymm11, %ymm15
-; AVX512DQ-FCP-NEXT: vpsrlq $32, %zmm1, %zmm13
-; AVX512DQ-FCP-NEXT: vpmovqw %zmm13, %xmm13
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm13 = ymm13[0,1,2,3],ymm15[4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpermd %ymm10, %ymm12, %ymm10
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm4 = [2,3,6,7,10,11,14,15,14,15,10,11,12,13,14,15,18,19,22,23,26,27,30,31,18,19,22,23,26,27,30,31]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm4, %ymm6, %ymm6
+; AVX512DQ-FCP-NEXT: vpshufb %ymm4, %ymm9, %ymm9
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm9[0,1,2,3,4,5],ymm6[6,7]
+; AVX512DQ-FCP-NEXT: vpsrlq $16, %zmm1, %zmm9
+; AVX512DQ-FCP-NEXT: vpmovqw %zmm9, %xmm9
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm9[0,1,2,3],ymm6[4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm4, %ymm12, %ymm9
+; AVX512DQ-FCP-NEXT: vpshufb %ymm4, %ymm15, %ymm12
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm9 = ymm12[0,1,2,3,4,5],ymm9[6,7]
+; AVX512DQ-FCP-NEXT: vpsrlq $16, %zmm0, %zmm12
+; AVX512DQ-FCP-NEXT: vpmovqw %zmm12, %xmm12
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm9 = ymm12[0,1,2,3],ymm9[4,5,6,7]
+; AVX512DQ-FCP-NEXT: vshufi64x2 {{.*#+}} zmm6 = zmm9[0,1,2,3],zmm6[0,1,2,3]
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm9 = [1,3,2,3,1,3,5,7]
+; AVX512DQ-FCP-NEXT: vpermd %ymm5, %ymm9, %ymm5
+; AVX512DQ-FCP-NEXT: vpshufb %ymm2, %ymm5, %ymm12
+; AVX512DQ-FCP-NEXT: vpermd %ymm8, %ymm9, %ymm8
+; AVX512DQ-FCP-NEXT: vpshufb %ymm3, %ymm8, %ymm13
+; AVX512DQ-FCP-NEXT: vpermt2d %ymm12, %ymm11, %ymm13
+; AVX512DQ-FCP-NEXT: vpsrlq $32, %zmm1, %zmm12
+; AVX512DQ-FCP-NEXT: vpmovqw %zmm12, %xmm12
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm12 = ymm12[0,1,2,3],ymm13[4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpermd %ymm10, %ymm9, %ymm10
; AVX512DQ-FCP-NEXT: vpshufb %ymm2, %ymm10, %ymm2
-; AVX512DQ-FCP-NEXT: vpermd %ymm14, %ymm12, %ymm12
-; AVX512DQ-FCP-NEXT: vpshufb %ymm3, %ymm12, %ymm3
+; AVX512DQ-FCP-NEXT: vpermd %ymm14, %ymm9, %ymm9
+; AVX512DQ-FCP-NEXT: vpshufb %ymm3, %ymm9, %ymm3
; AVX512DQ-FCP-NEXT: vpermt2d %ymm2, %ymm11, %ymm3
; AVX512DQ-FCP-NEXT: vpsrlq $32, %zmm0, %zmm2
; AVX512DQ-FCP-NEXT: vpmovqw %zmm2, %xmm2
; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
-; AVX512DQ-FCP-NEXT: vshufi64x2 {{.*#+}} zmm2 = zmm2[0,1,2,3],zmm13[0,1,2,3]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm4, %ymm6, %ymm3
-; AVX512DQ-FCP-NEXT: vpshufb %ymm5, %ymm8, %ymm6
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm6[0,1,2,3,4,5],ymm3[6,7]
+; AVX512DQ-FCP-NEXT: vshufi64x2 {{.*#+}} zmm2 = zmm2[0,1,2,3],zmm12[0,1,2,3]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm4, %ymm5, %ymm3
+; AVX512DQ-FCP-NEXT: vpshufb %ymm4, %ymm8, %ymm5
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm5[0,1,2,3,4,5],ymm3[6,7]
; AVX512DQ-FCP-NEXT: vpsrlq $48, %zmm1, %zmm1
; AVX512DQ-FCP-NEXT: vpmovqw %zmm1, %xmm1
; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm3[4,5,6,7]
; AVX512DQ-FCP-NEXT: vpshufb %ymm4, %ymm10, %ymm3
-; AVX512DQ-FCP-NEXT: vpshufb %ymm5, %ymm12, %ymm4
+; AVX512DQ-FCP-NEXT: vpshufb %ymm4, %ymm9, %ymm4
; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3,4,5],ymm3[6,7]
; AVX512DQ-FCP-NEXT: vpsrlq $48, %zmm0, %zmm0
; AVX512DQ-FCP-NEXT: vpmovqw %zmm0, %xmm0
; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm3[4,5,6,7]
; AVX512DQ-FCP-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm1[0,1,2,3]
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm7, (%rsi)
-; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm9, (%rdx)
+; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm6, (%rdx)
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm2, (%rcx)
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm0, (%r8)
; AVX512DQ-FCP-NEXT: vzeroupper
@@ -5349,10 +5338,10 @@ define void @load_i16_stride4_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX2-FCP-LABEL: load_i16_stride4_vf64:
; AVX2-FCP: # %bb.0:
; AVX2-FCP-NEXT: subq $680, %rsp # imm = 0x2A8
-; AVX2-FCP-NEXT: vmovdqa 320(%rdi), %ymm6
-; AVX2-FCP-NEXT: vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vmovdqa 352(%rdi), %ymm7
+; AVX2-FCP-NEXT: vmovdqa 320(%rdi), %ymm7
; AVX2-FCP-NEXT: vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vmovdqa 352(%rdi), %ymm6
+; AVX2-FCP-NEXT: vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vmovdqa 64(%rdi), %ymm5
; AVX2-FCP-NEXT: vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vmovdqa 96(%rdi), %ymm4
@@ -5364,252 +5353,243 @@ define void @load_i16_stride4_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm2 = mem[0],ymm1[1,2,3],mem[4],ymm1[5,6,7],mem[8],ymm1[9,10,11],mem[12],ymm1[13,14,15]
; AVX2-FCP-NEXT: vextracti128 $1, %ymm2, %xmm3
; AVX2-FCP-NEXT: vpackusdw %xmm3, %xmm2, %xmm2
-; AVX2-FCP-NEXT: vpackusdw %xmm0, %xmm2, %xmm0
-; AVX2-FCP-NEXT: vpmovsxbd {{.*#+}} ymm2 = [0,2,2,3,0,2,4,6]
-; AVX2-FCP-NEXT: vpermd %ymm4, %ymm2, %ymm3
+; AVX2-FCP-NEXT: vpackusdw %xmm0, %xmm2, %xmm2
+; AVX2-FCP-NEXT: vpmovsxbd {{.*#+}} ymm0 = [0,2,2,3,0,2,4,6]
+; AVX2-FCP-NEXT: vpermd %ymm4, %ymm0, %ymm3
; AVX2-FCP-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpbroadcastq {{.*#+}} ymm4 = [16,17,20,21,24,25,28,29,16,17,20,21,24,25,28,29,16,17,20,21,24,25,28,29,16,17,20,21,24,25,28,29]
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,16,17,20,21,24,25,28,29]
; AVX2-FCP-NEXT: vpshufb %ymm4, %ymm3, %ymm3
-; AVX2-FCP-NEXT: vmovdqa %ymm4, %ymm8
-; AVX2-FCP-NEXT: vpermd %ymm5, %ymm2, %ymm10
-; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm5 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,u,u,u,u,u,u,u,u]
-; AVX2-FCP-NEXT: vpshufb %ymm5, %ymm10, %ymm4
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3,4,5],ymm3[6,7]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm3[4,5,6,7]
-; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm0 = mem[0],ymm1[1,2,3],mem[4],ymm1[5,6,7],mem[8],ymm1[9,10,11],mem[12],ymm1[13,14,15]
-; AVX2-FCP-NEXT: vextracti128 $1, %ymm0, %xmm3
-; AVX2-FCP-NEXT: vpackusdw %xmm3, %xmm0, %xmm0
+; AVX2-FCP-NEXT: vpermd %ymm5, %ymm0, %ymm5
+; AVX2-FCP-NEXT: vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vpshufb %ymm4, %ymm5, %ymm5
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm5[0,1,2,3,4,5],ymm3[6,7]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
+; AVX2-FCP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm2 = mem[0],ymm1[1,2,3],mem[4],ymm1[5,6,7],mem[8],ymm1[9,10,11],mem[12],ymm1[13,14,15]
+; AVX2-FCP-NEXT: vextracti128 $1, %ymm2, %xmm3
+; AVX2-FCP-NEXT: vpackusdw %xmm3, %xmm2, %xmm2
; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm3 = mem[0],ymm1[1,2,3],mem[4],ymm1[5,6,7],mem[8],ymm1[9,10,11],mem[12],ymm1[13,14,15]
-; AVX2-FCP-NEXT: vextracti128 $1, %ymm3, %xmm4
-; AVX2-FCP-NEXT: vpackusdw %xmm4, %xmm3, %xmm3
-; AVX2-FCP-NEXT: vpackusdw %xmm0, %xmm3, %xmm0
-; AVX2-FCP-NEXT: vpermd %ymm7, %ymm2, %ymm3
-; AVX2-FCP-NEXT: vpshufb %ymm8, %ymm3, %ymm4
-; AVX2-FCP-NEXT: vpermd %ymm6, %ymm2, %ymm7
-; AVX2-FCP-NEXT: vpshufb %ymm5, %ymm7, %ymm6
-; AVX2-FCP-NEXT: vmovdqa %ymm5, %ymm9
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm6[0,1,2,3,4,5],ymm4[6,7]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm4[4,5,6,7]
-; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm0 = mem[0],ymm1[1,2,3],mem[4],ymm1[5,6,7],mem[8],ymm1[9,10,11],mem[12],ymm1[13,14,15]
-; AVX2-FCP-NEXT: vextracti128 $1, %ymm0, %xmm4
-; AVX2-FCP-NEXT: vpackusdw %xmm4, %xmm0, %xmm0
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm4 = mem[0],ymm1[1,2,3],mem[4],ymm1[5,6,7],mem[8],ymm1[9,10,11],mem[12],ymm1[13,14,15]
-; AVX2-FCP-NEXT: vextracti128 $1, %ymm4, %xmm6
-; AVX2-FCP-NEXT: vpackusdw %xmm6, %xmm4, %xmm4
-; AVX2-FCP-NEXT: vpackusdw %xmm0, %xmm4, %xmm0
-; AVX2-FCP-NEXT: vmovdqa 448(%rdi), %ymm5
+; AVX2-FCP-NEXT: vextracti128 $1, %ymm3, %xmm5
+; AVX2-FCP-NEXT: vpackusdw %xmm5, %xmm3, %xmm3
+; AVX2-FCP-NEXT: vpackusdw %xmm2, %xmm3, %xmm3
+; AVX2-FCP-NEXT: vpermd %ymm6, %ymm0, %ymm2
+; AVX2-FCP-NEXT: vpshufb %ymm4, %ymm2, %ymm5
+; AVX2-FCP-NEXT: vpermd %ymm7, %ymm0, %ymm6
+; AVX2-FCP-NEXT: vpshufb %ymm4, %ymm6, %ymm7
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm7[0,1,2,3,4,5],ymm5[6,7]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm5[4,5,6,7]
+; AVX2-FCP-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm3 = mem[0],ymm1[1,2,3],mem[4],ymm1[5,6,7],mem[8],ymm1[9,10,11],mem[12],ymm1[13,14,15]
+; AVX2-FCP-NEXT: vextracti128 $1, %ymm3, %xmm5
+; AVX2-FCP-NEXT: vpackusdw %xmm5, %xmm3, %xmm3
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm5 = mem[0],ymm1[1,2,3],mem[4],ymm1[5,6,7],mem[8],ymm1[9,10,11],mem[12],ymm1[13,14,15]
+; AVX2-FCP-NEXT: vextracti128 $1, %ymm5, %xmm7
+; AVX2-FCP-NEXT: vpackusdw %xmm7, %xmm5, %xmm5
+; AVX2-FCP-NEXT: vmovdqa 448(%rdi), %ymm7
+; AVX2-FCP-NEXT: vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vpackusdw %xmm3, %xmm5, %xmm3
+; AVX2-FCP-NEXT: vmovdqa 480(%rdi), %ymm5
; AVX2-FCP-NEXT: vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vmovdqa 480(%rdi), %ymm4
-; AVX2-FCP-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpermd %ymm4, %ymm2, %ymm4
-; AVX2-FCP-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpshufb %ymm8, %ymm4, %ymm4
-; AVX2-FCP-NEXT: vpermd %ymm5, %ymm2, %ymm12
-; AVX2-FCP-NEXT: vpshufb %ymm9, %ymm12, %ymm6
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm6[0,1,2,3,4,5],ymm4[6,7]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm4[4,5,6,7]
-; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm0 = mem[0],ymm1[1,2,3],mem[4],ymm1[5,6,7],mem[8],ymm1[9,10,11],mem[12],ymm1[13,14,15]
-; AVX2-FCP-NEXT: vextracti128 $1, %ymm0, %xmm4
-; AVX2-FCP-NEXT: vpackusdw %xmm4, %xmm0, %xmm0
+; AVX2-FCP-NEXT: vpermd %ymm5, %ymm0, %ymm5
+; AVX2-FCP-NEXT: vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vpshufb %ymm4, %ymm5, %ymm5
+; AVX2-FCP-NEXT: vpermd %ymm7, %ymm0, %ymm14
+; AVX2-FCP-NEXT: vpshufb %ymm4, %ymm14, %ymm7
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm7[0,1,2,3,4,5],ymm5[6,7]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm5[4,5,6,7]
+; AVX2-FCP-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm3 = mem[0],ymm1[1,2,3],mem[4],ymm1[5,6,7],mem[8],ymm1[9,10,11],mem[12],ymm1[13,14,15]
+; AVX2-FCP-NEXT: vextracti128 $1, %ymm3, %xmm5
+; AVX2-FCP-NEXT: vpackusdw %xmm5, %xmm3, %xmm3
; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm1 = mem[0],ymm1[1,2,3],mem[4],ymm1[5,6,7],mem[8],ymm1[9,10,11],mem[12],ymm1[13,14,15]
-; AVX2-FCP-NEXT: vextracti128 $1, %ymm1, %xmm4
-; AVX2-FCP-NEXT: vpackusdw %xmm4, %xmm1, %xmm1
-; AVX2-FCP-NEXT: vpackusdw %xmm0, %xmm1, %xmm0
-; AVX2-FCP-NEXT: vmovdqa 192(%rdi), %ymm1
-; AVX2-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vmovdqa 224(%rdi), %ymm4
-; AVX2-FCP-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpermd %ymm4, %ymm2, %ymm5
-; AVX2-FCP-NEXT: vpermd %ymm1, %ymm2, %ymm11
-; AVX2-FCP-NEXT: vpshufb %ymm8, %ymm5, %ymm2
-; AVX2-FCP-NEXT: vpshufb %ymm9, %ymm11, %ymm4
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm4[0,1,2,3,4,5],ymm2[6,7]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
+; AVX2-FCP-NEXT: vextracti128 $1, %ymm1, %xmm5
+; AVX2-FCP-NEXT: vpackusdw %xmm5, %xmm1, %xmm1
+; AVX2-FCP-NEXT: vpackusdw %xmm3, %xmm1, %xmm1
+; AVX2-FCP-NEXT: vmovdqa 192(%rdi), %ymm5
+; AVX2-FCP-NEXT: vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vmovdqa 224(%rdi), %ymm3
+; AVX2-FCP-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vpermd %ymm3, %ymm0, %ymm9
+; AVX2-FCP-NEXT: vpermd %ymm5, %ymm0, %ymm3
+; AVX2-FCP-NEXT: vpshufb %ymm4, %ymm9, %ymm0
+; AVX2-FCP-NEXT: vpshufb %ymm4, %ymm3, %ymm5
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm5[0,1,2,3,4,5],ymm0[6,7]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vmovdqa 288(%rdi), %xmm1
; AVX2-FCP-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FCP-NEXT: vmovdqa 304(%rdi), %xmm0
-; AVX2-FCP-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm4 = [0,1,2,3,2,3,10,11,8,9,10,11,12,13,14,15]
-; AVX2-FCP-NEXT: vpshufb %xmm4, %xmm0, %xmm0
-; AVX2-FCP-NEXT: vpshufb %xmm4, %xmm1, %xmm6
-; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm13 = xmm6[0],xmm0[0],xmm6[1],xmm0[1]
-; AVX2-FCP-NEXT: vmovdqa 256(%rdi), %xmm0
-; AVX2-FCP-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FCP-NEXT: vmovdqa 272(%rdi), %xmm9
-; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm1 = [2,3,10,11,8,9,10,11,8,9,10,11,12,13,14,15]
-; AVX2-FCP-NEXT: vpshufb %xmm1, %xmm9, %xmm14
-; AVX2-FCP-NEXT: vpshufb %xmm1, %xmm0, %xmm15
-; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm14 = xmm15[0],xmm14[0],xmm15[1],xmm14[1]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm13 = xmm14[0,1],xmm13[2,3]
-; AVX2-FCP-NEXT: vpbroadcastq {{.*#+}} ymm6 = [18,19,22,23,26,27,30,31,18,19,22,23,26,27,30,31,18,19,22,23,26,27,30,31,18,19,22,23,26,27,30,31]
-; AVX2-FCP-NEXT: vpshufb %ymm6, %ymm3, %ymm3
-; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm8 = [2,3,6,7,10,11,14,15,14,15,10,11,12,13,14,15,18,19,22,23,26,27,30,31,u,u,u,u,u,u,u,u]
-; AVX2-FCP-NEXT: vpshufb %ymm8, %ymm7, %ymm7
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm7[0,1,2,3,4,5],ymm3[6,7]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm13[0,1,2,3],ymm3[4,5,6,7]
-; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vmovdqa 32(%rdi), %xmm13
-; AVX2-FCP-NEXT: vmovdqa 48(%rdi), %xmm7
-; AVX2-FCP-NEXT: vpshufb %xmm4, %xmm7, %xmm3
-; AVX2-FCP-NEXT: vpshufb %xmm4, %xmm13, %xmm14
-; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm15 = xmm14[0],xmm3[0],xmm14[1],xmm3[1]
-; AVX2-FCP-NEXT: vmovdqa (%rdi), %xmm3
-; AVX2-FCP-NEXT: vmovdqa 16(%rdi), %xmm14
-; AVX2-FCP-NEXT: vpshufb %xmm1, %xmm14, %xmm0
-; AVX2-FCP-NEXT: vpshufb %xmm1, %xmm3, %xmm2
-; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm15[2,3]
-; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX2-FCP-NEXT: vpshufb %ymm6, %ymm2, %ymm2
-; AVX2-FCP-NEXT: vpshufb %ymm8, %ymm10, %ymm10
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm10[0,1,2,3,4,5],ymm2[6,7]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
-; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vmovdqa 160(%rdi), %xmm10
+; AVX2-FCP-NEXT: vmovdqa 304(%rdi), %xmm12
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm5 = [0,1,2,3,2,3,10,11,8,9,10,11,12,13,14,15]
+; AVX2-FCP-NEXT: vpshufb %xmm5, %xmm12, %xmm0
+; AVX2-FCP-NEXT: vpshufb %xmm5, %xmm1, %xmm1
+; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; AVX2-FCP-NEXT: vmovdqa 256(%rdi), %xmm10
; AVX2-FCP-NEXT: vmovdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FCP-NEXT: vmovdqa 176(%rdi), %xmm0
-; AVX2-FCP-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill
-; AVX2-FCP-NEXT: vpshufb %xmm4, %xmm0, %xmm0
-; AVX2-FCP-NEXT: vpshufb %xmm4, %xmm10, %xmm2
-; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
-; AVX2-FCP-NEXT: vmovdqa 128(%rdi), %xmm15
-; AVX2-FCP-NEXT: vmovdqa %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FCP-NEXT: vmovdqa 144(%rdi), %xmm2
-; AVX2-FCP-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FCP-NEXT: vpshufb %xmm1, %xmm2, %xmm2
-; AVX2-FCP-NEXT: vpshufb %xmm1, %xmm15, %xmm10
-; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm10[0],xmm2[0],xmm10[1],xmm2[1]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm0 = xmm2[0,1],xmm0[2,3]
-; AVX2-FCP-NEXT: vpshufb %ymm6, %ymm5, %ymm2
-; AVX2-FCP-NEXT: vpshufb %ymm8, %ymm11, %ymm5
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm5[0,1,2,3,4,5],ymm2[6,7]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
-; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vmovdqa 416(%rdi), %xmm5
-; AVX2-FCP-NEXT: vmovdqa 432(%rdi), %xmm10
-; AVX2-FCP-NEXT: vpshufb %xmm4, %xmm10, %xmm0
-; AVX2-FCP-NEXT: vpshufb %xmm4, %xmm5, %xmm2
-; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
-; AVX2-FCP-NEXT: vmovdqa 384(%rdi), %xmm4
-; AVX2-FCP-NEXT: vmovdqa 400(%rdi), %xmm2
-; AVX2-FCP-NEXT: vpshufb %xmm1, %xmm2, %xmm11
-; AVX2-FCP-NEXT: vpshufb %xmm1, %xmm4, %xmm1
-; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],xmm11[0],xmm1[1],xmm11[1]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
-; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-FCP-NEXT: vpshufb %ymm6, %ymm1, %ymm1
-; AVX2-FCP-NEXT: vpshufb %ymm8, %ymm12, %ymm11
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm11[0,1,2,3,4,5],ymm1[6,7]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
-; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpmovsxbd {{.*#+}} ymm0 = [1,3,2,3,1,3,5,7]
-; AVX2-FCP-NEXT: vpermd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 32-byte Folded Reload
+; AVX2-FCP-NEXT: vmovdqa 272(%rdi), %xmm7
+; AVX2-FCP-NEXT: vmovdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm0 = [2,3,10,11,8,9,10,11,8,9,10,11,12,13,14,15]
+; AVX2-FCP-NEXT: vpshufb %xmm0, %xmm7, %xmm7
+; AVX2-FCP-NEXT: vpshufb %xmm0, %xmm10, %xmm8
+; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm7 = xmm8[0],xmm7[0],xmm8[1],xmm7[1]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm1 = xmm7[0,1],xmm1[2,3]
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm10 = [2,3,6,7,10,11,14,15,14,15,10,11,12,13,14,15,18,19,22,23,26,27,30,31,18,19,22,23,26,27,30,31]
+; AVX2-FCP-NEXT: vpshufb %ymm10, %ymm2, %ymm2
+; AVX2-FCP-NEXT: vpshufb %ymm10, %ymm6, %ymm6
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm6[0,1,2,3,4,5],ymm2[6,7]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
; AVX2-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpermd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm6 # 32-byte Folded Reload
+; AVX2-FCP-NEXT: vmovdqa 32(%rdi), %xmm2
+; AVX2-FCP-NEXT: vmovdqa 48(%rdi), %xmm1
+; AVX2-FCP-NEXT: vpshufb %xmm5, %xmm1, %xmm6
+; AVX2-FCP-NEXT: vpshufb %xmm5, %xmm2, %xmm7
+; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[1],xmm6[1]
+; AVX2-FCP-NEXT: vmovdqa (%rdi), %xmm7
+; AVX2-FCP-NEXT: vmovdqa 16(%rdi), %xmm15
+; AVX2-FCP-NEXT: vpshufb %xmm0, %xmm15, %xmm11
+; AVX2-FCP-NEXT: vpshufb %xmm0, %xmm7, %xmm13
+; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm11 = xmm13[0],xmm11[0],xmm13[1],xmm11[1]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm6 = xmm11[0,1],xmm6[2,3]
+; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
+; AVX2-FCP-NEXT: vpshufb %ymm10, %ymm8, %ymm11
+; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
+; AVX2-FCP-NEXT: vpshufb %ymm10, %ymm8, %ymm13
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm11 = ymm13[0,1,2,3,4,5],ymm11[6,7]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm11[4,5,6,7]
; AVX2-FCP-NEXT: vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpbroadcastq {{.*#+}} ymm12 = [16,17,20,21,24,25,28,29,16,17,20,21,24,25,28,29,16,17,20,21,24,25,28,29,16,17,20,21,24,25,28,29]
-; AVX2-FCP-NEXT: vpshufb %ymm12, %ymm1, %ymm1
-; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm15 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,u,u,u,u,u,u,u,u]
-; AVX2-FCP-NEXT: vpshufb %ymm15, %ymm6, %ymm11
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm11[0,1,2,3,4,5],ymm1[6,7]
-; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm6 = xmm7[3,1,2,3]
-; AVX2-FCP-NEXT: vmovdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm8 = xmm13[3,1,2,3]
+; AVX2-FCP-NEXT: vmovdqa 160(%rdi), %xmm8
; AVX2-FCP-NEXT: vmovdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm7 = xmm6[0,1,2,0,4,5,6,7]
-; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm11 = xmm8[0,1,2,0,4,5,6,7]
-; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm7 = xmm11[0],xmm7[0],xmm11[1],xmm7[1]
-; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm6 = xmm14[3,1,2,3]
-; AVX2-FCP-NEXT: vmovdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm8 = xmm3[3,1,2,3]
+; AVX2-FCP-NEXT: vmovdqa 176(%rdi), %xmm6
+; AVX2-FCP-NEXT: vmovdqa %xmm6, (%rsp) # 16-byte Spill
+; AVX2-FCP-NEXT: vpshufb %xmm5, %xmm6, %xmm6
+; AVX2-FCP-NEXT: vpshufb %xmm5, %xmm8, %xmm11
+; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm6 = xmm11[0],xmm6[0],xmm11[1],xmm6[1]
+; AVX2-FCP-NEXT: vmovdqa 128(%rdi), %xmm8
; AVX2-FCP-NEXT: vmovdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm3 = xmm6[2,0,2,3,4,5,6,7]
-; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm11 = xmm8[2,0,2,3,4,5,6,7]
-; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm11[0],xmm3[0],xmm11[1],xmm3[1]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm3 = xmm3[0,1],xmm7[2,3]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4,5,6,7]
+; AVX2-FCP-NEXT: vmovdqa 144(%rdi), %xmm11
+; AVX2-FCP-NEXT: vmovdqa %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX2-FCP-NEXT: vpshufb %xmm0, %xmm11, %xmm11
+; AVX2-FCP-NEXT: vpshufb %xmm0, %xmm8, %xmm13
+; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm11 = xmm13[0],xmm11[0],xmm13[1],xmm11[1]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm6 = xmm11[0,1],xmm6[2,3]
+; AVX2-FCP-NEXT: vpshufb %ymm10, %ymm9, %ymm9
+; AVX2-FCP-NEXT: vpshufb %ymm10, %ymm3, %ymm3
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5],ymm9[6,7]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm6[0,1,2,3],ymm3[4,5,6,7]
+; AVX2-FCP-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vmovdqa 416(%rdi), %xmm6
+; AVX2-FCP-NEXT: vmovdqa 432(%rdi), %xmm13
+; AVX2-FCP-NEXT: vpshufb %xmm5, %xmm13, %xmm3
+; AVX2-FCP-NEXT: vpshufb %xmm5, %xmm6, %xmm5
+; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm9 = xmm5[0],xmm3[0],xmm5[1],xmm3[1]
+; AVX2-FCP-NEXT: vmovdqa 384(%rdi), %xmm5
+; AVX2-FCP-NEXT: vmovdqa 400(%rdi), %xmm3
+; AVX2-FCP-NEXT: vpshufb %xmm0, %xmm3, %xmm11
+; AVX2-FCP-NEXT: vpshufb %xmm0, %xmm5, %xmm0
+; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm11[0],xmm0[1],xmm11[1]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm9[2,3]
+; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
+; AVX2-FCP-NEXT: vpshufb %ymm10, %ymm8, %ymm9
+; AVX2-FCP-NEXT: vpshufb %ymm10, %ymm14, %ymm11
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm9 = ymm11[0,1,2,3,4,5],ymm9[6,7]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm9[4,5,6,7]
+; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vpmovsxbd {{.*#+}} ymm0 = [1,3,2,3,1,3,5,7]
+; AVX2-FCP-NEXT: vpermd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm8 # 32-byte Folded Reload
+; AVX2-FCP-NEXT: vmovdqu %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vpermd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm11 # 32-byte Folded Reload
+; AVX2-FCP-NEXT: vmovdqu %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vpshufb %ymm4, %ymm8, %ymm9
+; AVX2-FCP-NEXT: vpshufb %ymm4, %ymm11, %ymm11
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm9 = ymm11[0,1,2,3,4,5],ymm9[6,7]
+; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[3,1,2,3]
+; AVX2-FCP-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[3,1,2,3]
+; AVX2-FCP-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,1,2,0,4,5,6,7]
+; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm2 = xmm2[0,1,2,0,4,5,6,7]
+; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
+; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm2 = xmm15[3,1,2,3]
+; AVX2-FCP-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm7 = xmm7[3,1,2,3]
+; AVX2-FCP-NEXT: vmovdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm2 = xmm2[2,0,2,3,4,5,6,7]
+; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm7 = xmm7[2,0,2,3,4,5,6,7]
+; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm7[0],xmm2[0],xmm7[1],xmm2[1]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm9[4,5,6,7]
; AVX2-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vpermd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 32-byte Folded Reload
; AVX2-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpermd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm3 # 32-byte Folded Reload
-; AVX2-FCP-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpshufb %ymm12, %ymm1, %ymm1
-; AVX2-FCP-NEXT: vmovdqa %ymm12, %ymm6
-; AVX2-FCP-NEXT: vpshufb %ymm15, %ymm3, %ymm3
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3,4,5],ymm1[6,7]
-; AVX2-FCP-NEXT: vpshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
-; AVX2-FCP-NEXT: # xmm3 = mem[3,1,2,3]
-; AVX2-FCP-NEXT: vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX2-FCP-NEXT: vpermd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 32-byte Folded Reload
+; AVX2-FCP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vpshufb %ymm4, %ymm1, %ymm1
+; AVX2-FCP-NEXT: vpshufb %ymm4, %ymm2, %ymm2
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3,4,5],ymm1[6,7]
+; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm2 = xmm12[3,1,2,3]
+; AVX2-FCP-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX2-FCP-NEXT: vpshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Folded Reload
; AVX2-FCP-NEXT: # xmm14 = mem[3,1,2,3]
-; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm3 = xmm3[0,1,2,0,4,5,6,7]
+; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm2 = xmm2[0,1,2,0,4,5,6,7]
; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm7 = xmm14[0,1,2,0,4,5,6,7]
-; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm7[0],xmm3[0],xmm7[1],xmm3[1]
-; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm13 = xmm9[3,1,2,3]
+; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm7[0],xmm2[0],xmm7[1],xmm2[1]
; AVX2-FCP-NEXT: vpshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Folded Reload
; AVX2-FCP-NEXT: # xmm11 = mem[3,1,2,3]
-; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm9 = xmm13[2,0,2,3,4,5,6,7]
+; AVX2-FCP-NEXT: vpshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload
+; AVX2-FCP-NEXT: # xmm9 = mem[3,1,2,3]
; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm12 = xmm11[2,0,2,3,4,5,6,7]
-; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm9 = xmm12[0],xmm9[0],xmm12[1],xmm9[1]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm3 = xmm9[0,1],xmm3[2,3]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4,5,6,7]
+; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm15 = xmm9[2,0,2,3,4,5,6,7]
+; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm12 = xmm15[0],xmm12[0],xmm15[1],xmm12[1]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm2 = xmm12[0,1],xmm2[2,3]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vpermd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 32-byte Folded Reload
; AVX2-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpermd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm3 # 32-byte Folded Reload
-; AVX2-FCP-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpshufb %ymm6, %ymm1, %ymm1
-; AVX2-FCP-NEXT: vmovdqa %ymm15, %ymm7
-; AVX2-FCP-NEXT: vpshufb %ymm15, %ymm3, %ymm3
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3,4,5],ymm1[6,7]
-; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm3 = xmm10[3,1,2,3]
-; AVX2-FCP-NEXT: vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm5 = xmm5[3,1,2,3]
-; AVX2-FCP-NEXT: vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm3 = xmm3[0,1,2,0,4,5,6,7]
-; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm5 = xmm5[0,1,2,0,4,5,6,7]
-; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm5[0],xmm3[0],xmm5[1],xmm3[1]
-; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[3,1,2,3]
+; AVX2-FCP-NEXT: vpermd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 32-byte Folded Reload
+; AVX2-FCP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vpshufb %ymm4, %ymm1, %ymm1
+; AVX2-FCP-NEXT: vpshufb %ymm4, %ymm2, %ymm2
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3,4,5],ymm1[6,7]
+; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm2 = xmm13[3,1,2,3]
; AVX2-FCP-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm15 = xmm4[3,1,2,3]
-; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm2 = xmm2[2,0,2,3,4,5,6,7]
-; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm4 = xmm15[2,0,2,3,4,5,6,7]
-; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0,1],xmm3[2,3]
+; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm6 = xmm6[3,1,2,3]
+; AVX2-FCP-NEXT: vmovdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm2 = xmm2[0,1,2,0,4,5,6,7]
+; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm6 = xmm6[0,1,2,0,4,5,6,7]
+; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm6[0],xmm2[0],xmm6[1],xmm2[1]
+; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm13 = xmm3[3,1,2,3]
+; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm12 = xmm5[3,1,2,3]
+; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm3 = xmm13[2,0,2,3,4,5,6,7]
+; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm5 = xmm12[2,0,2,3,4,5,6,7]
+; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm5[0],xmm3[0],xmm5[1],xmm3[1]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm2 = xmm3[0,1],xmm2[2,3]
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpermd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm9 # 32-byte Folded Reload
; AVX2-FCP-NEXT: vpermd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm8 # 32-byte Folded Reload
-; AVX2-FCP-NEXT: vpshufb %ymm6, %ymm9, %ymm0
-; AVX2-FCP-NEXT: vpshufb %ymm7, %ymm8, %ymm1
+; AVX2-FCP-NEXT: vpermd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm7 # 32-byte Folded Reload
+; AVX2-FCP-NEXT: vpshufb %ymm4, %ymm8, %ymm0
+; AVX2-FCP-NEXT: vpshufb %ymm4, %ymm7, %ymm1
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm1[0,1,2,3,4,5],ymm0[6,7]
-; AVX2-FCP-NEXT: vpshufd $231, (%rsp), %xmm7 # 16-byte Folded Reload
-; AVX2-FCP-NEXT: # xmm7 = mem[3,1,2,3]
+; AVX2-FCP-NEXT: vpshufd $231, (%rsp), %xmm6 # 16-byte Folded Reload
+; AVX2-FCP-NEXT: # xmm6 = mem[3,1,2,3]
; AVX2-FCP-NEXT: vpshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Folded Reload
; AVX2-FCP-NEXT: # xmm5 = mem[3,1,2,3]
-; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm1 = xmm7[0,1,2,0,4,5,6,7]
+; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm1 = xmm6[0,1,2,0,4,5,6,7]
; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm2 = xmm5[0,1,2,0,4,5,6,7]
; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; AVX2-FCP-NEXT: vpshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
; AVX2-FCP-NEXT: # xmm4 = mem[3,1,2,3]
-; AVX2-FCP-NEXT: vpshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
-; AVX2-FCP-NEXT: # xmm6 = mem[3,1,2,3]
+; AVX2-FCP-NEXT: vpshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
+; AVX2-FCP-NEXT: # xmm2 = mem[3,1,2,3]
; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm0 = xmm4[2,0,2,3,4,5,6,7]
-; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm12 = xmm6[2,0,2,3,4,5,6,7]
-; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm12[0],xmm0[0],xmm12[1],xmm0[1]
+; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm15 = xmm2[2,0,2,3,4,5,6,7]
+; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm15[0],xmm0[0],xmm15[1],xmm0[1]
; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm3[4,5,6,7]
; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpbroadcastq {{.*#+}} ymm10 = [18,19,22,23,26,27,30,31,18,19,22,23,26,27,30,31,18,19,22,23,26,27,30,31,18,19,22,23,26,27,30,31]
; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT: vpshufb %ymm10, %ymm0, %ymm0
-; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm2 = [2,3,6,7,10,11,14,15,14,15,10,11,12,13,14,15,18,19,22,23,26,27,30,31,u,u,u,u,u,u,u,u]
; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-FCP-NEXT: vpshufb %ymm2, %ymm1, %ymm1
+; AVX2-FCP-NEXT: vpshufb %ymm10, %ymm1, %ymm1
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
; AVX2-FCP-NEXT: vpshuflw $116, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; AVX2-FCP-NEXT: # xmm1 = mem[0,1,3,1,4,5,6,7]
@@ -5618,51 +5598,48 @@ define void @load_i16_stride4_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
; AVX2-FCP-NEXT: vpshuflw $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
; AVX2-FCP-NEXT: # xmm3 = mem[3,1,2,3,4,5,6,7]
-; AVX2-FCP-NEXT: vpshuflw $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Folded Reload
-; AVX2-FCP-NEXT: # xmm12 = mem[3,1,2,3,4,5,6,7]
-; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm12[0],xmm3[0],xmm12[1],xmm3[1]
+; AVX2-FCP-NEXT: vpshuflw $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Folded Reload
+; AVX2-FCP-NEXT: # xmm15 = mem[3,1,2,3,4,5,6,7]
+; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm15[0],xmm3[0],xmm15[1],xmm3[1]
; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm1 = xmm3[0,1],xmm1[2,3]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm15 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT: vpshufb %ymm10, %ymm0, %ymm0
; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-FCP-NEXT: vpshufb %ymm2, %ymm1, %ymm1
+; AVX2-FCP-NEXT: vpshufb %ymm10, %ymm1, %ymm1
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
; AVX2-FCP-NEXT: vpshuflw $116, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; AVX2-FCP-NEXT: # xmm1 = mem[0,1,3,1,4,5,6,7]
; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm3 = xmm14[0,1,3,1,4,5,6,7]
; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
-; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm3 = xmm13[3,1,2,3,4,5,6,7]
-; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm11 = xmm11[3,1,2,3,4,5,6,7]
-; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm11[0],xmm3[0],xmm11[1],xmm3[1]
+; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm3 = xmm11[3,1,2,3,4,5,6,7]
+; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm9 = xmm9[3,1,2,3,4,5,6,7]
+; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm9[0],xmm3[0],xmm9[1],xmm3[1]
; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm1 = xmm3[0,1],xmm1[2,3]
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; AVX2-FCP-NEXT: vpshufb %ymm10, %ymm9, %ymm1
-; AVX2-FCP-NEXT: vmovdqa %ymm2, %ymm9
-; AVX2-FCP-NEXT: vpshufb %ymm2, %ymm8, %ymm3
+; AVX2-FCP-NEXT: vpshufb %ymm10, %ymm8, %ymm1
+; AVX2-FCP-NEXT: vpshufb %ymm10, %ymm7, %ymm3
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3,4,5],ymm1[6,7]
-; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm3 = xmm7[0,1,3,1,4,5,6,7]
+; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm3 = xmm6[0,1,3,1,4,5,6,7]
; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm5 = xmm5[0,1,3,1,4,5,6,7]
; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm5[0],xmm3[0],xmm5[1],xmm3[1]
; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm4 = xmm4[3,1,2,3,4,5,6,7]
-; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm2 = xmm6[3,1,2,3,4,5,6,7]
+; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm2 = xmm2[3,1,2,3,4,5,6,7]
; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1]
; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0,1],xmm3[2,3]
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
; AVX2-FCP-NEXT: vpshufb %ymm10, %ymm2, %ymm2
; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX2-FCP-NEXT: vpshufb %ymm9, %ymm3, %ymm3
+; AVX2-FCP-NEXT: vpshufb %ymm10, %ymm3, %ymm3
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3,4,5],ymm2[6,7]
; AVX2-FCP-NEXT: vpshuflw $116, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
; AVX2-FCP-NEXT: # xmm3 = mem[0,1,3,1,4,5,6,7]
; AVX2-FCP-NEXT: vpshuflw $116, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
; AVX2-FCP-NEXT: # xmm4 = mem[0,1,3,1,4,5,6,7]
; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
-; AVX2-FCP-NEXT: vpshuflw $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
-; AVX2-FCP-NEXT: # xmm4 = mem[3,1,2,3,4,5,6,7]
-; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm5 = xmm15[3,1,2,3,4,5,6,7]
+; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm4 = xmm13[3,1,2,3,4,5,6,7]
+; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm5 = xmm12[3,1,2,3,4,5,6,7]
; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm3 = xmm4[0,1],xmm3[2,3]
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5,6,7]
@@ -5693,8 +5670,7 @@ define void @load_i16_stride4_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX2-FCP-NEXT: vmovdqa %ymm2, 96(%r8)
; AVX2-FCP-NEXT: vmovdqa %ymm1, 32(%r8)
; AVX2-FCP-NEXT: vmovdqa %ymm0, 64(%r8)
-; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FCP-NEXT: vmovaps %ymm0, (%r8)
+; AVX2-FCP-NEXT: vmovdqa %ymm15, (%r8)
; AVX2-FCP-NEXT: addq $680, %rsp # imm = 0x2A8
; AVX2-FCP-NEXT: vzeroupper
; AVX2-FCP-NEXT: retq
@@ -5976,148 +5952,147 @@ define void @load_i16_stride4_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
;
; AVX512-FCP-LABEL: load_i16_stride4_vf64:
; AVX512-FCP: # %bb.0:
-; AVX512-FCP-NEXT: vmovdqa64 256(%rdi), %zmm23
-; AVX512-FCP-NEXT: vmovdqa64 384(%rdi), %zmm26
-; AVX512-FCP-NEXT: vmovdqa64 (%rdi), %zmm30
+; AVX512-FCP-NEXT: vmovdqa64 256(%rdi), %zmm22
+; AVX512-FCP-NEXT: vmovdqa64 384(%rdi), %zmm25
+; AVX512-FCP-NEXT: vmovdqa64 (%rdi), %zmm2
; AVX512-FCP-NEXT: vmovdqa64 128(%rdi), %zmm4
; AVX512-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm5 = [16,17,20,21,0,0,0,0,24,25,28,29,0,0,0,0,16,17,20,21,0,0,0,0,24,25,28,29,0,0,0,0]
; AVX512-FCP-NEXT: # ymm5 = mem[0,1,0,1]
-; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm1 = [0,2,2,3,0,2,4,6]
-; AVX512-FCP-NEXT: vmovdqa64 224(%rdi), %ymm24
-; AVX512-FCP-NEXT: vpermd %ymm24, %ymm1, %ymm10
+; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm9 = [0,2,2,3,0,2,4,6]
+; AVX512-FCP-NEXT: vmovdqa64 224(%rdi), %ymm23
+; AVX512-FCP-NEXT: vpermd %ymm23, %ymm9, %ymm10
; AVX512-FCP-NEXT: vpshufb %ymm5, %ymm10, %ymm0
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm6 = [0,1,4,5,u,u,u,u,8,9,12,13,12,13,14,15,16,17,20,21,u,u,u,u,24,25,28,29,u,u,u,u]
-; AVX512-FCP-NEXT: vmovdqa64 192(%rdi), %ymm25
-; AVX512-FCP-NEXT: vpermd %ymm25, %ymm1, %ymm11
-; AVX512-FCP-NEXT: vpshufb %ymm6, %ymm11, %ymm3
+; AVX512-FCP-NEXT: vmovdqa64 192(%rdi), %ymm24
+; AVX512-FCP-NEXT: vpermd %ymm24, %ymm9, %ymm3
+; AVX512-FCP-NEXT: vpshufb %ymm6, %ymm3, %ymm1
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm7 = [0,2,2,3,4,6,12,14]
-; AVX512-FCP-NEXT: vpermt2d %ymm0, %ymm7, %ymm3
+; AVX512-FCP-NEXT: vpermt2d %ymm0, %ymm7, %ymm1
; AVX512-FCP-NEXT: vpmovqw %zmm4, %xmm0
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm3[4,5,6,7]
-; AVX512-FCP-NEXT: vmovdqa64 96(%rdi), %ymm27
-; AVX512-FCP-NEXT: vpermd %ymm27, %ymm1, %ymm3
-; AVX512-FCP-NEXT: vpshufb %ymm5, %ymm3, %ymm9
-; AVX512-FCP-NEXT: vmovdqa64 64(%rdi), %ymm28
-; AVX512-FCP-NEXT: vpermd %ymm28, %ymm1, %ymm8
-; AVX512-FCP-NEXT: vpshufb %ymm6, %ymm8, %ymm12
-; AVX512-FCP-NEXT: vpermt2d %ymm9, %ymm7, %ymm12
-; AVX512-FCP-NEXT: vpmovqw %zmm30, %xmm9
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm12[4,5,6,7]
-; AVX512-FCP-NEXT: vshufi64x2 {{.*#+}} zmm21 = zmm9[0,1,2,3],zmm0[0,1,2,3]
-; AVX512-FCP-NEXT: vmovdqa64 480(%rdi), %ymm16
-; AVX512-FCP-NEXT: vpermd %ymm16, %ymm1, %ymm0
-; AVX512-FCP-NEXT: vpshufb %ymm5, %ymm0, %ymm9
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; AVX512-FCP-NEXT: vmovdqa64 96(%rdi), %ymm26
+; AVX512-FCP-NEXT: vpermd %ymm26, %ymm9, %ymm8
+; AVX512-FCP-NEXT: vpshufb %ymm5, %ymm8, %ymm11
+; AVX512-FCP-NEXT: vmovdqa64 64(%rdi), %ymm27
+; AVX512-FCP-NEXT: vpermd %ymm27, %ymm9, %ymm0
+; AVX512-FCP-NEXT: vpshufb %ymm6, %ymm0, %ymm12
+; AVX512-FCP-NEXT: vpermt2d %ymm11, %ymm7, %ymm12
+; AVX512-FCP-NEXT: vpmovqw %zmm2, %xmm11
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm11 = ymm11[0,1,2,3],ymm12[4,5,6,7]
+; AVX512-FCP-NEXT: vshufi64x2 {{.*#+}} zmm20 = zmm11[0,1,2,3],zmm1[0,1,2,3]
+; AVX512-FCP-NEXT: vmovdqa64 480(%rdi), %ymm28
+; AVX512-FCP-NEXT: vpermd %ymm28, %ymm9, %ymm11
+; AVX512-FCP-NEXT: vpshufb %ymm5, %ymm11, %ymm1
; AVX512-FCP-NEXT: vmovdqa64 448(%rdi), %ymm17
-; AVX512-FCP-NEXT: vpermd %ymm17, %ymm1, %ymm12
+; AVX512-FCP-NEXT: vpermd %ymm17, %ymm9, %ymm12
; AVX512-FCP-NEXT: vpshufb %ymm6, %ymm12, %ymm13
-; AVX512-FCP-NEXT: vpermt2d %ymm9, %ymm7, %ymm13
-; AVX512-FCP-NEXT: vpmovqw %zmm26, %xmm9
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm13[4,5,6,7]
+; AVX512-FCP-NEXT: vpermt2d %ymm1, %ymm7, %ymm13
+; AVX512-FCP-NEXT: vpmovqw %zmm25, %xmm1
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm14 = ymm1[0,1,2,3],ymm13[4,5,6,7]
; AVX512-FCP-NEXT: vmovdqa64 352(%rdi), %ymm18
-; AVX512-FCP-NEXT: vpermd %ymm18, %ymm1, %ymm13
-; AVX512-FCP-NEXT: vpshufb %ymm5, %ymm13, %ymm14
-; AVX512-FCP-NEXT: vmovdqa64 320(%rdi), %ymm20
-; AVX512-FCP-NEXT: vpermd %ymm20, %ymm1, %ymm1
-; AVX512-FCP-NEXT: vpshufb %ymm6, %ymm1, %ymm15
-; AVX512-FCP-NEXT: vpermt2d %ymm14, %ymm7, %ymm15
-; AVX512-FCP-NEXT: vpmovqw %zmm23, %xmm14
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm14 = ymm14[0,1,2,3],ymm15[4,5,6,7]
-; AVX512-FCP-NEXT: vshufi64x2 {{.*#+}} zmm22 = zmm14[0,1,2,3],zmm9[0,1,2,3]
-; AVX512-FCP-NEXT: vpbroadcastq {{.*#+}} ymm9 = [18,19,22,23,26,27,30,31,18,19,22,23,26,27,30,31,18,19,22,23,26,27,30,31,18,19,22,23,26,27,30,31]
-; AVX512-FCP-NEXT: vpshufb %ymm9, %ymm10, %ymm14
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm10 = [2,3,6,7,10,11,14,15,14,15,10,11,12,13,14,15,18,19,22,23,26,27,30,31,u,u,u,u,u,u,u,u]
-; AVX512-FCP-NEXT: vpshufb %ymm10, %ymm11, %ymm11
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm11 = ymm11[0,1,2,3,4,5],ymm14[6,7]
-; AVX512-FCP-NEXT: vpsrlq $16, %zmm4, %zmm14
-; AVX512-FCP-NEXT: vpmovqw %zmm14, %xmm14
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm11 = ymm14[0,1,2,3],ymm11[4,5,6,7]
+; AVX512-FCP-NEXT: vpermd %ymm18, %ymm9, %ymm1
+; AVX512-FCP-NEXT: vpshufb %ymm5, %ymm1, %ymm15
+; AVX512-FCP-NEXT: vmovdqa64 320(%rdi), %ymm19
+; AVX512-FCP-NEXT: vpermd %ymm19, %ymm9, %ymm13
+; AVX512-FCP-NEXT: vpshufb %ymm6, %ymm13, %ymm9
+; AVX512-FCP-NEXT: vpermt2d %ymm15, %ymm7, %ymm9
+; AVX512-FCP-NEXT: vpmovqw %zmm22, %xmm15
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm9 = ymm15[0,1,2,3],ymm9[4,5,6,7]
+; AVX512-FCP-NEXT: vshufi64x2 {{.*#+}} zmm21 = zmm9[0,1,2,3],zmm14[0,1,2,3]
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm9 = [2,3,6,7,10,11,14,15,14,15,10,11,12,13,14,15,18,19,22,23,26,27,30,31,18,19,22,23,26,27,30,31]
+; AVX512-FCP-NEXT: vpshufb %ymm9, %ymm10, %ymm10
; AVX512-FCP-NEXT: vpshufb %ymm9, %ymm3, %ymm3
-; AVX512-FCP-NEXT: vpshufb %ymm10, %ymm8, %ymm8
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm8[0,1,2,3,4,5],ymm3[6,7]
-; AVX512-FCP-NEXT: vpsrlq $16, %zmm30, %zmm8
-; AVX512-FCP-NEXT: vpmovqw %zmm8, %xmm8
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm8[0,1,2,3],ymm3[4,5,6,7]
-; AVX512-FCP-NEXT: vshufi64x2 {{.*#+}} zmm29 = zmm3[0,1,2,3],zmm11[0,1,2,3]
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5],ymm10[6,7]
+; AVX512-FCP-NEXT: vpsrlq $16, %zmm4, %zmm10
+; AVX512-FCP-NEXT: vpmovqw %zmm10, %xmm10
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm10[0,1,2,3],ymm3[4,5,6,7]
+; AVX512-FCP-NEXT: vpshufb %ymm9, %ymm8, %ymm8
; AVX512-FCP-NEXT: vpshufb %ymm9, %ymm0, %ymm0
-; AVX512-FCP-NEXT: vpshufb %ymm10, %ymm12, %ymm3
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm8[6,7]
+; AVX512-FCP-NEXT: vpsrlq $16, %zmm2, %zmm8
+; AVX512-FCP-NEXT: vpmovqw %zmm8, %xmm8
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm8[0,1,2,3],ymm0[4,5,6,7]
+; AVX512-FCP-NEXT: vshufi64x2 {{.*#+}} zmm29 = zmm0[0,1,2,3],zmm3[0,1,2,3]
+; AVX512-FCP-NEXT: vpshufb %ymm9, %ymm11, %ymm0
+; AVX512-FCP-NEXT: vpshufb %ymm9, %ymm12, %ymm3
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3,4,5],ymm0[6,7]
-; AVX512-FCP-NEXT: vpsrlq $16, %zmm26, %zmm3
+; AVX512-FCP-NEXT: vpsrlq $16, %zmm25, %zmm3
; AVX512-FCP-NEXT: vpmovqw %zmm3, %xmm3
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm0[4,5,6,7]
+; AVX512-FCP-NEXT: vpshufb %ymm9, %ymm1, %ymm1
; AVX512-FCP-NEXT: vpshufb %ymm9, %ymm13, %ymm3
-; AVX512-FCP-NEXT: vpshufb %ymm10, %ymm1, %ymm1
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm3[6,7]
-; AVX512-FCP-NEXT: vpsrlq $16, %zmm23, %zmm3
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3,4,5],ymm1[6,7]
+; AVX512-FCP-NEXT: vpsrlq $16, %zmm22, %zmm3
; AVX512-FCP-NEXT: vpmovqw %zmm3, %xmm3
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4,5,6,7]
-; AVX512-FCP-NEXT: vshufi64x2 {{.*#+}} zmm19 = zmm1[0,1,2,3],zmm0[0,1,2,3]
-; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm15 = [1,3,2,3,1,3,5,7]
-; AVX512-FCP-NEXT: vpermd %ymm24, %ymm15, %ymm3
+; AVX512-FCP-NEXT: vshufi64x2 {{.*#+}} zmm16 = zmm1[0,1,2,3],zmm0[0,1,2,3]
+; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm14 = [1,3,2,3,1,3,5,7]
+; AVX512-FCP-NEXT: vpermd %ymm23, %ymm14, %ymm3
; AVX512-FCP-NEXT: vpshufb %ymm5, %ymm3, %ymm0
-; AVX512-FCP-NEXT: vpermd %ymm25, %ymm15, %ymm8
+; AVX512-FCP-NEXT: vpermd %ymm24, %ymm14, %ymm8
; AVX512-FCP-NEXT: vpshufb %ymm6, %ymm8, %ymm1
; AVX512-FCP-NEXT: vpermt2d %ymm0, %ymm7, %ymm1
; AVX512-FCP-NEXT: vpsrlq $32, %zmm4, %zmm0
; AVX512-FCP-NEXT: vpmovqw %zmm0, %xmm0
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm0[0,1,2,3],ymm1[4,5,6,7]
-; AVX512-FCP-NEXT: vpermd %ymm27, %ymm15, %ymm0
-; AVX512-FCP-NEXT: vpshufb %ymm5, %ymm0, %ymm13
-; AVX512-FCP-NEXT: vpermd %ymm28, %ymm15, %ymm12
-; AVX512-FCP-NEXT: vpshufb %ymm6, %ymm12, %ymm14
-; AVX512-FCP-NEXT: vpermt2d %ymm13, %ymm7, %ymm14
-; AVX512-FCP-NEXT: vpsrlq $32, %zmm30, %zmm13
-; AVX512-FCP-NEXT: vpmovqw %zmm13, %xmm13
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm13 = ymm13[0,1,2,3],ymm14[4,5,6,7]
-; AVX512-FCP-NEXT: vshufi64x2 {{.*#+}} zmm24 = zmm13[0,1,2,3],zmm1[0,1,2,3]
-; AVX512-FCP-NEXT: vpermd %ymm16, %ymm15, %ymm13
-; AVX512-FCP-NEXT: vpshufb %ymm5, %ymm13, %ymm1
-; AVX512-FCP-NEXT: vpermd %ymm17, %ymm15, %ymm14
-; AVX512-FCP-NEXT: vpshufb %ymm6, %ymm14, %ymm11
-; AVX512-FCP-NEXT: vpermt2d %ymm1, %ymm7, %ymm11
-; AVX512-FCP-NEXT: vpsrlq $32, %zmm26, %zmm1
+; AVX512-FCP-NEXT: vpermd %ymm26, %ymm14, %ymm0
+; AVX512-FCP-NEXT: vpshufb %ymm5, %ymm0, %ymm12
+; AVX512-FCP-NEXT: vpermd %ymm27, %ymm14, %ymm11
+; AVX512-FCP-NEXT: vpshufb %ymm6, %ymm11, %ymm13
+; AVX512-FCP-NEXT: vpermt2d %ymm12, %ymm7, %ymm13
+; AVX512-FCP-NEXT: vpsrlq $32, %zmm2, %zmm12
+; AVX512-FCP-NEXT: vpmovqw %zmm12, %xmm12
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm12 = ymm12[0,1,2,3],ymm13[4,5,6,7]
+; AVX512-FCP-NEXT: vshufi64x2 {{.*#+}} zmm23 = zmm12[0,1,2,3],zmm1[0,1,2,3]
+; AVX512-FCP-NEXT: vpermd %ymm28, %ymm14, %ymm12
+; AVX512-FCP-NEXT: vpshufb %ymm5, %ymm12, %ymm1
+; AVX512-FCP-NEXT: vpermd %ymm17, %ymm14, %ymm13
+; AVX512-FCP-NEXT: vpshufb %ymm6, %ymm13, %ymm10
+; AVX512-FCP-NEXT: vpermt2d %ymm1, %ymm7, %ymm10
+; AVX512-FCP-NEXT: vpsrlq $32, %zmm25, %zmm1
; AVX512-FCP-NEXT: vpmovqw %zmm1, %xmm1
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm11 = ymm1[0,1,2,3],ymm11[4,5,6,7]
-; AVX512-FCP-NEXT: vpermd %ymm18, %ymm15, %ymm1
-; AVX512-FCP-NEXT: vpshufb %ymm5, %ymm1, %ymm2
-; AVX512-FCP-NEXT: vpermd %ymm20, %ymm15, %ymm5
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm10 = ymm1[0,1,2,3],ymm10[4,5,6,7]
+; AVX512-FCP-NEXT: vpermd %ymm18, %ymm14, %ymm1
+; AVX512-FCP-NEXT: vpshufb %ymm5, %ymm1, %ymm15
+; AVX512-FCP-NEXT: vpermd %ymm19, %ymm14, %ymm5
; AVX512-FCP-NEXT: vpshufb %ymm6, %ymm5, %ymm6
-; AVX512-FCP-NEXT: vpermt2d %ymm2, %ymm7, %ymm6
-; AVX512-FCP-NEXT: vpsrlq $32, %zmm23, %zmm2
-; AVX512-FCP-NEXT: vpmovqw %zmm2, %xmm2
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm6[4,5,6,7]
-; AVX512-FCP-NEXT: vshufi64x2 {{.*#+}} zmm2 = zmm2[0,1,2,3],zmm11[0,1,2,3]
+; AVX512-FCP-NEXT: vpermt2d %ymm15, %ymm7, %ymm6
+; AVX512-FCP-NEXT: vpsrlq $32, %zmm22, %zmm7
+; AVX512-FCP-NEXT: vpmovqw %zmm7, %xmm7
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm7[0,1,2,3],ymm6[4,5,6,7]
+; AVX512-FCP-NEXT: vshufi64x2 {{.*#+}} zmm6 = zmm6[0,1,2,3],zmm10[0,1,2,3]
; AVX512-FCP-NEXT: vpshufb %ymm9, %ymm3, %ymm3
-; AVX512-FCP-NEXT: vpshufb %ymm10, %ymm8, %ymm6
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm6[0,1,2,3,4,5],ymm3[6,7]
+; AVX512-FCP-NEXT: vpshufb %ymm9, %ymm8, %ymm7
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm7[0,1,2,3,4,5],ymm3[6,7]
; AVX512-FCP-NEXT: vpsrlq $48, %zmm4, %zmm4
; AVX512-FCP-NEXT: vpmovqw %zmm4, %xmm4
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
; AVX512-FCP-NEXT: vpshufb %ymm9, %ymm0, %ymm0
-; AVX512-FCP-NEXT: vpshufb %ymm10, %ymm12, %ymm4
+; AVX512-FCP-NEXT: vpshufb %ymm9, %ymm11, %ymm4
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm4[0,1,2,3,4,5],ymm0[6,7]
-; AVX512-FCP-NEXT: vpsrlq $48, %zmm30, %zmm4
-; AVX512-FCP-NEXT: vpmovqw %zmm4, %xmm4
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm4[0,1,2,3],ymm0[4,5,6,7]
+; AVX512-FCP-NEXT: vpsrlq $48, %zmm2, %zmm2
+; AVX512-FCP-NEXT: vpmovqw %zmm2, %xmm2
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
; AVX512-FCP-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm3[0,1,2,3]
+; AVX512-FCP-NEXT: vpshufb %ymm9, %ymm12, %ymm2
; AVX512-FCP-NEXT: vpshufb %ymm9, %ymm13, %ymm3
-; AVX512-FCP-NEXT: vpshufb %ymm10, %ymm14, %ymm4
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3,4,5],ymm3[6,7]
-; AVX512-FCP-NEXT: vpsrlq $48, %zmm26, %zmm4
-; AVX512-FCP-NEXT: vpmovqw %zmm4, %xmm4
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3,4,5],ymm2[6,7]
+; AVX512-FCP-NEXT: vpsrlq $48, %zmm25, %zmm3
+; AVX512-FCP-NEXT: vpmovqw %zmm3, %xmm3
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5,6,7]
; AVX512-FCP-NEXT: vpshufb %ymm9, %ymm1, %ymm1
-; AVX512-FCP-NEXT: vpshufb %ymm10, %ymm5, %ymm4
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm4[0,1,2,3,4,5],ymm1[6,7]
-; AVX512-FCP-NEXT: vpsrlq $48, %zmm23, %zmm4
-; AVX512-FCP-NEXT: vpmovqw %zmm4, %xmm4
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm4[0,1,2,3],ymm1[4,5,6,7]
-; AVX512-FCP-NEXT: vshufi64x2 {{.*#+}} zmm1 = zmm1[0,1,2,3],zmm3[0,1,2,3]
-; AVX512-FCP-NEXT: vmovdqa64 %zmm22, 64(%rsi)
-; AVX512-FCP-NEXT: vmovdqa64 %zmm21, (%rsi)
-; AVX512-FCP-NEXT: vmovdqa64 %zmm19, 64(%rdx)
+; AVX512-FCP-NEXT: vpshufb %ymm9, %ymm5, %ymm3
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3,4,5],ymm1[6,7]
+; AVX512-FCP-NEXT: vpsrlq $48, %zmm22, %zmm3
+; AVX512-FCP-NEXT: vpmovqw %zmm3, %xmm3
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4,5,6,7]
+; AVX512-FCP-NEXT: vshufi64x2 {{.*#+}} zmm1 = zmm1[0,1,2,3],zmm2[0,1,2,3]
+; AVX512-FCP-NEXT: vmovdqa64 %zmm21, 64(%rsi)
+; AVX512-FCP-NEXT: vmovdqa64 %zmm20, (%rsi)
+; AVX512-FCP-NEXT: vmovdqa64 %zmm16, 64(%rdx)
; AVX512-FCP-NEXT: vmovdqa64 %zmm29, (%rdx)
-; AVX512-FCP-NEXT: vmovdqa64 %zmm2, 64(%rcx)
-; AVX512-FCP-NEXT: vmovdqa64 %zmm24, (%rcx)
+; AVX512-FCP-NEXT: vmovdqa64 %zmm6, 64(%rcx)
+; AVX512-FCP-NEXT: vmovdqa64 %zmm23, (%rcx)
; AVX512-FCP-NEXT: vmovdqa64 %zmm1, 64(%r8)
; AVX512-FCP-NEXT: vmovdqa64 %zmm0, (%r8)
; AVX512-FCP-NEXT: vzeroupper
@@ -6400,148 +6375,147 @@ define void @load_i16_stride4_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
;
; AVX512DQ-FCP-LABEL: load_i16_stride4_vf64:
; AVX512DQ-FCP: # %bb.0:
-; AVX512DQ-FCP-NEXT: vmovdqa64 256(%rdi), %zmm23
-; AVX512DQ-FCP-NEXT: vmovdqa64 384(%rdi), %zmm26
-; AVX512DQ-FCP-NEXT: vmovdqa64 (%rdi), %zmm30
+; AVX512DQ-FCP-NEXT: vmovdqa64 256(%rdi), %zmm22
+; AVX512DQ-FCP-NEXT: vmovdqa64 384(%rdi), %zmm25
+; AVX512DQ-FCP-NEXT: vmovdqa64 (%rdi), %zmm2
; AVX512DQ-FCP-NEXT: vmovdqa64 128(%rdi), %zmm4
; AVX512DQ-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm5 = [16,17,20,21,0,0,0,0,24,25,28,29,0,0,0,0,16,17,20,21,0,0,0,0,24,25,28,29,0,0,0,0]
; AVX512DQ-FCP-NEXT: # ymm5 = mem[0,1,0,1]
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm1 = [0,2,2,3,0,2,4,6]
-; AVX512DQ-FCP-NEXT: vmovdqa64 224(%rdi), %ymm24
-; AVX512DQ-FCP-NEXT: vpermd %ymm24, %ymm1, %ymm10
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm9 = [0,2,2,3,0,2,4,6]
+; AVX512DQ-FCP-NEXT: vmovdqa64 224(%rdi), %ymm23
+; AVX512DQ-FCP-NEXT: vpermd %ymm23, %ymm9, %ymm10
; AVX512DQ-FCP-NEXT: vpshufb %ymm5, %ymm10, %ymm0
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm6 = [0,1,4,5,u,u,u,u,8,9,12,13,12,13,14,15,16,17,20,21,u,u,u,u,24,25,28,29,u,u,u,u]
-; AVX512DQ-FCP-NEXT: vmovdqa64 192(%rdi), %ymm25
-; AVX512DQ-FCP-NEXT: vpermd %ymm25, %ymm1, %ymm11
-; AVX512DQ-FCP-NEXT: vpshufb %ymm6, %ymm11, %ymm3
+; AVX512DQ-FCP-NEXT: vmovdqa64 192(%rdi), %ymm24
+; AVX512DQ-FCP-NEXT: vpermd %ymm24, %ymm9, %ymm3
+; AVX512DQ-FCP-NEXT: vpshufb %ymm6, %ymm3, %ymm1
; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm7 = [0,2,2,3,4,6,12,14]
-; AVX512DQ-FCP-NEXT: vpermt2d %ymm0, %ymm7, %ymm3
+; AVX512DQ-FCP-NEXT: vpermt2d %ymm0, %ymm7, %ymm1
; AVX512DQ-FCP-NEXT: vpmovqw %zmm4, %xmm0
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm3[4,5,6,7]
-; AVX512DQ-FCP-NEXT: vmovdqa64 96(%rdi), %ymm27
-; AVX512DQ-FCP-NEXT: vpermd %ymm27, %ymm1, %ymm3
-; AVX512DQ-FCP-NEXT: vpshufb %ymm5, %ymm3, %ymm9
-; AVX512DQ-FCP-NEXT: vmovdqa64 64(%rdi), %ymm28
-; AVX512DQ-FCP-NEXT: vpermd %ymm28, %ymm1, %ymm8
-; AVX512DQ-FCP-NEXT: vpshufb %ymm6, %ymm8, %ymm12
-; AVX512DQ-FCP-NEXT: vpermt2d %ymm9, %ymm7, %ymm12
-; AVX512DQ-FCP-NEXT: vpmovqw %zmm30, %xmm9
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm12[4,5,6,7]
-; AVX512DQ-FCP-NEXT: vshufi64x2 {{.*#+}} zmm21 = zmm9[0,1,2,3],zmm0[0,1,2,3]
-; AVX512DQ-FCP-NEXT: vmovdqa64 480(%rdi), %ymm16
-; AVX512DQ-FCP-NEXT: vpermd %ymm16, %ymm1, %ymm0
-; AVX512DQ-FCP-NEXT: vpshufb %ymm5, %ymm0, %ymm9
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; AVX512DQ-FCP-NEXT: vmovdqa64 96(%rdi), %ymm26
+; AVX512DQ-FCP-NEXT: vpermd %ymm26, %ymm9, %ymm8
+; AVX512DQ-FCP-NEXT: vpshufb %ymm5, %ymm8, %ymm11
+; AVX512DQ-FCP-NEXT: vmovdqa64 64(%rdi), %ymm27
+; AVX512DQ-FCP-NEXT: vpermd %ymm27, %ymm9, %ymm0
+; AVX512DQ-FCP-NEXT: vpshufb %ymm6, %ymm0, %ymm12
+; AVX512DQ-FCP-NEXT: vpermt2d %ymm11, %ymm7, %ymm12
+; AVX512DQ-FCP-NEXT: vpmovqw %zmm2, %xmm11
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm11 = ymm11[0,1,2,3],ymm12[4,5,6,7]
+; AVX512DQ-FCP-NEXT: vshufi64x2 {{.*#+}} zmm20 = zmm11[0,1,2,3],zmm1[0,1,2,3]
+; AVX512DQ-FCP-NEXT: vmovdqa64 480(%rdi), %ymm28
+; AVX512DQ-FCP-NEXT: vpermd %ymm28, %ymm9, %ymm11
+; AVX512DQ-FCP-NEXT: vpshufb %ymm5, %ymm11, %ymm1
; AVX512DQ-FCP-NEXT: vmovdqa64 448(%rdi), %ymm17
-; AVX512DQ-FCP-NEXT: vpermd %ymm17, %ymm1, %ymm12
+; AVX512DQ-FCP-NEXT: vpermd %ymm17, %ymm9, %ymm12
; AVX512DQ-FCP-NEXT: vpshufb %ymm6, %ymm12, %ymm13
-; AVX512DQ-FCP-NEXT: vpermt2d %ymm9, %ymm7, %ymm13
-; AVX512DQ-FCP-NEXT: vpmovqw %zmm26, %xmm9
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm13[4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpermt2d %ymm1, %ymm7, %ymm13
+; AVX512DQ-FCP-NEXT: vpmovqw %zmm25, %xmm1
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm14 = ymm1[0,1,2,3],ymm13[4,5,6,7]
; AVX512DQ-FCP-NEXT: vmovdqa64 352(%rdi), %ymm18
-; AVX512DQ-FCP-NEXT: vpermd %ymm18, %ymm1, %ymm13
-; AVX512DQ-FCP-NEXT: vpshufb %ymm5, %ymm13, %ymm14
-; AVX512DQ-FCP-NEXT: vmovdqa64 320(%rdi), %ymm20
-; AVX512DQ-FCP-NEXT: vpermd %ymm20, %ymm1, %ymm1
-; AVX512DQ-FCP-NEXT: vpshufb %ymm6, %ymm1, %ymm15
-; AVX512DQ-FCP-NEXT: vpermt2d %ymm14, %ymm7, %ymm15
-; AVX512DQ-FCP-NEXT: vpmovqw %zmm23, %xmm14
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm14 = ymm14[0,1,2,3],ymm15[4,5,6,7]
-; AVX512DQ-FCP-NEXT: vshufi64x2 {{.*#+}} zmm22 = zmm14[0,1,2,3],zmm9[0,1,2,3]
-; AVX512DQ-FCP-NEXT: vpbroadcastq {{.*#+}} ymm9 = [18,19,22,23,26,27,30,31,18,19,22,23,26,27,30,31,18,19,22,23,26,27,30,31,18,19,22,23,26,27,30,31]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm9, %ymm10, %ymm14
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm10 = [2,3,6,7,10,11,14,15,14,15,10,11,12,13,14,15,18,19,22,23,26,27,30,31,u,u,u,u,u,u,u,u]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm10, %ymm11, %ymm11
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm11 = ymm11[0,1,2,3,4,5],ymm14[6,7]
-; AVX512DQ-FCP-NEXT: vpsrlq $16, %zmm4, %zmm14
-; AVX512DQ-FCP-NEXT: vpmovqw %zmm14, %xmm14
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm11 = ymm14[0,1,2,3],ymm11[4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpermd %ymm18, %ymm9, %ymm1
+; AVX512DQ-FCP-NEXT: vpshufb %ymm5, %ymm1, %ymm15
+; AVX512DQ-FCP-NEXT: vmovdqa64 320(%rdi), %ymm19
+; AVX512DQ-FCP-NEXT: vpermd %ymm19, %ymm9, %ymm13
+; AVX512DQ-FCP-NEXT: vpshufb %ymm6, %ymm13, %ymm9
+; AVX512DQ-FCP-NEXT: vpermt2d %ymm15, %ymm7, %ymm9
+; AVX512DQ-FCP-NEXT: vpmovqw %zmm22, %xmm15
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm9 = ymm15[0,1,2,3],ymm9[4,5,6,7]
+; AVX512DQ-FCP-NEXT: vshufi64x2 {{.*#+}} zmm21 = zmm9[0,1,2,3],zmm14[0,1,2,3]
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm9 = [2,3,6,7,10,11,14,15,14,15,10,11,12,13,14,15,18,19,22,23,26,27,30,31,18,19,22,23,26,27,30,31]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm9, %ymm10, %ymm10
; AVX512DQ-FCP-NEXT: vpshufb %ymm9, %ymm3, %ymm3
-; AVX512DQ-FCP-NEXT: vpshufb %ymm10, %ymm8, %ymm8
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm8[0,1,2,3,4,5],ymm3[6,7]
-; AVX512DQ-FCP-NEXT: vpsrlq $16, %zmm30, %zmm8
-; AVX512DQ-FCP-NEXT: vpmovqw %zmm8, %xmm8
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm8[0,1,2,3],ymm3[4,5,6,7]
-; AVX512DQ-FCP-NEXT: vshufi64x2 {{.*#+}} zmm29 = zmm3[0,1,2,3],zmm11[0,1,2,3]
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5],ymm10[6,7]
+; AVX512DQ-FCP-NEXT: vpsrlq $16, %zmm4, %zmm10
+; AVX512DQ-FCP-NEXT: vpmovqw %zmm10, %xmm10
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm10[0,1,2,3],ymm3[4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm9, %ymm8, %ymm8
; AVX512DQ-FCP-NEXT: vpshufb %ymm9, %ymm0, %ymm0
-; AVX512DQ-FCP-NEXT: vpshufb %ymm10, %ymm12, %ymm3
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm8[6,7]
+; AVX512DQ-FCP-NEXT: vpsrlq $16, %zmm2, %zmm8
+; AVX512DQ-FCP-NEXT: vpmovqw %zmm8, %xmm8
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm8[0,1,2,3],ymm0[4,5,6,7]
+; AVX512DQ-FCP-NEXT: vshufi64x2 {{.*#+}} zmm29 = zmm0[0,1,2,3],zmm3[0,1,2,3]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm9, %ymm11, %ymm0
+; AVX512DQ-FCP-NEXT: vpshufb %ymm9, %ymm12, %ymm3
; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3,4,5],ymm0[6,7]
-; AVX512DQ-FCP-NEXT: vpsrlq $16, %zmm26, %zmm3
+; AVX512DQ-FCP-NEXT: vpsrlq $16, %zmm25, %zmm3
; AVX512DQ-FCP-NEXT: vpmovqw %zmm3, %xmm3
; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm0[4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm9, %ymm1, %ymm1
; AVX512DQ-FCP-NEXT: vpshufb %ymm9, %ymm13, %ymm3
-; AVX512DQ-FCP-NEXT: vpshufb %ymm10, %ymm1, %ymm1
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm3[6,7]
-; AVX512DQ-FCP-NEXT: vpsrlq $16, %zmm23, %zmm3
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3,4,5],ymm1[6,7]
+; AVX512DQ-FCP-NEXT: vpsrlq $16, %zmm22, %zmm3
; AVX512DQ-FCP-NEXT: vpmovqw %zmm3, %xmm3
; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4,5,6,7]
-; AVX512DQ-FCP-NEXT: vshufi64x2 {{.*#+}} zmm19 = zmm1[0,1,2,3],zmm0[0,1,2,3]
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm15 = [1,3,2,3,1,3,5,7]
-; AVX512DQ-FCP-NEXT: vpermd %ymm24, %ymm15, %ymm3
+; AVX512DQ-FCP-NEXT: vshufi64x2 {{.*#+}} zmm16 = zmm1[0,1,2,3],zmm0[0,1,2,3]
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm14 = [1,3,2,3,1,3,5,7]
+; AVX512DQ-FCP-NEXT: vpermd %ymm23, %ymm14, %ymm3
; AVX512DQ-FCP-NEXT: vpshufb %ymm5, %ymm3, %ymm0
-; AVX512DQ-FCP-NEXT: vpermd %ymm25, %ymm15, %ymm8
+; AVX512DQ-FCP-NEXT: vpermd %ymm24, %ymm14, %ymm8
; AVX512DQ-FCP-NEXT: vpshufb %ymm6, %ymm8, %ymm1
; AVX512DQ-FCP-NEXT: vpermt2d %ymm0, %ymm7, %ymm1
; AVX512DQ-FCP-NEXT: vpsrlq $32, %zmm4, %zmm0
; AVX512DQ-FCP-NEXT: vpmovqw %zmm0, %xmm0
; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm0[0,1,2,3],ymm1[4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpermd %ymm27, %ymm15, %ymm0
-; AVX512DQ-FCP-NEXT: vpshufb %ymm5, %ymm0, %ymm13
-; AVX512DQ-FCP-NEXT: vpermd %ymm28, %ymm15, %ymm12
-; AVX512DQ-FCP-NEXT: vpshufb %ymm6, %ymm12, %ymm14
-; AVX512DQ-FCP-NEXT: vpermt2d %ymm13, %ymm7, %ymm14
-; AVX512DQ-FCP-NEXT: vpsrlq $32, %zmm30, %zmm13
-; AVX512DQ-FCP-NEXT: vpmovqw %zmm13, %xmm13
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm13 = ymm13[0,1,2,3],ymm14[4,5,6,7]
-; AVX512DQ-FCP-NEXT: vshufi64x2 {{.*#+}} zmm24 = zmm13[0,1,2,3],zmm1[0,1,2,3]
-; AVX512DQ-FCP-NEXT: vpermd %ymm16, %ymm15, %ymm13
-; AVX512DQ-FCP-NEXT: vpshufb %ymm5, %ymm13, %ymm1
-; AVX512DQ-FCP-NEXT: vpermd %ymm17, %ymm15, %ymm14
-; AVX512DQ-FCP-NEXT: vpshufb %ymm6, %ymm14, %ymm11
-; AVX512DQ-FCP-NEXT: vpermt2d %ymm1, %ymm7, %ymm11
-; AVX512DQ-FCP-NEXT: vpsrlq $32, %zmm26, %zmm1
+; AVX512DQ-FCP-NEXT: vpermd %ymm26, %ymm14, %ymm0
+; AVX512DQ-FCP-NEXT: vpshufb %ymm5, %ymm0, %ymm12
+; AVX512DQ-FCP-NEXT: vpermd %ymm27, %ymm14, %ymm11
+; AVX512DQ-FCP-NEXT: vpshufb %ymm6, %ymm11, %ymm13
+; AVX512DQ-FCP-NEXT: vpermt2d %ymm12, %ymm7, %ymm13
+; AVX512DQ-FCP-NEXT: vpsrlq $32, %zmm2, %zmm12
+; AVX512DQ-FCP-NEXT: vpmovqw %zmm12, %xmm12
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm12 = ymm12[0,1,2,3],ymm13[4,5,6,7]
+; AVX512DQ-FCP-NEXT: vshufi64x2 {{.*#+}} zmm23 = zmm12[0,1,2,3],zmm1[0,1,2,3]
+; AVX512DQ-FCP-NEXT: vpermd %ymm28, %ymm14, %ymm12
+; AVX512DQ-FCP-NEXT: vpshufb %ymm5, %ymm12, %ymm1
+; AVX512DQ-FCP-NEXT: vpermd %ymm17, %ymm14, %ymm13
+; AVX512DQ-FCP-NEXT: vpshufb %ymm6, %ymm13, %ymm10
+; AVX512DQ-FCP-NEXT: vpermt2d %ymm1, %ymm7, %ymm10
+; AVX512DQ-FCP-NEXT: vpsrlq $32, %zmm25, %zmm1
; AVX512DQ-FCP-NEXT: vpmovqw %zmm1, %xmm1
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm11 = ymm1[0,1,2,3],ymm11[4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpermd %ymm18, %ymm15, %ymm1
-; AVX512DQ-FCP-NEXT: vpshufb %ymm5, %ymm1, %ymm2
-; AVX512DQ-FCP-NEXT: vpermd %ymm20, %ymm15, %ymm5
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm10 = ymm1[0,1,2,3],ymm10[4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpermd %ymm18, %ymm14, %ymm1
+; AVX512DQ-FCP-NEXT: vpshufb %ymm5, %ymm1, %ymm15
+; AVX512DQ-FCP-NEXT: vpermd %ymm19, %ymm14, %ymm5
; AVX512DQ-FCP-NEXT: vpshufb %ymm6, %ymm5, %ymm6
-; AVX512DQ-FCP-NEXT: vpermt2d %ymm2, %ymm7, %ymm6
-; AVX512DQ-FCP-NEXT: vpsrlq $32, %zmm23, %zmm2
-; AVX512DQ-FCP-NEXT: vpmovqw %zmm2, %xmm2
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm6[4,5,6,7]
-; AVX512DQ-FCP-NEXT: vshufi64x2 {{.*#+}} zmm2 = zmm2[0,1,2,3],zmm11[0,1,2,3]
+; AVX512DQ-FCP-NEXT: vpermt2d %ymm15, %ymm7, %ymm6
+; AVX512DQ-FCP-NEXT: vpsrlq $32, %zmm22, %zmm7
+; AVX512DQ-FCP-NEXT: vpmovqw %zmm7, %xmm7
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm7[0,1,2,3],ymm6[4,5,6,7]
+; AVX512DQ-FCP-NEXT: vshufi64x2 {{.*#+}} zmm6 = zmm6[0,1,2,3],zmm10[0,1,2,3]
; AVX512DQ-FCP-NEXT: vpshufb %ymm9, %ymm3, %ymm3
-; AVX512DQ-FCP-NEXT: vpshufb %ymm10, %ymm8, %ymm6
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm6[0,1,2,3,4,5],ymm3[6,7]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm9, %ymm8, %ymm7
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm7[0,1,2,3,4,5],ymm3[6,7]
; AVX512DQ-FCP-NEXT: vpsrlq $48, %zmm4, %zmm4
; AVX512DQ-FCP-NEXT: vpmovqw %zmm4, %xmm4
; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
; AVX512DQ-FCP-NEXT: vpshufb %ymm9, %ymm0, %ymm0
-; AVX512DQ-FCP-NEXT: vpshufb %ymm10, %ymm12, %ymm4
+; AVX512DQ-FCP-NEXT: vpshufb %ymm9, %ymm11, %ymm4
; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm4[0,1,2,3,4,5],ymm0[6,7]
-; AVX512DQ-FCP-NEXT: vpsrlq $48, %zmm30, %zmm4
-; AVX512DQ-FCP-NEXT: vpmovqw %zmm4, %xmm4
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm4[0,1,2,3],ymm0[4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpsrlq $48, %zmm2, %zmm2
+; AVX512DQ-FCP-NEXT: vpmovqw %zmm2, %xmm2
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
; AVX512DQ-FCP-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm3[0,1,2,3]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm9, %ymm12, %ymm2
; AVX512DQ-FCP-NEXT: vpshufb %ymm9, %ymm13, %ymm3
-; AVX512DQ-FCP-NEXT: vpshufb %ymm10, %ymm14, %ymm4
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3,4,5],ymm3[6,7]
-; AVX512DQ-FCP-NEXT: vpsrlq $48, %zmm26, %zmm4
-; AVX512DQ-FCP-NEXT: vpmovqw %zmm4, %xmm4
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3,4,5],ymm2[6,7]
+; AVX512DQ-FCP-NEXT: vpsrlq $48, %zmm25, %zmm3
+; AVX512DQ-FCP-NEXT: vpmovqw %zmm3, %xmm3
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5,6,7]
; AVX512DQ-FCP-NEXT: vpshufb %ymm9, %ymm1, %ymm1
-; AVX512DQ-FCP-NEXT: vpshufb %ymm10, %ymm5, %ymm4
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm4[0,1,2,3,4,5],ymm1[6,7]
-; AVX512DQ-FCP-NEXT: vpsrlq $48, %zmm23, %zmm4
-; AVX512DQ-FCP-NEXT: vpmovqw %zmm4, %xmm4
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm4[0,1,2,3],ymm1[4,5,6,7]
-; AVX512DQ-FCP-NEXT: vshufi64x2 {{.*#+}} zmm1 = zmm1[0,1,2,3],zmm3[0,1,2,3]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm22, 64(%rsi)
-; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm21, (%rsi)
-; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm19, 64(%rdx)
+; AVX512DQ-FCP-NEXT: vpshufb %ymm9, %ymm5, %ymm3
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3,4,5],ymm1[6,7]
+; AVX512DQ-FCP-NEXT: vpsrlq $48, %zmm22, %zmm3
+; AVX512DQ-FCP-NEXT: vpmovqw %zmm3, %xmm3
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4,5,6,7]
+; AVX512DQ-FCP-NEXT: vshufi64x2 {{.*#+}} zmm1 = zmm1[0,1,2,3],zmm2[0,1,2,3]
+; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm21, 64(%rsi)
+; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm20, (%rsi)
+; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm16, 64(%rdx)
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm29, (%rdx)
-; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm2, 64(%rcx)
-; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm24, (%rcx)
+; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm6, 64(%rcx)
+; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm23, (%rcx)
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm1, 64(%r8)
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm0, (%r8)
; AVX512DQ-FCP-NEXT: vzeroupper
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-5.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-5.ll
index 8e55cb48cf7a2..b18f08b62f0d4 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-5.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-5.ll
@@ -937,24 +937,27 @@ define void @load_i16_stride5_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX2-FP-NEXT: vmovdqa 64(%rdi), %xmm4
; AVX2-FP-NEXT: vpsllq $48, %xmm4, %xmm5
; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,6],xmm5[7]
-; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm5 = ymm1[0,1],ymm0[2],ymm1[3],ymm0[4],ymm1[5,6],ymm0[7],ymm1[8,9],ymm0[10],ymm1[11],ymm0[12],ymm1[13,14],ymm0[15]
-; AVX2-FP-NEXT: vextracti128 $1, %ymm5, %xmm6
-; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0,1,2],xmm6[3,4],xmm5[5,6,7]
-; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[4,5,14,15,8,9,2,3,12,13,6,7,u,u,u,u]
-; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm6 = xmm4[u,u,u,u,u,u,u,u,u,u,u,u,0,1,10,11]
+; AVX2-FP-NEXT: vmovdqa {{.*#+}} xmm5 = [4,5,14,15,8,9,2,3,12,13,6,7,0,1,10,11]
+; AVX2-FP-NEXT: vpshufb %xmm5, %xmm4, %xmm6
+; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm7 = ymm1[0,1],ymm0[2],ymm1[3],ymm0[4],ymm1[5,6],ymm0[7],ymm1[8,9],ymm0[10],ymm1[11],ymm0[12],ymm1[13,14],ymm0[15]
+; AVX2-FP-NEXT: vextracti128 $1, %ymm7, %xmm8
+; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm7 = xmm7[0,1,2],xmm8[3,4],xmm7[5,6,7]
+; AVX2-FP-NEXT: vpshufb %xmm5, %xmm7, %xmm5
; AVX2-FP-NEXT: vpblendd {{.*#+}} xmm5 = xmm5[0,1,2],xmm6[3]
-; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm6 = ymm0[0],ymm1[1,2],ymm0[3],ymm1[4],ymm0[5],ymm1[6,7],ymm0[8],ymm1[9,10],ymm0[11],ymm1[12],ymm0[13],ymm1[14,15]
-; AVX2-FP-NEXT: vextracti128 $1, %ymm6, %xmm7
-; AVX2-FP-NEXT: vpblendd {{.*#+}} xmm6 = xmm7[0],xmm6[1],xmm7[2],xmm6[3]
-; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm6 = xmm6[6,7,0,1,10,11,4,5,14,15,8,9,u,u,u,u]
-; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm7 = xmm4[u,u,u,u,u,u,u,u,u,u,u,u,2,3,12,13]
+; AVX2-FP-NEXT: vmovdqa {{.*#+}} xmm6 = [6,7,0,1,10,11,4,5,14,15,8,9,2,3,12,13]
+; AVX2-FP-NEXT: vpshufb %xmm6, %xmm4, %xmm7
+; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm8 = ymm0[0],ymm1[1,2],ymm0[3],ymm1[4],ymm0[5],ymm1[6,7],ymm0[8],ymm1[9,10],ymm0[11],ymm1[12],ymm0[13],ymm1[14,15]
+; AVX2-FP-NEXT: vextracti128 $1, %ymm8, %xmm9
+; AVX2-FP-NEXT: vpblendd {{.*#+}} xmm8 = xmm9[0],xmm8[1],xmm9[2],xmm8[3]
+; AVX2-FP-NEXT: vpshufb %xmm6, %xmm8, %xmm6
; AVX2-FP-NEXT: vpblendd {{.*#+}} xmm6 = xmm6[0,1,2],xmm7[3]
+; AVX2-FP-NEXT: vmovdqa {{.*#+}} xmm7 = [8,9,2,3,12,13,6,7,0,1,10,11,4,5,14,15]
+; AVX2-FP-NEXT: vpshufb %xmm7, %xmm4, %xmm4
; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5],ymm0[6],ymm1[7,8],ymm0[9],ymm1[10,11],ymm0[12],ymm1[13],ymm0[14],ymm1[15]
; AVX2-FP-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3,4],xmm1[5,6,7]
-; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[8,9,2,3,12,13,6,7,0,1,10,11,u,u,u,u]
-; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm1 = xmm4[u,u,u,u,u,u,u,u,u,u,u,u,4,5,14,15]
-; AVX2-FP-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3]
+; AVX2-FP-NEXT: vpshufb %xmm7, %xmm0, %xmm0
+; AVX2-FP-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1,2],xmm4[3]
; AVX2-FP-NEXT: vmovdqa %xmm2, (%rsi)
; AVX2-FP-NEXT: vmovdqa %xmm3, (%rdx)
; AVX2-FP-NEXT: vmovdqa %xmm5, (%rcx)
@@ -980,24 +983,27 @@ define void @load_i16_stride5_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX2-FCP-NEXT: vmovdqa 64(%rdi), %xmm4
; AVX2-FCP-NEXT: vpsllq $48, %xmm4, %xmm5
; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,6],xmm5[7]
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm5 = ymm1[0,1],ymm0[2],ymm1[3],ymm0[4],ymm1[5,6],ymm0[7],ymm1[8,9],ymm0[10],ymm1[11],ymm0[12],ymm1[13,14],ymm0[15]
-; AVX2-FCP-NEXT: vextracti128 $1, %ymm5, %xmm6
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0,1,2],xmm6[3,4],xmm5[5,6,7]
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[4,5,14,15,8,9,2,3,12,13,6,7,u,u,u,u]
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm6 = xmm4[u,u,u,u,u,u,u,u,u,u,u,u,0,1,10,11]
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm5 = [4,5,14,15,8,9,2,3,12,13,6,7,0,1,10,11]
+; AVX2-FCP-NEXT: vpshufb %xmm5, %xmm4, %xmm6
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm7 = ymm1[0,1],ymm0[2],ymm1[3],ymm0[4],ymm1[5,6],ymm0[7],ymm1[8,9],ymm0[10],ymm1[11],ymm0[12],ymm1[13,14],ymm0[15]
+; AVX2-FCP-NEXT: vextracti128 $1, %ymm7, %xmm8
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm7 = xmm7[0,1,2],xmm8[3,4],xmm7[5,6,7]
+; AVX2-FCP-NEXT: vpshufb %xmm5, %xmm7, %xmm5
; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm5 = xmm5[0,1,2],xmm6[3]
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm6 = ymm0[0],ymm1[1,2],ymm0[3],ymm1[4],ymm0[5],ymm1[6,7],ymm0[8],ymm1[9,10],ymm0[11],ymm1[12],ymm0[13],ymm1[14,15]
-; AVX2-FCP-NEXT: vextracti128 $1, %ymm6, %xmm7
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm6 = xmm7[0],xmm6[1],xmm7[2],xmm6[3]
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm6 = xmm6[6,7,0,1,10,11,4,5,14,15,8,9,u,u,u,u]
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm4[u,u,u,u,u,u,u,u,u,u,u,u,2,3,12,13]
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm6 = [6,7,0,1,10,11,4,5,14,15,8,9,2,3,12,13]
+; AVX2-FCP-NEXT: vpshufb %xmm6, %xmm4, %xmm7
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm8 = ymm0[0],ymm1[1,2],ymm0[3],ymm1[4],ymm0[5],ymm1[6,7],ymm0[8],ymm1[9,10],ymm0[11],ymm1[12],ymm0[13],ymm1[14,15]
+; AVX2-FCP-NEXT: vextracti128 $1, %ymm8, %xmm9
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm8 = xmm9[0],xmm8[1],xmm9[2],xmm8[3]
+; AVX2-FCP-NEXT: vpshufb %xmm6, %xmm8, %xmm6
; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm6 = xmm6[0,1,2],xmm7[3]
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm7 = [8,9,2,3,12,13,6,7,0,1,10,11,4,5,14,15]
+; AVX2-FCP-NEXT: vpshufb %xmm7, %xmm4, %xmm4
; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5],ymm0[6],ymm1[7,8],ymm0[9],ymm1[10,11],ymm0[12],ymm1[13],ymm0[14],ymm1[15]
; AVX2-FCP-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3,4],xmm1[5,6,7]
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[8,9,2,3,12,13,6,7,0,1,10,11,u,u,u,u]
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm4[u,u,u,u,u,u,u,u,u,u,u,u,4,5,14,15]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3]
+; AVX2-FCP-NEXT: vpshufb %xmm7, %xmm0, %xmm0
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1,2],xmm4[3]
; AVX2-FCP-NEXT: vmovdqa %xmm2, (%rsi)
; AVX2-FCP-NEXT: vmovdqa %xmm3, (%rdx)
; AVX2-FCP-NEXT: vmovdqa %xmm5, (%rcx)
@@ -1069,24 +1075,27 @@ define void @load_i16_stride5_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[2,3,12,13,6,7,0,1,10,11,4,5,14,15,u,u]
; AVX512-FCP-NEXT: vpsllq $48, %xmm3, %xmm5
; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,5,6],xmm5[7]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm5 = ymm1[0,1],ymm0[2],ymm1[3],ymm0[4],ymm1[5,6],ymm0[7],ymm1[8,9],ymm0[10],ymm1[11],ymm0[12],ymm1[13,14],ymm0[15]
-; AVX512-FCP-NEXT: vextracti128 $1, %ymm5, %xmm6
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0,1,2],xmm6[3,4],xmm5[5,6,7]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[4,5,14,15,8,9,2,3,12,13,6,7,u,u,u,u]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm6 = xmm3[u,u,u,u,u,u,u,u,u,u,u,u,0,1,10,11]
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm5 = [4,5,14,15,8,9,2,3,12,13,6,7,0,1,10,11]
+; AVX512-FCP-NEXT: vpshufb %xmm5, %xmm3, %xmm6
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm7 = ymm1[0,1],ymm0[2],ymm1[3],ymm0[4],ymm1[5,6],ymm0[7],ymm1[8,9],ymm0[10],ymm1[11],ymm0[12],ymm1[13,14],ymm0[15]
+; AVX512-FCP-NEXT: vextracti128 $1, %ymm7, %xmm8
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm7 = xmm7[0,1,2],xmm8[3,4],xmm7[5,6,7]
+; AVX512-FCP-NEXT: vpshufb %xmm5, %xmm7, %xmm5
; AVX512-FCP-NEXT: vpblendd {{.*#+}} xmm5 = xmm5[0,1,2],xmm6[3]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm6 = ymm0[0],ymm1[1,2],ymm0[3],ymm1[4],ymm0[5],ymm1[6,7],ymm0[8],ymm1[9,10],ymm0[11],ymm1[12],ymm0[13],ymm1[14,15]
-; AVX512-FCP-NEXT: vextracti128 $1, %ymm6, %xmm7
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} xmm6 = xmm7[0],xmm6[1],xmm7[2],xmm6[3]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm6 = xmm6[6,7,0,1,10,11,4,5,14,15,8,9,u,u,u,u]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm3[u,u,u,u,u,u,u,u,u,u,u,u,2,3,12,13]
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm6 = [6,7,0,1,10,11,4,5,14,15,8,9,2,3,12,13]
+; AVX512-FCP-NEXT: vpshufb %xmm6, %xmm3, %xmm7
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm8 = ymm0[0],ymm1[1,2],ymm0[3],ymm1[4],ymm0[5],ymm1[6,7],ymm0[8],ymm1[9,10],ymm0[11],ymm1[12],ymm0[13],ymm1[14,15]
+; AVX512-FCP-NEXT: vextracti128 $1, %ymm8, %xmm9
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} xmm8 = xmm9[0],xmm8[1],xmm9[2],xmm8[3]
+; AVX512-FCP-NEXT: vpshufb %xmm6, %xmm8, %xmm6
; AVX512-FCP-NEXT: vpblendd {{.*#+}} xmm6 = xmm6[0,1,2],xmm7[3]
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm7 = [8,9,2,3,12,13,6,7,0,1,10,11,4,5,14,15]
+; AVX512-FCP-NEXT: vpshufb %xmm7, %xmm3, %xmm3
; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5],ymm0[6],ymm1[7,8],ymm0[9],ymm1[10,11],ymm0[12],ymm1[13],ymm0[14],ymm1[15]
; AVX512-FCP-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3,4],xmm1[5,6,7]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[8,9,2,3,12,13,6,7,0,1,10,11,u,u,u,u]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm3[u,u,u,u,u,u,u,u,u,u,u,u,4,5,14,15]
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3]
+; AVX512-FCP-NEXT: vpshufb %xmm7, %xmm0, %xmm0
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1,2],xmm3[3]
; AVX512-FCP-NEXT: vmovdqa %xmm2, (%rsi)
; AVX512-FCP-NEXT: vmovdqa %xmm4, (%rdx)
; AVX512-FCP-NEXT: vmovdqa %xmm5, (%rcx)
@@ -1158,24 +1167,27 @@ define void @load_i16_stride5_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[2,3,12,13,6,7,0,1,10,11,4,5,14,15,u,u]
; AVX512DQ-FCP-NEXT: vpsllq $48, %xmm3, %xmm5
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,5,6],xmm5[7]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm5 = ymm1[0,1],ymm0[2],ymm1[3],ymm0[4],ymm1[5,6],ymm0[7],ymm1[8,9],ymm0[10],ymm1[11],ymm0[12],ymm1[13,14],ymm0[15]
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm5, %xmm6
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0,1,2],xmm6[3,4],xmm5[5,6,7]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[4,5,14,15,8,9,2,3,12,13,6,7,u,u,u,u]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm6 = xmm3[u,u,u,u,u,u,u,u,u,u,u,u,0,1,10,11]
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm5 = [4,5,14,15,8,9,2,3,12,13,6,7,0,1,10,11]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm5, %xmm3, %xmm6
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm7 = ymm1[0,1],ymm0[2],ymm1[3],ymm0[4],ymm1[5,6],ymm0[7],ymm1[8,9],ymm0[10],ymm1[11],ymm0[12],ymm1[13,14],ymm0[15]
+; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm7, %xmm8
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm7 = xmm7[0,1,2],xmm8[3,4],xmm7[5,6,7]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm5, %xmm7, %xmm5
; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} xmm5 = xmm5[0,1,2],xmm6[3]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm6 = ymm0[0],ymm1[1,2],ymm0[3],ymm1[4],ymm0[5],ymm1[6,7],ymm0[8],ymm1[9,10],ymm0[11],ymm1[12],ymm0[13],ymm1[14,15]
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm6, %xmm7
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} xmm6 = xmm7[0],xmm6[1],xmm7[2],xmm6[3]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm6 = xmm6[6,7,0,1,10,11,4,5,14,15,8,9,u,u,u,u]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm3[u,u,u,u,u,u,u,u,u,u,u,u,2,3,12,13]
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm6 = [6,7,0,1,10,11,4,5,14,15,8,9,2,3,12,13]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm6, %xmm3, %xmm7
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm8 = ymm0[0],ymm1[1,2],ymm0[3],ymm1[4],ymm0[5],ymm1[6,7],ymm0[8],ymm1[9,10],ymm0[11],ymm1[12],ymm0[13],ymm1[14,15]
+; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm8, %xmm9
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} xmm8 = xmm9[0],xmm8[1],xmm9[2],xmm8[3]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm6, %xmm8, %xmm6
; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} xmm6 = xmm6[0,1,2],xmm7[3]
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm7 = [8,9,2,3,12,13,6,7,0,1,10,11,4,5,14,15]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm7, %xmm3, %xmm3
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5],ymm0[6],ymm1[7,8],ymm0[9],ymm1[10,11],ymm0[12],ymm1[13],ymm0[14],ymm1[15]
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3,4],xmm1[5,6,7]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[8,9,2,3,12,13,6,7,0,1,10,11,u,u,u,u]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm3[u,u,u,u,u,u,u,u,u,u,u,u,4,5,14,15]
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm7, %xmm0, %xmm0
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1,2],xmm3[3]
; AVX512DQ-FCP-NEXT: vmovdqa %xmm2, (%rsi)
; AVX512DQ-FCP-NEXT: vmovdqa %xmm4, (%rdx)
; AVX512DQ-FCP-NEXT: vmovdqa %xmm5, (%rcx)
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-6.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-6.ll
index 1ddd8166c998e..605deed6536bf 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-6.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-6.ll
@@ -1023,11 +1023,12 @@ define void @load_i16_stride6_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX2-NEXT: vpshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,4,6,6,7]
; AVX2-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0,1],xmm8[2],xmm6[3],xmm8[4,5],xmm6[6,7]
; AVX2-NEXT: vpblendd {{.*#+}} xmm2 = xmm6[0,1,2],xmm2[3]
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm6 = [2,3,14,15,10,11,6,7,2,3,14,15,12,13,14,15]
+; AVX2-NEXT: vpshufb %xmm6, %xmm7, %xmm7
+; AVX2-NEXT: vpshufb %xmm6, %xmm5, %xmm5
+; AVX2-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0,1],xmm7[2],xmm5[3],xmm7[4,5],xmm5[6,7]
; AVX2-NEXT: vpbroadcastw 74(%rdi), %xmm6
; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm6[0],xmm0[0],xmm6[1],xmm0[1],xmm6[2],xmm0[2],xmm6[3],xmm0[3]
-; AVX2-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[u,u,u,u,10,11,u,u,2,3,14,15,u,u,u,u]
-; AVX2-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15]
-; AVX2-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0,1],xmm7[2],xmm5[3],xmm7[4,5],xmm5[6,7]
; AVX2-NEXT: vpblendd {{.*#+}} xmm5 = xmm5[0,1,2],xmm6[3]
; AVX2-NEXT: vpblendd {{.*#+}} ymm6 = ymm3[0,1],ymm4[2],ymm3[3,4],ymm4[5],ymm3[6,7]
; AVX2-NEXT: vpshufd {{.*#+}} xmm7 = xmm6[2,1,2,3]
@@ -1072,34 +1073,36 @@ define void @load_i16_stride6_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX2-FP-LABEL: load_i16_stride6_vf8:
; AVX2-FP: # %bb.0:
; AVX2-FP-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX2-FP-NEXT: vmovdqa (%rdi), %ymm2
-; AVX2-FP-NEXT: vmovdqa 32(%rdi), %ymm4
-; AVX2-FP-NEXT: vmovdqa 80(%rdi), %xmm0
-; AVX2-FP-NEXT: vpslld $16, %xmm0, %xmm3
-; AVX2-FP-NEXT: vmovdqa 64(%rdi), %xmm1
-; AVX2-FP-NEXT: vpsrldq {{.*#+}} xmm5 = xmm1[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
-; AVX2-FP-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm5[0],xmm3[0],xmm5[1],xmm3[1],xmm5[2],xmm3[2],xmm5[3],xmm3[3]
-; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm5 = ymm2[0],ymm4[1],ymm2[2,3],ymm4[4],ymm2[5,6],ymm4[7]
-; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm6 = xmm5[0,1,12,13,u,u,4,5,u,u,u,u,12,13,14,15]
-; AVX2-FP-NEXT: vextracti128 $1, %ymm5, %xmm7
-; AVX2-FP-NEXT: vpshufd {{.*#+}} xmm7 = xmm7[2,1,0,3]
-; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm8 = xmm7[u,u,u,u,0,1,u,u,8,9,12,13,u,u,u,u]
-; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0,1],xmm8[2],xmm6[3],xmm8[4,5],xmm6[6,7]
-; AVX2-FP-NEXT: vpblendd {{.*#+}} xmm3 = xmm6[0,1,2],xmm3[3]
+; AVX2-FP-NEXT: vmovdqa (%rdi), %ymm0
+; AVX2-FP-NEXT: vmovdqa 32(%rdi), %ymm2
+; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm5 = ymm0[0],ymm2[1],ymm0[2,3],ymm2[4],ymm0[5,6],ymm2[7]
+; AVX2-FP-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,12,13,0,1,4,5,8,9,12,13,12,13,14,15]
+; AVX2-FP-NEXT: vpshufb %xmm1, %xmm5, %xmm3
+; AVX2-FP-NEXT: vextracti128 $1, %ymm5, %xmm4
+; AVX2-FP-NEXT: vpshufd {{.*#+}} xmm6 = xmm4[2,1,0,3]
+; AVX2-FP-NEXT: vpshufb %xmm1, %xmm6, %xmm1
+; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm1 = xmm3[0,1],xmm1[2],xmm3[3],xmm1[4,5],xmm3[6,7]
+; AVX2-FP-NEXT: vmovdqa 80(%rdi), %xmm3
+; AVX2-FP-NEXT: vpslld $16, %xmm3, %xmm7
+; AVX2-FP-NEXT: vmovdqa 64(%rdi), %xmm4
+; AVX2-FP-NEXT: vpsrldq {{.*#+}} xmm8 = xmm4[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
+; AVX2-FP-NEXT: vpunpcklwd {{.*#+}} xmm7 = xmm8[0],xmm7[0],xmm8[1],xmm7[1],xmm8[2],xmm7[2],xmm8[3],xmm7[3]
+; AVX2-FP-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0,1,2],xmm7[3]
+; AVX2-FP-NEXT: vmovdqa {{.*#+}} xmm7 = [2,3,14,15,2,3,6,7,10,11,14,15,12,13,14,15]
+; AVX2-FP-NEXT: vpshufb %xmm7, %xmm5, %xmm5
+; AVX2-FP-NEXT: vpshufb %xmm7, %xmm6, %xmm6
+; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0,1],xmm6[2],xmm5[3],xmm6[4,5],xmm5[6,7]
; AVX2-FP-NEXT: vpbroadcastw 74(%rdi), %xmm6
-; AVX2-FP-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm6[0],xmm0[0],xmm6[1],xmm0[1],xmm6[2],xmm0[2],xmm6[3],xmm0[3]
-; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15]
-; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[u,u,u,u,2,3,u,u,10,11,14,15,u,u,u,u]
-; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0,1],xmm7[2],xmm5[3],xmm7[4,5],xmm5[6,7]
+; AVX2-FP-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm6[0],xmm3[0],xmm6[1],xmm3[1],xmm6[2],xmm3[2],xmm6[3],xmm3[3]
; AVX2-FP-NEXT: vpblendd {{.*#+}} xmm5 = xmm5[0,1,2],xmm6[3]
-; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm6 = ymm2[0,1],ymm4[2],ymm2[3,4],ymm4[5],ymm2[6,7]
+; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm6 = ymm0[0,1],ymm2[2],ymm0[3,4],ymm2[5],ymm0[6,7]
; AVX2-FP-NEXT: vpshufd {{.*#+}} xmm7 = xmm6[2,1,2,3]
; AVX2-FP-NEXT: vpshuflw {{.*#+}} xmm8 = xmm7[2,1,2,0,4,5,6,7]
; AVX2-FP-NEXT: vextracti128 $1, %ymm6, %xmm6
; AVX2-FP-NEXT: vpshufd {{.*#+}} xmm6 = xmm6[0,3,2,1]
; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm9 = xmm6[u,u,0,1,4,5,u,u,12,13,u,u,u,u,u,u]
; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm8 = xmm8[0],xmm9[1,2],xmm8[3],xmm9[4,5,6,7]
-; AVX2-FP-NEXT: vpblendd {{.*#+}} xmm9 = xmm1[0,1],xmm0[2],xmm1[3]
+; AVX2-FP-NEXT: vpblendd {{.*#+}} xmm9 = xmm4[0,1],xmm3[2],xmm4[3]
; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm10 = xmm9[u,u,u,u,u,u,u,u,u,u,0,1,12,13,8,9]
; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm8 = xmm8[0,1,2,3,4],xmm10[5,6,7]
; AVX2-FP-NEXT: vpshuflw {{.*#+}} xmm7 = xmm7[3,1,2,1,4,5,6,7]
@@ -1107,24 +1110,25 @@ define void @load_i16_stride6_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm6 = xmm7[0],xmm6[1,2],xmm7[3],xmm6[4,5,6,7]
; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm7 = xmm9[u,u,u,u,u,u,u,u,u,u,2,3,14,15,10,11]
; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0,1,2,3,4],xmm7[5,6,7]
-; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm2 = ymm4[0,1],ymm2[2],ymm4[3,4],ymm2[5],ymm4[6,7]
-; AVX2-FP-NEXT: vextracti128 $1, %ymm2, %xmm4
-; AVX2-FP-NEXT: vpshuflw {{.*#+}} xmm7 = xmm4[2,2,2,2,4,5,6,7]
-; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm9 = xmm2[8,9,u,u,0,1,12,13,u,u,u,u,u,u,u,u]
+; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1],ymm0[2],ymm2[3,4],ymm0[5],ymm2[6,7]
+; AVX2-FP-NEXT: vextracti128 $1, %ymm0, %xmm2
+; AVX2-FP-NEXT: vpshuflw {{.*#+}} xmm7 = xmm2[2,2,2,2,4,5,6,7]
+; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm9 = xmm0[8,9,u,u,0,1,12,13,u,u,u,u,u,u,u,u]
; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm7 = xmm9[0],xmm7[1],xmm9[2,3],xmm7[4],xmm9[5,6,7]
-; AVX2-FP-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
-; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[u,u,u,u,u,u,u,u,u,u,4,5,0,1,12,13]
-; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm1 = xmm7[0,1,2,3,4],xmm1[5,6,7]
-; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[u,u,6,7,u,u,u,u,10,11,u,u,u,u,u,u]
-; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[10,11,u,u,2,3,14,15,u,u,u,u,u,u,u,u]
-; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm4[1],xmm2[2,3],xmm4[4],xmm2[5,6,7]
-; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u,u,u,u,6,7,2,3,14,15]
-; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm0 = xmm2[0,1,2,3,4],xmm0[5,6,7]
-; AVX2-FP-NEXT: vmovdqa %xmm3, (%rsi)
+; AVX2-FP-NEXT: vpblendd {{.*#+}} xmm3 = xmm3[0],xmm4[1],xmm3[2,3]
+; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm4 = xmm3[u,u,u,u,u,u,u,u,u,u,4,5,0,1,12,13]
+; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm4 = xmm7[0,1,2,3,4],xmm4[5,6,7]
+; AVX2-FP-NEXT: vmovdqa {{.*#+}} xmm7 = [10,11,6,7,2,3,14,15,10,11,10,11,12,13,14,15]
+; AVX2-FP-NEXT: vpshufb %xmm7, %xmm2, %xmm2
+; AVX2-FP-NEXT: vpshufb %xmm7, %xmm0, %xmm0
+; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2,3],xmm2[4],xmm0[5,6,7]
+; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm2 = xmm3[u,u,u,u,u,u,u,u,u,u,6,7,2,3,14,15]
+; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm2[5,6,7]
+; AVX2-FP-NEXT: vmovdqa %xmm1, (%rsi)
; AVX2-FP-NEXT: vmovdqa %xmm5, (%rdx)
; AVX2-FP-NEXT: vmovdqa %xmm8, (%rcx)
; AVX2-FP-NEXT: vmovdqa %xmm6, (%r8)
-; AVX2-FP-NEXT: vmovdqa %xmm1, (%r9)
+; AVX2-FP-NEXT: vmovdqa %xmm4, (%r9)
; AVX2-FP-NEXT: vmovdqa %xmm0, (%rax)
; AVX2-FP-NEXT: vzeroupper
; AVX2-FP-NEXT: retq
@@ -1132,34 +1136,36 @@ define void @load_i16_stride6_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX2-FCP-LABEL: load_i16_stride6_vf8:
; AVX2-FCP: # %bb.0:
; AVX2-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX2-FCP-NEXT: vmovdqa (%rdi), %ymm2
-; AVX2-FCP-NEXT: vmovdqa 32(%rdi), %ymm4
-; AVX2-FCP-NEXT: vmovdqa 80(%rdi), %xmm0
-; AVX2-FCP-NEXT: vpslld $16, %xmm0, %xmm3
-; AVX2-FCP-NEXT: vmovdqa 64(%rdi), %xmm1
-; AVX2-FCP-NEXT: vpsrldq {{.*#+}} xmm5 = xmm1[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
-; AVX2-FCP-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm5[0],xmm3[0],xmm5[1],xmm3[1],xmm5[2],xmm3[2],xmm5[3],xmm3[3]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm2[0],ymm4[1],ymm2[2,3],ymm4[4],ymm2[5,6],ymm4[7]
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm6 = xmm5[0,1,12,13,u,u,4,5,u,u,u,u,12,13,14,15]
-; AVX2-FCP-NEXT: vextracti128 $1, %ymm5, %xmm7
-; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm7 = xmm7[2,1,0,3]
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm8 = xmm7[u,u,u,u,0,1,u,u,8,9,12,13,u,u,u,u]
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0,1],xmm8[2],xmm6[3],xmm8[4,5],xmm6[6,7]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm3 = xmm6[0,1,2],xmm3[3]
+; AVX2-FCP-NEXT: vmovdqa (%rdi), %ymm0
+; AVX2-FCP-NEXT: vmovdqa 32(%rdi), %ymm2
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm0[0],ymm2[1],ymm0[2,3],ymm2[4],ymm0[5,6],ymm2[7]
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,12,13,0,1,4,5,8,9,12,13,12,13,14,15]
+; AVX2-FCP-NEXT: vpshufb %xmm1, %xmm5, %xmm3
+; AVX2-FCP-NEXT: vextracti128 $1, %ymm5, %xmm4
+; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm6 = xmm4[2,1,0,3]
+; AVX2-FCP-NEXT: vpshufb %xmm1, %xmm6, %xmm1
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm3[0,1],xmm1[2],xmm3[3],xmm1[4,5],xmm3[6,7]
+; AVX2-FCP-NEXT: vmovdqa 80(%rdi), %xmm3
+; AVX2-FCP-NEXT: vpslld $16, %xmm3, %xmm7
+; AVX2-FCP-NEXT: vmovdqa 64(%rdi), %xmm4
+; AVX2-FCP-NEXT: vpsrldq {{.*#+}} xmm8 = xmm4[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
+; AVX2-FCP-NEXT: vpunpcklwd {{.*#+}} xmm7 = xmm8[0],xmm7[0],xmm8[1],xmm7[1],xmm8[2],xmm7[2],xmm8[3],xmm7[3]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0,1,2],xmm7[3]
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm7 = [2,3,14,15,2,3,6,7,10,11,14,15,12,13,14,15]
+; AVX2-FCP-NEXT: vpshufb %xmm7, %xmm5, %xmm5
+; AVX2-FCP-NEXT: vpshufb %xmm7, %xmm6, %xmm6
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0,1],xmm6[2],xmm5[3],xmm6[4,5],xmm5[6,7]
; AVX2-FCP-NEXT: vpbroadcastw 74(%rdi), %xmm6
-; AVX2-FCP-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm6[0],xmm0[0],xmm6[1],xmm0[1],xmm6[2],xmm0[2],xmm6[3],xmm0[3]
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15]
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[u,u,u,u,2,3,u,u,10,11,14,15,u,u,u,u]
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0,1],xmm7[2],xmm5[3],xmm7[4,5],xmm5[6,7]
+; AVX2-FCP-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm6[0],xmm3[0],xmm6[1],xmm3[1],xmm6[2],xmm3[2],xmm6[3],xmm3[3]
; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm5 = xmm5[0,1,2],xmm6[3]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm2[0,1],ymm4[2],ymm2[3,4],ymm4[5],ymm2[6,7]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm0[0,1],ymm2[2],ymm0[3,4],ymm2[5],ymm0[6,7]
; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm7 = xmm6[2,1,2,3]
; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm8 = xmm7[2,1,2,0,4,5,6,7]
; AVX2-FCP-NEXT: vextracti128 $1, %ymm6, %xmm6
; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm6 = xmm6[0,3,2,1]
; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm9 = xmm6[u,u,0,1,4,5,u,u,12,13,u,u,u,u,u,u]
; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm8 = xmm8[0],xmm9[1,2],xmm8[3],xmm9[4,5,6,7]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm9 = xmm1[0,1],xmm0[2],xmm1[3]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm9 = xmm4[0,1],xmm3[2],xmm4[3]
; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm10 = xmm9[u,u,u,u,u,u,u,u,u,u,0,1,12,13,8,9]
; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm8 = xmm8[0,1,2,3,4],xmm10[5,6,7]
; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm7 = xmm7[3,1,2,1,4,5,6,7]
@@ -1167,24 +1173,25 @@ define void @load_i16_stride6_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm6 = xmm7[0],xmm6[1,2],xmm7[3],xmm6[4,5,6,7]
; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm9[u,u,u,u,u,u,u,u,u,u,2,3,14,15,10,11]
; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0,1,2,3,4],xmm7[5,6,7]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm4[0,1],ymm2[2],ymm4[3,4],ymm2[5],ymm4[6,7]
-; AVX2-FCP-NEXT: vextracti128 $1, %ymm2, %xmm4
-; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm7 = xmm4[2,2,2,2,4,5,6,7]
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm9 = xmm2[8,9,u,u,0,1,12,13,u,u,u,u,u,u,u,u]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1],ymm0[2],ymm2[3,4],ymm0[5],ymm2[6,7]
+; AVX2-FCP-NEXT: vextracti128 $1, %ymm0, %xmm2
+; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm7 = xmm2[2,2,2,2,4,5,6,7]
+; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm9 = xmm0[8,9,u,u,0,1,12,13,u,u,u,u,u,u,u,u]
; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm7 = xmm9[0],xmm7[1],xmm9[2,3],xmm7[4],xmm9[5,6,7]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[u,u,u,u,u,u,u,u,u,u,4,5,0,1,12,13]
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm7[0,1,2,3,4],xmm1[5,6,7]
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[u,u,6,7,u,u,u,u,10,11,u,u,u,u,u,u]
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[10,11,u,u,2,3,14,15,u,u,u,u,u,u,u,u]
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm4[1],xmm2[2,3],xmm4[4],xmm2[5,6,7]
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u,u,u,u,6,7,2,3,14,15]
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm2[0,1,2,3,4],xmm0[5,6,7]
-; AVX2-FCP-NEXT: vmovdqa %xmm3, (%rsi)
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm3 = xmm3[0],xmm4[1],xmm3[2,3]
+; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm4 = xmm3[u,u,u,u,u,u,u,u,u,u,4,5,0,1,12,13]
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm4 = xmm7[0,1,2,3,4],xmm4[5,6,7]
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm7 = [10,11,6,7,2,3,14,15,10,11,10,11,12,13,14,15]
+; AVX2-FCP-NEXT: vpshufb %xmm7, %xmm2, %xmm2
+; AVX2-FCP-NEXT: vpshufb %xmm7, %xmm0, %xmm0
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2,3],xmm2[4],xmm0[5,6,7]
+; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm3[u,u,u,u,u,u,u,u,u,u,6,7,2,3,14,15]
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm2[5,6,7]
+; AVX2-FCP-NEXT: vmovdqa %xmm1, (%rsi)
; AVX2-FCP-NEXT: vmovdqa %xmm5, (%rdx)
; AVX2-FCP-NEXT: vmovdqa %xmm8, (%rcx)
; AVX2-FCP-NEXT: vmovdqa %xmm6, (%r8)
-; AVX2-FCP-NEXT: vmovdqa %xmm1, (%r9)
+; AVX2-FCP-NEXT: vmovdqa %xmm4, (%r9)
; AVX2-FCP-NEXT: vmovdqa %xmm0, (%rax)
; AVX2-FCP-NEXT: vzeroupper
; AVX2-FCP-NEXT: retq
@@ -1259,35 +1266,36 @@ define void @load_i16_stride6_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-FCP-LABEL: load_i16_stride6_vf8:
; AVX512-FCP: # %bb.0:
; AVX512-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX512-FCP-NEXT: vmovdqa 80(%rdi), %xmm0
-; AVX512-FCP-NEXT: vpslld $16, %xmm0, %xmm2
-; AVX512-FCP-NEXT: vmovdqa 64(%rdi), %xmm1
-; AVX512-FCP-NEXT: vpsrldq {{.*#+}} xmm3 = xmm1[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
-; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
-; AVX512-FCP-NEXT: vmovdqa (%rdi), %ymm3
-; AVX512-FCP-NEXT: vmovdqa 32(%rdi), %ymm4
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm3[0],ymm4[1],ymm3[2,3],ymm4[4],ymm3[5,6],ymm4[7]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm6 = xmm5[0,1,12,13,u,u,4,5,u,u,u,u,u,u,u,u]
-; AVX512-FCP-NEXT: vextracti128 $1, %ymm5, %xmm7
-; AVX512-FCP-NEXT: vpshufd {{.*#+}} xmm7 = xmm7[2,1,0,3]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm8 = xmm7[u,u,u,u,0,1,u,u,8,9,12,13,u,u,u,u]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0,1],xmm8[2],xmm6[3],xmm8[4,5],xmm6[6,7]
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} xmm2 = xmm6[0,1,2],xmm2[3]
-; AVX512-FCP-NEXT: vpbroadcastw 74(%rdi), %xmm6
-; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm6[0],xmm0[0],xmm6[1],xmm0[1],xmm6[2],xmm0[2],xmm6[3],xmm0[3]
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm0 = [0,1,12,13,0,1,4,5,8,9,12,13,u,u,u,u]
+; AVX512-FCP-NEXT: vmovdqa (%rdi), %ymm1
+; AVX512-FCP-NEXT: vmovdqa 32(%rdi), %ymm2
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm1[0],ymm2[1],ymm1[2,3],ymm2[4],ymm1[5,6],ymm2[7]
+; AVX512-FCP-NEXT: vpshufb %xmm0, %xmm5, %xmm3
+; AVX512-FCP-NEXT: vextracti128 $1, %ymm5, %xmm4
+; AVX512-FCP-NEXT: vpshufd {{.*#+}} xmm6 = xmm4[2,1,0,3]
+; AVX512-FCP-NEXT: vpshufb %xmm0, %xmm6, %xmm0
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm3[0,1],xmm0[2],xmm3[3],xmm0[4,5],xmm3[6,7]
+; AVX512-FCP-NEXT: vmovdqa 80(%rdi), %xmm3
+; AVX512-FCP-NEXT: vpslld $16, %xmm3, %xmm7
+; AVX512-FCP-NEXT: vmovdqa 64(%rdi), %xmm4
+; AVX512-FCP-NEXT: vpsrldq {{.*#+}} xmm8 = xmm4[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
+; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} xmm7 = xmm8[0],xmm7[0],xmm8[1],xmm7[1],xmm8[2],xmm7[2],xmm8[3],xmm7[3]
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1,2],xmm7[3]
+; AVX512-FCP-NEXT: vpbroadcastw 74(%rdi), %xmm7
+; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} xmm7 = xmm7[0],xmm3[0],xmm7[1],xmm3[1],xmm7[2],xmm3[2],xmm7[3],xmm3[3]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[6,7,2,3,u,u,14,15,u,u,u,u,u,u,u,u]
-; AVX512-FCP-NEXT: vpshuflw {{.*#+}} xmm7 = xmm7[1,1,1,1,4,5,6,7]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0,1],xmm7[2],xmm5[3,4],xmm7[5],xmm5[6],xmm7[7]
+; AVX512-FCP-NEXT: vpshuflw {{.*#+}} xmm6 = xmm6[1,1,1,1,4,5,6,7]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0,1],xmm6[2],xmm5[3,4],xmm6[5],xmm5[6],xmm6[7]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[2,3,6,7,4,5,0,1,10,11,14,15,u,u,u,u]
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} xmm5 = xmm5[0,1,2],xmm6[3]
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm3[0,1],ymm4[2],ymm3[3,4],ymm4[5],ymm3[6,7]
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} xmm5 = xmm5[0,1,2],xmm7[3]
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7]
; AVX512-FCP-NEXT: vpshufd {{.*#+}} xmm7 = xmm6[2,1,2,3]
; AVX512-FCP-NEXT: vpshuflw {{.*#+}} xmm8 = xmm7[2,1,2,0,4,5,6,7]
; AVX512-FCP-NEXT: vextracti128 $1, %ymm6, %xmm6
; AVX512-FCP-NEXT: vpshufd {{.*#+}} xmm6 = xmm6[0,3,2,1]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm9 = xmm6[u,u,0,1,4,5,u,u,12,13,u,u,u,u,u,u]
; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm8 = xmm8[0],xmm9[1,2],xmm8[3],xmm9[4,5,6,7]
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} xmm9 = xmm1[0,1],xmm0[2],xmm1[3]
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} xmm9 = xmm4[0,1],xmm3[2],xmm4[3]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm10 = xmm9[u,u,u,u,u,u,u,u,u,u,0,1,12,13,8,9]
; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm8 = xmm8[0,1,2,3,4],xmm10[5,6,7]
; AVX512-FCP-NEXT: vpshuflw {{.*#+}} xmm7 = xmm7[3,1,2,1,4,5,6,7]
@@ -1295,25 +1303,26 @@ define void @load_i16_stride6_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm6 = xmm7[0],xmm6[1,2],xmm7[3],xmm6[4,5,6,7]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm9[u,u,u,u,u,u,u,u,u,u,2,3,14,15,10,11]
; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0,1,2,3,4],xmm7[5,6,7]
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0,1],ymm3[2],ymm4[3,4],ymm3[5],ymm4[6,7]
-; AVX512-FCP-NEXT: vextracti128 $1, %ymm3, %xmm4
-; AVX512-FCP-NEXT: vpshuflw {{.*#+}} xmm7 = xmm4[2,2,2,2,4,5,6,7]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm9 = xmm3[8,9,u,u,0,1,12,13,u,u,u,u,u,u,u,u]
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
+; AVX512-FCP-NEXT: vextracti128 $1, %ymm1, %xmm2
+; AVX512-FCP-NEXT: vpshuflw {{.*#+}} xmm7 = xmm2[2,2,2,2,4,5,6,7]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm9 = xmm1[8,9,u,u,0,1,12,13,u,u,u,u,u,u,u,u]
; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm7 = xmm9[0],xmm7[1],xmm9[2,3],xmm7[4],xmm9[5,6,7]
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[u,u,u,u,u,u,u,u,u,u,4,5,0,1,12,13]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm7[0,1,2,3,4],xmm1[5,6,7]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[u,u,6,7,u,u,u,u,10,11,u,u,u,u,u,u]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[10,11,u,u,2,3,14,15,u,u,u,u,u,u,u,u]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0],xmm4[1],xmm3[2,3],xmm4[4],xmm3[5,6,7]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u,u,u,u,6,7,2,3,14,15]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm3[0,1,2,3,4],xmm0[5,6,7]
-; AVX512-FCP-NEXT: vmovdqa %xmm2, (%rsi)
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} xmm3 = xmm3[0],xmm4[1],xmm3[2,3]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm4 = xmm3[u,u,u,u,u,u,u,u,u,u,4,5,0,1,12,13]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm4 = xmm7[0,1,2,3,4],xmm4[5,6,7]
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm7 = [10,11,6,7,2,3,14,15,10,11,10,11,12,13,14,15]
+; AVX512-FCP-NEXT: vpshufb %xmm7, %xmm2, %xmm2
+; AVX512-FCP-NEXT: vpshufb %xmm7, %xmm1, %xmm1
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3],xmm2[4],xmm1[5,6,7]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm3[u,u,u,u,u,u,u,u,u,u,6,7,2,3,14,15]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm2[5,6,7]
+; AVX512-FCP-NEXT: vmovdqa %xmm0, (%rsi)
; AVX512-FCP-NEXT: vmovdqa %xmm5, (%rdx)
; AVX512-FCP-NEXT: vmovdqa %xmm8, (%rcx)
; AVX512-FCP-NEXT: vmovdqa %xmm6, (%r8)
-; AVX512-FCP-NEXT: vmovdqa %xmm1, (%r9)
-; AVX512-FCP-NEXT: vmovdqa %xmm0, (%rax)
+; AVX512-FCP-NEXT: vmovdqa %xmm4, (%r9)
+; AVX512-FCP-NEXT: vmovdqa %xmm1, (%rax)
; AVX512-FCP-NEXT: vzeroupper
; AVX512-FCP-NEXT: retq
;
@@ -1387,35 +1396,36 @@ define void @load_i16_stride6_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-FCP-LABEL: load_i16_stride6_vf8:
; AVX512DQ-FCP: # %bb.0:
; AVX512DQ-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-FCP-NEXT: vmovdqa 80(%rdi), %xmm0
-; AVX512DQ-FCP-NEXT: vpslld $16, %xmm0, %xmm2
-; AVX512DQ-FCP-NEXT: vmovdqa 64(%rdi), %xmm1
-; AVX512DQ-FCP-NEXT: vpsrldq {{.*#+}} xmm3 = xmm1[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
-; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
-; AVX512DQ-FCP-NEXT: vmovdqa (%rdi), %ymm3
-; AVX512DQ-FCP-NEXT: vmovdqa 32(%rdi), %ymm4
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm3[0],ymm4[1],ymm3[2,3],ymm4[4],ymm3[5,6],ymm4[7]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm6 = xmm5[0,1,12,13,u,u,4,5,u,u,u,u,u,u,u,u]
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm5, %xmm7
-; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} xmm7 = xmm7[2,1,0,3]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm8 = xmm7[u,u,u,u,0,1,u,u,8,9,12,13,u,u,u,u]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0,1],xmm8[2],xmm6[3],xmm8[4,5],xmm6[6,7]
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} xmm2 = xmm6[0,1,2],xmm2[3]
-; AVX512DQ-FCP-NEXT: vpbroadcastw 74(%rdi), %xmm6
-; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm6[0],xmm0[0],xmm6[1],xmm0[1],xmm6[2],xmm0[2],xmm6[3],xmm0[3]
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm0 = [0,1,12,13,0,1,4,5,8,9,12,13,u,u,u,u]
+; AVX512DQ-FCP-NEXT: vmovdqa (%rdi), %ymm1
+; AVX512DQ-FCP-NEXT: vmovdqa 32(%rdi), %ymm2
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm1[0],ymm2[1],ymm1[2,3],ymm2[4],ymm1[5,6],ymm2[7]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm0, %xmm5, %xmm3
+; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm5, %xmm4
+; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} xmm6 = xmm4[2,1,0,3]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm0, %xmm6, %xmm0
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm3[0,1],xmm0[2],xmm3[3],xmm0[4,5],xmm3[6,7]
+; AVX512DQ-FCP-NEXT: vmovdqa 80(%rdi), %xmm3
+; AVX512DQ-FCP-NEXT: vpslld $16, %xmm3, %xmm7
+; AVX512DQ-FCP-NEXT: vmovdqa 64(%rdi), %xmm4
+; AVX512DQ-FCP-NEXT: vpsrldq {{.*#+}} xmm8 = xmm4[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
+; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} xmm7 = xmm8[0],xmm7[0],xmm8[1],xmm7[1],xmm8[2],xmm7[2],xmm8[3],xmm7[3]
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1,2],xmm7[3]
+; AVX512DQ-FCP-NEXT: vpbroadcastw 74(%rdi), %xmm7
+; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} xmm7 = xmm7[0],xmm3[0],xmm7[1],xmm3[1],xmm7[2],xmm3[2],xmm7[3],xmm3[3]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[6,7,2,3,u,u,14,15,u,u,u,u,u,u,u,u]
-; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} xmm7 = xmm7[1,1,1,1,4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0,1],xmm7[2],xmm5[3,4],xmm7[5],xmm5[6],xmm7[7]
+; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} xmm6 = xmm6[1,1,1,1,4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0,1],xmm6[2],xmm5[3,4],xmm6[5],xmm5[6],xmm6[7]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[2,3,6,7,4,5,0,1,10,11,14,15,u,u,u,u]
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} xmm5 = xmm5[0,1,2],xmm6[3]
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm3[0,1],ymm4[2],ymm3[3,4],ymm4[5],ymm3[6,7]
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} xmm5 = xmm5[0,1,2],xmm7[3]
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7]
; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} xmm7 = xmm6[2,1,2,3]
; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} xmm8 = xmm7[2,1,2,0,4,5,6,7]
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm6, %xmm6
; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} xmm6 = xmm6[0,3,2,1]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm9 = xmm6[u,u,0,1,4,5,u,u,12,13,u,u,u,u,u,u]
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm8 = xmm8[0],xmm9[1,2],xmm8[3],xmm9[4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} xmm9 = xmm1[0,1],xmm0[2],xmm1[3]
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} xmm9 = xmm4[0,1],xmm3[2],xmm4[3]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm10 = xmm9[u,u,u,u,u,u,u,u,u,u,0,1,12,13,8,9]
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm8 = xmm8[0,1,2,3,4],xmm10[5,6,7]
; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} xmm7 = xmm7[3,1,2,1,4,5,6,7]
@@ -1423,25 +1433,26 @@ define void @load_i16_stride6_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm6 = xmm7[0],xmm6[1,2],xmm7[3],xmm6[4,5,6,7]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm9[u,u,u,u,u,u,u,u,u,u,2,3,14,15,10,11]
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0,1,2,3,4],xmm7[5,6,7]
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0,1],ymm3[2],ymm4[3,4],ymm3[5],ymm4[6,7]
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm3, %xmm4
-; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} xmm7 = xmm4[2,2,2,2,4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm9 = xmm3[8,9,u,u,0,1,12,13,u,u,u,u,u,u,u,u]
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
+; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm1, %xmm2
+; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} xmm7 = xmm2[2,2,2,2,4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm9 = xmm1[8,9,u,u,0,1,12,13,u,u,u,u,u,u,u,u]
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm7 = xmm9[0],xmm7[1],xmm9[2,3],xmm7[4],xmm9[5,6,7]
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[u,u,u,u,u,u,u,u,u,u,4,5,0,1,12,13]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm7[0,1,2,3,4],xmm1[5,6,7]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[u,u,6,7,u,u,u,u,10,11,u,u,u,u,u,u]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[10,11,u,u,2,3,14,15,u,u,u,u,u,u,u,u]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0],xmm4[1],xmm3[2,3],xmm4[4],xmm3[5,6,7]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u,u,u,u,6,7,2,3,14,15]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm3[0,1,2,3,4],xmm0[5,6,7]
-; AVX512DQ-FCP-NEXT: vmovdqa %xmm2, (%rsi)
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} xmm3 = xmm3[0],xmm4[1],xmm3[2,3]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm4 = xmm3[u,u,u,u,u,u,u,u,u,u,4,5,0,1,12,13]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm4 = xmm7[0,1,2,3,4],xmm4[5,6,7]
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm7 = [10,11,6,7,2,3,14,15,10,11,10,11,12,13,14,15]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm7, %xmm2, %xmm2
+; AVX512DQ-FCP-NEXT: vpshufb %xmm7, %xmm1, %xmm1
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3],xmm2[4],xmm1[5,6,7]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm3[u,u,u,u,u,u,u,u,u,u,6,7,2,3,14,15]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm2[5,6,7]
+; AVX512DQ-FCP-NEXT: vmovdqa %xmm0, (%rsi)
; AVX512DQ-FCP-NEXT: vmovdqa %xmm5, (%rdx)
; AVX512DQ-FCP-NEXT: vmovdqa %xmm8, (%rcx)
; AVX512DQ-FCP-NEXT: vmovdqa %xmm6, (%r8)
-; AVX512DQ-FCP-NEXT: vmovdqa %xmm1, (%r9)
-; AVX512DQ-FCP-NEXT: vmovdqa %xmm0, (%rax)
+; AVX512DQ-FCP-NEXT: vmovdqa %xmm4, (%r9)
+; AVX512DQ-FCP-NEXT: vmovdqa %xmm1, (%rax)
; AVX512DQ-FCP-NEXT: vzeroupper
; AVX512DQ-FCP-NEXT: retq
;
@@ -2174,27 +2185,29 @@ define void @load_i16_stride6_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX2-FP-LABEL: load_i16_stride6_vf16:
; AVX2-FP: # %bb.0:
; AVX2-FP-NEXT: vmovdqa (%rdi), %ymm3
-; AVX2-FP-NEXT: vmovdqa 32(%rdi), %ymm4
+; AVX2-FP-NEXT: vmovdqa 32(%rdi), %ymm5
; AVX2-FP-NEXT: vmovdqa 64(%rdi), %ymm0
-; AVX2-FP-NEXT: vmovdqa 96(%rdi), %ymm5
+; AVX2-FP-NEXT: vmovdqa 96(%rdi), %ymm4
; AVX2-FP-NEXT: vmovdqa 160(%rdi), %ymm1
; AVX2-FP-NEXT: vmovdqa 128(%rdi), %ymm2
; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm8 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
-; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm6 = xmm8[u,u,u,u,u,u,4,5,u,u,u,u,8,9,u,u]
+; AVX2-FP-NEXT: vmovdqa {{.*#+}} xmm6 = [0,1,4,5,4,5,4,5,0,1,12,13,8,9,4,5]
+; AVX2-FP-NEXT: vpshufb %xmm6, %xmm8, %xmm7
; AVX2-FP-NEXT: vextracti128 $1, %ymm8, %xmm9
-; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm7 = xmm9[0,1,4,5,4,5,u,u,0,1,12,13,u,u,4,5]
-; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm6 = xmm7[0,1,2],xmm6[3],xmm7[4,5],xmm6[6],xmm7[7]
+; AVX2-FP-NEXT: vpshufb %xmm6, %xmm9, %xmm6
+; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0,1,2],xmm7[3],xmm6[4,5],xmm7[6],xmm6[7]
; AVX2-FP-NEXT: vinserti128 $1, %xmm6, %ymm0, %ymm10
-; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm11 = ymm3[0],ymm4[1],ymm3[2,3],ymm4[4],ymm3[5,6],ymm4[7]
-; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm6 = xmm11[0,1,12,13,u,u,4,5,u,u,u,u,12,13,14,15]
-; AVX2-FP-NEXT: vextracti128 $1, %ymm11, %xmm7
-; AVX2-FP-NEXT: vpshufd {{.*#+}} xmm12 = xmm7[2,1,0,3]
-; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm7 = xmm12[u,u,u,u,0,1,u,u,8,9,12,13,u,u,u,u]
-; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm13 = xmm6[0,1],xmm7[2],xmm6[3],xmm7[4,5],xmm6[6,7]
-; AVX2-FP-NEXT: vperm2i128 {{.*#+}} ymm6 = ymm0[2,3],ymm5[2,3]
-; AVX2-FP-NEXT: vperm2i128 {{.*#+}} ymm7 = ymm0[0,1],ymm5[0,1]
-; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm5 = ymm7[0],ymm6[1],ymm7[2,3,4,5],ymm6[6],ymm7[7]
-; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm0 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,8,9,4,5,16,17,28,29,24,25,28,29,24,25,28,29,24,25,20,21]
+; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm11 = ymm3[0],ymm5[1],ymm3[2,3],ymm5[4],ymm3[5,6],ymm5[7]
+; AVX2-FP-NEXT: vmovdqa {{.*#+}} xmm6 = [0,1,12,13,0,1,4,5,8,9,12,13,12,13,14,15]
+; AVX2-FP-NEXT: vpshufb %xmm6, %xmm11, %xmm7
+; AVX2-FP-NEXT: vextracti128 $1, %ymm11, %xmm12
+; AVX2-FP-NEXT: vpshufd {{.*#+}} xmm12 = xmm12[2,1,0,3]
+; AVX2-FP-NEXT: vpshufb %xmm6, %xmm12, %xmm6
+; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm13 = xmm7[0,1],xmm6[2],xmm7[3],xmm6[4,5],xmm7[6,7]
+; AVX2-FP-NEXT: vperm2i128 {{.*#+}} ymm6 = ymm0[2,3],ymm4[2,3]
+; AVX2-FP-NEXT: vperm2i128 {{.*#+}} ymm7 = ymm0[0,1],ymm4[0,1]
+; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm4 = ymm7[0],ymm6[1],ymm7[2,3,4,5],ymm6[6],ymm7[7]
+; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm0 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,8,9,4,5,16,17,28,29,24,25,28,29,24,25,28,29,24,25,20,21]
; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm13[0,1,2],ymm0[3,4,5,6,7]
; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm10 = ymm0[0,1,2],ymm10[3,4,5,6,7],ymm0[8,9,10],ymm10[11,12,13,14,15]
; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm10[4,5,6,7]
@@ -2208,10 +2221,10 @@ define void @load_i16_stride6_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX2-FP-NEXT: vpshuflw {{.*#+}} xmm11 = xmm12[1,1,1,1,4,5,6,7]
; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm10 = xmm10[0,1],xmm11[2],xmm10[3,4],xmm11[5],xmm10[6],xmm11[7]
; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm10 = xmm10[2,3,6,7,4,5,0,1,10,11,14,15,u,u,u,u]
-; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm5 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,10,11,6,7,18,19,30,31,26,27,26,27,30,31,26,27,26,27,22,23]
-; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm5 = ymm10[0,1,2],ymm5[3,4,5,6,7]
-; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm8 = ymm5[0,1,2],ymm8[3,4,5,6,7],ymm5[8,9,10],ymm8[11,12,13,14,15]
-; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm8[4,5,6,7]
+; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,10,11,6,7,18,19,30,31,26,27,26,27,30,31,26,27,26,27,22,23]
+; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm4 = ymm10[0,1,2],ymm4[3,4,5,6,7]
+; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm8 = ymm4[0,1,2],ymm8[3,4,5,6,7],ymm4[8,9,10],ymm8[11,12,13,14,15]
+; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm8[4,5,6,7]
; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm8 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7]
; AVX2-FP-NEXT: vextracti128 $1, %ymm8, %xmm10
; AVX2-FP-NEXT: vpshufd {{.*#+}} xmm10 = xmm10[0,1,2,1]
@@ -2220,7 +2233,7 @@ define void @load_i16_stride6_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm8 = xmm12[0,1,0,1,0,1,0,1,u,u,8,9,12,13,u,u]
; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm8 = xmm8[0,1,2,3],xmm11[4],xmm8[5,6],xmm11[7]
; AVX2-FP-NEXT: vinserti128 $1, %xmm8, %ymm0, %ymm8
-; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm11 = ymm3[0,1],ymm4[2],ymm3[3,4],ymm4[5],ymm3[6,7]
+; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm11 = ymm3[0,1],ymm5[2],ymm3[3,4],ymm5[5],ymm3[6,7]
; AVX2-FP-NEXT: vpshufd {{.*#+}} xmm13 = xmm11[2,1,2,3]
; AVX2-FP-NEXT: vpshuflw {{.*#+}} xmm14 = xmm13[2,1,2,0,4,5,6,7]
; AVX2-FP-NEXT: vextracti128 $1, %ymm11, %xmm11
@@ -2245,39 +2258,42 @@ define void @load_i16_stride6_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX2-FP-NEXT: vpshufd {{.*#+}} xmm9 = xmm9[0,1,3,2]
; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm9 = xmm10[0,1,2,3,4],xmm9[5,6,7]
; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm0[4,5,6,7]
-; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm4[0,1],ymm3[2],ymm4[3,4],ymm3[5],ymm4[6,7]
+; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm5[0,1],ymm3[2],ymm5[3,4],ymm3[5],ymm5[6,7]
; AVX2-FP-NEXT: vextracti128 $1, %ymm0, %xmm3
-; AVX2-FP-NEXT: vpshuflw {{.*#+}} xmm4 = xmm3[2,2,2,2,4,5,6,7]
+; AVX2-FP-NEXT: vpshuflw {{.*#+}} xmm5 = xmm3[2,2,2,2,4,5,6,7]
; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm10 = xmm0[8,9,u,u,0,1,12,13,u,u,12,13,12,13,14,15]
-; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm4 = xmm10[0],xmm4[1],xmm10[2,3],xmm4[4],xmm10[5,6,7]
+; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm5 = xmm10[0],xmm5[1],xmm10[2,3],xmm5[4],xmm10[5,6,7]
; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm6 = ymm6[0],ymm7[1],ymm6[2,3,4,5],ymm7[6],ymm6[7]
; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm7 = ymm6[8,9,4,5,4,5,6,7,0,1,4,5,0,1,12,13,24,25,20,21,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX2-FP-NEXT: vpmovsxbw {{.*#+}} xmm10 = [65535,65535,65535,65535,65535,0,0,0]
-; AVX2-FP-NEXT: vpblendvb %ymm10, %ymm4, %ymm7, %ymm4
+; AVX2-FP-NEXT: vpblendvb %ymm10, %ymm5, %ymm7, %ymm5
; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3],ymm1[4],ymm2[5,6],ymm1[7]
-; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm2 = xmm3[u,u,6,7,u,u,u,u,10,11,u,u,u,u,u,u]
-; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[10,11,u,u,2,3,14,15,u,u,10,11,12,13,14,15]
-; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2,3],xmm2[4],xmm0[5,6,7]
-; AVX2-FP-NEXT: vextracti128 $1, %ymm1, %xmm2
+; AVX2-FP-NEXT: vmovdqa {{.*#+}} xmm2 = [10,11,6,7,2,3,14,15,10,11,10,11,12,13,14,15]
+; AVX2-FP-NEXT: vpshufb %xmm2, %xmm3, %xmm3
+; AVX2-FP-NEXT: vpshufb %xmm2, %xmm0, %xmm0
+; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm3[1],xmm0[2,3],xmm3[4],xmm0[5,6,7]
+; AVX2-FP-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,2,3,0,1,4,5,8,9,12,13,0,1,12,13]
; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm3 = ymm6[10,11,6,7,4,5,6,7,6,7,6,7,2,3,14,15,26,27,22,23,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX2-FP-NEXT: vpblendvb %ymm10, %ymm0, %ymm3, %ymm0
-; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm3 = xmm2[u,u,u,u,u,u,u,u,8,9,u,u,0,1,12,13]
+; AVX2-FP-NEXT: vextracti128 $1, %ymm1, %xmm3
+; AVX2-FP-NEXT: vpshufb %xmm2, %xmm3, %xmm6
; AVX2-FP-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,3,2,1]
-; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm6 = xmm1[0,1,2,3,0,1,4,5,u,u,12,13,u,u,u,u]
-; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm3 = xmm6[0,1,2,3],xmm3[4],xmm6[5],xmm3[6,7]
-; AVX2-FP-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3,4],ymm3[5,6,7]
-; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u,u,u,u,10,11,u,u,2,3,14,15]
-; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0,1,2,3,2,3,6,7,u,u,14,15,u,u,u,u]
-; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4],xmm1[5],xmm2[6,7]
+; AVX2-FP-NEXT: vpshufb %xmm2, %xmm1, %xmm2
+; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm6[4],xmm2[5],xmm6[6,7]
+; AVX2-FP-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm2 = ymm5[0,1,2,3,4],ymm2[5,6,7]
+; AVX2-FP-NEXT: vmovdqa {{.*#+}} xmm5 = [0,1,2,3,2,3,6,7,10,11,14,15,2,3,14,15]
+; AVX2-FP-NEXT: vpshufb %xmm5, %xmm3, %xmm3
+; AVX2-FP-NEXT: vpshufb %xmm5, %xmm1, %xmm1
+; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm3[4],xmm1[5],xmm3[6,7]
; AVX2-FP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
; AVX2-FP-NEXT: vmovaps %ymm1, (%rsi)
-; AVX2-FP-NEXT: vmovdqa %ymm5, (%rdx)
+; AVX2-FP-NEXT: vmovdqa %ymm4, (%rdx)
; AVX2-FP-NEXT: vmovdqa %ymm8, (%rcx)
; AVX2-FP-NEXT: vmovdqa %ymm9, (%r8)
-; AVX2-FP-NEXT: vmovdqa %ymm3, (%r9)
+; AVX2-FP-NEXT: vmovdqa %ymm2, (%r9)
; AVX2-FP-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX2-FP-NEXT: vmovdqa %ymm0, (%rax)
; AVX2-FP-NEXT: vzeroupper
@@ -2286,27 +2302,29 @@ define void @load_i16_stride6_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX2-FCP-LABEL: load_i16_stride6_vf16:
; AVX2-FCP: # %bb.0:
; AVX2-FCP-NEXT: vmovdqa (%rdi), %ymm3
-; AVX2-FCP-NEXT: vmovdqa 32(%rdi), %ymm4
+; AVX2-FCP-NEXT: vmovdqa 32(%rdi), %ymm5
; AVX2-FCP-NEXT: vmovdqa 64(%rdi), %ymm0
-; AVX2-FCP-NEXT: vmovdqa 96(%rdi), %ymm5
+; AVX2-FCP-NEXT: vmovdqa 96(%rdi), %ymm4
; AVX2-FCP-NEXT: vmovdqa 160(%rdi), %ymm1
; AVX2-FCP-NEXT: vmovdqa 128(%rdi), %ymm2
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm6 = xmm8[u,u,u,u,u,u,4,5,u,u,u,u,8,9,u,u]
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm6 = [0,1,4,5,4,5,4,5,0,1,12,13,8,9,4,5]
+; AVX2-FCP-NEXT: vpshufb %xmm6, %xmm8, %xmm7
; AVX2-FCP-NEXT: vextracti128 $1, %ymm8, %xmm9
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm9[0,1,4,5,4,5,u,u,0,1,12,13,u,u,4,5]
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm6 = xmm7[0,1,2],xmm6[3],xmm7[4,5],xmm6[6],xmm7[7]
+; AVX2-FCP-NEXT: vpshufb %xmm6, %xmm9, %xmm6
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0,1,2],xmm7[3],xmm6[4,5],xmm7[6],xmm6[7]
; AVX2-FCP-NEXT: vinserti128 $1, %xmm6, %ymm0, %ymm10
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm11 = ymm3[0],ymm4[1],ymm3[2,3],ymm4[4],ymm3[5,6],ymm4[7]
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm6 = xmm11[0,1,12,13,u,u,4,5,u,u,u,u,12,13,14,15]
-; AVX2-FCP-NEXT: vextracti128 $1, %ymm11, %xmm7
-; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm12 = xmm7[2,1,0,3]
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm12[u,u,u,u,0,1,u,u,8,9,12,13,u,u,u,u]
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm13 = xmm6[0,1],xmm7[2],xmm6[3],xmm7[4,5],xmm6[6,7]
-; AVX2-FCP-NEXT: vperm2i128 {{.*#+}} ymm6 = ymm0[2,3],ymm5[2,3]
-; AVX2-FCP-NEXT: vperm2i128 {{.*#+}} ymm7 = ymm0[0,1],ymm5[0,1]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm7[0],ymm6[1],ymm7[2,3,4,5],ymm6[6],ymm7[7]
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,8,9,4,5,16,17,28,29,24,25,28,29,24,25,28,29,24,25,20,21]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm11 = ymm3[0],ymm5[1],ymm3[2,3],ymm5[4],ymm3[5,6],ymm5[7]
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm6 = [0,1,12,13,0,1,4,5,8,9,12,13,12,13,14,15]
+; AVX2-FCP-NEXT: vpshufb %xmm6, %xmm11, %xmm7
+; AVX2-FCP-NEXT: vextracti128 $1, %ymm11, %xmm12
+; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm12 = xmm12[2,1,0,3]
+; AVX2-FCP-NEXT: vpshufb %xmm6, %xmm12, %xmm6
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm13 = xmm7[0,1],xmm6[2],xmm7[3],xmm6[4,5],xmm7[6,7]
+; AVX2-FCP-NEXT: vperm2i128 {{.*#+}} ymm6 = ymm0[2,3],ymm4[2,3]
+; AVX2-FCP-NEXT: vperm2i128 {{.*#+}} ymm7 = ymm0[0,1],ymm4[0,1]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm7[0],ymm6[1],ymm7[2,3,4,5],ymm6[6],ymm7[7]
+; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,8,9,4,5,16,17,28,29,24,25,28,29,24,25,28,29,24,25,20,21]
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm13[0,1,2],ymm0[3,4,5,6,7]
; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm10 = ymm0[0,1,2],ymm10[3,4,5,6,7],ymm0[8,9,10],ymm10[11,12,13,14,15]
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm10[4,5,6,7]
@@ -2320,10 +2338,10 @@ define void @load_i16_stride6_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm11 = xmm12[1,1,1,1,4,5,6,7]
; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm10 = xmm10[0,1],xmm11[2],xmm10[3,4],xmm11[5],xmm10[6],xmm11[7]
; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm10 = xmm10[2,3,6,7,4,5,0,1,10,11,14,15,u,u,u,u]
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm5 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,10,11,6,7,18,19,30,31,26,27,26,27,30,31,26,27,26,27,22,23]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm10[0,1,2],ymm5[3,4,5,6,7]
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm8 = ymm5[0,1,2],ymm8[3,4,5,6,7],ymm5[8,9,10],ymm8[11,12,13,14,15]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm8[4,5,6,7]
+; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,10,11,6,7,18,19,30,31,26,27,26,27,30,31,26,27,26,27,22,23]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm10[0,1,2],ymm4[3,4,5,6,7]
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm8 = ymm4[0,1,2],ymm8[3,4,5,6,7],ymm4[8,9,10],ymm8[11,12,13,14,15]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm8[4,5,6,7]
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7]
; AVX2-FCP-NEXT: vextracti128 $1, %ymm8, %xmm10
; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm10 = xmm10[0,1,2,1]
@@ -2332,7 +2350,7 @@ define void @load_i16_stride6_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm8 = xmm12[0,1,0,1,0,1,0,1,u,u,8,9,12,13,u,u]
; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm8 = xmm8[0,1,2,3],xmm11[4],xmm8[5,6],xmm11[7]
; AVX2-FCP-NEXT: vinserti128 $1, %xmm8, %ymm0, %ymm8
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm11 = ymm3[0,1],ymm4[2],ymm3[3,4],ymm4[5],ymm3[6,7]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm11 = ymm3[0,1],ymm5[2],ymm3[3,4],ymm5[5],ymm3[6,7]
; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm13 = xmm11[2,1,2,3]
; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm14 = xmm13[2,1,2,0,4,5,6,7]
; AVX2-FCP-NEXT: vextracti128 $1, %ymm11, %xmm11
@@ -2357,39 +2375,42 @@ define void @load_i16_stride6_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm9 = xmm9[0,1,3,2]
; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm9 = xmm10[0,1,2,3,4],xmm9[5,6,7]
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm0[4,5,6,7]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm4[0,1],ymm3[2],ymm4[3,4],ymm3[5],ymm4[6,7]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm5[0,1],ymm3[2],ymm5[3,4],ymm3[5],ymm5[6,7]
; AVX2-FCP-NEXT: vextracti128 $1, %ymm0, %xmm3
-; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm4 = xmm3[2,2,2,2,4,5,6,7]
+; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm5 = xmm3[2,2,2,2,4,5,6,7]
; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm10 = xmm0[8,9,u,u,0,1,12,13,u,u,12,13,12,13,14,15]
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm4 = xmm10[0],xmm4[1],xmm10[2,3],xmm4[4],xmm10[5,6,7]
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm5 = xmm10[0],xmm5[1],xmm10[2,3],xmm5[4],xmm10[5,6,7]
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm6[0],ymm7[1],ymm6[2,3,4,5],ymm7[6],ymm6[7]
; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm7 = ymm6[8,9,4,5,4,5,6,7,0,1,4,5,0,1,12,13,24,25,20,21,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX2-FCP-NEXT: vpmovsxbw {{.*#+}} xmm10 = [65535,65535,65535,65535,65535,0,0,0]
-; AVX2-FCP-NEXT: vpblendvb %ymm10, %ymm4, %ymm7, %ymm4
+; AVX2-FCP-NEXT: vpblendvb %ymm10, %ymm5, %ymm7, %ymm5
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3],ymm1[4],ymm2[5,6],ymm1[7]
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm3[u,u,6,7,u,u,u,u,10,11,u,u,u,u,u,u]
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[10,11,u,u,2,3,14,15,u,u,10,11,12,13,14,15]
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2,3],xmm2[4],xmm0[5,6,7]
-; AVX2-FCP-NEXT: vextracti128 $1, %ymm1, %xmm2
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm2 = [10,11,6,7,2,3,14,15,10,11,10,11,12,13,14,15]
+; AVX2-FCP-NEXT: vpshufb %xmm2, %xmm3, %xmm3
+; AVX2-FCP-NEXT: vpshufb %xmm2, %xmm0, %xmm0
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm3[1],xmm0[2,3],xmm3[4],xmm0[5,6,7]
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,2,3,0,1,4,5,8,9,12,13,0,1,12,13]
; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm6[10,11,6,7,4,5,6,7,6,7,6,7,2,3,14,15,26,27,22,23,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX2-FCP-NEXT: vpblendvb %ymm10, %ymm0, %ymm3, %ymm0
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm2[u,u,u,u,u,u,u,u,8,9,u,u,0,1,12,13]
+; AVX2-FCP-NEXT: vextracti128 $1, %ymm1, %xmm3
+; AVX2-FCP-NEXT: vpshufb %xmm2, %xmm3, %xmm6
; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,3,2,1]
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm6 = xmm1[0,1,2,3,0,1,4,5,u,u,12,13,u,u,u,u]
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm3 = xmm6[0,1,2,3],xmm3[4],xmm6[5],xmm3[6,7]
-; AVX2-FCP-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3,4],ymm3[5,6,7]
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u,u,u,u,10,11,u,u,2,3,14,15]
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0,1,2,3,2,3,6,7,u,u,14,15,u,u,u,u]
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4],xmm1[5],xmm2[6,7]
+; AVX2-FCP-NEXT: vpshufb %xmm2, %xmm1, %xmm2
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm6[4],xmm2[5],xmm6[6,7]
+; AVX2-FCP-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm5[0,1,2,3,4],ymm2[5,6,7]
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm5 = [0,1,2,3,2,3,6,7,10,11,14,15,2,3,14,15]
+; AVX2-FCP-NEXT: vpshufb %xmm5, %xmm3, %xmm3
+; AVX2-FCP-NEXT: vpshufb %xmm5, %xmm1, %xmm1
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm3[4],xmm1[5],xmm3[6,7]
; AVX2-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
; AVX2-FCP-NEXT: vmovaps %ymm1, (%rsi)
-; AVX2-FCP-NEXT: vmovdqa %ymm5, (%rdx)
+; AVX2-FCP-NEXT: vmovdqa %ymm4, (%rdx)
; AVX2-FCP-NEXT: vmovdqa %ymm8, (%rcx)
; AVX2-FCP-NEXT: vmovdqa %ymm9, (%r8)
-; AVX2-FCP-NEXT: vmovdqa %ymm3, (%r9)
+; AVX2-FCP-NEXT: vmovdqa %ymm2, (%r9)
; AVX2-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX2-FCP-NEXT: vmovdqa %ymm0, (%rax)
; AVX2-FCP-NEXT: vzeroupper
@@ -2421,17 +2442,18 @@ define void @load_i16_stride6_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512-NEXT: vpblendd {{.*#+}} ymm1 = ymm12[0,1,2],ymm1[3,4,5,6,7]
; AVX512-NEXT: vpblendw {{.*#+}} ymm9 = ymm1[0,1,2],ymm9[3,4,5,6,7],ymm1[8,9,10],ymm9[11,12,13,14,15]
; AVX512-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm9[4,5,6,7]
+; AVX512-NEXT: vmovdqa {{.*#+}} xmm9 = [2,3,14,15,10,11,6,7,2,3,14,15,12,13,14,15]
+; AVX512-NEXT: vpshufb %xmm9, %xmm13, %xmm12
+; AVX512-NEXT: vpshufb %xmm9, %xmm11, %xmm9
+; AVX512-NEXT: vpblendw {{.*#+}} xmm9 = xmm9[0,1],xmm12[2],xmm9[3],xmm12[4,5],xmm9[6,7]
+; AVX512-NEXT: vpshufb {{.*#+}} ymm10 = ymm10[u,u,u,u,u,u,u,u,u,u,u,u,10,11,6,7,18,19,30,31,26,27,26,27,30,31,26,27,26,27,22,23]
+; AVX512-NEXT: vpblendd {{.*#+}} ymm9 = ymm9[0,1,2],ymm10[3,4,5,6,7]
; AVX512-NEXT: vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,5,5,5,5]
; AVX512-NEXT: vpshufb {{.*#+}} xmm8 = xmm8[6,7,2,3,4,5,u,u,2,3,14,15,u,u,6,7]
; AVX512-NEXT: vpblendw {{.*#+}} xmm5 = xmm8[0,1,2],xmm5[3],xmm8[4,5],xmm5[6],xmm8[7]
; AVX512-NEXT: vinserti128 $1, %xmm5, %ymm0, %ymm5
-; AVX512-NEXT: vpshufb {{.*#+}} ymm8 = ymm10[u,u,u,u,u,u,u,u,u,u,u,u,10,11,6,7,18,19,30,31,26,27,26,27,30,31,26,27,26,27,22,23]
-; AVX512-NEXT: vpshufb {{.*#+}} xmm9 = xmm13[u,u,u,u,10,11,u,u,2,3,14,15,u,u,u,u]
-; AVX512-NEXT: vpshufb {{.*#+}} xmm10 = xmm11[2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15]
-; AVX512-NEXT: vpblendw {{.*#+}} xmm9 = xmm10[0,1],xmm9[2],xmm10[3],xmm9[4,5],xmm10[6,7]
-; AVX512-NEXT: vpblendd {{.*#+}} ymm8 = ymm9[0,1,2],ymm8[3,4,5,6,7]
-; AVX512-NEXT: vpblendw {{.*#+}} ymm5 = ymm8[0,1,2],ymm5[3,4,5,6,7],ymm8[8,9,10],ymm5[11,12,13,14,15]
-; AVX512-NEXT: vpblendd {{.*#+}} ymm5 = ymm8[0,1,2,3],ymm5[4,5,6,7]
+; AVX512-NEXT: vpblendw {{.*#+}} ymm5 = ymm9[0,1,2],ymm5[3,4,5,6,7],ymm9[8,9,10],ymm5[11,12,13,14,15]
+; AVX512-NEXT: vpblendd {{.*#+}} ymm5 = ymm9[0,1,2,3],ymm5[4,5,6,7]
; AVX512-NEXT: vpblendd {{.*#+}} ymm8 = ymm0[0,1],ymm2[2],ymm0[3,4],ymm2[5],ymm0[6,7]
; AVX512-NEXT: vextracti128 $1, %ymm8, %xmm9
; AVX512-NEXT: vpshufd {{.*#+}} xmm9 = xmm9[0,1,2,1]
@@ -2509,42 +2531,45 @@ define void @load_i16_stride6_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
;
; AVX512-FCP-LABEL: load_i16_stride6_vf16:
; AVX512-FCP: # %bb.0:
-; AVX512-FCP-NEXT: vmovdqa 160(%rdi), %ymm0
-; AVX512-FCP-NEXT: vmovdqa (%rdi), %ymm3
-; AVX512-FCP-NEXT: vmovdqa 32(%rdi), %ymm4
-; AVX512-FCP-NEXT: vmovdqa 64(%rdi), %ymm2
-; AVX512-FCP-NEXT: vmovdqa 128(%rdi), %ymm1
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm6 = xmm5[u,u,u,u,u,u,4,5,u,u,u,u,8,9,u,u]
-; AVX512-FCP-NEXT: vextracti128 $1, %ymm5, %xmm8
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm8[0,1,4,5,4,5,u,u,0,1,12,13,u,u,4,5]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm6 = xmm7[0,1,2],xmm6[3],xmm7[4,5],xmm6[6],xmm7[7]
-; AVX512-FCP-NEXT: vinserti128 $1, %xmm6, %ymm0, %ymm9
-; AVX512-FCP-NEXT: vperm2i128 {{.*#+}} ymm6 = ymm2[2,3],mem[2,3]
-; AVX512-FCP-NEXT: vinserti128 $1, 96(%rdi), %ymm2, %ymm7
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm10 = ymm7[0],ymm6[1],ymm7[2,3,4,5],ymm6[6],ymm7[7]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm10[u,u,u,u,u,u,u,u,u,u,u,u,8,9,4,5,16,17,28,29,24,25,28,29,24,25,28,29,24,25,20,21]
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm11 = ymm3[0],ymm4[1],ymm3[2,3],ymm4[4],ymm3[5,6],ymm4[7]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm12 = xmm11[0,1,12,13,u,u,4,5,u,u,u,u,12,13,14,15]
-; AVX512-FCP-NEXT: vextracti128 $1, %ymm11, %xmm13
-; AVX512-FCP-NEXT: vpshufd {{.*#+}} xmm13 = xmm13[2,1,0,3]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm14 = xmm13[u,u,u,u,0,1,u,u,8,9,12,13,u,u,u,u]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm12 = xmm12[0,1],xmm14[2],xmm12[3],xmm14[4,5],xmm12[6,7]
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm12[0,1,2],ymm2[3,4,5,6,7]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm9 = ymm2[0,1,2],ymm9[3,4,5,6,7],ymm2[8,9,10],ymm9[11,12,13,14,15]
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm9[4,5,6,7]
-; AVX512-FCP-NEXT: vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,5,5,5,5]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm8 = xmm8[6,7,2,3,4,5,u,u,2,3,14,15,u,u,6,7]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm5 = xmm8[0,1,2],xmm5[3],xmm8[4,5],xmm5[6],xmm8[7]
-; AVX512-FCP-NEXT: vinserti128 $1, %xmm5, %ymm0, %ymm5
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm8 = ymm10[u,u,u,u,u,u,u,u,u,u,u,u,10,11,6,7,18,19,30,31,26,27,26,27,30,31,26,27,26,27,22,23]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm9 = xmm11[2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm10 = xmm13[u,u,u,u,2,3,u,u,10,11,14,15,u,u,u,u]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm9 = xmm9[0,1],xmm10[2],xmm9[3],xmm10[4,5],xmm9[6,7]
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm9[0,1,2],ymm8[3,4,5,6,7]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm5 = ymm8[0,1,2],ymm5[3,4,5,6,7],ymm8[8,9,10],ymm5[11,12,13,14,15]
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm8[0,1,2,3],ymm5[4,5,6,7]
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,12,13,0,1,4,5,8,9,12,13,12,13,14,15]
+; AVX512-FCP-NEXT: vmovdqa (%rdi), %ymm2
+; AVX512-FCP-NEXT: vmovdqa 32(%rdi), %ymm3
+; AVX512-FCP-NEXT: vmovdqa 64(%rdi), %ymm4
+; AVX512-FCP-NEXT: vmovdqa 128(%rdi), %ymm0
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm2[0],ymm3[1],ymm2[2,3],ymm3[4],ymm2[5,6],ymm3[7]
+; AVX512-FCP-NEXT: vpshufb %xmm1, %xmm8, %xmm5
+; AVX512-FCP-NEXT: vextracti128 $1, %ymm8, %xmm6
+; AVX512-FCP-NEXT: vpshufd {{.*#+}} xmm9 = xmm6[2,1,0,3]
+; AVX512-FCP-NEXT: vpshufb %xmm1, %xmm9, %xmm1
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm5[0,1],xmm1[2],xmm5[3],xmm1[4,5],xmm5[6,7]
+; AVX512-FCP-NEXT: vperm2i128 {{.*#+}} ymm6 = ymm4[2,3],mem[2,3]
+; AVX512-FCP-NEXT: vinserti128 $1, 96(%rdi), %ymm4, %ymm7
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm7[0],ymm6[1],ymm7[2,3,4,5],ymm6[6],ymm7[7]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm5 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,8,9,4,5,16,17,28,29,24,25,28,29,24,25,28,29,24,25,20,21]
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm5[3,4,5,6,7]
+; AVX512-FCP-NEXT: vmovdqa 160(%rdi), %ymm5
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm10 = ymm0[0,1],ymm5[2],ymm0[3,4],ymm5[5],ymm0[6,7]
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm11 = [0,1,4,5,4,5,4,5,0,1,12,13,8,9,4,5]
+; AVX512-FCP-NEXT: vpshufb %xmm11, %xmm10, %xmm12
+; AVX512-FCP-NEXT: vextracti128 $1, %ymm10, %xmm13
+; AVX512-FCP-NEXT: vpshufb %xmm11, %xmm13, %xmm11
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm11 = xmm11[0,1,2],xmm12[3],xmm11[4,5],xmm12[6],xmm11[7]
+; AVX512-FCP-NEXT: vinserti128 $1, %xmm11, %ymm0, %ymm11
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm11 = ymm1[0,1,2],ymm11[3,4,5,6,7],ymm1[8,9,10],ymm11[11,12,13,14,15]
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm11[4,5,6,7]
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm11 = [2,3,14,15,2,3,6,7,10,11,14,15,12,13,14,15]
+; AVX512-FCP-NEXT: vpshufb %xmm11, %xmm8, %xmm8
+; AVX512-FCP-NEXT: vpshufb %xmm11, %xmm9, %xmm9
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm8 = xmm8[0,1],xmm9[2],xmm8[3],xmm9[4,5],xmm8[6,7]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,10,11,6,7,18,19,30,31,26,27,26,27,30,31,26,27,26,27,22,23]
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm8[0,1,2],ymm4[3,4,5,6,7]
+; AVX512-FCP-NEXT: vpshufhw {{.*#+}} xmm8 = xmm10[0,1,2,3,5,5,5,5]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm9 = xmm13[6,7,2,3,4,5,u,u,2,3,14,15,u,u,6,7]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm8 = xmm9[0,1,2],xmm8[3],xmm9[4,5],xmm8[6],xmm9[7]
+; AVX512-FCP-NEXT: vinserti128 $1, %xmm8, %ymm0, %ymm8
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm8 = ymm4[0,1,2],ymm8[3,4,5,6,7],ymm4[8,9,10],ymm8[11,12,13,14,15]
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm8[4,5,6,7]
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm5[0,1],ymm0[2],ymm5[3,4],ymm0[5],ymm5[6,7]
; AVX512-FCP-NEXT: vextracti128 $1, %ymm8, %xmm9
; AVX512-FCP-NEXT: vpshufd {{.*#+}} xmm9 = xmm9[0,1,2,1]
; AVX512-FCP-NEXT: vpshufhw {{.*#+}} xmm10 = xmm9[0,1,2,3,6,5,6,4]
@@ -2553,7 +2578,7 @@ define void @load_i16_stride6_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm8 = xmm8[0,1,2,3],xmm10[4],xmm8[5,6],xmm10[7]
; AVX512-FCP-NEXT: vinserti128 $1, %xmm8, %ymm0, %ymm8
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm10 = ymm7[0,1],ymm6[2],ymm7[3],ymm6[4],ymm7[5,6],ymm6[7]
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm12 = ymm3[0,1],ymm4[2],ymm3[3,4],ymm4[5],ymm3[6,7]
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm12 = ymm2[0,1],ymm3[2],ymm2[3,4],ymm3[5],ymm2[6,7]
; AVX512-FCP-NEXT: vpshufd {{.*#+}} xmm13 = xmm12[2,1,2,3]
; AVX512-FCP-NEXT: vpshuflw {{.*#+}} xmm14 = xmm13[2,1,2,0,4,5,6,7]
; AVX512-FCP-NEXT: vextracti128 $1, %ymm12, %xmm12
@@ -2577,36 +2602,39 @@ define void @load_i16_stride6_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm9 = ymm10[0,1,2,3],ymm9[4,5,6,7]
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm6[0],ymm7[1],ymm6[2,3,4,5],ymm7[6],ymm6[7]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm7 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm6[4,5,0,1,12,13,24,25,20,21],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0,1],ymm3[2],ymm4[3,4],ymm3[5],ymm4[6,7]
-; AVX512-FCP-NEXT: vextracti128 $1, %ymm3, %xmm4
-; AVX512-FCP-NEXT: vpshuflw {{.*#+}} xmm10 = xmm4[2,2,2,2,4,5,6,7]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm11 = xmm3[8,9,u,u,0,1,12,13,u,u,12,13,12,13,14,15]
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1],ymm2[2],ymm3[3,4],ymm2[5],ymm3[6,7]
+; AVX512-FCP-NEXT: vextracti128 $1, %ymm2, %xmm3
+; AVX512-FCP-NEXT: vpshuflw {{.*#+}} xmm10 = xmm3[2,2,2,2,4,5,6,7]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm11 = xmm2[8,9,u,u,0,1,12,13,u,u,12,13,12,13,14,15]
; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm10 = xmm11[0],xmm10[1],xmm11[2,3],xmm10[4],xmm11[5,6,7]
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm11 = [65535,65535,65535,65535,65535,0,0,0,0,0,65535,65535,65535,65535,65535,65535]
; AVX512-FCP-NEXT: vpternlogq $236, %ymm11, %ymm7, %ymm10
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6],ymm0[7]
-; AVX512-FCP-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm1[u,u,u,u,u,u,u,u,8,9,u,u,0,1,12,13]
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm5[1],ymm0[2,3],ymm5[4],ymm0[5,6],ymm5[7]
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm5 = [0,1,2,3,0,1,4,5,8,9,12,13,0,1,12,13]
+; AVX512-FCP-NEXT: vextracti128 $1, %ymm0, %xmm7
+; AVX512-FCP-NEXT: vpshufb %xmm5, %xmm7, %xmm12
; AVX512-FCP-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,3,2,1]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm12 = xmm0[0,1,2,3,0,1,4,5,u,u,12,13,u,u,u,u]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm7 = xmm12[0,1,2,3],xmm7[4],xmm12[5],xmm7[6,7]
-; AVX512-FCP-NEXT: vinserti128 $1, %xmm7, %ymm0, %ymm7
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm10[0,1,2,3,4],ymm7[5,6,7]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[u,u,6,7,u,u,u,u,10,11,u,u,u,u,u,u]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[10,11,u,u,2,3,14,15,u,u,10,11,12,13,14,15]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0],xmm4[1],xmm3[2,3],xmm4[4],xmm3[5,6,7]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm4 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm6[6,7,2,3,14,15,26,27,22,23],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512-FCP-NEXT: vpternlogq $236, %ymm11, %ymm4, %ymm3
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,u,u,u,u,u,10,11,u,u,2,3,14,15]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,2,3,6,7,u,u,14,15,u,u,u,u]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4],xmm0[5],xmm1[6,7]
+; AVX512-FCP-NEXT: vpshufb %xmm5, %xmm0, %xmm5
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3],xmm12[4],xmm5[5],xmm12[6,7]
+; AVX512-FCP-NEXT: vinserti128 $1, %xmm5, %ymm0, %ymm5
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm10[0,1,2,3,4],ymm5[5,6,7]
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm10 = [10,11,6,7,2,3,14,15,10,11,10,11,12,13,14,15]
+; AVX512-FCP-NEXT: vpshufb %xmm10, %xmm3, %xmm3
+; AVX512-FCP-NEXT: vpshufb %xmm10, %xmm2, %xmm2
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1],xmm2[2,3],xmm3[4],xmm2[5,6,7]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm3 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm6[6,7,2,3,14,15,26,27,22,23],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512-FCP-NEXT: vpternlogq $248, %ymm11, %ymm2, %ymm3
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,2,3,2,3,6,7,10,11,14,15,2,3,14,15]
+; AVX512-FCP-NEXT: vpshufb %xmm2, %xmm7, %xmm6
+; AVX512-FCP-NEXT: vpshufb %xmm2, %xmm0, %xmm0
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm6[4],xmm0[5],xmm6[6,7]
; AVX512-FCP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3,4],ymm0[5,6,7]
-; AVX512-FCP-NEXT: vmovdqa %ymm2, (%rsi)
-; AVX512-FCP-NEXT: vmovdqa %ymm5, (%rdx)
+; AVX512-FCP-NEXT: vmovdqa %ymm1, (%rsi)
+; AVX512-FCP-NEXT: vmovdqa %ymm4, (%rdx)
; AVX512-FCP-NEXT: vmovdqa %ymm8, (%rcx)
; AVX512-FCP-NEXT: vmovdqa %ymm9, (%r8)
-; AVX512-FCP-NEXT: vmovdqa %ymm7, (%r9)
+; AVX512-FCP-NEXT: vmovdqa %ymm5, (%r9)
; AVX512-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX512-FCP-NEXT: vmovdqa %ymm0, (%rax)
; AVX512-FCP-NEXT: vzeroupper
@@ -2638,17 +2666,18 @@ define void @load_i16_stride6_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm1 = ymm12[0,1,2],ymm1[3,4,5,6,7]
; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm9 = ymm1[0,1,2],ymm9[3,4,5,6,7],ymm1[8,9,10],ymm9[11,12,13,14,15]
; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm9[4,5,6,7]
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm9 = [2,3,14,15,10,11,6,7,2,3,14,15,12,13,14,15]
+; AVX512DQ-NEXT: vpshufb %xmm9, %xmm13, %xmm12
+; AVX512DQ-NEXT: vpshufb %xmm9, %xmm11, %xmm9
+; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm9 = xmm9[0,1],xmm12[2],xmm9[3],xmm12[4,5],xmm9[6,7]
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm10 = ymm10[u,u,u,u,u,u,u,u,u,u,u,u,10,11,6,7,18,19,30,31,26,27,26,27,30,31,26,27,26,27,22,23]
+; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm9 = ymm9[0,1,2],ymm10[3,4,5,6,7]
; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,5,5,5,5]
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm8 = xmm8[6,7,2,3,4,5,u,u,2,3,14,15,u,u,6,7]
; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm5 = xmm8[0,1,2],xmm5[3],xmm8[4,5],xmm5[6],xmm8[7]
; AVX512DQ-NEXT: vinserti128 $1, %xmm5, %ymm0, %ymm5
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm8 = ymm10[u,u,u,u,u,u,u,u,u,u,u,u,10,11,6,7,18,19,30,31,26,27,26,27,30,31,26,27,26,27,22,23]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm9 = xmm13[u,u,u,u,10,11,u,u,2,3,14,15,u,u,u,u]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm10 = xmm11[2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15]
-; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm9 = xmm10[0,1],xmm9[2],xmm10[3],xmm9[4,5],xmm10[6,7]
-; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm8 = ymm9[0,1,2],ymm8[3,4,5,6,7]
-; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm5 = ymm8[0,1,2],ymm5[3,4,5,6,7],ymm8[8,9,10],ymm5[11,12,13,14,15]
-; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm5 = ymm8[0,1,2,3],ymm5[4,5,6,7]
+; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm5 = ymm9[0,1,2],ymm5[3,4,5,6,7],ymm9[8,9,10],ymm5[11,12,13,14,15]
+; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm5 = ymm9[0,1,2,3],ymm5[4,5,6,7]
; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm8 = ymm0[0,1],ymm2[2],ymm0[3,4],ymm2[5],ymm0[6,7]
; AVX512DQ-NEXT: vextracti128 $1, %ymm8, %xmm9
; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm9 = xmm9[0,1,2,1]
@@ -2726,42 +2755,45 @@ define void @load_i16_stride6_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
;
; AVX512DQ-FCP-LABEL: load_i16_stride6_vf16:
; AVX512DQ-FCP: # %bb.0:
-; AVX512DQ-FCP-NEXT: vmovdqa 160(%rdi), %ymm0
-; AVX512DQ-FCP-NEXT: vmovdqa (%rdi), %ymm3
-; AVX512DQ-FCP-NEXT: vmovdqa 32(%rdi), %ymm4
-; AVX512DQ-FCP-NEXT: vmovdqa 64(%rdi), %ymm2
-; AVX512DQ-FCP-NEXT: vmovdqa 128(%rdi), %ymm1
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm6 = xmm5[u,u,u,u,u,u,4,5,u,u,u,u,8,9,u,u]
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm5, %xmm8
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm8[0,1,4,5,4,5,u,u,0,1,12,13,u,u,4,5]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm6 = xmm7[0,1,2],xmm6[3],xmm7[4,5],xmm6[6],xmm7[7]
-; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm6, %ymm0, %ymm9
-; AVX512DQ-FCP-NEXT: vperm2i128 {{.*#+}} ymm6 = ymm2[2,3],mem[2,3]
-; AVX512DQ-FCP-NEXT: vinserti128 $1, 96(%rdi), %ymm2, %ymm7
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm10 = ymm7[0],ymm6[1],ymm7[2,3,4,5],ymm6[6],ymm7[7]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm10[u,u,u,u,u,u,u,u,u,u,u,u,8,9,4,5,16,17,28,29,24,25,28,29,24,25,28,29,24,25,20,21]
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm11 = ymm3[0],ymm4[1],ymm3[2,3],ymm4[4],ymm3[5,6],ymm4[7]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm12 = xmm11[0,1,12,13,u,u,4,5,u,u,u,u,12,13,14,15]
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm11, %xmm13
-; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} xmm13 = xmm13[2,1,0,3]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm14 = xmm13[u,u,u,u,0,1,u,u,8,9,12,13,u,u,u,u]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm12 = xmm12[0,1],xmm14[2],xmm12[3],xmm14[4,5],xmm12[6,7]
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm12[0,1,2],ymm2[3,4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm9 = ymm2[0,1,2],ymm9[3,4,5,6,7],ymm2[8,9,10],ymm9[11,12,13,14,15]
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm9[4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,5,5,5,5]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm8 = xmm8[6,7,2,3,4,5,u,u,2,3,14,15,u,u,6,7]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm5 = xmm8[0,1,2],xmm5[3],xmm8[4,5],xmm5[6],xmm8[7]
-; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm5, %ymm0, %ymm5
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm8 = ymm10[u,u,u,u,u,u,u,u,u,u,u,u,10,11,6,7,18,19,30,31,26,27,26,27,30,31,26,27,26,27,22,23]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm9 = xmm11[2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm10 = xmm13[u,u,u,u,2,3,u,u,10,11,14,15,u,u,u,u]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm9 = xmm9[0,1],xmm10[2],xmm9[3],xmm10[4,5],xmm9[6,7]
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm9[0,1,2],ymm8[3,4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm5 = ymm8[0,1,2],ymm5[3,4,5,6,7],ymm8[8,9,10],ymm5[11,12,13,14,15]
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm8[0,1,2,3],ymm5[4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,12,13,0,1,4,5,8,9,12,13,12,13,14,15]
+; AVX512DQ-FCP-NEXT: vmovdqa (%rdi), %ymm2
+; AVX512DQ-FCP-NEXT: vmovdqa 32(%rdi), %ymm3
+; AVX512DQ-FCP-NEXT: vmovdqa 64(%rdi), %ymm4
+; AVX512DQ-FCP-NEXT: vmovdqa 128(%rdi), %ymm0
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm2[0],ymm3[1],ymm2[2,3],ymm3[4],ymm2[5,6],ymm3[7]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm1, %xmm8, %xmm5
+; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm8, %xmm6
+; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} xmm9 = xmm6[2,1,0,3]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm1, %xmm9, %xmm1
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm5[0,1],xmm1[2],xmm5[3],xmm1[4,5],xmm5[6,7]
+; AVX512DQ-FCP-NEXT: vperm2i128 {{.*#+}} ymm6 = ymm4[2,3],mem[2,3]
+; AVX512DQ-FCP-NEXT: vinserti128 $1, 96(%rdi), %ymm4, %ymm7
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm7[0],ymm6[1],ymm7[2,3,4,5],ymm6[6],ymm7[7]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm5 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,8,9,4,5,16,17,28,29,24,25,28,29,24,25,28,29,24,25,20,21]
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm5[3,4,5,6,7]
+; AVX512DQ-FCP-NEXT: vmovdqa 160(%rdi), %ymm5
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm10 = ymm0[0,1],ymm5[2],ymm0[3,4],ymm5[5],ymm0[6,7]
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm11 = [0,1,4,5,4,5,4,5,0,1,12,13,8,9,4,5]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm11, %xmm10, %xmm12
+; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm10, %xmm13
+; AVX512DQ-FCP-NEXT: vpshufb %xmm11, %xmm13, %xmm11
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm11 = xmm11[0,1,2],xmm12[3],xmm11[4,5],xmm12[6],xmm11[7]
+; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm11, %ymm0, %ymm11
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm11 = ymm1[0,1,2],ymm11[3,4,5,6,7],ymm1[8,9,10],ymm11[11,12,13,14,15]
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm11[4,5,6,7]
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm11 = [2,3,14,15,2,3,6,7,10,11,14,15,12,13,14,15]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm11, %xmm8, %xmm8
+; AVX512DQ-FCP-NEXT: vpshufb %xmm11, %xmm9, %xmm9
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm8 = xmm8[0,1],xmm9[2],xmm8[3],xmm9[4,5],xmm8[6,7]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,10,11,6,7,18,19,30,31,26,27,26,27,30,31,26,27,26,27,22,23]
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm8[0,1,2],ymm4[3,4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpshufhw {{.*#+}} xmm8 = xmm10[0,1,2,3,5,5,5,5]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm9 = xmm13[6,7,2,3,4,5,u,u,2,3,14,15,u,u,6,7]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm8 = xmm9[0,1,2],xmm8[3],xmm9[4,5],xmm8[6],xmm9[7]
+; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm8, %ymm0, %ymm8
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm8 = ymm4[0,1,2],ymm8[3,4,5,6,7],ymm4[8,9,10],ymm8[11,12,13,14,15]
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm8[4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm5[0,1],ymm0[2],ymm5[3,4],ymm0[5],ymm5[6,7]
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm8, %xmm9
; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} xmm9 = xmm9[0,1,2,1]
; AVX512DQ-FCP-NEXT: vpshufhw {{.*#+}} xmm10 = xmm9[0,1,2,3,6,5,6,4]
@@ -2770,7 +2802,7 @@ define void @load_i16_stride6_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm8 = xmm8[0,1,2,3],xmm10[4],xmm8[5,6],xmm10[7]
; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm8, %ymm0, %ymm8
; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm10 = ymm7[0,1],ymm6[2],ymm7[3],ymm6[4],ymm7[5,6],ymm6[7]
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm12 = ymm3[0,1],ymm4[2],ymm3[3,4],ymm4[5],ymm3[6,7]
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm12 = ymm2[0,1],ymm3[2],ymm2[3,4],ymm3[5],ymm2[6,7]
; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} xmm13 = xmm12[2,1,2,3]
; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} xmm14 = xmm13[2,1,2,0,4,5,6,7]
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm12, %xmm12
@@ -2794,36 +2826,39 @@ define void @load_i16_stride6_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm9 = ymm10[0,1,2,3],ymm9[4,5,6,7]
; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm6[0],ymm7[1],ymm6[2,3,4,5],ymm7[6],ymm6[7]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm7 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm6[4,5,0,1,12,13,24,25,20,21],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0,1],ymm3[2],ymm4[3,4],ymm3[5],ymm4[6,7]
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm3, %xmm4
-; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} xmm10 = xmm4[2,2,2,2,4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm11 = xmm3[8,9,u,u,0,1,12,13,u,u,12,13,12,13,14,15]
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1],ymm2[2],ymm3[3,4],ymm2[5],ymm3[6,7]
+; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm2, %xmm3
+; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} xmm10 = xmm3[2,2,2,2,4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm11 = xmm2[8,9,u,u,0,1,12,13,u,u,12,13,12,13,14,15]
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm10 = xmm11[0],xmm10[1],xmm11[2,3],xmm10[4],xmm11[5,6,7]
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm11 = [65535,65535,65535,65535,65535,0,0,0,0,0,65535,65535,65535,65535,65535,65535]
; AVX512DQ-FCP-NEXT: vpternlogq $236, %ymm11, %ymm7, %ymm10
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6],ymm0[7]
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm1[u,u,u,u,u,u,u,u,8,9,u,u,0,1,12,13]
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm5[1],ymm0[2,3],ymm5[4],ymm0[5,6],ymm5[7]
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm5 = [0,1,2,3,0,1,4,5,8,9,12,13,0,1,12,13]
+; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm0, %xmm7
+; AVX512DQ-FCP-NEXT: vpshufb %xmm5, %xmm7, %xmm12
; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,3,2,1]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm12 = xmm0[0,1,2,3,0,1,4,5,u,u,12,13,u,u,u,u]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm7 = xmm12[0,1,2,3],xmm7[4],xmm12[5],xmm7[6,7]
-; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm7, %ymm0, %ymm7
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm10[0,1,2,3,4],ymm7[5,6,7]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[u,u,6,7,u,u,u,u,10,11,u,u,u,u,u,u]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[10,11,u,u,2,3,14,15,u,u,10,11,12,13,14,15]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0],xmm4[1],xmm3[2,3],xmm4[4],xmm3[5,6,7]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm4 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm6[6,7,2,3,14,15,26,27,22,23],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512DQ-FCP-NEXT: vpternlogq $236, %ymm11, %ymm4, %ymm3
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,u,u,u,u,u,10,11,u,u,2,3,14,15]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,2,3,6,7,u,u,14,15,u,u,u,u]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4],xmm0[5],xmm1[6,7]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm5, %xmm0, %xmm5
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3],xmm12[4],xmm5[5],xmm12[6,7]
+; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm5, %ymm0, %ymm5
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm10[0,1,2,3,4],ymm5[5,6,7]
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm10 = [10,11,6,7,2,3,14,15,10,11,10,11,12,13,14,15]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm10, %xmm3, %xmm3
+; AVX512DQ-FCP-NEXT: vpshufb %xmm10, %xmm2, %xmm2
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1],xmm2[2,3],xmm3[4],xmm2[5,6,7]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm3 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm6[6,7,2,3,14,15,26,27,22,23],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512DQ-FCP-NEXT: vpternlogq $248, %ymm11, %ymm2, %ymm3
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,2,3,2,3,6,7,10,11,14,15,2,3,14,15]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm2, %xmm7, %xmm6
+; AVX512DQ-FCP-NEXT: vpshufb %xmm2, %xmm0, %xmm0
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm6[4],xmm0[5],xmm6[6,7]
; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3,4],ymm0[5,6,7]
-; AVX512DQ-FCP-NEXT: vmovdqa %ymm2, (%rsi)
-; AVX512DQ-FCP-NEXT: vmovdqa %ymm5, (%rdx)
+; AVX512DQ-FCP-NEXT: vmovdqa %ymm1, (%rsi)
+; AVX512DQ-FCP-NEXT: vmovdqa %ymm4, (%rdx)
; AVX512DQ-FCP-NEXT: vmovdqa %ymm8, (%rcx)
; AVX512DQ-FCP-NEXT: vmovdqa %ymm9, (%r8)
-; AVX512DQ-FCP-NEXT: vmovdqa %ymm7, (%r9)
+; AVX512DQ-FCP-NEXT: vmovdqa %ymm5, (%r9)
; AVX512DQ-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX512DQ-FCP-NEXT: vmovdqa %ymm0, (%rax)
; AVX512DQ-FCP-NEXT: vzeroupper
@@ -4410,9 +4445,9 @@ define void @load_i16_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
;
; AVX2-FP-LABEL: load_i16_stride6_vf32:
; AVX2-FP: # %bb.0:
-; AVX2-FP-NEXT: subq $488, %rsp # imm = 0x1E8
+; AVX2-FP-NEXT: subq $456, %rsp # imm = 0x1C8
; AVX2-FP-NEXT: vmovdqa (%rdi), %ymm5
-; AVX2-FP-NEXT: vmovdqu %ymm5, (%rsp) # 32-byte Spill
+; AVX2-FP-NEXT: vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT: vmovdqa 32(%rdi), %ymm7
; AVX2-FP-NEXT: vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT: vmovdqa 64(%rdi), %ymm0
@@ -4421,12 +4456,12 @@ define void @load_i16_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX2-FP-NEXT: vmovdqa 192(%rdi), %ymm10
; AVX2-FP-NEXT: vmovdqa 288(%rdi), %ymm2
; AVX2-FP-NEXT: vmovdqa 256(%rdi), %ymm3
-; AVX2-FP-NEXT: vperm2i128 {{.*#+}} ymm13 = ymm3[2,3],ymm2[2,3]
-; AVX2-FP-NEXT: vperm2i128 {{.*#+}} ymm12 = ymm3[0,1],ymm2[0,1]
+; AVX2-FP-NEXT: vperm2i128 {{.*#+}} ymm12 = ymm3[2,3],ymm2[2,3]
+; AVX2-FP-NEXT: vperm2i128 {{.*#+}} ymm14 = ymm3[0,1],ymm2[0,1]
; AVX2-FP-NEXT: vperm2i128 {{.*#+}} ymm2 = ymm0[2,3],ymm1[2,3]
; AVX2-FP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[0,1],ymm1[0,1]
-; AVX2-FP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FP-NEXT: vmovdqu %ymm0, (%rsp) # 32-byte Spill
; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm4 = [8,9,4,5,4,5,6,7,0,1,4,5,0,1,12,13,24,25,20,21,20,21,22,23,16,17,20,21,16,17,28,29]
; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0],ymm0[1],ymm2[2,3,4,5],ymm0[6],ymm2[7]
; AVX2-FP-NEXT: vpshufb %ymm4, %ymm1, %ymm6
@@ -4440,121 +4475,116 @@ define void @load_i16_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX2-FP-NEXT: vpblendvb %ymm0, %ymm8, %ymm6, %ymm5
; AVX2-FP-NEXT: vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT: vmovdqu %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FP-NEXT: vmovdqa %ymm10, %ymm5
; AVX2-FP-NEXT: vmovdqu %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm6 = ymm9[0,1],ymm10[2],ymm9[3,4],ymm10[5],ymm9[6,7]
-; AVX2-FP-NEXT: vpshufb %xmm7, %xmm6, %xmm8
-; AVX2-FP-NEXT: vextracti128 $1, %ymm6, %xmm7
-; AVX2-FP-NEXT: vpshuflw {{.*#+}} xmm10 = xmm7[2,2,2,2,4,5,6,7]
-; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm8 = xmm8[0],xmm10[1],xmm8[2,3],xmm10[4],xmm8[5,6,7]
-; AVX2-FP-NEXT: vmovdqu %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FP-NEXT: vpshufb %xmm7, %xmm6, %xmm7
+; AVX2-FP-NEXT: vextracti128 $1, %ymm6, %xmm8
+; AVX2-FP-NEXT: vpshuflw {{.*#+}} xmm11 = xmm8[2,2,2,2,4,5,6,7]
+; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm7 = xmm7[0],xmm11[1],xmm7[2,3],xmm11[4],xmm7[5,6,7]
; AVX2-FP-NEXT: vmovdqu %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm10 = ymm13[0],ymm12[1],ymm13[2,3,4,5],ymm12[6],ymm13[7]
-; AVX2-FP-NEXT: vpshufb %ymm4, %ymm10, %ymm4
-; AVX2-FP-NEXT: vpblendvb %ymm0, %ymm8, %ymm4, %ymm4
+; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm11 = ymm12[0],ymm14[1],ymm12[2,3,4,5],ymm14[6],ymm12[7]
+; AVX2-FP-NEXT: vpshufb %ymm4, %ymm11, %ymm4
+; AVX2-FP-NEXT: vpblendvb %ymm0, %ymm7, %ymm4, %ymm4
; AVX2-FP-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FP-NEXT: vpbroadcastd {{.*#+}} xmm4 = [10,11,6,7,10,11,6,7,10,11,6,7,10,11,6,7]
+; AVX2-FP-NEXT: vmovdqa {{.*#+}} xmm4 = [10,11,6,7,2,3,14,15,10,11,10,11,12,13,14,15]
; AVX2-FP-NEXT: vpshufb %xmm4, %xmm3, %xmm3
-; AVX2-FP-NEXT: vmovdqa {{.*#+}} xmm8 = [10,11,u,u,2,3,14,15,u,u,10,11,12,13,14,15]
-; AVX2-FP-NEXT: vpshufb %xmm8, %xmm2, %xmm2
+; AVX2-FP-NEXT: vpshufb %xmm4, %xmm2, %xmm2
; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1],xmm2[2,3],xmm3[4],xmm2[5,6,7]
; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm3 = [10,11,6,7,4,5,6,7,6,7,6,7,2,3,14,15,26,27,22,23,20,21,22,23,22,23,22,23,18,19,30,31]
; AVX2-FP-NEXT: vpshufb %ymm3, %ymm1, %ymm1
; AVX2-FP-NEXT: vpblendvb %ymm0, %ymm2, %ymm1, %ymm1
; AVX2-FP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FP-NEXT: vpshufb %ymm3, %ymm10, %ymm1
-; AVX2-FP-NEXT: vpshufb %xmm4, %xmm7, %xmm3
-; AVX2-FP-NEXT: vpshufb %xmm8, %xmm6, %xmm4
-; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm2 = ymm5[0],ymm9[1],ymm5[2,3],ymm9[4],ymm5[5,6],ymm9[7]
+; AVX2-FP-NEXT: vpshufb %ymm3, %ymm11, %ymm1
+; AVX2-FP-NEXT: vpshufb %xmm4, %xmm8, %xmm3
+; AVX2-FP-NEXT: vpshufb %xmm4, %xmm6, %xmm4
+; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm5 = ymm10[0],ymm9[1],ymm10[2,3],ymm9[4],ymm10[5,6],ymm9[7]
; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0],xmm3[1],xmm4[2,3],xmm3[4],xmm4[5,6,7]
-; AVX2-FP-NEXT: vmovdqa {{.*#+}} xmm15 = [0,1,12,13,u,u,4,5,u,u,u,u,12,13,14,15]
+; AVX2-FP-NEXT: vmovdqa {{.*#+}} xmm15 = [0,1,12,13,0,1,4,5,8,9,12,13,12,13,14,15]
; AVX2-FP-NEXT: vpblendvb %ymm0, %ymm3, %ymm1, %ymm0
; AVX2-FP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FP-NEXT: vpshufb %xmm15, %xmm2, %xmm0
-; AVX2-FP-NEXT: vextracti128 $1, %ymm2, %xmm1
+; AVX2-FP-NEXT: vpshufb %xmm15, %xmm5, %xmm0
+; AVX2-FP-NEXT: vextracti128 $1, %ymm5, %xmm1
; AVX2-FP-NEXT: vpshufd {{.*#+}} xmm11 = xmm1[2,1,0,3]
-; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm1 = xmm11[u,u,u,u,0,1,u,u,8,9,12,13,u,u,u,u]
+; AVX2-FP-NEXT: vpshufb %xmm15, %xmm11, %xmm1
; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3],xmm1[4,5],xmm0[6,7]
-; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm14 = [u,u,u,u,u,u,u,u,u,u,u,u,8,9,4,5,16,17,28,29,24,25,28,29,24,25,28,29,24,25,20,21]
-; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm8 = ymm12[0],ymm13[1],ymm12[2,3,4,5],ymm13[6],ymm12[7]
-; AVX2-FP-NEXT: vpshufb %ymm14, %ymm8, %ymm1
+; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm13 = [u,u,u,u,u,u,u,u,u,u,u,u,8,9,4,5,16,17,28,29,24,25,28,29,24,25,28,29,24,25,20,21]
+; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm10 = ymm14[0],ymm12[1],ymm14[2,3,4,5],ymm12[6],ymm14[7]
+; AVX2-FP-NEXT: vpshufb %ymm13, %ymm10, %ymm1
; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
; AVX2-FP-NEXT: vmovdqa 352(%rdi), %ymm1
; AVX2-FP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FP-NEXT: vmovdqa 320(%rdi), %ymm3
-; AVX2-FP-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm7 = ymm3[0,1],ymm1[2],ymm3[3,4],ymm1[5],ymm3[6,7]
-; AVX2-FP-NEXT: vpbroadcastd {{.*#+}} xmm5 = [8,9,4,5,8,9,4,5,8,9,4,5,8,9,4,5]
-; AVX2-FP-NEXT: vpshufb %xmm5, %xmm7, %xmm6
-; AVX2-FP-NEXT: vmovdqa {{.*#+}} xmm10 = [0,1,4,5,4,5,u,u,0,1,12,13,u,u,4,5]
-; AVX2-FP-NEXT: vextracti128 $1, %ymm7, %xmm4
-; AVX2-FP-NEXT: vpshufb %xmm10, %xmm4, %xmm9
-; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm6 = xmm9[0,1,2],xmm6[3],xmm9[4,5],xmm6[6],xmm9[7]
+; AVX2-FP-NEXT: vmovdqa 320(%rdi), %ymm2
+; AVX2-FP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm7 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
+; AVX2-FP-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,4,5,4,5,0,1,12,13,8,9,4,5]
+; AVX2-FP-NEXT: vpshufb %xmm2, %xmm7, %xmm6
+; AVX2-FP-NEXT: vextracti128 $1, %ymm7, %xmm9
+; AVX2-FP-NEXT: vpshufb %xmm2, %xmm9, %xmm8
+; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm6 = xmm8[0,1,2],xmm6[3],xmm8[4,5],xmm6[6],xmm8[7]
; AVX2-FP-NEXT: vinserti128 $1, %xmm6, %ymm0, %ymm6
; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm6 = ymm0[0,1,2],ymm6[3,4,5,6,7],ymm0[8,9,10],ymm6[11,12,13,14,15]
; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm6[4,5,6,7]
; AVX2-FP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT: vmovdqa 160(%rdi), %ymm0
; AVX2-FP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FP-NEXT: vmovdqa 128(%rdi), %ymm9
-; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm9[0,1],ymm0[2],ymm9[3,4],ymm0[5],ymm9[6,7]
-; AVX2-FP-NEXT: vpshufb %xmm5, %xmm0, %xmm5
-; AVX2-FP-NEXT: vextracti128 $1, %ymm0, %xmm3
-; AVX2-FP-NEXT: vpshufb %xmm10, %xmm3, %xmm10
-; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm10 = xmm10[0,1,2],xmm5[3],xmm10[4,5],xmm5[6],xmm10[7]
-; AVX2-FP-NEXT: vmovdqu (%rsp), %ymm1 # 32-byte Reload
-; AVX2-FP-NEXT: vpblendd $146, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm5 # 32-byte Folded Reload
-; AVX2-FP-NEXT: # ymm5 = ymm1[0],mem[1],ymm1[2,3],mem[4],ymm1[5,6],mem[7]
-; AVX2-FP-NEXT: vpshufb %xmm15, %xmm5, %xmm15
-; AVX2-FP-NEXT: vextracti128 $1, %ymm5, %xmm13
-; AVX2-FP-NEXT: vpshufd {{.*#+}} xmm13 = xmm13[2,1,0,3]
-; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm12 = xmm13[u,u,u,u,0,1,u,u,8,9,12,13,u,u,u,u]
-; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm12 = xmm15[0,1],xmm12[2],xmm15[3],xmm12[4,5],xmm15[6,7]
-; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-FP-NEXT: vpblendd $66, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX2-FP-NEXT: # ymm1 = ymm1[0],mem[1],ymm1[2,3,4,5],mem[6],ymm1[7]
-; AVX2-FP-NEXT: vpshufb %ymm14, %ymm1, %ymm14
-; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm12 = ymm12[0,1,2],ymm14[3,4,5,6,7]
-; AVX2-FP-NEXT: vinserti128 $1, %xmm10, %ymm0, %ymm10
-; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm10 = ymm12[0,1,2],ymm10[3,4,5,6,7],ymm12[8,9,10],ymm10[11,12,13,14,15]
-; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm6 = ymm12[0,1,2,3],ymm10[4,5,6,7]
-; AVX2-FP-NEXT: vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FP-NEXT: vpbroadcastq {{.*#+}} xmm12 = [6,7,2,3,12,13,14,15,6,7,2,3,12,13,14,15]
-; AVX2-FP-NEXT: vpshufb %xmm12, %xmm2, %xmm2
-; AVX2-FP-NEXT: vpshuflw {{.*#+}} xmm10 = xmm11[1,1,1,1,4,5,6,7]
-; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm10[2],xmm2[3,4],xmm10[5],xmm2[6],xmm10[7]
-; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm11 = [u,u,u,u,u,u,u,u,u,u,u,u,10,11,6,7,18,19,30,31,26,27,26,27,30,31,26,27,26,27,22,23]
-; AVX2-FP-NEXT: vpshufb %ymm11, %ymm8, %ymm8
-; AVX2-FP-NEXT: vmovdqa {{.*#+}} xmm14 = [2,3,6,7,4,5,0,1,10,11,14,15,12,13,14,15]
-; AVX2-FP-NEXT: vpshufb %xmm14, %xmm2, %xmm2
-; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2],ymm8[3,4,5,6,7]
-; AVX2-FP-NEXT: vmovdqa {{.*#+}} xmm8 = [6,7,2,3,4,5,u,u,2,3,14,15,u,u,6,7]
-; AVX2-FP-NEXT: vpshufb %xmm8, %xmm4, %xmm4
+; AVX2-FP-NEXT: vmovdqa 128(%rdi), %ymm8
+; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm8[0,1],ymm0[2],ymm8[3,4],ymm0[5],ymm8[6,7]
+; AVX2-FP-NEXT: vpshufb %xmm2, %xmm1, %xmm0
+; AVX2-FP-NEXT: vextracti128 $1, %ymm1, %xmm4
+; AVX2-FP-NEXT: vpshufb %xmm2, %xmm4, %xmm2
+; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm3 = xmm2[0,1,2],xmm0[3],xmm2[4,5],xmm0[6],xmm2[7]
+; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX2-FP-NEXT: vpblendd $146, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 32-byte Folded Reload
+; AVX2-FP-NEXT: # ymm2 = ymm0[0],mem[1],ymm0[2,3],mem[4],ymm0[5,6],mem[7]
+; AVX2-FP-NEXT: vextracti128 $1, %ymm2, %xmm12
+; AVX2-FP-NEXT: vpshufd {{.*#+}} xmm12 = xmm12[2,1,0,3]
+; AVX2-FP-NEXT: vpshufb %xmm15, %xmm2, %xmm0
+; AVX2-FP-NEXT: vpshufb %xmm15, %xmm12, %xmm15
+; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm15[2],xmm0[3],xmm15[4,5],xmm0[6,7]
+; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX2-FP-NEXT: vpblendd $189, (%rsp), %ymm6, %ymm15 # 32-byte Folded Reload
+; AVX2-FP-NEXT: # ymm15 = mem[0],ymm6[1],mem[2,3,4,5],ymm6[6],mem[7]
+; AVX2-FP-NEXT: vpshufb %ymm13, %ymm15, %ymm13
+; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm13[3,4,5,6,7]
+; AVX2-FP-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm3 = ymm0[0,1,2],ymm3[3,4,5,6,7],ymm0[8,9,10],ymm3[11,12,13,14,15]
+; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm3[4,5,6,7]
+; AVX2-FP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FP-NEXT: vpbroadcastq {{.*#+}} xmm6 = [6,7,2,3,12,13,14,15,6,7,2,3,12,13,14,15]
+; AVX2-FP-NEXT: vpshufb %xmm6, %xmm5, %xmm3
+; AVX2-FP-NEXT: vpshuflw {{.*#+}} xmm5 = xmm11[1,1,1,1,4,5,6,7]
+; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm5[2],xmm3[3,4],xmm5[5],xmm3[6],xmm5[7]
+; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm5 = [u,u,u,u,u,u,u,u,u,u,u,u,10,11,6,7,18,19,30,31,26,27,26,27,30,31,26,27,26,27,22,23]
+; AVX2-FP-NEXT: vpshufb %ymm5, %ymm10, %ymm10
+; AVX2-FP-NEXT: vmovdqa {{.*#+}} xmm0 = [2,3,6,7,4,5,0,1,10,11,14,15,12,13,14,15]
+; AVX2-FP-NEXT: vpshufb %xmm0, %xmm3, %xmm3
+; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2],ymm10[3,4,5,6,7]
+; AVX2-FP-NEXT: vmovdqa {{.*#+}} xmm10 = [6,7,2,3,4,5,u,u,2,3,14,15,u,u,6,7]
+; AVX2-FP-NEXT: vpshufb %xmm10, %xmm9, %xmm9
; AVX2-FP-NEXT: vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,5,5,5,5]
-; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1,2],xmm7[3],xmm4[4,5],xmm7[6],xmm4[7]
-; AVX2-FP-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4
-; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm4 = ymm2[0,1,2],ymm4[3,4,5,6,7],ymm2[8,9,10],ymm4[11,12,13,14,15]
-; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7]
-; AVX2-FP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FP-NEXT: vpshufb %xmm8, %xmm3, %xmm2
-; AVX2-FP-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,5,5,5]
-; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm0 = xmm2[0,1,2],xmm0[3],xmm2[4,5],xmm0[6],xmm2[7]
-; AVX2-FP-NEXT: vpshufb %ymm11, %ymm1, %ymm1
-; AVX2-FP-NEXT: vpshufb %xmm12, %xmm5, %xmm2
-; AVX2-FP-NEXT: vpshuflw {{.*#+}} xmm3 = xmm13[1,1,1,1,4,5,6,7]
-; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2],xmm2[3,4],xmm3[5],xmm2[6],xmm3[7]
-; AVX2-FP-NEXT: vpshufb %xmm14, %xmm2, %xmm2
-; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3,4,5,6,7]
-; AVX2-FP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7],ymm1[8,9,10],ymm0[11,12,13,14,15]
-; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm7 = xmm9[0,1,2],xmm7[3],xmm9[4,5],xmm7[6],xmm9[7]
+; AVX2-FP-NEXT: vinserti128 $1, %xmm7, %ymm0, %ymm7
+; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm7 = ymm3[0,1,2],ymm7[3,4,5,6,7],ymm3[8,9,10],ymm7[11,12,13,14,15]
+; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm7[4,5,6,7]
+; AVX2-FP-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FP-NEXT: vpshufb %xmm10, %xmm4, %xmm3
+; AVX2-FP-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,5,5,5]
+; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm1 = xmm3[0,1,2],xmm1[3],xmm3[4,5],xmm1[6],xmm3[7]
+; AVX2-FP-NEXT: vpshufb %ymm5, %ymm15, %ymm3
+; AVX2-FP-NEXT: vpshufb %xmm6, %xmm2, %xmm2
+; AVX2-FP-NEXT: vpshuflw {{.*#+}} xmm4 = xmm12[1,1,1,1,4,5,6,7]
+; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm4[2],xmm2[3,4],xmm4[5],xmm2[6],xmm4[7]
+; AVX2-FP-NEXT: vpshufb %xmm0, %xmm2, %xmm0
+; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm3[3,4,5,6,7]
+; AVX2-FP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm1 = ymm0[0,1,2],ymm1[3,4,5,6,7],ymm0[8,9,10],ymm1[11,12,13,14,15]
+; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FP-NEXT: vpblendd $148, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
+; AVX2-FP-NEXT: # ymm14 = ymm14[0,1],mem[2],ymm14[3],mem[4],ymm14[5,6],mem[7]
; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FP-NEXT: vpblendd $107, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm6 # 32-byte Folded Reload
-; AVX2-FP-NEXT: # ymm6 = mem[0,1],ymm0[2],mem[3],ymm0[4],mem[5,6],ymm0[7]
-; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FP-NEXT: vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
-; AVX2-FP-NEXT: # ymm0 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
+; AVX2-FP-NEXT: vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX2-FP-NEXT: # ymm0 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
; AVX2-FP-NEXT: vpshufd {{.*#+}} xmm5 = xmm0[2,1,0,3]
; AVX2-FP-NEXT: vextracti128 $1, %ymm0, %xmm0
; AVX2-FP-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[0,1,2,1]
@@ -4565,106 +4595,104 @@ define void @load_i16_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
; AVX2-FP-NEXT: vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
; AVX2-FP-NEXT: # ymm1 = mem[0,1],ymm1[2],mem[3,4],ymm1[5],mem[6,7]
-; AVX2-FP-NEXT: vextracti128 $1, %ymm1, %xmm8
-; AVX2-FP-NEXT: vpshufd {{.*#+}} xmm11 = xmm8[0,3,2,1]
+; AVX2-FP-NEXT: vextracti128 $1, %ymm1, %xmm6
+; AVX2-FP-NEXT: vpshufd {{.*#+}} xmm10 = xmm6[0,3,2,1]
; AVX2-FP-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[2,1,2,3]
-; AVX2-FP-NEXT: vpbroadcastq {{.*#+}} xmm8 = [12,13,0,1,4,5,0,0,12,13,0,1,4,5,0,0]
-; AVX2-FP-NEXT: vpshufb %xmm8, %xmm11, %xmm12
-; AVX2-FP-NEXT: vpshuflw {{.*#+}} xmm13 = xmm2[2,1,2,0,4,5,6,7]
-; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm12 = xmm13[0],xmm12[1,2],xmm13[3],xmm12[4,5,6,7]
+; AVX2-FP-NEXT: vpbroadcastq {{.*#+}} xmm6 = [12,13,0,1,4,5,0,0,12,13,0,1,4,5,0,0]
+; AVX2-FP-NEXT: vpshufb %xmm6, %xmm10, %xmm9
+; AVX2-FP-NEXT: vpshuflw {{.*#+}} xmm12 = xmm2[2,1,2,0,4,5,6,7]
+; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm9 = xmm12[0],xmm9[1,2],xmm12[3],xmm9[4,5,6,7]
; AVX2-FP-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm13 = [4,5,0,1,12,13,14,15,8,9,12,13,0,1,u,u,20,21,16,17,28,29,30,31,24,25,28,29,16,17,u,u]
-; AVX2-FP-NEXT: vpshufb %ymm13, %ymm6, %ymm14
-; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm3 = ymm14[0,1,2],ymm3[3,4,5,6,7],ymm14[8,9,10],ymm3[11,12,13,14,15]
-; AVX2-FP-NEXT: vpshufhw {{.*#+}} xmm14 = xmm14[0,1,2,3,4,6,5,4]
-; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm12 = xmm12[0,1,2,3,4],xmm14[5,6,7]
-; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm12[0,1,2,3],ymm3[4,5,6,7]
+; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm15 = [4,5,0,1,12,13,14,15,8,9,12,13,0,1,u,u,20,21,16,17,28,29,30,31,24,25,28,29,16,17,u,u]
+; AVX2-FP-NEXT: vpshufb %ymm15, %ymm14, %ymm12
+; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm3 = ymm12[0,1,2],ymm3[3,4,5,6,7],ymm12[8,9,10],ymm3[11,12,13,14,15]
+; AVX2-FP-NEXT: vpshufhw {{.*#+}} xmm12 = xmm12[0,1,2,3,4,6,5,4]
+; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm9 = xmm9[0,1,2,3,4],xmm12[5,6,7]
+; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm9[0,1,2,3],ymm3[4,5,6,7]
; AVX2-FP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm14 = ymm3[0,1],ymm9[2],ymm3[3,4],ymm9[5],ymm3[6,7]
-; AVX2-FP-NEXT: vpshufd {{.*#+}} xmm12 = xmm14[2,1,0,3]
+; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm9 = ymm7[0,1],ymm8[2],ymm7[3,4],ymm8[5],ymm7[6,7]
+; AVX2-FP-NEXT: vpshufd {{.*#+}} xmm12 = xmm9[2,1,0,3]
; AVX2-FP-NEXT: vpshufb %xmm0, %xmm12, %xmm0
-; AVX2-FP-NEXT: vextracti128 $1, %ymm14, %xmm14
-; AVX2-FP-NEXT: vpshufd {{.*#+}} xmm14 = xmm14[0,1,2,1]
-; AVX2-FP-NEXT: vpshufhw {{.*#+}} xmm15 = xmm14[0,1,2,3,6,5,6,4]
-; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0,1,2,3],xmm15[4],xmm0[5,6],xmm15[7]
+; AVX2-FP-NEXT: vextracti128 $1, %ymm9, %xmm9
+; AVX2-FP-NEXT: vpshufd {{.*#+}} xmm9 = xmm9[0,1,2,1]
+; AVX2-FP-NEXT: vpshufhw {{.*#+}} xmm13 = xmm9[0,1,2,3,6,5,6,4]
+; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0,1,2,3],xmm13[4],xmm0[5,6],xmm13[7]
+; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX2-FP-NEXT: vpblendd $107, (%rsp), %ymm0, %ymm13 # 32-byte Folded Reload
+; AVX2-FP-NEXT: # ymm13 = mem[0,1],ymm0[2],mem[3],ymm0[4],mem[5,6],ymm0[7]
; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FP-NEXT: vpblendd $148, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm15 # 32-byte Folded Reload
-; AVX2-FP-NEXT: # ymm15 = ymm0[0,1],mem[2],ymm0[3],mem[4],ymm0[5,6],mem[7]
-; AVX2-FP-NEXT: vmovdqu (%rsp), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT: vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
; AVX2-FP-NEXT: # ymm0 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
-; AVX2-FP-NEXT: vextracti128 $1, %ymm0, %xmm10
-; AVX2-FP-NEXT: vpshufd {{.*#+}} xmm10 = xmm10[0,3,2,1]
-; AVX2-FP-NEXT: vpshufb %xmm8, %xmm10, %xmm8
+; AVX2-FP-NEXT: vextracti128 $1, %ymm0, %xmm11
+; AVX2-FP-NEXT: vpshufd {{.*#+}} xmm11 = xmm11[0,3,2,1]
+; AVX2-FP-NEXT: vpshufb %xmm6, %xmm11, %xmm6
; AVX2-FP-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,1,2,3]
-; AVX2-FP-NEXT: vpshuflw {{.*#+}} xmm7 = xmm0[2,1,2,0,4,5,6,7]
-; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm7 = xmm7[0],xmm8[1,2],xmm7[3],xmm8[4,5,6,7]
+; AVX2-FP-NEXT: vpshuflw {{.*#+}} xmm3 = xmm0[2,1,2,0,4,5,6,7]
+; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0],xmm6[1,2],xmm3[3],xmm6[4,5,6,7]
; AVX2-FP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX2-FP-NEXT: vpshufb %ymm13, %ymm15, %ymm8
-; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm1 = ymm8[0,1,2],ymm1[3,4,5,6,7],ymm8[8,9,10],ymm1[11,12,13,14,15]
-; AVX2-FP-NEXT: vpshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,4,6,5,4]
-; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm7 = xmm7[0,1,2,3,4],xmm8[5,6,7]
-; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm8 = ymm7[0,1,2,3],ymm1[4,5,6,7]
+; AVX2-FP-NEXT: vpshufb %ymm15, %ymm13, %ymm6
+; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm1 = ymm6[0,1,2],ymm1[3,4,5,6,7],ymm6[8,9,10],ymm1[11,12,13,14,15]
+; AVX2-FP-NEXT: vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,6,5,4]
+; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3,4],xmm6[5,6,7]
+; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm6 = ymm3[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FP-NEXT: vmovdqa {{.*#+}} xmm1 = [2,3,2,3,2,3,2,3,u,u,10,11,14,15,u,u]
-; AVX2-FP-NEXT: vpshufb %xmm1, %xmm5, %xmm5
+; AVX2-FP-NEXT: vpshufb %xmm1, %xmm5, %xmm3
; AVX2-FP-NEXT: vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,7,5,6,5]
-; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm4 = xmm5[0,1,2,3],xmm4[4],xmm5[5,6],xmm4[7]
+; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm4[4],xmm3[5,6],xmm4[7]
; AVX2-FP-NEXT: vpbroadcastq {{.*#+}} xmm5 = [14,15,2,3,6,7,0,0,14,15,2,3,6,7,0,0]
-; AVX2-FP-NEXT: vpshufb %xmm5, %xmm11, %xmm7
+; AVX2-FP-NEXT: vpshufb %xmm5, %xmm10, %xmm4
; AVX2-FP-NEXT: vpshuflw {{.*#+}} xmm2 = xmm2[3,1,2,1,4,5,6,7]
-; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm7[1,2],xmm2[3],xmm7[4,5,6,7]
-; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm7 = [6,7,2,3,14,15,14,15,14,15,10,11,u,u,2,3,22,23,18,19,30,31,30,31,30,31,26,27,u,u,18,19]
-; AVX2-FP-NEXT: vpshufb %ymm7, %ymm6, %ymm6
-; AVX2-FP-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4
-; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm4 = ymm6[0,1,2],ymm4[3,4,5,6,7],ymm6[8,9,10],ymm4[11,12,13,14,15]
-; AVX2-FP-NEXT: vpshufd {{.*#+}} xmm6 = xmm6[0,1,3,2]
-; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3,4],xmm6[5,6,7]
-; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm4 = ymm2[0,1,2,3],ymm4[4,5,6,7]
+; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm4[1,2],xmm2[3],xmm4[4,5,6,7]
+; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm10 = [6,7,2,3,14,15,14,15,14,15,10,11,u,u,2,3,22,23,18,19,30,31,30,31,30,31,26,27,u,u,18,19]
+; AVX2-FP-NEXT: vpshufb %ymm10, %ymm14, %ymm4
+; AVX2-FP-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm3 = ymm4[0,1,2],ymm3[3,4,5,6,7],ymm4[8,9,10],ymm3[11,12,13,14,15]
+; AVX2-FP-NEXT: vpshufd {{.*#+}} xmm4 = xmm4[0,1,3,2]
+; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3,4],xmm4[5,6,7]
+; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm4 = ymm2[0,1,2,3],ymm3[4,5,6,7]
; AVX2-FP-NEXT: vpshufb %xmm1, %xmm12, %xmm1
-; AVX2-FP-NEXT: vpshufhw {{.*#+}} xmm2 = xmm14[0,1,2,3,7,5,6,5]
+; AVX2-FP-NEXT: vpshufhw {{.*#+}} xmm2 = xmm9[0,1,2,3,7,5,6,5]
; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4],xmm1[5,6],xmm2[7]
-; AVX2-FP-NEXT: vpshufb %ymm7, %ymm15, %ymm2
-; AVX2-FP-NEXT: vpshufb %xmm5, %xmm10, %xmm5
+; AVX2-FP-NEXT: vpshufb %ymm10, %ymm13, %ymm2
+; AVX2-FP-NEXT: vpshufb %xmm5, %xmm11, %xmm3
; AVX2-FP-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[3,1,2,1,4,5,6,7]
-; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm5[1,2],xmm0[3],xmm5[4,5,6,7]
+; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm3[1,2],xmm0[3],xmm3[4,5,6,7]
; AVX2-FP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3,4,5,6,7],ymm2[8,9,10],ymm1[11,12,13,14,15]
; AVX2-FP-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,1,3,2]
; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm2[5,6,7]
; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
-; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm9[0],ymm3[1],ymm9[2,3],ymm3[4],ymm9[5,6],ymm3[7]
+; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm8[0],ymm7[1],ymm8[2,3],ymm7[4],ymm8[5,6],ymm7[7]
; AVX2-FP-NEXT: vextracti128 $1, %ymm1, %xmm2
; AVX2-FP-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,3,2,1]
-; AVX2-FP-NEXT: vpbroadcastq {{.*#+}} xmm5 = [8,9,0,0,0,1,12,13,8,9,0,0,0,1,12,13]
-; AVX2-FP-NEXT: vpshufb %xmm5, %xmm2, %xmm6
-; AVX2-FP-NEXT: vmovdqa {{.*#+}} xmm7 = [0,1,2,3,0,1,4,5,u,u,12,13,u,u,u,u]
-; AVX2-FP-NEXT: vpshufb %xmm7, %xmm1, %xmm9
-; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm6 = xmm9[0,1,2,3],xmm6[4],xmm9[5],xmm6[6,7]
-; AVX2-FP-NEXT: vinserti128 $1, %xmm6, %ymm0, %ymm6
-; AVX2-FP-NEXT: vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm6 # 32-byte Folded Reload
-; AVX2-FP-NEXT: # ymm6 = mem[0,1,2,3,4],ymm6[5,6,7]
-; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
-; AVX2-FP-NEXT: vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm9 # 32-byte Folded Reload
-; AVX2-FP-NEXT: # ymm9 = mem[0],ymm9[1],mem[2,3],ymm9[4],mem[5,6],ymm9[7]
-; AVX2-FP-NEXT: vextracti128 $1, %ymm9, %xmm10
-; AVX2-FP-NEXT: vpshufb %xmm5, %xmm10, %xmm5
-; AVX2-FP-NEXT: vpshufd {{.*#+}} xmm9 = xmm9[0,3,2,1]
-; AVX2-FP-NEXT: vpshufb %xmm7, %xmm9, %xmm7
+; AVX2-FP-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,2,3,0,1,4,5,8,9,12,13,0,1,12,13]
+; AVX2-FP-NEXT: vpshufb %xmm3, %xmm2, %xmm5
+; AVX2-FP-NEXT: vpshufb %xmm3, %xmm1, %xmm7
; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm5 = xmm7[0,1,2,3],xmm5[4],xmm7[5],xmm5[6,7]
; AVX2-FP-NEXT: vinserti128 $1, %xmm5, %ymm0, %ymm5
; AVX2-FP-NEXT: vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm5 # 32-byte Folded Reload
; AVX2-FP-NEXT: # ymm5 = mem[0,1,2,3,4],ymm5[5,6,7]
-; AVX2-FP-NEXT: vpbroadcastq {{.*#+}} xmm7 = [10,11,0,0,2,3,14,15,10,11,0,0,2,3,14,15]
-; AVX2-FP-NEXT: vpshufb %xmm7, %xmm2, %xmm2
-; AVX2-FP-NEXT: vmovdqa {{.*#+}} xmm11 = [0,1,2,3,2,3,6,7,u,u,14,15,u,u,u,u]
-; AVX2-FP-NEXT: vpshufb %xmm11, %xmm1, %xmm1
+; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX2-FP-NEXT: vpblendd $146, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm7 # 32-byte Folded Reload
+; AVX2-FP-NEXT: # ymm7 = ymm7[0],mem[1],ymm7[2,3],mem[4],ymm7[5,6],mem[7]
+; AVX2-FP-NEXT: vextracti128 $1, %ymm7, %xmm8
+; AVX2-FP-NEXT: vpshufd {{.*#+}} xmm7 = xmm7[0,3,2,1]
+; AVX2-FP-NEXT: vpshufb %xmm3, %xmm8, %xmm9
+; AVX2-FP-NEXT: vpshufb %xmm3, %xmm7, %xmm3
+; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm9[4],xmm3[5],xmm9[6,7]
+; AVX2-FP-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX2-FP-NEXT: vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
+; AVX2-FP-NEXT: # ymm3 = mem[0,1,2,3,4],ymm3[5,6,7]
+; AVX2-FP-NEXT: vmovdqa {{.*#+}} xmm9 = [0,1,2,3,2,3,6,7,10,11,14,15,2,3,14,15]
+; AVX2-FP-NEXT: vpshufb %xmm9, %xmm2, %xmm2
+; AVX2-FP-NEXT: vpshufb %xmm9, %xmm1, %xmm1
; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4],xmm1[5],xmm2[6,7]
; AVX2-FP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
; AVX2-FP-NEXT: vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
; AVX2-FP-NEXT: # ymm1 = mem[0,1,2,3,4],ymm1[5,6,7]
-; AVX2-FP-NEXT: vpshufb %xmm7, %xmm10, %xmm2
-; AVX2-FP-NEXT: vpshufb %xmm11, %xmm9, %xmm7
+; AVX2-FP-NEXT: vpshufb %xmm9, %xmm8, %xmm2
+; AVX2-FP-NEXT: vpshufb %xmm9, %xmm7, %xmm7
; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm2 = xmm7[0,1,2,3],xmm2[4],xmm7[5],xmm2[6,7]
; AVX2-FP-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
; AVX2-FP-NEXT: vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
@@ -4675,27 +4703,27 @@ define void @load_i16_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX2-FP-NEXT: vmovaps %ymm7, (%rsi)
; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
; AVX2-FP-NEXT: vmovaps %ymm7, 32(%rdx)
-; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX2-FP-NEXT: vmovaps %ymm3, (%rdx)
-; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX2-FP-NEXT: vmovaps %ymm3, 32(%rcx)
-; AVX2-FP-NEXT: vmovdqa %ymm8, (%rcx)
+; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX2-FP-NEXT: vmovaps %ymm7, (%rdx)
+; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX2-FP-NEXT: vmovaps %ymm7, 32(%rcx)
+; AVX2-FP-NEXT: vmovdqa %ymm6, (%rcx)
; AVX2-FP-NEXT: vmovdqa %ymm4, 32(%r8)
; AVX2-FP-NEXT: vmovdqa %ymm0, (%r8)
-; AVX2-FP-NEXT: vmovdqa %ymm5, 32(%r9)
-; AVX2-FP-NEXT: vmovdqa %ymm6, (%r9)
+; AVX2-FP-NEXT: vmovdqa %ymm3, 32(%r9)
+; AVX2-FP-NEXT: vmovdqa %ymm5, (%r9)
; AVX2-FP-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX2-FP-NEXT: vmovdqa %ymm2, 32(%rax)
; AVX2-FP-NEXT: vmovdqa %ymm1, (%rax)
-; AVX2-FP-NEXT: addq $488, %rsp # imm = 0x1E8
+; AVX2-FP-NEXT: addq $456, %rsp # imm = 0x1C8
; AVX2-FP-NEXT: vzeroupper
; AVX2-FP-NEXT: retq
;
; AVX2-FCP-LABEL: load_i16_stride6_vf32:
; AVX2-FCP: # %bb.0:
-; AVX2-FCP-NEXT: subq $488, %rsp # imm = 0x1E8
+; AVX2-FCP-NEXT: subq $456, %rsp # imm = 0x1C8
; AVX2-FCP-NEXT: vmovdqa (%rdi), %ymm5
-; AVX2-FCP-NEXT: vmovdqu %ymm5, (%rsp) # 32-byte Spill
+; AVX2-FCP-NEXT: vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vmovdqa 32(%rdi), %ymm7
; AVX2-FCP-NEXT: vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vmovdqa 64(%rdi), %ymm0
@@ -4704,12 +4732,12 @@ define void @load_i16_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX2-FCP-NEXT: vmovdqa 192(%rdi), %ymm10
; AVX2-FCP-NEXT: vmovdqa 288(%rdi), %ymm2
; AVX2-FCP-NEXT: vmovdqa 256(%rdi), %ymm3
-; AVX2-FCP-NEXT: vperm2i128 {{.*#+}} ymm13 = ymm3[2,3],ymm2[2,3]
-; AVX2-FCP-NEXT: vperm2i128 {{.*#+}} ymm12 = ymm3[0,1],ymm2[0,1]
+; AVX2-FCP-NEXT: vperm2i128 {{.*#+}} ymm12 = ymm3[2,3],ymm2[2,3]
+; AVX2-FCP-NEXT: vperm2i128 {{.*#+}} ymm14 = ymm3[0,1],ymm2[0,1]
; AVX2-FCP-NEXT: vperm2i128 {{.*#+}} ymm2 = ymm0[2,3],ymm1[2,3]
; AVX2-FCP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[0,1],ymm1[0,1]
-; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vmovdqu %ymm0, (%rsp) # 32-byte Spill
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm4 = [8,9,4,5,4,5,6,7,0,1,4,5,0,1,12,13,24,25,20,21,20,21,22,23,16,17,20,21,16,17,28,29]
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0],ymm0[1],ymm2[2,3,4,5],ymm0[6],ymm2[7]
; AVX2-FCP-NEXT: vpshufb %ymm4, %ymm1, %ymm6
@@ -4723,121 +4751,116 @@ define void @load_i16_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX2-FCP-NEXT: vpblendvb %ymm0, %ymm8, %ymm6, %ymm5
; AVX2-FCP-NEXT: vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vmovdqu %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vmovdqa %ymm10, %ymm5
; AVX2-FCP-NEXT: vmovdqu %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm9[0,1],ymm10[2],ymm9[3,4],ymm10[5],ymm9[6,7]
-; AVX2-FCP-NEXT: vpshufb %xmm7, %xmm6, %xmm8
-; AVX2-FCP-NEXT: vextracti128 $1, %ymm6, %xmm7
-; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm10 = xmm7[2,2,2,2,4,5,6,7]
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm8 = xmm8[0],xmm10[1],xmm8[2,3],xmm10[4],xmm8[5,6,7]
-; AVX2-FCP-NEXT: vmovdqu %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vpshufb %xmm7, %xmm6, %xmm7
+; AVX2-FCP-NEXT: vextracti128 $1, %ymm6, %xmm8
+; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm11 = xmm8[2,2,2,2,4,5,6,7]
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm7 = xmm7[0],xmm11[1],xmm7[2,3],xmm11[4],xmm7[5,6,7]
; AVX2-FCP-NEXT: vmovdqu %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm10 = ymm13[0],ymm12[1],ymm13[2,3,4,5],ymm12[6],ymm13[7]
-; AVX2-FCP-NEXT: vpshufb %ymm4, %ymm10, %ymm4
-; AVX2-FCP-NEXT: vpblendvb %ymm0, %ymm8, %ymm4, %ymm4
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm11 = ymm12[0],ymm14[1],ymm12[2,3,4,5],ymm14[6],ymm12[7]
+; AVX2-FCP-NEXT: vpshufb %ymm4, %ymm11, %ymm4
+; AVX2-FCP-NEXT: vpblendvb %ymm0, %ymm7, %ymm4, %ymm4
; AVX2-FCP-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpbroadcastd {{.*#+}} xmm4 = [10,11,6,7,10,11,6,7,10,11,6,7,10,11,6,7]
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm4 = [10,11,6,7,2,3,14,15,10,11,10,11,12,13,14,15]
; AVX2-FCP-NEXT: vpshufb %xmm4, %xmm3, %xmm3
-; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm8 = [10,11,u,u,2,3,14,15,u,u,10,11,12,13,14,15]
-; AVX2-FCP-NEXT: vpshufb %xmm8, %xmm2, %xmm2
+; AVX2-FCP-NEXT: vpshufb %xmm4, %xmm2, %xmm2
; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1],xmm2[2,3],xmm3[4],xmm2[5,6,7]
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm3 = [10,11,6,7,4,5,6,7,6,7,6,7,2,3,14,15,26,27,22,23,20,21,22,23,22,23,22,23,18,19,30,31]
; AVX2-FCP-NEXT: vpshufb %ymm3, %ymm1, %ymm1
; AVX2-FCP-NEXT: vpblendvb %ymm0, %ymm2, %ymm1, %ymm1
; AVX2-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpshufb %ymm3, %ymm10, %ymm1
-; AVX2-FCP-NEXT: vpshufb %xmm4, %xmm7, %xmm3
-; AVX2-FCP-NEXT: vpshufb %xmm8, %xmm6, %xmm4
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm5[0],ymm9[1],ymm5[2,3],ymm9[4],ymm5[5,6],ymm9[7]
+; AVX2-FCP-NEXT: vpshufb %ymm3, %ymm11, %ymm1
+; AVX2-FCP-NEXT: vpshufb %xmm4, %xmm8, %xmm3
+; AVX2-FCP-NEXT: vpshufb %xmm4, %xmm6, %xmm4
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm10[0],ymm9[1],ymm10[2,3],ymm9[4],ymm10[5,6],ymm9[7]
; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0],xmm3[1],xmm4[2,3],xmm3[4],xmm4[5,6,7]
-; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm15 = [0,1,12,13,u,u,4,5,u,u,u,u,12,13,14,15]
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm15 = [0,1,12,13,0,1,4,5,8,9,12,13,12,13,14,15]
; AVX2-FCP-NEXT: vpblendvb %ymm0, %ymm3, %ymm1, %ymm0
; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpshufb %xmm15, %xmm2, %xmm0
-; AVX2-FCP-NEXT: vextracti128 $1, %ymm2, %xmm1
+; AVX2-FCP-NEXT: vpshufb %xmm15, %xmm5, %xmm0
+; AVX2-FCP-NEXT: vextracti128 $1, %ymm5, %xmm1
; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm11 = xmm1[2,1,0,3]
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm11[u,u,u,u,0,1,u,u,8,9,12,13,u,u,u,u]
+; AVX2-FCP-NEXT: vpshufb %xmm15, %xmm11, %xmm1
; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3],xmm1[4,5],xmm0[6,7]
-; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm14 = [u,u,u,u,u,u,u,u,u,u,u,u,8,9,4,5,16,17,28,29,24,25,28,29,24,25,28,29,24,25,20,21]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm12[0],ymm13[1],ymm12[2,3,4,5],ymm13[6],ymm12[7]
-; AVX2-FCP-NEXT: vpshufb %ymm14, %ymm8, %ymm1
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm13 = [u,u,u,u,u,u,u,u,u,u,u,u,8,9,4,5,16,17,28,29,24,25,28,29,24,25,28,29,24,25,20,21]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm10 = ymm14[0],ymm12[1],ymm14[2,3,4,5],ymm12[6],ymm14[7]
+; AVX2-FCP-NEXT: vpshufb %ymm13, %ymm10, %ymm1
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
; AVX2-FCP-NEXT: vmovdqa 352(%rdi), %ymm1
; AVX2-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vmovdqa 320(%rdi), %ymm3
-; AVX2-FCP-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm3[0,1],ymm1[2],ymm3[3,4],ymm1[5],ymm3[6,7]
-; AVX2-FCP-NEXT: vpbroadcastd {{.*#+}} xmm5 = [8,9,4,5,8,9,4,5,8,9,4,5,8,9,4,5]
-; AVX2-FCP-NEXT: vpshufb %xmm5, %xmm7, %xmm6
-; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm10 = [0,1,4,5,4,5,u,u,0,1,12,13,u,u,4,5]
-; AVX2-FCP-NEXT: vextracti128 $1, %ymm7, %xmm4
-; AVX2-FCP-NEXT: vpshufb %xmm10, %xmm4, %xmm9
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm6 = xmm9[0,1,2],xmm6[3],xmm9[4,5],xmm6[6],xmm9[7]
+; AVX2-FCP-NEXT: vmovdqa 320(%rdi), %ymm2
+; AVX2-FCP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,4,5,4,5,0,1,12,13,8,9,4,5]
+; AVX2-FCP-NEXT: vpshufb %xmm2, %xmm7, %xmm6
+; AVX2-FCP-NEXT: vextracti128 $1, %ymm7, %xmm9
+; AVX2-FCP-NEXT: vpshufb %xmm2, %xmm9, %xmm8
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm6 = xmm8[0,1,2],xmm6[3],xmm8[4,5],xmm6[6],xmm8[7]
; AVX2-FCP-NEXT: vinserti128 $1, %xmm6, %ymm0, %ymm6
; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm6 = ymm0[0,1,2],ymm6[3,4,5,6,7],ymm0[8,9,10],ymm6[11,12,13,14,15]
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm6[4,5,6,7]
; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vmovdqa 160(%rdi), %ymm0
; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vmovdqa 128(%rdi), %ymm9
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm9[0,1],ymm0[2],ymm9[3,4],ymm0[5],ymm9[6,7]
-; AVX2-FCP-NEXT: vpshufb %xmm5, %xmm0, %xmm5
-; AVX2-FCP-NEXT: vextracti128 $1, %ymm0, %xmm3
-; AVX2-FCP-NEXT: vpshufb %xmm10, %xmm3, %xmm10
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm10 = xmm10[0,1,2],xmm5[3],xmm10[4,5],xmm5[6],xmm10[7]
-; AVX2-FCP-NEXT: vmovdqu (%rsp), %ymm1 # 32-byte Reload
-; AVX2-FCP-NEXT: vpblendd $146, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm5 # 32-byte Folded Reload
-; AVX2-FCP-NEXT: # ymm5 = ymm1[0],mem[1],ymm1[2,3],mem[4],ymm1[5,6],mem[7]
-; AVX2-FCP-NEXT: vpshufb %xmm15, %xmm5, %xmm15
-; AVX2-FCP-NEXT: vextracti128 $1, %ymm5, %xmm13
-; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm13 = xmm13[2,1,0,3]
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm12 = xmm13[u,u,u,u,0,1,u,u,8,9,12,13,u,u,u,u]
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm12 = xmm15[0,1],xmm12[2],xmm15[3],xmm12[4,5],xmm15[6,7]
-; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-FCP-NEXT: vpblendd $66, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX2-FCP-NEXT: # ymm1 = ymm1[0],mem[1],ymm1[2,3,4,5],mem[6],ymm1[7]
-; AVX2-FCP-NEXT: vpshufb %ymm14, %ymm1, %ymm14
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm12 = ymm12[0,1,2],ymm14[3,4,5,6,7]
-; AVX2-FCP-NEXT: vinserti128 $1, %xmm10, %ymm0, %ymm10
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm10 = ymm12[0,1,2],ymm10[3,4,5,6,7],ymm12[8,9,10],ymm10[11,12,13,14,15]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm12[0,1,2,3],ymm10[4,5,6,7]
-; AVX2-FCP-NEXT: vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpbroadcastq {{.*#+}} xmm12 = [6,7,2,3,12,13,14,15,6,7,2,3,12,13,14,15]
-; AVX2-FCP-NEXT: vpshufb %xmm12, %xmm2, %xmm2
-; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm10 = xmm11[1,1,1,1,4,5,6,7]
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm10[2],xmm2[3,4],xmm10[5],xmm2[6],xmm10[7]
-; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm11 = [u,u,u,u,u,u,u,u,u,u,u,u,10,11,6,7,18,19,30,31,26,27,26,27,30,31,26,27,26,27,22,23]
-; AVX2-FCP-NEXT: vpshufb %ymm11, %ymm8, %ymm8
-; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm14 = [2,3,6,7,4,5,0,1,10,11,14,15,12,13,14,15]
-; AVX2-FCP-NEXT: vpshufb %xmm14, %xmm2, %xmm2
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2],ymm8[3,4,5,6,7]
-; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm8 = [6,7,2,3,4,5,u,u,2,3,14,15,u,u,6,7]
-; AVX2-FCP-NEXT: vpshufb %xmm8, %xmm4, %xmm4
+; AVX2-FCP-NEXT: vmovdqa 128(%rdi), %ymm8
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm8[0,1],ymm0[2],ymm8[3,4],ymm0[5],ymm8[6,7]
+; AVX2-FCP-NEXT: vpshufb %xmm2, %xmm1, %xmm0
+; AVX2-FCP-NEXT: vextracti128 $1, %ymm1, %xmm4
+; AVX2-FCP-NEXT: vpshufb %xmm2, %xmm4, %xmm2
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm3 = xmm2[0,1,2],xmm0[3],xmm2[4,5],xmm0[6],xmm2[7]
+; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX2-FCP-NEXT: vpblendd $146, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 32-byte Folded Reload
+; AVX2-FCP-NEXT: # ymm2 = ymm0[0],mem[1],ymm0[2,3],mem[4],ymm0[5,6],mem[7]
+; AVX2-FCP-NEXT: vextracti128 $1, %ymm2, %xmm12
+; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm12 = xmm12[2,1,0,3]
+; AVX2-FCP-NEXT: vpshufb %xmm15, %xmm2, %xmm0
+; AVX2-FCP-NEXT: vpshufb %xmm15, %xmm12, %xmm15
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm15[2],xmm0[3],xmm15[4,5],xmm0[6,7]
+; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX2-FCP-NEXT: vpblendd $189, (%rsp), %ymm6, %ymm15 # 32-byte Folded Reload
+; AVX2-FCP-NEXT: # ymm15 = mem[0],ymm6[1],mem[2,3,4,5],ymm6[6],mem[7]
+; AVX2-FCP-NEXT: vpshufb %ymm13, %ymm15, %ymm13
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm13[3,4,5,6,7]
+; AVX2-FCP-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm0[0,1,2],ymm3[3,4,5,6,7],ymm0[8,9,10],ymm3[11,12,13,14,15]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm3[4,5,6,7]
+; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vpbroadcastq {{.*#+}} xmm6 = [6,7,2,3,12,13,14,15,6,7,2,3,12,13,14,15]
+; AVX2-FCP-NEXT: vpshufb %xmm6, %xmm5, %xmm3
+; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm5 = xmm11[1,1,1,1,4,5,6,7]
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm5[2],xmm3[3,4],xmm5[5],xmm3[6],xmm5[7]
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm5 = [u,u,u,u,u,u,u,u,u,u,u,u,10,11,6,7,18,19,30,31,26,27,26,27,30,31,26,27,26,27,22,23]
+; AVX2-FCP-NEXT: vpshufb %ymm5, %ymm10, %ymm10
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm0 = [2,3,6,7,4,5,0,1,10,11,14,15,12,13,14,15]
+; AVX2-FCP-NEXT: vpshufb %xmm0, %xmm3, %xmm3
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2],ymm10[3,4,5,6,7]
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm10 = [6,7,2,3,4,5,u,u,2,3,14,15,u,u,6,7]
+; AVX2-FCP-NEXT: vpshufb %xmm10, %xmm9, %xmm9
; AVX2-FCP-NEXT: vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,5,5,5,5]
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1,2],xmm7[3],xmm4[4,5],xmm7[6],xmm4[7]
-; AVX2-FCP-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm4 = ymm2[0,1,2],ymm4[3,4,5,6,7],ymm2[8,9,10],ymm4[11,12,13,14,15]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7]
-; AVX2-FCP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpshufb %xmm8, %xmm3, %xmm2
-; AVX2-FCP-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,5,5,5]
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm2[0,1,2],xmm0[3],xmm2[4,5],xmm0[6],xmm2[7]
-; AVX2-FCP-NEXT: vpshufb %ymm11, %ymm1, %ymm1
-; AVX2-FCP-NEXT: vpshufb %xmm12, %xmm5, %xmm2
-; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm3 = xmm13[1,1,1,1,4,5,6,7]
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2],xmm2[3,4],xmm3[5],xmm2[6],xmm3[7]
-; AVX2-FCP-NEXT: vpshufb %xmm14, %xmm2, %xmm2
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3,4,5,6,7]
-; AVX2-FCP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7],ymm1[8,9,10],ymm0[11,12,13,14,15]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm7 = xmm9[0,1,2],xmm7[3],xmm9[4,5],xmm7[6],xmm9[7]
+; AVX2-FCP-NEXT: vinserti128 $1, %xmm7, %ymm0, %ymm7
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm7 = ymm3[0,1,2],ymm7[3,4,5,6,7],ymm3[8,9,10],ymm7[11,12,13,14,15]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm7[4,5,6,7]
+; AVX2-FCP-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vpshufb %xmm10, %xmm4, %xmm3
+; AVX2-FCP-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,5,5,5]
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm3[0,1,2],xmm1[3],xmm3[4,5],xmm1[6],xmm3[7]
+; AVX2-FCP-NEXT: vpshufb %ymm5, %ymm15, %ymm3
+; AVX2-FCP-NEXT: vpshufb %xmm6, %xmm2, %xmm2
+; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm4 = xmm12[1,1,1,1,4,5,6,7]
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm4[2],xmm2[3,4],xmm4[5],xmm2[6],xmm4[7]
+; AVX2-FCP-NEXT: vpshufb %xmm0, %xmm2, %xmm0
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm3[3,4,5,6,7]
+; AVX2-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm0[0,1,2],ymm1[3,4,5,6,7],ymm0[8,9,10],ymm1[11,12,13,14,15]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vpblendd $148, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
+; AVX2-FCP-NEXT: # ymm14 = ymm14[0,1],mem[2],ymm14[3],mem[4],ymm14[5,6],mem[7]
; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FCP-NEXT: vpblendd $107, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm6 # 32-byte Folded Reload
-; AVX2-FCP-NEXT: # ymm6 = mem[0,1],ymm0[2],mem[3],ymm0[4],mem[5,6],ymm0[7]
-; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FCP-NEXT: vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
-; AVX2-FCP-NEXT: # ymm0 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
+; AVX2-FCP-NEXT: vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX2-FCP-NEXT: # ymm0 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm5 = xmm0[2,1,0,3]
; AVX2-FCP-NEXT: vextracti128 $1, %ymm0, %xmm0
; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[0,1,2,1]
@@ -4848,106 +4871,104 @@ define void @load_i16_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
; AVX2-FCP-NEXT: vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
; AVX2-FCP-NEXT: # ymm1 = mem[0,1],ymm1[2],mem[3,4],ymm1[5],mem[6,7]
-; AVX2-FCP-NEXT: vextracti128 $1, %ymm1, %xmm8
-; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm11 = xmm8[0,3,2,1]
+; AVX2-FCP-NEXT: vextracti128 $1, %ymm1, %xmm6
+; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm10 = xmm6[0,3,2,1]
; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[2,1,2,3]
-; AVX2-FCP-NEXT: vpbroadcastq {{.*#+}} xmm8 = [12,13,0,1,4,5,0,0,12,13,0,1,4,5,0,0]
-; AVX2-FCP-NEXT: vpshufb %xmm8, %xmm11, %xmm12
-; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm13 = xmm2[2,1,2,0,4,5,6,7]
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm12 = xmm13[0],xmm12[1,2],xmm13[3],xmm12[4,5,6,7]
+; AVX2-FCP-NEXT: vpbroadcastq {{.*#+}} xmm6 = [12,13,0,1,4,5,0,0,12,13,0,1,4,5,0,0]
+; AVX2-FCP-NEXT: vpshufb %xmm6, %xmm10, %xmm9
+; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm12 = xmm2[2,1,2,0,4,5,6,7]
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm9 = xmm12[0],xmm9[1,2],xmm12[3],xmm9[4,5,6,7]
; AVX2-FCP-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm13 = [4,5,0,1,12,13,14,15,8,9,12,13,0,1,u,u,20,21,16,17,28,29,30,31,24,25,28,29,16,17,u,u]
-; AVX2-FCP-NEXT: vpshufb %ymm13, %ymm6, %ymm14
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm14[0,1,2],ymm3[3,4,5,6,7],ymm14[8,9,10],ymm3[11,12,13,14,15]
-; AVX2-FCP-NEXT: vpshufhw {{.*#+}} xmm14 = xmm14[0,1,2,3,4,6,5,4]
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm12 = xmm12[0,1,2,3,4],xmm14[5,6,7]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm12[0,1,2,3],ymm3[4,5,6,7]
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm15 = [4,5,0,1,12,13,14,15,8,9,12,13,0,1,u,u,20,21,16,17,28,29,30,31,24,25,28,29,16,17,u,u]
+; AVX2-FCP-NEXT: vpshufb %ymm15, %ymm14, %ymm12
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm12[0,1,2],ymm3[3,4,5,6,7],ymm12[8,9,10],ymm3[11,12,13,14,15]
+; AVX2-FCP-NEXT: vpshufhw {{.*#+}} xmm12 = xmm12[0,1,2,3,4,6,5,4]
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm9 = xmm9[0,1,2,3,4],xmm12[5,6,7]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm9[0,1,2,3],ymm3[4,5,6,7]
; AVX2-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm14 = ymm3[0,1],ymm9[2],ymm3[3,4],ymm9[5],ymm3[6,7]
-; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm12 = xmm14[2,1,0,3]
+; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm9 = ymm7[0,1],ymm8[2],ymm7[3,4],ymm8[5],ymm7[6,7]
+; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm12 = xmm9[2,1,0,3]
; AVX2-FCP-NEXT: vpshufb %xmm0, %xmm12, %xmm0
-; AVX2-FCP-NEXT: vextracti128 $1, %ymm14, %xmm14
-; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm14 = xmm14[0,1,2,1]
-; AVX2-FCP-NEXT: vpshufhw {{.*#+}} xmm15 = xmm14[0,1,2,3,6,5,6,4]
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0,1,2,3],xmm15[4],xmm0[5,6],xmm15[7]
+; AVX2-FCP-NEXT: vextracti128 $1, %ymm9, %xmm9
+; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm9 = xmm9[0,1,2,1]
+; AVX2-FCP-NEXT: vpshufhw {{.*#+}} xmm13 = xmm9[0,1,2,3,6,5,6,4]
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0,1,2,3],xmm13[4],xmm0[5,6],xmm13[7]
+; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX2-FCP-NEXT: vpblendd $107, (%rsp), %ymm0, %ymm13 # 32-byte Folded Reload
+; AVX2-FCP-NEXT: # ymm13 = mem[0,1],ymm0[2],mem[3],ymm0[4],mem[5,6],ymm0[7]
; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FCP-NEXT: vpblendd $148, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm15 # 32-byte Folded Reload
-; AVX2-FCP-NEXT: # ymm15 = ymm0[0,1],mem[2],ymm0[3],mem[4],ymm0[5,6],mem[7]
-; AVX2-FCP-NEXT: vmovdqu (%rsp), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT: vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
; AVX2-FCP-NEXT: # ymm0 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
-; AVX2-FCP-NEXT: vextracti128 $1, %ymm0, %xmm10
-; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm10 = xmm10[0,3,2,1]
-; AVX2-FCP-NEXT: vpshufb %xmm8, %xmm10, %xmm8
+; AVX2-FCP-NEXT: vextracti128 $1, %ymm0, %xmm11
+; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm11 = xmm11[0,3,2,1]
+; AVX2-FCP-NEXT: vpshufb %xmm6, %xmm11, %xmm6
; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,1,2,3]
-; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm7 = xmm0[2,1,2,0,4,5,6,7]
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm7 = xmm7[0],xmm8[1,2],xmm7[3],xmm8[4,5,6,7]
+; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm3 = xmm0[2,1,2,0,4,5,6,7]
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0],xmm6[1,2],xmm3[3],xmm6[4,5,6,7]
; AVX2-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX2-FCP-NEXT: vpshufb %ymm13, %ymm15, %ymm8
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm8[0,1,2],ymm1[3,4,5,6,7],ymm8[8,9,10],ymm1[11,12,13,14,15]
-; AVX2-FCP-NEXT: vpshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,4,6,5,4]
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm7 = xmm7[0,1,2,3,4],xmm8[5,6,7]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm7[0,1,2,3],ymm1[4,5,6,7]
+; AVX2-FCP-NEXT: vpshufb %ymm15, %ymm13, %ymm6
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm6[0,1,2],ymm1[3,4,5,6,7],ymm6[8,9,10],ymm1[11,12,13,14,15]
+; AVX2-FCP-NEXT: vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,6,5,4]
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3,4],xmm6[5,6,7]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm3[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm1 = [2,3,2,3,2,3,2,3,u,u,10,11,14,15,u,u]
-; AVX2-FCP-NEXT: vpshufb %xmm1, %xmm5, %xmm5
+; AVX2-FCP-NEXT: vpshufb %xmm1, %xmm5, %xmm3
; AVX2-FCP-NEXT: vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,7,5,6,5]
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm4 = xmm5[0,1,2,3],xmm4[4],xmm5[5,6],xmm4[7]
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm4[4],xmm3[5,6],xmm4[7]
; AVX2-FCP-NEXT: vpbroadcastq {{.*#+}} xmm5 = [14,15,2,3,6,7,0,0,14,15,2,3,6,7,0,0]
-; AVX2-FCP-NEXT: vpshufb %xmm5, %xmm11, %xmm7
+; AVX2-FCP-NEXT: vpshufb %xmm5, %xmm10, %xmm4
; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm2 = xmm2[3,1,2,1,4,5,6,7]
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm7[1,2],xmm2[3],xmm7[4,5,6,7]
-; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm7 = [6,7,2,3,14,15,14,15,14,15,10,11,u,u,2,3,22,23,18,19,30,31,30,31,30,31,26,27,u,u,18,19]
-; AVX2-FCP-NEXT: vpshufb %ymm7, %ymm6, %ymm6
-; AVX2-FCP-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm4 = ymm6[0,1,2],ymm4[3,4,5,6,7],ymm6[8,9,10],ymm4[11,12,13,14,15]
-; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm6 = xmm6[0,1,3,2]
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3,4],xmm6[5,6,7]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm2[0,1,2,3],ymm4[4,5,6,7]
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm4[1,2],xmm2[3],xmm4[4,5,6,7]
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm10 = [6,7,2,3,14,15,14,15,14,15,10,11,u,u,2,3,22,23,18,19,30,31,30,31,30,31,26,27,u,u,18,19]
+; AVX2-FCP-NEXT: vpshufb %ymm10, %ymm14, %ymm4
+; AVX2-FCP-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm4[0,1,2],ymm3[3,4,5,6,7],ymm4[8,9,10],ymm3[11,12,13,14,15]
+; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm4 = xmm4[0,1,3,2]
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3,4],xmm4[5,6,7]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm2[0,1,2,3],ymm3[4,5,6,7]
; AVX2-FCP-NEXT: vpshufb %xmm1, %xmm12, %xmm1
-; AVX2-FCP-NEXT: vpshufhw {{.*#+}} xmm2 = xmm14[0,1,2,3,7,5,6,5]
+; AVX2-FCP-NEXT: vpshufhw {{.*#+}} xmm2 = xmm9[0,1,2,3,7,5,6,5]
; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4],xmm1[5,6],xmm2[7]
-; AVX2-FCP-NEXT: vpshufb %ymm7, %ymm15, %ymm2
-; AVX2-FCP-NEXT: vpshufb %xmm5, %xmm10, %xmm5
+; AVX2-FCP-NEXT: vpshufb %ymm10, %ymm13, %ymm2
+; AVX2-FCP-NEXT: vpshufb %xmm5, %xmm11, %xmm3
; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[3,1,2,1,4,5,6,7]
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm5[1,2],xmm0[3],xmm5[4,5,6,7]
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm3[1,2],xmm0[3],xmm3[4,5,6,7]
; AVX2-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3,4,5,6,7],ymm2[8,9,10],ymm1[11,12,13,14,15]
; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,1,3,2]
; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm2[5,6,7]
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm9[0],ymm3[1],ymm9[2,3],ymm3[4],ymm9[5,6],ymm3[7]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm8[0],ymm7[1],ymm8[2,3],ymm7[4],ymm8[5,6],ymm7[7]
; AVX2-FCP-NEXT: vextracti128 $1, %ymm1, %xmm2
; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,3,2,1]
-; AVX2-FCP-NEXT: vpbroadcastq {{.*#+}} xmm5 = [8,9,0,0,0,1,12,13,8,9,0,0,0,1,12,13]
-; AVX2-FCP-NEXT: vpshufb %xmm5, %xmm2, %xmm6
-; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm7 = [0,1,2,3,0,1,4,5,u,u,12,13,u,u,u,u]
-; AVX2-FCP-NEXT: vpshufb %xmm7, %xmm1, %xmm9
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm6 = xmm9[0,1,2,3],xmm6[4],xmm9[5],xmm6[6,7]
-; AVX2-FCP-NEXT: vinserti128 $1, %xmm6, %ymm0, %ymm6
-; AVX2-FCP-NEXT: vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm6 # 32-byte Folded Reload
-; AVX2-FCP-NEXT: # ymm6 = mem[0,1,2,3,4],ymm6[5,6,7]
-; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
-; AVX2-FCP-NEXT: vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm9 # 32-byte Folded Reload
-; AVX2-FCP-NEXT: # ymm9 = mem[0],ymm9[1],mem[2,3],ymm9[4],mem[5,6],ymm9[7]
-; AVX2-FCP-NEXT: vextracti128 $1, %ymm9, %xmm10
-; AVX2-FCP-NEXT: vpshufb %xmm5, %xmm10, %xmm5
-; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm9 = xmm9[0,3,2,1]
-; AVX2-FCP-NEXT: vpshufb %xmm7, %xmm9, %xmm7
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,2,3,0,1,4,5,8,9,12,13,0,1,12,13]
+; AVX2-FCP-NEXT: vpshufb %xmm3, %xmm2, %xmm5
+; AVX2-FCP-NEXT: vpshufb %xmm3, %xmm1, %xmm7
; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm5 = xmm7[0,1,2,3],xmm5[4],xmm7[5],xmm5[6,7]
; AVX2-FCP-NEXT: vinserti128 $1, %xmm5, %ymm0, %ymm5
; AVX2-FCP-NEXT: vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm5 # 32-byte Folded Reload
; AVX2-FCP-NEXT: # ymm5 = mem[0,1,2,3,4],ymm5[5,6,7]
-; AVX2-FCP-NEXT: vpbroadcastq {{.*#+}} xmm7 = [10,11,0,0,2,3,14,15,10,11,0,0,2,3,14,15]
-; AVX2-FCP-NEXT: vpshufb %xmm7, %xmm2, %xmm2
-; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm11 = [0,1,2,3,2,3,6,7,u,u,14,15,u,u,u,u]
-; AVX2-FCP-NEXT: vpshufb %xmm11, %xmm1, %xmm1
+; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX2-FCP-NEXT: vpblendd $146, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm7 # 32-byte Folded Reload
+; AVX2-FCP-NEXT: # ymm7 = ymm7[0],mem[1],ymm7[2,3],mem[4],ymm7[5,6],mem[7]
+; AVX2-FCP-NEXT: vextracti128 $1, %ymm7, %xmm8
+; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm7 = xmm7[0,3,2,1]
+; AVX2-FCP-NEXT: vpshufb %xmm3, %xmm8, %xmm9
+; AVX2-FCP-NEXT: vpshufb %xmm3, %xmm7, %xmm3
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm9[4],xmm3[5],xmm9[6,7]
+; AVX2-FCP-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX2-FCP-NEXT: vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
+; AVX2-FCP-NEXT: # ymm3 = mem[0,1,2,3,4],ymm3[5,6,7]
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm9 = [0,1,2,3,2,3,6,7,10,11,14,15,2,3,14,15]
+; AVX2-FCP-NEXT: vpshufb %xmm9, %xmm2, %xmm2
+; AVX2-FCP-NEXT: vpshufb %xmm9, %xmm1, %xmm1
; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4],xmm1[5],xmm2[6,7]
; AVX2-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
; AVX2-FCP-NEXT: vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
; AVX2-FCP-NEXT: # ymm1 = mem[0,1,2,3,4],ymm1[5,6,7]
-; AVX2-FCP-NEXT: vpshufb %xmm7, %xmm10, %xmm2
-; AVX2-FCP-NEXT: vpshufb %xmm11, %xmm9, %xmm7
+; AVX2-FCP-NEXT: vpshufb %xmm9, %xmm8, %xmm2
+; AVX2-FCP-NEXT: vpshufb %xmm9, %xmm7, %xmm7
; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm2 = xmm7[0,1,2,3],xmm2[4],xmm7[5],xmm2[6,7]
; AVX2-FCP-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
; AVX2-FCP-NEXT: vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
@@ -4958,19 +4979,19 @@ define void @load_i16_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX2-FCP-NEXT: vmovaps %ymm7, (%rsi)
; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
; AVX2-FCP-NEXT: vmovaps %ymm7, 32(%rdx)
-; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX2-FCP-NEXT: vmovaps %ymm3, (%rdx)
-; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX2-FCP-NEXT: vmovaps %ymm3, 32(%rcx)
-; AVX2-FCP-NEXT: vmovdqa %ymm8, (%rcx)
+; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX2-FCP-NEXT: vmovaps %ymm7, (%rdx)
+; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX2-FCP-NEXT: vmovaps %ymm7, 32(%rcx)
+; AVX2-FCP-NEXT: vmovdqa %ymm6, (%rcx)
; AVX2-FCP-NEXT: vmovdqa %ymm4, 32(%r8)
; AVX2-FCP-NEXT: vmovdqa %ymm0, (%r8)
-; AVX2-FCP-NEXT: vmovdqa %ymm5, 32(%r9)
-; AVX2-FCP-NEXT: vmovdqa %ymm6, (%r9)
+; AVX2-FCP-NEXT: vmovdqa %ymm3, 32(%r9)
+; AVX2-FCP-NEXT: vmovdqa %ymm5, (%r9)
; AVX2-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX2-FCP-NEXT: vmovdqa %ymm2, 32(%rax)
; AVX2-FCP-NEXT: vmovdqa %ymm1, (%rax)
-; AVX2-FCP-NEXT: addq $488, %rsp # imm = 0x1E8
+; AVX2-FCP-NEXT: addq $456, %rsp # imm = 0x1C8
; AVX2-FCP-NEXT: vzeroupper
; AVX2-FCP-NEXT: retq
;
@@ -5040,10 +5061,9 @@ define void @load_i16_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512-NEXT: vpshufhw {{.*#+}} xmm12 = xmm12[0,1,2,3,4,5,4,6]
; AVX512-NEXT: vpblendd {{.*#+}} ymm10 = ymm12[0,1,2,3],ymm10[4,5,6,7]
; AVX512-NEXT: vmovdqa64 %ymm10, %ymm30
-; AVX512-NEXT: vpbroadcastq {{.*#+}} xmm10 = [2,3,14,15,10,11,0,0,2,3,14,15,10,11,0,0]
+; AVX512-NEXT: vmovdqa {{.*#+}} xmm10 = [2,3,14,15,10,11,6,7,2,3,14,15,12,13,14,15]
; AVX512-NEXT: vpshufb %xmm10, %xmm0, %xmm0
-; AVX512-NEXT: vmovdqa {{.*#+}} xmm12 = [2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15]
-; AVX512-NEXT: vpshufb %xmm12, %xmm2, %xmm2
+; AVX512-NEXT: vpshufb %xmm10, %xmm2, %xmm2
; AVX512-NEXT: vpblendw {{.*#+}} xmm0 = xmm2[0,1],xmm0[2],xmm2[3],xmm0[4,5],xmm2[6,7]
; AVX512-NEXT: vmovdqa {{.*#+}} xmm2 = [6,7,2,3,4,5,u,u,2,3,14,15,u,u,6,7]
; AVX512-NEXT: vpshufb %xmm2, %xmm4, %xmm4
@@ -5053,7 +5073,7 @@ define void @load_i16_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512-NEXT: vinserti32x4 $2, %xmm0, %zmm1, %zmm0
; AVX512-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT: vpshufb %xmm10, %xmm5, %xmm0
-; AVX512-NEXT: vpshufb %xmm12, %xmm3, %xmm1
+; AVX512-NEXT: vpshufb %xmm10, %xmm3, %xmm1
; AVX512-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2],xmm1[3],xmm0[4,5],xmm1[6,7]
; AVX512-NEXT: vpshufb {{.*#+}} ymm1 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,10,11,6,7,18,19,30,31,26,27,26,27,30,31,26,27,26,27,22,23]
; AVX512-NEXT: vpblendd {{.*#+}} ymm15 = ymm0[0,1,2],ymm1[3,4,5,6,7]
@@ -5262,109 +5282,105 @@ define void @load_i16_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512-FCP-LABEL: load_i16_stride6_vf32:
; AVX512-FCP: # %bb.0:
; AVX512-FCP-NEXT: subq $136, %rsp
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm5 = [0,1,12,13,u,u,4,5,u,u,u,u,12,13,14,15]
-; AVX512-FCP-NEXT: vmovdqa 224(%rdi), %ymm15
-; AVX512-FCP-NEXT: vmovdqa 192(%rdi), %ymm1
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm1[0],ymm15[1],ymm1[2,3],ymm15[4],ymm1[5,6],ymm15[7]
-; AVX512-FCP-NEXT: vmovdqa64 %ymm1, %ymm17
-; AVX512-FCP-NEXT: vpshufb %xmm5, %xmm3, %xmm0
-; AVX512-FCP-NEXT: vpbroadcastq {{.*#+}} xmm6 = [8,9,12,13,0,1,0,0,8,9,12,13,0,1,0,0]
-; AVX512-FCP-NEXT: vextracti128 $1, %ymm3, %xmm1
-; AVX512-FCP-NEXT: vpshufd {{.*#+}} xmm11 = xmm1[2,1,0,3]
-; AVX512-FCP-NEXT: vpshufb %xmm6, %xmm11, %xmm1
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm2 = xmm0[0,1],xmm1[2],xmm0[3],xmm1[4,5],xmm0[6,7]
-; AVX512-FCP-NEXT: vmovdqa 160(%rdi), %ymm4
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,12,13,0,1,4,5,8,9,12,13,12,13,14,15]
+; AVX512-FCP-NEXT: vmovdqa 224(%rdi), %ymm13
+; AVX512-FCP-NEXT: vmovdqa 192(%rdi), %ymm15
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm15[0],ymm13[1],ymm15[2,3],ymm13[4],ymm15[5,6],ymm13[7]
+; AVX512-FCP-NEXT: vpshufb %xmm1, %xmm7, %xmm2
+; AVX512-FCP-NEXT: vextracti128 $1, %ymm7, %xmm3
+; AVX512-FCP-NEXT: vpshufd {{.*#+}} xmm11 = xmm3[2,1,0,3]
+; AVX512-FCP-NEXT: vpshufb %xmm1, %xmm11, %xmm3
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm4 = xmm2[0,1],xmm3[2],xmm2[3],xmm3[4,5],xmm2[6,7]
+; AVX512-FCP-NEXT: vmovdqa 160(%rdi), %ymm3
; AVX512-FCP-NEXT: vmovdqa (%rdi), %ymm0
-; AVX512-FCP-NEXT: vmovdqa 128(%rdi), %ymm13
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm13[0,1],ymm4[2],ymm13[3,4],ymm4[5],ymm13[6,7]
-; AVX512-FCP-NEXT: vmovdqa64 %ymm4, %ymm24
-; AVX512-FCP-NEXT: vpbroadcastd {{.*#+}} xmm7 = [8,9,4,5,8,9,4,5,8,9,4,5,8,9,4,5]
-; AVX512-FCP-NEXT: vpshufb %xmm7, %xmm1, %xmm8
-; AVX512-FCP-NEXT: vextracti128 $1, %ymm1, %xmm4
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm10 = [0,1,4,5,4,5,u,u,0,1,12,13,u,u,4,5]
-; AVX512-FCP-NEXT: vpshufb %xmm10, %xmm4, %xmm9
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm8 = xmm9[0,1,2],xmm8[3],xmm9[4,5],xmm8[6],xmm9[7]
-; AVX512-FCP-NEXT: vmovdqa 32(%rdi), %ymm12
-; AVX512-FCP-NEXT: vmovdqa 64(%rdi), %ymm9
-; AVX512-FCP-NEXT: vinserti128 $1, %xmm8, %ymm0, %ymm8
-; AVX512-FCP-NEXT: vinserti32x4 $2, %xmm2, %zmm8, %zmm2
-; AVX512-FCP-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm0[0],ymm12[1],ymm0[2,3],ymm12[4],ymm0[5,6],ymm12[7]
-; AVX512-FCP-NEXT: vmovdqa64 %ymm12, %ymm16
+; AVX512-FCP-NEXT: vmovdqa 32(%rdi), %ymm10
+; AVX512-FCP-NEXT: vmovdqa 64(%rdi), %ymm6
+; AVX512-FCP-NEXT: vmovdqa 128(%rdi), %ymm12
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm12[0,1],ymm3[2],ymm12[3,4],ymm3[5],ymm12[6,7]
+; AVX512-FCP-NEXT: vmovdqa64 %ymm3, %ymm26
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm9 = [0,1,4,5,4,5,4,5,0,1,12,13,8,9,4,5]
+; AVX512-FCP-NEXT: vpshufb %xmm9, %xmm2, %xmm5
+; AVX512-FCP-NEXT: vextracti128 $1, %ymm2, %xmm3
+; AVX512-FCP-NEXT: vpshufb %xmm9, %xmm3, %xmm8
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm5 = xmm8[0,1,2],xmm5[3],xmm8[4,5],xmm5[6],xmm8[7]
+; AVX512-FCP-NEXT: vinserti128 $1, %xmm5, %ymm0, %ymm5
+; AVX512-FCP-NEXT: vinserti32x4 $2, %xmm4, %zmm5, %zmm4
+; AVX512-FCP-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm0[0],ymm10[1],ymm0[2,3],ymm10[4],ymm0[5,6],ymm10[7]
+; AVX512-FCP-NEXT: vmovdqa64 %ymm10, %ymm16
; AVX512-FCP-NEXT: vmovdqa64 %ymm0, %ymm22
-; AVX512-FCP-NEXT: vpshufb %xmm5, %xmm2, %xmm8
-; AVX512-FCP-NEXT: vextracti128 $1, %ymm2, %xmm5
+; AVX512-FCP-NEXT: vpshufb %xmm1, %xmm4, %xmm8
+; AVX512-FCP-NEXT: vextracti128 $1, %ymm4, %xmm5
; AVX512-FCP-NEXT: vpshufd {{.*#+}} xmm5 = xmm5[2,1,0,3]
-; AVX512-FCP-NEXT: vpshufb %xmm6, %xmm5, %xmm6
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm8 = xmm8[0,1],xmm6[2],xmm8[3],xmm6[4,5],xmm8[6,7]
-; AVX512-FCP-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm9[2,3],mem[2,3]
-; AVX512-FCP-NEXT: vinserti128 $1, 96(%rdi), %ymm9, %ymm14
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm14[0],ymm0[1],ymm14[2,3,4,5],ymm0[6],ymm14[7]
+; AVX512-FCP-NEXT: vpshufb %xmm1, %xmm5, %xmm1
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm8[0,1],xmm1[2],xmm8[3],xmm1[4,5],xmm8[6,7]
+; AVX512-FCP-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm6[2,3],mem[2,3]
+; AVX512-FCP-NEXT: vinserti128 $1, 96(%rdi), %ymm6, %ymm8
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm8[0],ymm0[1],ymm8[2,3,4,5],ymm0[6],ymm8[7]
+; AVX512-FCP-NEXT: vmovdqa64 %ymm8, %ymm27
; AVX512-FCP-NEXT: vmovdqa64 %ymm0, %ymm28
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm9 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,8,9,4,5,16,17,28,29,24,25,28,29,24,25,28,29,24,25,20,21]
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm8[0,1,2],ymm9[3,4,5,6,7]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm8 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,8,9,4,5,16,17,28,29,24,25,28,29,24,25,28,29,24,25,20,21]
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm8[3,4,5,6,7]
; AVX512-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-FCP-NEXT: vmovdqa 352(%rdi), %ymm0
; AVX512-FCP-NEXT: vmovdqa 320(%rdi), %ymm8
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm9 = ymm8[0,1],ymm0[2],ymm8[3,4],ymm0[5],ymm8[6,7]
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm8[0,1],ymm0[2],ymm8[3,4],ymm0[5],ymm8[6,7]
; AVX512-FCP-NEXT: vmovdqa64 %ymm8, %ymm19
; AVX512-FCP-NEXT: vmovdqa64 %ymm0, %ymm20
-; AVX512-FCP-NEXT: vpshufb %xmm7, %xmm9, %xmm8
-; AVX512-FCP-NEXT: vextracti128 $1, %ymm9, %xmm7
-; AVX512-FCP-NEXT: vpshufb %xmm10, %xmm7, %xmm10
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm8 = xmm10[0,1,2],xmm8[3],xmm10[4,5],xmm8[6],xmm10[7]
-; AVX512-FCP-NEXT: vinserti128 $1, %xmm8, %ymm0, %ymm10
-; AVX512-FCP-NEXT: vmovdqa 256(%rdi), %ymm8
-; AVX512-FCP-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm8[2,3],mem[2,3]
-; AVX512-FCP-NEXT: vinserti128 $1, 288(%rdi), %ymm8, %ymm12
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm12[0],ymm0[1],ymm12[2,3,4,5],ymm0[6],ymm12[7]
-; AVX512-FCP-NEXT: vmovdqa64 %ymm12, %ymm25
-; AVX512-FCP-NEXT: vmovdqa64 %ymm0, %ymm26
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm8[0,1,12,13,8,9,12,13,8,9,12,13,4,5,u,u,16,17,28,29,24,25,28,29,24,25,28,29,20,21,u,u]
+; AVX512-FCP-NEXT: vpshufb %xmm9, %xmm1, %xmm10
+; AVX512-FCP-NEXT: vextracti128 $1, %ymm1, %xmm8
+; AVX512-FCP-NEXT: vpshufb %xmm9, %xmm8, %xmm9
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm9 = xmm9[0,1,2],xmm10[3],xmm9[4,5],xmm10[6],xmm9[7]
+; AVX512-FCP-NEXT: vinserti128 $1, %xmm9, %ymm0, %ymm10
+; AVX512-FCP-NEXT: vmovdqa 256(%rdi), %ymm9
+; AVX512-FCP-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm9[2,3],mem[2,3]
+; AVX512-FCP-NEXT: vinserti128 $1, 288(%rdi), %ymm9, %ymm14
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm9 = ymm14[0],ymm0[1],ymm14[2,3,4,5],ymm0[6],ymm14[7]
+; AVX512-FCP-NEXT: vmovdqa64 %ymm14, %ymm24
+; AVX512-FCP-NEXT: vmovdqa64 %ymm0, %ymm25
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm9[0,1,12,13,8,9,12,13,8,9,12,13,4,5,u,u,16,17,28,29,24,25,28,29,24,25,28,29,20,21,u,u]
; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm10 = ymm0[0,1,2],ymm10[3,4,5,6,7],ymm0[8,9,10],ymm10[11,12,13,14,15]
; AVX512-FCP-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,4,6]
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm10[4,5,6,7]
; AVX512-FCP-NEXT: vmovdqa64 %ymm0, %ymm29
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm0 = [2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15]
-; AVX512-FCP-NEXT: vpshufb %xmm0, %xmm3, %xmm3
-; AVX512-FCP-NEXT: vpbroadcastq {{.*#+}} xmm10 = [10,11,14,15,2,3,0,0,10,11,14,15,2,3,0,0]
-; AVX512-FCP-NEXT: vpshufb %xmm10, %xmm11, %xmm11
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm11[2],xmm3[3],xmm11[4,5],xmm3[6,7]
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm0 = [2,3,14,15,2,3,6,7,10,11,14,15,12,13,14,15]
+; AVX512-FCP-NEXT: vpshufb %xmm0, %xmm7, %xmm7
+; AVX512-FCP-NEXT: vpshufb %xmm0, %xmm11, %xmm10
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm7 = xmm7[0,1],xmm10[2],xmm7[3],xmm10[4,5],xmm7[6,7]
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm11 = [6,7,2,3,4,5,u,u,2,3,14,15,u,u,6,7]
-; AVX512-FCP-NEXT: vpshufb %xmm11, %xmm4, %xmm4
-; AVX512-FCP-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,5,5,5]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm4[0,1,2],xmm1[3],xmm4[4,5],xmm1[6],xmm4[7]
-; AVX512-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512-FCP-NEXT: vinserti32x4 $2, %xmm3, %zmm1, %zmm1
-; AVX512-FCP-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-FCP-NEXT: vpshufb %xmm0, %xmm2, %xmm0
-; AVX512-FCP-NEXT: vpshufb %xmm10, %xmm5, %xmm1
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3],xmm1[4,5],xmm0[6,7]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,10,11,6,7,18,19,30,31,26,27,26,27,30,31,26,27,26,27,22,23]
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
+; AVX512-FCP-NEXT: vpshufb %xmm11, %xmm3, %xmm3
+; AVX512-FCP-NEXT: vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,5,5,5,5]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2],xmm2[3],xmm3[4,5],xmm2[6],xmm3[7]
+; AVX512-FCP-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512-FCP-NEXT: vinserti32x4 $2, %xmm7, %zmm2, %zmm2
+; AVX512-FCP-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-FCP-NEXT: vpshufb %xmm0, %xmm4, %xmm2
+; AVX512-FCP-NEXT: vpshufb %xmm0, %xmm5, %xmm0
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm2[0,1],xmm0[2],xmm2[3],xmm0[4,5],xmm2[6,7]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,10,11,6,7,18,19,30,31,26,27,26,27,30,31,26,27,26,27,22,23]
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm2[3,4,5,6,7]
; AVX512-FCP-NEXT: vmovdqu64 %zmm0, (%rsp) # 64-byte Spill
-; AVX512-FCP-NEXT: vpshufb %xmm11, %xmm7, %xmm0
-; AVX512-FCP-NEXT: vpshufhw {{.*#+}} xmm1 = xmm9[0,1,2,3,5,5,5,5]
+; AVX512-FCP-NEXT: vpshufb %xmm11, %xmm8, %xmm0
+; AVX512-FCP-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,5,5,5]
; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3],xmm0[4,5],xmm1[6],xmm0[7]
; AVX512-FCP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm8[2,3,14,15,10,11,10,11,14,15,10,11,u,u,6,7,18,19,30,31,26,27,26,27,30,31,26,27,u,u,22,23]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm9[2,3,14,15,10,11,10,11,14,15,10,11,u,u,6,7,18,19,30,31,26,27,26,27,30,31,26,27,u,u,22,23]
; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7],ymm1[8,9,10],ymm0[11,12,13,14,15]
; AVX512-FCP-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,5,7]
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; AVX512-FCP-NEXT: vmovdqa64 %ymm0, %ymm18
-; AVX512-FCP-NEXT: vmovdqa64 %ymm17, %ymm0
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm15[2],ymm0[3,4],ymm15[5],ymm0[6,7]
-; AVX512-FCP-NEXT: vmovdqa64 %ymm15, %ymm30
-; AVX512-FCP-NEXT: vmovdqa64 %ymm17, %ymm31
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm15[0,1],ymm13[2],ymm15[3,4],ymm13[5],ymm15[6,7]
+; AVX512-FCP-NEXT: vmovdqa64 %ymm13, %ymm30
+; AVX512-FCP-NEXT: vmovdqa64 %ymm15, %ymm31
; AVX512-FCP-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512-FCP-NEXT: vpshufd {{.*#+}} xmm10 = xmm0[2,1,2,3]
; AVX512-FCP-NEXT: vpshuflw {{.*#+}} xmm0 = xmm10[2,1,2,0,4,5,6,7]
; AVX512-FCP-NEXT: vpshufd {{.*#+}} xmm9 = xmm1[0,3,2,1]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm9[u,u,0,1,4,5,u,u,12,13,12,13,12,13,12,13]
; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1,2],xmm0[3],xmm1[4,5,6,7]
-; AVX512-FCP-NEXT: vmovdqa64 %ymm24, %ymm0
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm13[2],ymm0[3,4],ymm13[5],ymm0[6,7]
-; AVX512-FCP-NEXT: vmovdqa64 %ymm13, %ymm21
+; AVX512-FCP-NEXT: vmovdqa64 %ymm26, %ymm0
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm12[2],ymm0[3,4],ymm12[5],ymm0[6,7]
+; AVX512-FCP-NEXT: vmovdqa64 %ymm12, %ymm21
; AVX512-FCP-NEXT: vextracti128 $1, %ymm0, %xmm2
; AVX512-FCP-NEXT: vpshufd {{.*#+}} xmm8 = xmm0[2,1,0,3]
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm0 = [0,1,0,1,0,1,0,1,u,u,8,9,12,13,u,u]
@@ -5384,6 +5400,7 @@ define void @load_i16_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512-FCP-NEXT: vpshufd {{.*#+}} xmm4 = xmm2[0,3,2,1]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm4[u,u,0,1,4,5,u,u,12,13,u,u,u,u,u,u]
; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1,2],xmm1[3],xmm2[4,5,6,7]
+; AVX512-FCP-NEXT: vmovdqa64 %ymm27, %ymm14
; AVX512-FCP-NEXT: vmovdqa64 %ymm28, %ymm12
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm14[0,1],ymm12[2],ymm14[3],ymm12[4],ymm14[5,6],ymm12[7]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm6[4,5,0,1,12,13,14,15,8,9,0,1,12,13,8,9,20,21,16,17,28,29,30,31,24,25,16,17,28,29,24,25]
@@ -5400,8 +5417,8 @@ define void @load_i16_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512-FCP-NEXT: vpshufhw {{.*#+}} xmm13 = xmm2[0,1,2,3,6,5,6,4]
; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm13[4],xmm0[5,6],xmm13[7]
; AVX512-FCP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm13
-; AVX512-FCP-NEXT: vmovdqa64 %ymm25, %ymm0
-; AVX512-FCP-NEXT: vmovdqa64 %ymm26, %ymm1
+; AVX512-FCP-NEXT: vmovdqa64 %ymm24, %ymm0
+; AVX512-FCP-NEXT: vmovdqa64 %ymm25, %ymm1
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm0[0,1],ymm1[2],ymm0[3],ymm1[4],ymm0[5,6],ymm1[7]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm1[4,5,0,1,12,13,14,15,8,9,12,13,0,1,u,u,20,21,16,17,28,29,30,31,24,25,28,29,16,17,u,u]
; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm13 = ymm0[0,1,2],ymm13[3,4,5,6,7],ymm0[8,9,10],ymm13[11,12,13,14,15]
@@ -5440,96 +5457,93 @@ define void @load_i16_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512-FCP-NEXT: vpternlogq $226, %zmm7, %zmm0, %zmm4
; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm20
; AVX512-FCP-NEXT: vpternlogq $184, %zmm4, %zmm17, %zmm20
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm4 = [8,9,u,u,0,1,12,13,u,u,12,13,12,13,14,15]
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm5 = [8,9,u,u,0,1,12,13,u,u,12,13,12,13,14,15]
; AVX512-FCP-NEXT: vmovdqa64 %ymm30, %ymm0
; AVX512-FCP-NEXT: vmovdqa64 %ymm31, %ymm1
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
-; AVX512-FCP-NEXT: vpshufb %xmm4, %xmm1, %xmm0
-; AVX512-FCP-NEXT: vextracti128 $1, %ymm1, %xmm5
-; AVX512-FCP-NEXT: vpshuflw {{.*#+}} xmm2 = xmm5[2,2,2,2,4,5,6,7]
+; AVX512-FCP-NEXT: vpshufb %xmm5, %xmm1, %xmm0
+; AVX512-FCP-NEXT: vextracti128 $1, %ymm1, %xmm4
+; AVX512-FCP-NEXT: vpshuflw {{.*#+}} xmm2 = xmm4[2,2,2,2,4,5,6,7]
; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2,3],xmm2[4],xmm0[5,6,7]
-; AVX512-FCP-NEXT: vmovdqa64 %ymm24, %ymm2
+; AVX512-FCP-NEXT: vmovdqa64 %ymm26, %ymm2
; AVX512-FCP-NEXT: vmovdqa64 %ymm21, %ymm3
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0],ymm2[1],ymm3[2,3],ymm2[4],ymm3[5,6],ymm2[7]
; AVX512-FCP-NEXT: vextracti128 $1, %ymm2, %xmm6
-; AVX512-FCP-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,3,2,1]
-; AVX512-FCP-NEXT: vpbroadcastq {{.*#+}} xmm8 = [8,9,0,0,0,1,12,13,8,9,0,0,0,1,12,13]
-; AVX512-FCP-NEXT: vpshufb %xmm8, %xmm6, %xmm3
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm9 = [0,1,2,3,0,1,4,5,u,u,12,13,u,u,u,u]
-; AVX512-FCP-NEXT: vpshufb %xmm9, %xmm2, %xmm7
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm3 = xmm7[0,1,2,3],xmm3[4],xmm7[5],xmm3[6,7]
-; AVX512-FCP-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX512-FCP-NEXT: vinserti32x4 $2, %xmm0, %zmm3, %zmm3
+; AVX512-FCP-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[0,3,2,1]
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm8 = [0,1,2,3,0,1,4,5,8,9,12,13,0,1,12,13]
+; AVX512-FCP-NEXT: vpshufb %xmm8, %xmm6, %xmm2
+; AVX512-FCP-NEXT: vpshufb %xmm8, %xmm3, %xmm7
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm2 = xmm7[0,1,2,3],xmm2[4],xmm7[5],xmm2[6,7]
+; AVX512-FCP-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512-FCP-NEXT: vinserti32x4 $2, %xmm0, %zmm2, %zmm2
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm12[0],ymm14[1],ymm12[2,3,4,5],ymm14[6],ymm12[7]
; AVX512-FCP-NEXT: vmovdqa64 %ymm23, %ymm7
-; AVX512-FCP-NEXT: vmovdqa64 %ymm22, %ymm10
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm7[0,1],ymm10[2],ymm7[3,4],ymm10[5],ymm7[6,7]
-; AVX512-FCP-NEXT: vpshufb %xmm4, %xmm7, %xmm4
-; AVX512-FCP-NEXT: vextracti128 $1, %ymm7, %xmm10
-; AVX512-FCP-NEXT: vpshuflw {{.*#+}} xmm11 = xmm10[2,2,2,2,4,5,6,7]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0],xmm11[1],xmm4[2,3],xmm11[4],xmm4[5,6,7]
-; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} ymm23 = [65535,65535,65535,65535,65535,0,0,0,0,0,65535,65535,65535,65535,65535,65535]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm13 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm0[4,5,0,1,12,13,24,25,20,21],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512-FCP-NEXT: vpternlogq $236, %ymm23, %ymm13, %ymm4
+; AVX512-FCP-NEXT: vmovdqa64 %ymm22, %ymm9
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm7[0,1],ymm9[2],ymm7[3,4],ymm9[5],ymm7[6,7]
+; AVX512-FCP-NEXT: vpshufb %xmm5, %xmm7, %xmm5
+; AVX512-FCP-NEXT: vextracti128 $1, %ymm7, %xmm9
+; AVX512-FCP-NEXT: vpshuflw {{.*#+}} xmm10 = xmm9[2,2,2,2,4,5,6,7]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0],xmm10[1],xmm5[2,3],xmm10[4],xmm5[5,6,7]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm10 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm0[4,5,0,1,12,13,24,25,20,21],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm11 = [65535,65535,65535,65535,65535,0,0,0,0,0,65535,65535,65535,65535,65535,65535]
+; AVX512-FCP-NEXT: vpternlogq $236, %ymm11, %ymm10, %ymm5
; AVX512-FCP-NEXT: movw $31, %ax
; AVX512-FCP-NEXT: kmovw %eax, %k1
-; AVX512-FCP-NEXT: vmovdqa32 %zmm4, %zmm3 {%k1}
-; AVX512-FCP-NEXT: vmovdqa64 %ymm27, %ymm4
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm13 = ymm4[0],ymm15[1],ymm4[2,3],ymm15[4],ymm4[5,6],ymm15[7]
-; AVX512-FCP-NEXT: vextracti128 $1, %ymm13, %xmm4
-; AVX512-FCP-NEXT: vpshufb %xmm8, %xmm4, %xmm15
-; AVX512-FCP-NEXT: vpshufd {{.*#+}} xmm8 = xmm13[0,3,2,1]
-; AVX512-FCP-NEXT: vpshufb %xmm9, %xmm8, %xmm9
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm9 = xmm9[0,1,2,3],xmm15[4],xmm9[5],xmm15[6,7]
-; AVX512-FCP-NEXT: vinserti128 $1, %xmm9, %ymm0, %ymm13
-; AVX512-FCP-NEXT: vmovdqa64 %ymm25, %ymm9
-; AVX512-FCP-NEXT: vmovdqa64 %ymm26, %ymm11
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm9 = ymm11[0],ymm9[1],ymm11[2,3,4,5],ymm9[6],ymm11[7]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm15 = ymm9[8,9,4,5,4,5,6,7,0,1,4,5,0,1,12,13,24,25,20,21,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm13 = ymm15[0,1,2,3,4],ymm13[5,6,7]
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm13, %zmm0, %zmm13
-; AVX512-FCP-NEXT: vpbroadcastd {{.*#+}} xmm15 = [10,11,6,7,10,11,6,7,10,11,6,7,10,11,6,7]
-; AVX512-FCP-NEXT: vpshufb %xmm15, %xmm10, %xmm10
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm11 = [10,11,u,u,2,3,14,15,u,u,10,11,12,13,14,15]
-; AVX512-FCP-NEXT: vpshufb %xmm11, %xmm7, %xmm7
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm7 = xmm7[0],xmm10[1],xmm7[2,3],xmm10[4],xmm7[5,6,7]
+; AVX512-FCP-NEXT: vmovdqa32 %zmm5, %zmm2 {%k1}
+; AVX512-FCP-NEXT: vmovdqa64 %ymm27, %ymm5
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm10 = ymm5[0],ymm15[1],ymm5[2,3],ymm15[4],ymm5[5,6],ymm15[7]
+; AVX512-FCP-NEXT: vextracti128 $1, %ymm10, %xmm5
+; AVX512-FCP-NEXT: vpshufb %xmm8, %xmm5, %xmm13
+; AVX512-FCP-NEXT: vpshufd {{.*#+}} xmm10 = xmm10[0,3,2,1]
+; AVX512-FCP-NEXT: vpshufb %xmm8, %xmm10, %xmm8
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm8 = xmm8[0,1,2,3],xmm13[4],xmm8[5],xmm13[6,7]
+; AVX512-FCP-NEXT: vinserti128 $1, %xmm8, %ymm0, %ymm8
+; AVX512-FCP-NEXT: vmovdqa64 %ymm24, %ymm12
+; AVX512-FCP-NEXT: vmovdqa64 %ymm25, %ymm13
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm13 = ymm13[0],ymm12[1],ymm13[2,3,4,5],ymm12[6],ymm13[7]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm15 = ymm13[8,9,4,5,4,5,6,7,0,1,4,5,0,1,12,13,24,25,20,21,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm15[0,1,2,3,4],ymm8[5,6,7]
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm8, %zmm0, %zmm8
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm15 = [10,11,6,7,2,3,14,15,10,11,10,11,12,13,14,15]
+; AVX512-FCP-NEXT: vpshufb %xmm15, %xmm9, %xmm9
+; AVX512-FCP-NEXT: vpshufb %xmm15, %xmm7, %xmm7
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm7 = xmm7[0],xmm9[1],xmm7[2,3],xmm9[4],xmm7[5,6,7]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm0[6,7,2,3,14,15,26,27,22,23],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512-FCP-NEXT: vpternlogq $248, %ymm23, %ymm7, %ymm0
-; AVX512-FCP-NEXT: vpshufb %xmm15, %xmm5, %xmm5
-; AVX512-FCP-NEXT: vpshufb %xmm11, %xmm1, %xmm1
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm5[1],xmm1[2,3],xmm5[4],xmm1[5,6,7]
-; AVX512-FCP-NEXT: vpbroadcastq {{.*#+}} xmm5 = [10,11,0,0,2,3,14,15,10,11,0,0,2,3,14,15]
-; AVX512-FCP-NEXT: vpshufb %xmm5, %xmm6, %xmm6
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm7 = [0,1,2,3,2,3,6,7,u,u,14,15,u,u,u,u]
-; AVX512-FCP-NEXT: vpshufb %xmm7, %xmm2, %xmm2
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm6[4],xmm2[5],xmm6[6,7]
-; AVX512-FCP-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512-FCP-NEXT: vinserti32x4 $2, %xmm1, %zmm2, %zmm1
+; AVX512-FCP-NEXT: vpternlogq $248, %ymm11, %ymm7, %ymm0
+; AVX512-FCP-NEXT: vpshufb %xmm15, %xmm4, %xmm4
+; AVX512-FCP-NEXT: vpshufb %xmm15, %xmm1, %xmm1
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm4[1],xmm1[2,3],xmm4[4],xmm1[5,6,7]
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm4 = [0,1,2,3,2,3,6,7,10,11,14,15,2,3,14,15]
+; AVX512-FCP-NEXT: vpshufb %xmm4, %xmm6, %xmm6
+; AVX512-FCP-NEXT: vpshufb %xmm4, %xmm3, %xmm3
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm6[4],xmm3[5],xmm6[6,7]
+; AVX512-FCP-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512-FCP-NEXT: vinserti32x4 $2, %xmm1, %zmm3, %zmm1
; AVX512-FCP-NEXT: vmovdqa32 %zmm0, %zmm1 {%k1}
-; AVX512-FCP-NEXT: vpshufb %xmm5, %xmm4, %xmm0
-; AVX512-FCP-NEXT: vpshufb %xmm7, %xmm8, %xmm2
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm2[0,1,2,3],xmm0[4],xmm2[5],xmm0[6,7]
+; AVX512-FCP-NEXT: vpshufb %xmm4, %xmm5, %xmm0
+; AVX512-FCP-NEXT: vpshufb %xmm4, %xmm10, %xmm3
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm3[0,1,2,3],xmm0[4],xmm3[5],xmm0[6,7]
; AVX512-FCP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm9[10,11,6,7,4,5,6,7,6,7,6,7,2,3,14,15,26,27,22,23,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3,4],ymm0[5,6,7]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm13[10,11,6,7,4,5,6,7,6,7,6,7,2,3,14,15,26,27,22,23,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3,4],ymm0[5,6,7]
; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
-; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm2 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
+; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm3 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512-FCP-NEXT: vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm4 # 64-byte Folded Reload
+; AVX512-FCP-NEXT: vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm4 # 64-byte Folded Reload
; AVX512-FCP-NEXT: movw $-2048, %ax # imm = 0xF800
; AVX512-FCP-NEXT: kmovw %eax, %k1
; AVX512-FCP-NEXT: vmovdqa32 %zmm19, %zmm4 {%k1}
; AVX512-FCP-NEXT: vmovdqa64 %zmm4, (%rsi)
; AVX512-FCP-NEXT: vmovdqu64 (%rsp), %zmm4 # 64-byte Reload
-; AVX512-FCP-NEXT: vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm4 # 64-byte Folded Reload
+; AVX512-FCP-NEXT: vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm4 # 64-byte Folded Reload
; AVX512-FCP-NEXT: vmovdqa32 %zmm28, %zmm4 {%k1}
; AVX512-FCP-NEXT: vmovdqa64 %zmm4, (%rdx)
; AVX512-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX512-FCP-NEXT: vpternlogq $184, %zmm3, %zmm17, %zmm13
+; AVX512-FCP-NEXT: vpternlogq $184, %zmm2, %zmm17, %zmm8
; AVX512-FCP-NEXT: vpternlogq $184, %zmm1, %zmm17, %zmm0
; AVX512-FCP-NEXT: vmovdqa64 %zmm16, (%rcx)
; AVX512-FCP-NEXT: vmovdqa64 %zmm20, (%r8)
-; AVX512-FCP-NEXT: vmovdqa64 %zmm13, (%r9)
+; AVX512-FCP-NEXT: vmovdqa64 %zmm8, (%r9)
; AVX512-FCP-NEXT: vmovdqa64 %zmm0, (%rax)
; AVX512-FCP-NEXT: addq $136, %rsp
; AVX512-FCP-NEXT: vzeroupper
@@ -5539,10 +5553,10 @@ define void @load_i16_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: pushq %rax
; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm0 = [0,1,12,13,u,u,4,5,u,u,u,u,12,13,14,15]
-; AVX512DQ-NEXT: vmovdqa 224(%rdi), %ymm1
-; AVX512DQ-NEXT: vmovdqa 192(%rdi), %ymm13
-; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm3 = ymm13[0],ymm1[1],ymm13[2,3],ymm1[4],ymm13[5,6],ymm1[7]
-; AVX512DQ-NEXT: vmovdqa64 %ymm1, %ymm18
+; AVX512DQ-NEXT: vmovdqa 224(%rdi), %ymm13
+; AVX512DQ-NEXT: vmovdqa 192(%rdi), %ymm2
+; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm3 = ymm2[0],ymm13[1],ymm2[2,3],ymm13[4],ymm2[5,6],ymm13[7]
+; AVX512DQ-NEXT: vmovdqa64 %ymm2, %ymm25
; AVX512DQ-NEXT: vpshufb %xmm0, %xmm3, %xmm1
; AVX512DQ-NEXT: vextracti128 $1, %ymm3, %xmm9
; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm2 = xmm9[0,2,0,3]
@@ -5554,8 +5568,8 @@ define void @load_i16_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512DQ-NEXT: vmovdqa 64(%rdi), %ymm4
; AVX512DQ-NEXT: vmovdqa 128(%rdi), %ymm7
; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm14 = ymm7[0,1],ymm2[2],ymm7[3,4],ymm2[5],ymm7[6,7]
-; AVX512DQ-NEXT: vmovdqa64 %ymm7, %ymm22
-; AVX512DQ-NEXT: vmovdqa64 %ymm2, %ymm24
+; AVX512DQ-NEXT: vmovdqa64 %ymm7, %ymm20
+; AVX512DQ-NEXT: vmovdqa64 %ymm2, %ymm22
; AVX512DQ-NEXT: vextracti128 $1, %ymm14, %xmm15
; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm2 = xmm14[2,2,2,2,4,5,6,7]
; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm2 = xmm15[0,1,2],xmm2[3,4],xmm15[5,6,7]
@@ -5565,8 +5579,8 @@ define void @load_i16_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512DQ-NEXT: vinserti32x4 $2, %xmm1, %zmm2, %zmm16
; AVX512DQ-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm4[2,3],mem[2,3]
; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm2 = ymm5[0],ymm6[1],ymm5[2,3],ymm6[4],ymm5[5,6],ymm6[7]
-; AVX512DQ-NEXT: vmovdqa64 %ymm6, %ymm19
-; AVX512DQ-NEXT: vmovdqa64 %ymm5, %ymm21
+; AVX512DQ-NEXT: vmovdqa64 %ymm6, %ymm18
+; AVX512DQ-NEXT: vmovdqa64 %ymm5, %ymm19
; AVX512DQ-NEXT: vpshufb %xmm0, %xmm2, %xmm0
; AVX512DQ-NEXT: vextracti128 $1, %ymm2, %xmm6
; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm5 = xmm6[0,2,0,3]
@@ -5574,14 +5588,14 @@ define void @load_i16_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm5[2],xmm0[3],xmm5[4,5],xmm0[6,7]
; AVX512DQ-NEXT: vinserti128 $1, 96(%rdi), %ymm4, %ymm12
; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm4 = ymm12[0],ymm1[1],ymm12[2,3,4,5],ymm1[6],ymm12[7]
-; AVX512DQ-NEXT: vmovdqa64 %ymm1, %ymm29
+; AVX512DQ-NEXT: vmovdqa64 %ymm1, %ymm28
; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm5 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,8,9,4,5,16,17,28,29,24,25,28,29,24,25,28,29,24,25,20,21]
; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm10 = ymm0[0,1,2],ymm5[3,4,5,6,7]
; AVX512DQ-NEXT: vmovdqa 352(%rdi), %ymm0
; AVX512DQ-NEXT: vmovdqa 320(%rdi), %ymm5
; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm1 = ymm5[0,1],ymm0[2],ymm5[3,4],ymm0[5],ymm5[6,7]
-; AVX512DQ-NEXT: vmovdqa64 %ymm5, %ymm23
-; AVX512DQ-NEXT: vmovdqa64 %ymm0, %ymm25
+; AVX512DQ-NEXT: vmovdqa64 %ymm5, %ymm21
+; AVX512DQ-NEXT: vmovdqa64 %ymm0, %ymm23
; AVX512DQ-NEXT: vextracti128 $1, %ymm1, %xmm5
; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm11 = xmm1[2,2,2,2,4,5,6,7]
; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm11 = xmm5[0,1,2],xmm11[3,4],xmm5[5,6,7]
@@ -5591,8 +5605,8 @@ define void @load_i16_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512DQ-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm7[2,3],mem[2,3]
; AVX512DQ-NEXT: vinserti128 $1, 288(%rdi), %ymm7, %ymm11
; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm7 = ymm11[0],ymm0[1],ymm11[2,3,4,5],ymm0[6],ymm11[7]
-; AVX512DQ-NEXT: vmovdqa64 %ymm11, %ymm26
-; AVX512DQ-NEXT: vmovdqa64 %ymm0, %ymm27
+; AVX512DQ-NEXT: vmovdqa64 %ymm11, %ymm24
+; AVX512DQ-NEXT: vmovdqa64 %ymm0, %ymm26
; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm0 = ymm7[0,1,12,13,8,9,12,13,8,9,12,13,4,5,u,u,16,17,28,29,24,25,28,29,24,25,28,29,20,21,u,u]
; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm8 = ymm0[0,1,2],ymm8[3,4,5,6,7],ymm0[8,9,10],ymm8[11,12,13,14,15]
; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,4,6]
@@ -5603,23 +5617,22 @@ define void @load_i16_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512DQ-NEXT: kmovw %eax, %k1
; AVX512DQ-NEXT: vinserti32x8 $1, %ymm0, %zmm0, %zmm10 {%k1}
; AVX512DQ-NEXT: vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-NEXT: vpbroadcastq {{.*#+}} xmm0 = [2,3,14,15,10,11,0,0,2,3,14,15,10,11,0,0]
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm0 = [2,3,14,15,10,11,6,7,2,3,14,15,12,13,14,15]
; AVX512DQ-NEXT: vpshufb %xmm0, %xmm9, %xmm8
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm9 = [2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15]
-; AVX512DQ-NEXT: vpshufb %xmm9, %xmm3, %xmm3
+; AVX512DQ-NEXT: vpshufb %xmm0, %xmm3, %xmm3
; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm8[2],xmm3[3],xmm8[4,5],xmm3[6,7]
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm8 = [6,7,2,3,4,5,u,u,2,3,14,15,u,u,6,7]
-; AVX512DQ-NEXT: vpshufb %xmm8, %xmm15, %xmm10
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm9 = [6,7,2,3,4,5,u,u,2,3,14,15,u,u,6,7]
+; AVX512DQ-NEXT: vpshufb %xmm9, %xmm15, %xmm8
; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm14 = xmm14[0,1,2,3,5,5,5,5]
-; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm10 = xmm10[0,1,2],xmm14[3],xmm10[4,5],xmm14[6],xmm10[7]
-; AVX512DQ-NEXT: vinserti128 $1, %xmm10, %ymm0, %ymm10
-; AVX512DQ-NEXT: vinserti32x4 $2, %xmm3, %zmm10, %zmm3
-; AVX512DQ-NEXT: vpshufb %xmm0, %xmm6, %xmm0
-; AVX512DQ-NEXT: vpshufb %xmm9, %xmm2, %xmm2
-; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm0 = xmm2[0,1],xmm0[2],xmm2[3],xmm0[4,5],xmm2[6,7]
+; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm8 = xmm8[0,1,2],xmm14[3],xmm8[4,5],xmm14[6],xmm8[7]
+; AVX512DQ-NEXT: vinserti128 $1, %xmm8, %ymm0, %ymm8
+; AVX512DQ-NEXT: vinserti32x4 $2, %xmm3, %zmm8, %zmm3
+; AVX512DQ-NEXT: vpshufb %xmm0, %xmm6, %xmm6
+; AVX512DQ-NEXT: vpshufb %xmm0, %xmm2, %xmm0
+; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm6[2],xmm0[3],xmm6[4,5],xmm0[6,7]
; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm2 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,10,11,6,7,18,19,30,31,26,27,26,27,30,31,26,27,26,27,22,23]
; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm2 = ymm0[0,1,2],ymm2[3,4,5,6,7]
-; AVX512DQ-NEXT: vpshufb %xmm8, %xmm5, %xmm0
+; AVX512DQ-NEXT: vpshufb %xmm9, %xmm5, %xmm0
; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,5,5,5]
; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3],xmm0[4,5],xmm1[6],xmm0[7]
; AVX512DQ-NEXT: vpternlogq $226, %zmm3, %zmm17, %zmm2
@@ -5630,19 +5643,19 @@ define void @load_i16_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; AVX512DQ-NEXT: vinserti32x8 $1, %ymm0, %zmm0, %zmm2 {%k1}
; AVX512DQ-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-NEXT: vmovdqa64 %ymm18, %ymm20
-; AVX512DQ-NEXT: vmovdqa64 %ymm18, %ymm0
-; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm0 = ymm13[0,1],ymm0[2],ymm13[3,4],ymm0[5],ymm13[6,7]
-; AVX512DQ-NEXT: vmovdqa64 %ymm13, %ymm30
+; AVX512DQ-NEXT: vmovdqa64 %ymm25, %ymm0
+; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm13[2],ymm0[3,4],ymm13[5],ymm0[6,7]
+; AVX512DQ-NEXT: vmovdqa64 %ymm13, %ymm29
+; AVX512DQ-NEXT: vmovdqa64 %ymm25, %ymm30
; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm15 = xmm0[2,1,2,3]
; AVX512DQ-NEXT: vextracti128 $1, %ymm0, %xmm0
-; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm10 = xmm0[0,3,2,1]
-; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm0 = xmm10[0,0,2,3,4,5,6,7]
+; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm8 = xmm0[0,3,2,1]
+; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm0 = xmm8[0,0,2,3,4,5,6,7]
; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,6,6,6]
; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm1 = xmm15[2,1,2,0,4,5,6,7]
; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2],xmm1[3],xmm0[4,5,6,7]
-; AVX512DQ-NEXT: vmovdqa64 %ymm22, %ymm1
-; AVX512DQ-NEXT: vmovdqa64 %ymm24, %ymm2
+; AVX512DQ-NEXT: vmovdqa64 %ymm20, %ymm1
+; AVX512DQ-NEXT: vmovdqa64 %ymm22, %ymm2
; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
; AVX512DQ-NEXT: vextracti128 $1, %ymm1, %xmm2
; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm14 = xmm1[2,1,0,3]
@@ -5653,8 +5666,9 @@ define void @load_i16_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4],xmm1[5,6],xmm2[7]
; AVX512DQ-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
; AVX512DQ-NEXT: vinserti32x4 $2, %xmm0, %zmm1, %zmm16
-; AVX512DQ-NEXT: vmovdqa64 %ymm21, %ymm13
-; AVX512DQ-NEXT: vmovdqa64 %ymm19, %ymm0
+; AVX512DQ-NEXT: vmovdqa64 %ymm18, %ymm25
+; AVX512DQ-NEXT: vmovdqa64 %ymm19, %ymm13
+; AVX512DQ-NEXT: vmovdqa64 %ymm18, %ymm0
; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm0 = ymm13[0,1],ymm0[2],ymm13[3,4],ymm0[5],ymm13[6,7]
; AVX512DQ-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm6 = xmm0[2,1,2,3]
@@ -5663,38 +5677,38 @@ define void @load_i16_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,3,3]
; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm1 = xmm6[2,1,2,0,4,5,6,7]
; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2],xmm1[3],xmm0[4,5,6,7]
-; AVX512DQ-NEXT: vmovdqa64 %ymm29, %ymm11
-; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm4 = ymm12[0,1],ymm11[2],ymm12[3],ymm11[4],ymm12[5,6],ymm11[7]
+; AVX512DQ-NEXT: vmovdqa64 %ymm28, %ymm10
+; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm4 = ymm12[0,1],ymm10[2],ymm12[3],ymm10[4],ymm12[5,6],ymm10[7]
; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm1 = ymm4[4,5,0,1,12,13,14,15,8,9,0,1,12,13,8,9,20,21,16,17,28,29,30,31,24,25,16,17,28,29,24,25]
; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm1[5,6,7]
; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm9 = ymm0[0,1,2,3],ymm1[4,5,6,7]
-; AVX512DQ-NEXT: vmovdqa64 %ymm23, %ymm0
-; AVX512DQ-NEXT: vmovdqa64 %ymm25, %ymm1
+; AVX512DQ-NEXT: vmovdqa64 %ymm21, %ymm0
+; AVX512DQ-NEXT: vmovdqa64 %ymm23, %ymm1
; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
; AVX512DQ-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[2,1,0,3]
; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm0 = xmm3[0,0,0,0,4,5,6,7]
-; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm8 = xmm0[0,1,2,3,4,4,6,7]
-; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[0,1,2,1]
-; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm1 = xmm2[0,1,2,3,6,5,6,4]
-; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm1 = xmm8[0,1,2,3],xmm1[4],xmm8[5,6],xmm1[7]
-; AVX512DQ-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm8
-; AVX512DQ-NEXT: vmovdqa64 %ymm27, %ymm0
-; AVX512DQ-NEXT: vmovdqa64 %ymm26, %ymm1
-; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1],ymm0[2],ymm1[3],ymm0[4],ymm1[5,6],ymm0[7]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm0 = ymm1[4,5,0,1,12,13,14,15,8,9,12,13,0,1,u,u,20,21,16,17,28,29,30,31,24,25,28,29,16,17,u,u]
-; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm8 = ymm0[0,1,2],ymm8[3,4,5,6,7],ymm0[8,9,10],ymm8[11,12,13,14,15]
+; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,6,7]
+; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,2,1]
+; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm11 = xmm1[0,1,2,3,6,5,6,4]
+; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm11[4],xmm0[5,6],xmm11[7]
+; AVX512DQ-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm11
+; AVX512DQ-NEXT: vmovdqa64 %ymm26, %ymm0
+; AVX512DQ-NEXT: vmovdqa64 %ymm24, %ymm2
+; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1],ymm0[2],ymm2[3],ymm0[4],ymm2[5,6],ymm0[7]
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm0 = ymm2[4,5,0,1,12,13,14,15,8,9,12,13,0,1,u,u,20,21,16,17,28,29,30,31,24,25,28,29,16,17,u,u]
+; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm11 = ymm0[0,1,2],ymm11[3,4,5,6,7],ymm0[8,9,10],ymm11[11,12,13,14,15]
; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,5,4]
-; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm8[4,5,6,7]
+; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm11[4,5,6,7]
; AVX512DQ-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm18
; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm0 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
; AVX512DQ-NEXT: vpternlogq $226, %zmm16, %zmm0, %zmm9
; AVX512DQ-NEXT: vpmovsxdq {{.*#+}} zmm17 = [18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,65535,0,0]
; AVX512DQ-NEXT: vpternlogq $184, %zmm9, %zmm17, %zmm18
-; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm8 = xmm15[3,1,2,1,4,5,6,7]
-; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm9 = xmm10[0,1,3,3,4,5,6,7]
-; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,7,7,7,7]
-; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm8 = xmm8[0],xmm9[1,2],xmm8[3],xmm9[4,5,6,7]
+; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm9 = xmm15[3,1,2,1,4,5,6,7]
+; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm8 = xmm8[0,1,3,3,4,5,6,7]
+; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,7,7,7,7]
+; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm8 = xmm9[0],xmm8[1,2],xmm9[3],xmm8[4,5,6,7]
; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,7,5,6,5]
; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm9 = xmm14[1,1,1,1,4,5,6,7]
; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,4,5,7,7]
@@ -5708,28 +5722,28 @@ define void @load_i16_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm4 = ymm4[6,7,2,3,14,15,14,15,14,15,2,3,14,15,10,11,22,23,18,19,30,31,30,31,30,31,18,19,30,31,26,27]
; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3,4],xmm4[5,6,7]
; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm4 = ymm5[0,1,2,3],ymm4[4,5,6,7]
-; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,5,6,5]
+; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,5,6,5]
; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm3 = xmm3[1,1,1,1,4,5,6,7]
; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,7,7]
-; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4],xmm3[5,6],xmm2[7]
-; AVX512DQ-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[6,7,2,3,14,15,14,15,14,15,10,11,u,u,2,3,22,23,18,19,30,31,30,31,30,31,26,27,u,u,18,19]
-; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm2 = ymm1[0,1,2],ymm2[3,4,5,6,7],ymm1[8,9,10],ymm2[11,12,13,14,15]
-; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,7,4,5]
-; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
+; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm1 = xmm3[0,1,2,3],xmm1[4],xmm3[5,6],xmm1[7]
+; AVX512DQ-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[6,7,2,3,14,15,14,15,14,15,10,11,u,u,2,3,22,23,18,19,30,31,30,31,30,31,26,27,u,u,18,19]
+; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3,4,5,6,7],ymm2[8,9,10],ymm1[11,12,13,14,15]
+; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,7,4,5]
+; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
; AVX512DQ-NEXT: vpternlogq $226, %zmm7, %zmm0, %zmm4
; AVX512DQ-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm16
; AVX512DQ-NEXT: vpternlogq $184, %zmm4, %zmm17, %zmm16
; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm6 = [8,9,u,u,0,1,12,13,u,u,12,13,12,13,14,15]
-; AVX512DQ-NEXT: vmovdqa64 %ymm20, %ymm0
+; AVX512DQ-NEXT: vmovdqa64 %ymm29, %ymm0
; AVX512DQ-NEXT: vmovdqa64 %ymm30, %ymm1
; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
; AVX512DQ-NEXT: vpshufb %xmm6, %xmm0, %xmm2
; AVX512DQ-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm3 = xmm1[2,2,2,2,4,5,6,7]
; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm3 = xmm2[0],xmm3[1],xmm2[2,3],xmm3[4],xmm2[5,6,7]
-; AVX512DQ-NEXT: vmovdqa64 %ymm22, %ymm2
-; AVX512DQ-NEXT: vmovdqa64 %ymm24, %ymm4
+; AVX512DQ-NEXT: vmovdqa64 %ymm20, %ymm2
+; AVX512DQ-NEXT: vmovdqa64 %ymm22, %ymm4
; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm4 = ymm2[0],ymm4[1],ymm2[2,3],ymm4[4],ymm2[5,6],ymm4[7]
; AVX512DQ-NEXT: vextracti128 $1, %ymm4, %xmm2
; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm9 = xmm4[0,3,2,1]
@@ -5740,37 +5754,37 @@ define void @load_i16_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm5[4],xmm4[5],xmm5[6,7]
; AVX512DQ-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4
; AVX512DQ-NEXT: vinserti32x4 $2, %xmm3, %zmm4, %zmm4
-; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm3 = ymm11[0],ymm12[1],ymm11[2,3,4,5],ymm12[6],ymm11[7]
-; AVX512DQ-NEXT: vmovdqa64 %ymm19, %ymm5
+; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm3 = ymm10[0],ymm12[1],ymm10[2,3,4,5],ymm12[6],ymm10[7]
+; AVX512DQ-NEXT: vmovdqa64 %ymm25, %ymm5
; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm5 = ymm5[0,1],ymm13[2],ymm5[3,4],ymm13[5],ymm5[6,7]
; AVX512DQ-NEXT: vpshufb %xmm6, %xmm5, %xmm6
; AVX512DQ-NEXT: vextracti128 $1, %ymm5, %xmm13
; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm8 = xmm13[2,2,2,2,4,5,6,7]
; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0],xmm8[1],xmm6[2,3],xmm8[4],xmm6[5,6,7]
; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm8 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm3[4,5,0,1,12,13,24,25,20,21],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm10 = [65535,65535,65535,65535,65535,0,0,0,0,0,65535,65535,65535,65535,65535,65535]
-; AVX512DQ-NEXT: vpternlogq $236, %ymm10, %ymm8, %ymm6
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm14 = [65535,65535,65535,65535,65535,0,0,0,0,0,65535,65535,65535,65535,65535,65535]
+; AVX512DQ-NEXT: vpternlogq $236, %ymm14, %ymm8, %ymm6
; AVX512DQ-NEXT: movw $31, %ax
; AVX512DQ-NEXT: kmovw %eax, %k1
; AVX512DQ-NEXT: vinserti32x8 $0, %ymm6, %zmm0, %zmm4 {%k1}
-; AVX512DQ-NEXT: vmovdqa64 %ymm23, %ymm6
-; AVX512DQ-NEXT: vmovdqa64 %ymm25, %ymm8
+; AVX512DQ-NEXT: vmovdqa64 %ymm21, %ymm6
+; AVX512DQ-NEXT: vmovdqa64 %ymm23, %ymm8
; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm8 = ymm6[0],ymm8[1],ymm6[2,3],ymm8[4],ymm6[5,6],ymm8[7]
; AVX512DQ-NEXT: vextracti128 $1, %ymm8, %xmm6
-; AVX512DQ-NEXT: vpshufb %xmm7, %xmm6, %xmm14
+; AVX512DQ-NEXT: vpshufb %xmm7, %xmm6, %xmm11
; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm7 = xmm8[0,3,2,1]
; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm8 = xmm7[0,1,0,2,4,5,6,7]
; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,6,6,6,6]
-; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm8 = xmm8[0,1,2,3],xmm14[4],xmm8[5],xmm14[6,7]
+; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm8 = xmm8[0,1,2,3],xmm11[4],xmm8[5],xmm11[6,7]
; AVX512DQ-NEXT: vinserti128 $1, %xmm8, %ymm0, %ymm8
-; AVX512DQ-NEXT: vmovdqa64 %ymm27, %ymm14
; AVX512DQ-NEXT: vmovdqa64 %ymm26, %ymm11
-; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm11 = ymm14[0],ymm11[1],ymm14[2,3,4,5],ymm11[6],ymm14[7]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm14 = ymm11[8,9,4,5,4,5,6,7,0,1,4,5,0,1,12,13,24,25,20,21,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm8 = ymm14[0,1,2,3,4],ymm8[5,6,7]
+; AVX512DQ-NEXT: vmovdqa64 %ymm24, %ymm10
+; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm11 = ymm11[0],ymm10[1],ymm11[2,3,4,5],ymm10[6],ymm11[7]
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm15 = ymm11[8,9,4,5,4,5,6,7,0,1,4,5,0,1,12,13,24,25,20,21,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm8 = ymm15[0,1,2,3,4],ymm8[5,6,7]
; AVX512DQ-NEXT: vinserti64x4 $1, %ymm8, %zmm0, %zmm8
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm14 = [10,11,u,u,2,3,14,15,u,u,10,11,12,13,14,15]
-; AVX512DQ-NEXT: vpshufb %xmm14, %xmm0, %xmm0
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm15 = [10,11,u,u,2,3,14,15,u,u,10,11,12,13,14,15]
+; AVX512DQ-NEXT: vpshufb %xmm15, %xmm0, %xmm0
; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,2,3]
; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,5,5,5]
; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3],xmm1[4],xmm0[5,6,7]
@@ -5781,12 +5795,12 @@ define void @load_i16_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm2 = xmm9[0,1,2,3],xmm2[4],xmm9[5],xmm2[6,7]
; AVX512DQ-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
; AVX512DQ-NEXT: vinserti32x4 $2, %xmm0, %zmm2, %zmm0
-; AVX512DQ-NEXT: vpshufb %xmm14, %xmm5, %xmm2
+; AVX512DQ-NEXT: vpshufb %xmm15, %xmm5, %xmm2
; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm5 = xmm13[1,1,2,3]
; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,5,5,5,5]
; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm5[1],xmm2[2,3],xmm5[4],xmm2[5,6,7]
; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm3 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm3[6,7,2,3,14,15,26,27,22,23],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512DQ-NEXT: vpternlogq $236, %ymm10, %ymm3, %ymm2
+; AVX512DQ-NEXT: vpternlogq $236, %ymm14, %ymm3, %ymm2
; AVX512DQ-NEXT: vinserti32x8 $0, %ymm2, %zmm0, %zmm0 {%k1}
; AVX512DQ-NEXT: vpshufb %xmm1, %xmm6, %xmm1
; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm2 = xmm7[0,1,1,3,4,5,6,7]
@@ -5813,155 +5827,152 @@ define void @load_i16_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
;
; AVX512DQ-FCP-LABEL: load_i16_stride6_vf32:
; AVX512DQ-FCP: # %bb.0:
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm0 = [0,1,12,13,u,u,4,5,u,u,u,u,12,13,14,15]
-; AVX512DQ-FCP-NEXT: vmovdqa 224(%rdi), %ymm12
+; AVX512DQ-FCP-NEXT: pushq %rax
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm0 = [0,1,12,13,0,1,4,5,8,9,12,13,12,13,14,15]
+; AVX512DQ-FCP-NEXT: vmovdqa 224(%rdi), %ymm13
; AVX512DQ-FCP-NEXT: vmovdqa 192(%rdi), %ymm2
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm2[0],ymm12[1],ymm2[2,3],ymm12[4],ymm2[5,6],ymm12[7]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm2, %ymm25
-; AVX512DQ-FCP-NEXT: vpshufb %xmm0, %xmm7, %xmm1
-; AVX512DQ-FCP-NEXT: vpbroadcastq {{.*#+}} xmm3 = [8,9,12,13,0,1,0,0,8,9,12,13,0,1,0,0]
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm7, %xmm2
-; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} xmm14 = xmm2[2,1,0,3]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm3, %xmm14, %xmm2
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm2[0],ymm13[1],ymm2[2,3],ymm13[4],ymm2[5,6],ymm13[7]
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm2, %ymm24
+; AVX512DQ-FCP-NEXT: vpshufb %xmm0, %xmm5, %xmm1
+; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm5, %xmm2
+; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} xmm9 = xmm2[2,1,0,3]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm0, %xmm9, %xmm2
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3],xmm2[4,5],xmm1[6,7]
; AVX512DQ-FCP-NEXT: vmovdqa 160(%rdi), %ymm4
; AVX512DQ-FCP-NEXT: vmovdqa (%rdi), %ymm2
-; AVX512DQ-FCP-NEXT: vmovdqa 128(%rdi), %ymm5
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm5[0,1],ymm4[2],ymm5[3,4],ymm4[5],ymm5[6,7]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm5, %ymm20
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm4, %ymm22
-; AVX512DQ-FCP-NEXT: vpbroadcastd {{.*#+}} xmm8 = [8,9,4,5,8,9,4,5,8,9,4,5,8,9,4,5]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm8, %xmm6, %xmm4
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm6, %xmm15
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm10 = [0,1,4,5,4,5,u,u,0,1,12,13,u,u,4,5]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm10, %xmm15, %xmm5
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm4 = xmm5[0,1,2],xmm4[3],xmm5[4,5],xmm4[6],xmm5[7]
-; AVX512DQ-FCP-NEXT: vmovdqa 32(%rdi), %ymm9
-; AVX512DQ-FCP-NEXT: vmovdqa 64(%rdi), %ymm5
+; AVX512DQ-FCP-NEXT: vmovdqa 32(%rdi), %ymm10
+; AVX512DQ-FCP-NEXT: vmovdqa 64(%rdi), %ymm3
+; AVX512DQ-FCP-NEXT: vmovdqa 128(%rdi), %ymm6
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm14 = ymm6[0,1],ymm4[2],ymm6[3,4],ymm4[5],ymm6[6,7]
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm6, %ymm22
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm4, %ymm25
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm6 = [0,1,4,5,4,5,4,5,0,1,12,13,8,9,4,5]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm6, %xmm14, %xmm4
+; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm14, %xmm7
+; AVX512DQ-FCP-NEXT: vpshufb %xmm6, %xmm7, %xmm8
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm4 = xmm8[0,1,2],xmm4[3],xmm8[4,5],xmm4[6],xmm8[7]
; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4
; AVX512DQ-FCP-NEXT: vinserti32x4 $2, %xmm1, %zmm4, %zmm16
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0],ymm9[1],ymm2[2,3],ymm9[4],ymm2[5,6],ymm9[7]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm9, %ymm18
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm15 = ymm2[0],ymm10[1],ymm2[2,3],ymm10[4],ymm2[5,6],ymm10[7]
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm10, %ymm18
; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm2, %ymm19
-; AVX512DQ-FCP-NEXT: vpshufb %xmm0, %xmm1, %xmm0
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm1, %xmm4
-; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} xmm9 = xmm4[2,1,0,3]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm3, %xmm9, %xmm3
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm3[2],xmm0[3],xmm3[4,5],xmm0[6,7]
-; AVX512DQ-FCP-NEXT: vperm2i128 {{.*#+}} ymm2 = ymm5[2,3],mem[2,3]
-; AVX512DQ-FCP-NEXT: vinserti128 $1, 96(%rdi), %ymm5, %ymm3
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm3[0],ymm2[1],ymm3[2,3,4,5],ymm2[6],ymm3[7]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm3, %ymm27
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm2, %ymm28
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,8,9,4,5,16,17,28,29,24,25,28,29,24,25,28,29,24,25,20,21]
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm11 = ymm0[0,1,2],ymm3[3,4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm0, %xmm15, %xmm4
+; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm15, %xmm1
+; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[2,1,0,3]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm0, %xmm2, %xmm0
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm4[0,1],xmm0[2],xmm4[3],xmm0[4,5],xmm4[6,7]
+; AVX512DQ-FCP-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm3[2,3],mem[2,3]
+; AVX512DQ-FCP-NEXT: vinserti128 $1, 96(%rdi), %ymm3, %ymm12
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm12[0],ymm1[1],ymm12[2,3,4,5],ymm1[6],ymm12[7]
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm1, %ymm28
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm4 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,8,9,4,5,16,17,28,29,24,25,28,29,24,25,28,29,24,25,20,21]
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm10 = ymm0[0,1,2],ymm4[3,4,5,6,7]
; AVX512DQ-FCP-NEXT: vmovdqa 352(%rdi), %ymm0
-; AVX512DQ-FCP-NEXT: vmovdqa 320(%rdi), %ymm2
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm2[0,1],ymm0[2],ymm2[3,4],ymm0[5],ymm2[6,7]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm2, %ymm21
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm0, %ymm23
-; AVX512DQ-FCP-NEXT: vpshufb %xmm8, %xmm4, %xmm0
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm4, %xmm3
-; AVX512DQ-FCP-NEXT: vpshufb %xmm10, %xmm3, %xmm8
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm8[0,1,2],xmm0[3],xmm8[4,5],xmm0[6],xmm8[7]
-; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm8
-; AVX512DQ-FCP-NEXT: vmovdqa 256(%rdi), %ymm0
-; AVX512DQ-FCP-NEXT: vperm2i128 {{.*#+}} ymm10 = ymm0[2,3],mem[2,3]
-; AVX512DQ-FCP-NEXT: vinserti128 $1, 288(%rdi), %ymm0, %ymm0
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm0[0],ymm10[1],ymm0[2,3,4,5],ymm10[6],ymm0[7]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm0, %ymm24
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm10, %ymm26
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm2[0,1,12,13,8,9,12,13,8,9,12,13,4,5,u,u,16,17,28,29,24,25,28,29,24,25,28,29,20,21,u,u]
+; AVX512DQ-FCP-NEXT: vmovdqa 320(%rdi), %ymm1
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm1, %ymm20
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm0, %ymm21
+; AVX512DQ-FCP-NEXT: vpshufb %xmm6, %xmm4, %xmm11
+; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm4, %xmm1
+; AVX512DQ-FCP-NEXT: vpshufb %xmm6, %xmm1, %xmm6
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0,1,2],xmm11[3],xmm6[4,5],xmm11[6],xmm6[7]
+; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm6, %ymm0, %ymm8
+; AVX512DQ-FCP-NEXT: vmovdqa 256(%rdi), %ymm6
+; AVX512DQ-FCP-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm6[2,3],mem[2,3]
+; AVX512DQ-FCP-NEXT: vinserti128 $1, 288(%rdi), %ymm6, %ymm11
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm11[0],ymm0[1],ymm11[2,3,4,5],ymm0[6],ymm11[7]
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm11, %ymm23
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm0, %ymm26
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm6[0,1,12,13,8,9,12,13,8,9,12,13,4,5,u,u,16,17,28,29,24,25,28,29,24,25,28,29,20,21,u,u]
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm8 = ymm0[0,1,2],ymm8[3,4,5,6,7],ymm0[8,9,10],ymm8[11,12,13,14,15]
; AVX512DQ-FCP-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,4,6]
; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm8[4,5,6,7]
; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm17 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
-; AVX512DQ-FCP-NEXT: vpternlogq $226, %zmm16, %zmm17, %zmm11
+; AVX512DQ-FCP-NEXT: vpternlogq $226, %zmm16, %zmm17, %zmm10
; AVX512DQ-FCP-NEXT: movw $-2048, %ax # imm = 0xF800
; AVX512DQ-FCP-NEXT: kmovw %eax, %k1
-; AVX512DQ-FCP-NEXT: vinserti32x8 $1, %ymm0, %zmm0, %zmm11 {%k1}
-; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm0 = [2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm0, %xmm7, %xmm7
-; AVX512DQ-FCP-NEXT: vpbroadcastq {{.*#+}} xmm8 = [10,11,14,15,2,3,0,0,10,11,14,15,2,3,0,0]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm8, %xmm14, %xmm14
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm7 = xmm7[0,1],xmm14[2],xmm7[3],xmm14[4,5],xmm7[6,7]
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm14 = [6,7,2,3,4,5,u,u,2,3,14,15,u,u,6,7]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm14, %xmm15, %xmm15
-; AVX512DQ-FCP-NEXT: vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,5,5,5,5]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm6 = xmm15[0,1,2],xmm6[3],xmm15[4,5],xmm6[6],xmm15[7]
-; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm6, %ymm0, %ymm6
-; AVX512DQ-FCP-NEXT: vinserti32x4 $2, %xmm7, %zmm6, %zmm6
-; AVX512DQ-FCP-NEXT: vpshufb %xmm0, %xmm1, %xmm0
-; AVX512DQ-FCP-NEXT: vpshufb %xmm8, %xmm9, %xmm1
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3],xmm1[4,5],xmm0[6,7]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,10,11,6,7,18,19,30,31,26,27,26,27,30,31,26,27,26,27,22,23]
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm13 = ymm0[0,1,2],ymm1[3,4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm14, %xmm3, %xmm0
+; AVX512DQ-FCP-NEXT: vinserti32x8 $1, %ymm0, %zmm0, %zmm10 {%k1}
+; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm0 = [2,3,14,15,2,3,6,7,10,11,14,15,12,13,14,15]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm0, %xmm5, %xmm5
+; AVX512DQ-FCP-NEXT: vpshufb %xmm0, %xmm9, %xmm8
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0,1],xmm8[2],xmm5[3],xmm8[4,5],xmm5[6,7]
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm9 = [6,7,2,3,4,5,u,u,2,3,14,15,u,u,6,7]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm9, %xmm7, %xmm7
+; AVX512DQ-FCP-NEXT: vpshufhw {{.*#+}} xmm8 = xmm14[0,1,2,3,5,5,5,5]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm7 = xmm7[0,1,2],xmm8[3],xmm7[4,5],xmm8[6],xmm7[7]
+; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm7, %ymm0, %ymm7
+; AVX512DQ-FCP-NEXT: vinserti32x4 $2, %xmm5, %zmm7, %zmm5
+; AVX512DQ-FCP-NEXT: vpshufb %xmm0, %xmm15, %xmm7
+; AVX512DQ-FCP-NEXT: vpshufb %xmm0, %xmm2, %xmm0
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm7[0,1],xmm0[2],xmm7[3],xmm0[4,5],xmm7[6,7]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,10,11,6,7,18,19,30,31,26,27,26,27,30,31,26,27,26,27,22,23]
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm0[0,1,2],ymm2[3,4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm9, %xmm1, %xmm0
; AVX512DQ-FCP-NEXT: vpshufhw {{.*#+}} xmm1 = xmm4[0,1,2,3,5,5,5,5]
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3],xmm0[4,5],xmm1[6],xmm0[7]
-; AVX512DQ-FCP-NEXT: vpternlogq $226, %zmm6, %zmm17, %zmm13
+; AVX512DQ-FCP-NEXT: vpternlogq $226, %zmm5, %zmm17, %zmm2
; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm2[2,3,14,15,10,11,10,11,14,15,10,11,u,u,6,7,18,19,30,31,26,27,26,27,30,31,26,27,u,u,22,23]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm6[2,3,14,15,10,11,10,11,14,15,10,11,u,u,6,7,18,19,30,31,26,27,26,27,30,31,26,27,u,u,22,23]
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7],ymm1[8,9,10],ymm0[11,12,13,14,15]
; AVX512DQ-FCP-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,5,7]
; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; AVX512DQ-FCP-NEXT: vinserti32x8 $1, %ymm0, %zmm0, %zmm13 {%k1}
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm25, %ymm0
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm12[2],ymm0[3,4],ymm12[5],ymm0[6,7]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm12, %ymm29
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm25, %ymm30
+; AVX512DQ-FCP-NEXT: vinserti32x8 $1, %ymm0, %zmm0, %zmm2 {%k1}
+; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm24, %ymm0
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm13[2],ymm0[3,4],ymm13[5],ymm0[6,7]
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm13, %ymm29
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm24, %ymm30
; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} xmm15 = xmm0[2,1,2,3]
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm0, %xmm0
; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} xmm1 = xmm15[2,1,2,0,4,5,6,7]
; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} xmm8 = xmm0[0,3,2,1]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm0 = xmm8[u,u,0,1,4,5,u,u,12,13,12,13,12,13,12,13]
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm0[1,2],xmm1[3],xmm0[4,5,6,7]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm20, %ymm0
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm22, %ymm2
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm22, %ymm0
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm25, %ymm2
; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1],ymm0[2],ymm2[3,4],ymm0[5],ymm2[6,7]
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm0, %xmm2
; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} xmm14 = xmm0[2,1,0,3]
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm0 = [0,1,0,1,0,1,0,1,u,u,8,9,12,13,u,u]
; AVX512DQ-FCP-NEXT: vpshufb %xmm0, %xmm14, %xmm3
-; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} xmm6 = xmm2[0,1,2,1]
-; AVX512DQ-FCP-NEXT: vpshufhw {{.*#+}} xmm2 = xmm6[0,1,2,3,6,5,6,4]
+; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} xmm7 = xmm2[0,1,2,1]
+; AVX512DQ-FCP-NEXT: vpshufhw {{.*#+}} xmm2 = xmm7[0,1,2,3,6,5,6,4]
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4],xmm3[5,6],xmm2[7]
; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
; AVX512DQ-FCP-NEXT: vinserti32x4 $2, %xmm1, %zmm2, %zmm16
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm18, %ymm25
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm18, %ymm24
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm19, %ymm13
; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm18, %ymm1
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm19, %ymm2
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm13[0,1],ymm1[2],ymm13[3,4],ymm1[5],ymm13[6,7]
+; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm1, %xmm2
+; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} xmm5 = xmm1[2,1,2,3]
+; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} xmm1 = xmm5[2,1,2,0,4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} xmm4 = xmm2[0,3,2,1]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm4[u,u,0,1,4,5,u,u,12,13,u,u,u,u,u,u]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1,2],xmm1[3],xmm2[4,5,6,7]
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm28, %ymm10
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm12[0,1],ymm10[2],ymm12[3],ymm10[4],ymm12[5,6],ymm10[7]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm6 = ymm3[4,5,0,1,12,13,14,15,8,9,0,1,12,13,8,9,20,21,16,17,28,29,30,31,24,25,16,17,28,29,24,25]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm6[5,6,7]
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm9 = ymm1[0,1,2,3],ymm6[4,5,6,7]
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm20, %ymm1
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm21, %ymm2
; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm1, %xmm3
-; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} xmm4 = xmm1[2,1,2,3]
-; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} xmm1 = xmm4[2,1,2,0,4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[0,3,2,1]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm5 = xmm3[u,u,0,1,4,5,u,u,12,13,u,u,u,u,u,u]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm5 = xmm1[0],xmm5[1,2],xmm1[3],xmm5[4,5,6,7]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm27, %ymm12
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm28, %ymm11
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm12[0,1],ymm11[2],ymm12[3],ymm11[4],ymm12[5,6],ymm11[7]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm7 = ymm2[4,5,0,1,12,13,14,15,8,9,0,1,12,13,8,9,20,21,16,17,28,29,30,31,24,25,16,17,28,29,24,25]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3,4],xmm7[5,6,7]
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm9 = ymm5[0,1,2,3],ymm7[4,5,6,7]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm21, %ymm1
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm23, %ymm5
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm5[0,1],ymm1[2],ymm5[3,4],ymm1[5],ymm5[6,7]
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm5, %xmm7
-; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} xmm5 = xmm5[2,1,0,3]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm0, %xmm5, %xmm0
-; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} xmm7 = xmm7[0,1,2,1]
-; AVX512DQ-FCP-NEXT: vpshufhw {{.*#+}} xmm10 = xmm7[0,1,2,3,6,5,6,4]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm10[4],xmm0[5,6],xmm10[7]
-; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm10
+; AVX512DQ-FCP-NEXT: vextracti32x4 $1, %ymm1, %xmm17
+; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} xmm6 = xmm1[2,1,0,3]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm0, %xmm6, %xmm0
+; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} xmm2 = xmm17[0,1,2,1]
+; AVX512DQ-FCP-NEXT: vpshufhw {{.*#+}} xmm11 = xmm2[0,1,2,3,6,5,6,4]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm11[4],xmm0[5,6],xmm11[7]
+; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm11
; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm26, %ymm0
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm24, %ymm1
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm23, %ymm1
; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1],ymm0[2],ymm1[3],ymm0[4],ymm1[5,6],ymm0[7]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm1[4,5,0,1,12,13,14,15,8,9,12,13,0,1,u,u,20,21,16,17,28,29,30,31,24,25,28,29,16,17,u,u]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm10 = ymm0[0,1,2],ymm10[3,4,5,6,7],ymm0[8,9,10],ymm10[11,12,13,14,15]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm11 = ymm0[0,1,2],ymm11[3,4,5,6,7],ymm0[8,9,10],ymm11[11,12,13,14,15]
; AVX512DQ-FCP-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,5,4]
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm10[4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm11[4,5,6,7]
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm18
; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm0 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
; AVX512DQ-FCP-NEXT: vpternlogq $226, %zmm16, %zmm0, %zmm9
@@ -5971,112 +5982,110 @@ define void @load_i16_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm8 = xmm8[u,u,2,3,6,7,u,u,14,15,14,15,14,15,14,15]
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm8 = xmm9[0],xmm8[1,2],xmm9[3],xmm8[4,5,6,7]
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm9 = [2,3,2,3,2,3,2,3,u,u,10,11,14,15,u,u]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm9, %xmm14, %xmm10
-; AVX512DQ-FCP-NEXT: vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,7,5,6,5]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm6 = xmm10[0,1,2,3],xmm6[4],xmm10[5,6],xmm6[7]
-; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm6, %ymm0, %ymm6
-; AVX512DQ-FCP-NEXT: vinserti32x4 $2, %xmm8, %zmm6, %zmm6
-; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} xmm4 = xmm4[3,1,2,1,4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[u,u,2,3,6,7,u,u,14,15,u,u,u,u,u,u]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0],xmm3[1,2],xmm4[3],xmm3[4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[6,7,2,3,14,15,14,15,14,15,2,3,14,15,10,11,22,23,18,19,30,31,30,31,30,31,18,19,30,31,26,27]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3,4],xmm2[5,6,7]
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm9, %xmm5, %xmm3
-; AVX512DQ-FCP-NEXT: vpshufhw {{.*#+}} xmm4 = xmm7[0,1,2,3,7,5,6,5]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm4[4],xmm3[5,6],xmm4[7]
-; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512DQ-FCP-NEXT: vpshufb %xmm9, %xmm14, %xmm11
+; AVX512DQ-FCP-NEXT: vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,7,5,6,5]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm7 = xmm11[0,1,2,3],xmm7[4],xmm11[5,6],xmm7[7]
+; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm7, %ymm0, %ymm7
+; AVX512DQ-FCP-NEXT: vinserti32x4 $2, %xmm8, %zmm7, %zmm7
+; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} xmm5 = xmm5[3,1,2,1,4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[u,u,2,3,6,7,u,u,14,15,u,u,u,u,u,u]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm4 = xmm5[0],xmm4[1,2],xmm5[3],xmm4[4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[6,7,2,3,14,15,14,15,14,15,2,3,14,15,10,11,22,23,18,19,30,31,30,31,30,31,18,19,30,31,26,27]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3,4],xmm3[5,6,7]
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm9, %xmm6, %xmm4
+; AVX512DQ-FCP-NEXT: vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,5,6,5]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm2 = xmm4[0,1,2,3],xmm2[4],xmm4[5,6],xmm2[7]
+; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[6,7,2,3,14,15,14,15,14,15,10,11,u,u,2,3,22,23,18,19,30,31,30,31,30,31,26,27,u,u,18,19]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm1[0,1,2],ymm3[3,4,5,6,7],ymm1[8,9,10],ymm3[11,12,13,14,15]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm1[0,1,2],ymm2[3,4,5,6,7],ymm1[8,9,10],ymm2[11,12,13,14,15]
; AVX512DQ-FCP-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,7,4,5]
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm3[4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpternlogq $226, %zmm6, %zmm0, %zmm2
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpternlogq $226, %zmm7, %zmm0, %zmm3
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm16
-; AVX512DQ-FCP-NEXT: vpternlogq $184, %zmm2, %zmm17, %zmm16
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm6 = [8,9,u,u,0,1,12,13,u,u,12,13,12,13,14,15]
+; AVX512DQ-FCP-NEXT: vpternlogq $184, %zmm3, %zmm17, %zmm16
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm5 = [8,9,u,u,0,1,12,13,u,u,12,13,12,13,14,15]
; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm29, %ymm0
; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm30, %ymm1
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm6, %xmm0, %xmm1
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm0, %xmm3
-; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} xmm2 = xmm3[2,2,2,2,4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm2 = xmm1[0],xmm2[1],xmm1[2,3],xmm2[4],xmm1[5,6,7]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm20, %ymm1
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm22, %ymm4
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0],ymm4[1],ymm1[2,3],ymm4[4],ymm1[5,6],ymm4[7]
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm1, %xmm5
-; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,3,2,1]
-; AVX512DQ-FCP-NEXT: vpbroadcastq {{.*#+}} xmm8 = [8,9,0,0,0,1,12,13,8,9,0,0,0,1,12,13]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm8, %xmm5, %xmm4
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm9 = [0,1,2,3,0,1,4,5,u,u,12,13,u,u,u,u]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm9, %xmm1, %xmm7
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm4 = xmm7[0,1,2,3],xmm4[4],xmm7[5],xmm4[6,7]
-; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4
-; AVX512DQ-FCP-NEXT: vinserti32x4 $2, %xmm2, %zmm4, %zmm2
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm11[0],ymm12[1],ymm11[2,3,4,5],ymm12[6],ymm11[7]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm25, %ymm7
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm19, %ymm10
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm7[0,1],ymm10[2],ymm7[3,4],ymm10[5],ymm7[6,7]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm6, %xmm7, %xmm6
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm7, %xmm10
-; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} xmm12 = xmm10[2,2,2,2,4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0],xmm12[1],xmm6[2,3],xmm12[4],xmm6[5,6,7]
-; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} ymm19 = [65535,65535,65535,65535,65535,0,0,0,0,0,65535,65535,65535,65535,65535,65535]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm12 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm4[4,5,0,1,12,13,24,25,20,21],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512DQ-FCP-NEXT: vpternlogq $236, %ymm19, %ymm12, %ymm6
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm5, %xmm3, %xmm0
+; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm3, %xmm1
+; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} xmm2 = xmm1[2,2,2,2,4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2,3],xmm2[4],xmm0[5,6,7]
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm22, %ymm2
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm25, %ymm4
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0],ymm4[1],ymm2[2,3],ymm4[4],ymm2[5,6],ymm4[7]
+; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm2, %xmm4
+; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} xmm6 = xmm2[0,3,2,1]
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm7 = [0,1,2,3,0,1,4,5,8,9,12,13,0,1,12,13]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm7, %xmm4, %xmm2
+; AVX512DQ-FCP-NEXT: vpshufb %xmm7, %xmm6, %xmm8
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm2 = xmm8[0,1,2,3],xmm2[4],xmm8[5],xmm2[6,7]
+; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512DQ-FCP-NEXT: vinserti32x4 $2, %xmm0, %zmm2, %zmm0
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm10[0],ymm12[1],ymm10[2,3,4,5],ymm12[6],ymm10[7]
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm24, %ymm8
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm8[0,1],ymm13[2],ymm8[3,4],ymm13[5],ymm8[6,7]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm5, %xmm8, %xmm5
+; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm8, %xmm9
+; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} xmm11 = xmm9[2,2,2,2,4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0],xmm11[1],xmm5[2,3],xmm11[4],xmm5[5,6,7]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm11 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm2[4,5,0,1,12,13,24,25,20,21],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm13 = [65535,65535,65535,65535,65535,0,0,0,0,0,65535,65535,65535,65535,65535,65535]
+; AVX512DQ-FCP-NEXT: vpternlogq $236, %ymm13, %ymm11, %ymm5
; AVX512DQ-FCP-NEXT: movw $31, %ax
; AVX512DQ-FCP-NEXT: kmovw %eax, %k1
-; AVX512DQ-FCP-NEXT: vinserti32x8 $0, %ymm6, %zmm0, %zmm2 {%k1}
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm21, %ymm6
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm23, %ymm12
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm12 = ymm6[0],ymm12[1],ymm6[2,3],ymm12[4],ymm6[5,6],ymm12[7]
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm12, %xmm6
-; AVX512DQ-FCP-NEXT: vpshufb %xmm8, %xmm6, %xmm8
-; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} xmm12 = xmm12[0,3,2,1]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm9, %xmm12, %xmm9
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm8 = xmm9[0,1,2,3],xmm8[4],xmm9[5],xmm8[6,7]
-; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm8, %ymm0, %ymm8
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm26, %ymm9
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm24, %ymm11
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm9 = ymm9[0],ymm11[1],ymm9[2,3,4,5],ymm11[6],ymm9[7]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm15 = ymm9[8,9,4,5,4,5,6,7,0,1,4,5,0,1,12,13,24,25,20,21,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm15[0,1,2,3,4],ymm8[5,6,7]
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm8, %zmm0, %zmm8
-; AVX512DQ-FCP-NEXT: vpbroadcastd {{.*#+}} xmm15 = [10,11,6,7,10,11,6,7,10,11,6,7,10,11,6,7]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm15, %xmm10, %xmm10
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm14 = [10,11,u,u,2,3,14,15,u,u,10,11,12,13,14,15]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm14, %xmm7, %xmm7
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm7 = xmm7[0],xmm10[1],xmm7[2,3],xmm10[4],xmm7[5,6,7]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm4 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm4[6,7,2,3,14,15,26,27,22,23],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512DQ-FCP-NEXT: vpternlogq $248, %ymm19, %ymm7, %ymm4
+; AVX512DQ-FCP-NEXT: vinserti32x8 $0, %ymm5, %zmm0, %zmm0 {%k1}
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm20, %ymm5
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm21, %ymm11
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm11 = ymm5[0],ymm11[1],ymm5[2,3],ymm11[4],ymm5[5,6],ymm11[7]
+; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm11, %xmm5
+; AVX512DQ-FCP-NEXT: vpshufb %xmm7, %xmm5, %xmm14
+; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} xmm11 = xmm11[0,3,2,1]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm7, %xmm11, %xmm7
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm7 = xmm7[0,1,2,3],xmm14[4],xmm7[5],xmm14[6,7]
+; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm7, %ymm0, %ymm7
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm26, %ymm14
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm23, %ymm10
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm14 = ymm14[0],ymm10[1],ymm14[2,3,4,5],ymm10[6],ymm14[7]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm15 = ymm14[8,9,4,5,4,5,6,7,0,1,4,5,0,1,12,13,24,25,20,21,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm15[0,1,2,3,4],ymm7[5,6,7]
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm7, %zmm0, %zmm7
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm15 = [10,11,6,7,2,3,14,15,10,11,10,11,12,13,14,15]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm15, %xmm9, %xmm9
+; AVX512DQ-FCP-NEXT: vpshufb %xmm15, %xmm8, %xmm8
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm8 = xmm8[0],xmm9[1],xmm8[2,3],xmm9[4],xmm8[5,6,7]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm2[6,7,2,3,14,15,26,27,22,23],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512DQ-FCP-NEXT: vpternlogq $248, %ymm13, %ymm8, %ymm2
+; AVX512DQ-FCP-NEXT: vpshufb %xmm15, %xmm1, %xmm1
; AVX512DQ-FCP-NEXT: vpshufb %xmm15, %xmm3, %xmm3
-; AVX512DQ-FCP-NEXT: vpshufb %xmm14, %xmm0, %xmm0
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm3[1],xmm0[2,3],xmm3[4],xmm0[5,6,7]
-; AVX512DQ-FCP-NEXT: vpbroadcastq {{.*#+}} xmm3 = [10,11,0,0,2,3,14,15,10,11,0,0,2,3,14,15]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm3, %xmm5, %xmm5
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm7 = [0,1,2,3,2,3,6,7,u,u,14,15,u,u,u,u]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm7, %xmm1, %xmm1
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm5[4],xmm1[5],xmm5[6,7]
-; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512DQ-FCP-NEXT: vinserti32x4 $2, %xmm0, %zmm1, %zmm0
-; AVX512DQ-FCP-NEXT: vinserti32x8 $0, %ymm4, %zmm0, %zmm0 {%k1}
-; AVX512DQ-FCP-NEXT: vpshufb %xmm3, %xmm6, %xmm1
-; AVX512DQ-FCP-NEXT: vpshufb %xmm7, %xmm12, %xmm3
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm3[0,1,2,3],xmm1[4],xmm3[5],xmm1[6,7]
-; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm9[10,11,6,7,4,5,6,7,6,7,6,7,2,3,14,15,26,27,22,23,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3,4],ymm1[5,6,7]
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm1
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm3[0],xmm1[1],xmm3[2,3],xmm1[4],xmm3[5,6,7]
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,2,3,2,3,6,7,10,11,14,15,2,3,14,15]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm3, %xmm4, %xmm4
+; AVX512DQ-FCP-NEXT: vpshufb %xmm3, %xmm6, %xmm6
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm4 = xmm6[0,1,2,3],xmm4[4],xmm6[5],xmm4[6,7]
+; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4
+; AVX512DQ-FCP-NEXT: vinserti32x4 $2, %xmm1, %zmm4, %zmm1
+; AVX512DQ-FCP-NEXT: vinserti32x8 $0, %ymm2, %zmm0, %zmm1 {%k1}
+; AVX512DQ-FCP-NEXT: vpshufb %xmm3, %xmm5, %xmm2
+; AVX512DQ-FCP-NEXT: vpshufb %xmm3, %xmm11, %xmm3
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4],xmm3[5],xmm2[6,7]
+; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm14[10,11,6,7,4,5,6,7,6,7,6,7,2,3,14,15,26,27,22,23,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3,4],ymm2[5,6,7]
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm2
; AVX512DQ-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
; AVX512DQ-FCP-NEXT: vmovaps %zmm3, (%rsi)
-; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm13, (%rdx)
+; AVX512DQ-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512DQ-FCP-NEXT: vmovaps %zmm3, (%rdx)
; AVX512DQ-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-FCP-NEXT: vpternlogq $184, %zmm2, %zmm17, %zmm8
-; AVX512DQ-FCP-NEXT: vpternlogq $184, %zmm0, %zmm17, %zmm1
+; AVX512DQ-FCP-NEXT: vpternlogq $184, %zmm0, %zmm17, %zmm7
+; AVX512DQ-FCP-NEXT: vpternlogq $184, %zmm1, %zmm17, %zmm2
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm18, (%rcx)
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm16, (%r8)
-; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm8, (%r9)
-; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm1, (%rax)
+; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm7, (%r9)
+; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm2, (%rax)
+; AVX512DQ-FCP-NEXT: popq %rax
; AVX512DQ-FCP-NEXT: vzeroupper
; AVX512DQ-FCP-NEXT: retq
;
@@ -9314,213 +9323,209 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX2-FP-NEXT: vpshufb %ymm2, %ymm13, %ymm2
; AVX2-FP-NEXT: vpblendvb %ymm0, %ymm14, %ymm2, %ymm2
; AVX2-FP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FP-NEXT: vpbroadcastd {{.*#+}} xmm2 = [10,11,6,7,10,11,6,7,10,11,6,7,10,11,6,7]
-; AVX2-FP-NEXT: vpshufb %xmm2, %xmm11, %xmm14
-; AVX2-FP-NEXT: vmovdqa {{.*#+}} xmm11 = [10,11,u,u,2,3,14,15,u,u,10,11,12,13,14,15]
-; AVX2-FP-NEXT: vpshufb %xmm11, %xmm7, %xmm7
-; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm14 = xmm7[0],xmm14[1],xmm7[2,3],xmm14[4],xmm7[5,6,7]
+; AVX2-FP-NEXT: vmovdqa {{.*#+}} xmm2 = [10,11,6,7,2,3,14,15,10,11,10,11,12,13,14,15]
+; AVX2-FP-NEXT: vpshufb %xmm2, %xmm11, %xmm11
+; AVX2-FP-NEXT: vpshufb %xmm2, %xmm7, %xmm7
+; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm11 = xmm7[0],xmm11[1],xmm7[2,3],xmm11[4],xmm7[5,6,7]
; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm7 = [10,11,6,7,4,5,6,7,6,7,6,7,2,3,14,15,26,27,22,23,20,21,22,23,22,23,22,23,18,19,30,31]
; AVX2-FP-NEXT: vpshufb %ymm7, %ymm10, %ymm10
-; AVX2-FP-NEXT: vpblendvb %ymm0, %ymm14, %ymm10, %ymm10
+; AVX2-FP-NEXT: vpblendvb %ymm0, %ymm11, %ymm10, %ymm10
; AVX2-FP-NEXT: vmovdqu %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT: vpshufb %xmm2, %xmm5, %xmm5
-; AVX2-FP-NEXT: vpshufb %xmm11, %xmm1, %xmm1
+; AVX2-FP-NEXT: vpshufb %xmm2, %xmm1, %xmm1
; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm5[1],xmm1[2,3],xmm5[4],xmm1[5,6,7]
; AVX2-FP-NEXT: vpshufb %ymm7, %ymm4, %ymm4
; AVX2-FP-NEXT: vpblendvb %ymm0, %ymm1, %ymm4, %ymm1
; AVX2-FP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT: vpshufb %xmm2, %xmm9, %xmm1
-; AVX2-FP-NEXT: vpshufb %xmm11, %xmm6, %xmm4
+; AVX2-FP-NEXT: vpshufb %xmm2, %xmm6, %xmm4
; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm1 = xmm4[0],xmm1[1],xmm4[2,3],xmm1[4],xmm4[5,6,7]
; AVX2-FP-NEXT: vpshufb %ymm7, %ymm8, %ymm4
; AVX2-FP-NEXT: vpblendvb %ymm0, %ymm1, %ymm4, %ymm1
; AVX2-FP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT: vpshufb %ymm7, %ymm13, %ymm1
-; AVX2-FP-NEXT: vpshufb %xmm2, %xmm3, %xmm2
-; AVX2-FP-NEXT: vpshufb %xmm11, %xmm12, %xmm3
-; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0],xmm2[1],xmm3[2,3],xmm2[4],xmm3[5,6,7]
+; AVX2-FP-NEXT: vpshufb %xmm2, %xmm3, %xmm3
+; AVX2-FP-NEXT: vpshufb %xmm2, %xmm12, %xmm2
+; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1],xmm2[2,3],xmm3[4],xmm2[5,6,7]
; AVX2-FP-NEXT: vpblendvb %ymm0, %ymm2, %ymm1, %ymm0
; AVX2-FP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FP-NEXT: vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm10 # 32-byte Folded Reload
-; AVX2-FP-NEXT: # ymm10 = mem[0],ymm0[1],mem[2,3],ymm0[4],mem[5,6],ymm0[7]
-; AVX2-FP-NEXT: vextracti128 $1, %ymm10, %xmm0
-; AVX2-FP-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,1,0,3]
-; AVX2-FP-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FP-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,12,13,u,u,4,5,u,u,u,u,12,13,14,15]
-; AVX2-FP-NEXT: vpshufb %xmm2, %xmm10, %xmm0
-; AVX2-FP-NEXT: vmovdqa %xmm2, %xmm6
-; AVX2-FP-NEXT: vpbroadcastq {{.*#+}} xmm12 = [8,9,12,13,0,1,0,0,8,9,12,13,0,1,0,0]
-; AVX2-FP-NEXT: vpshufb %xmm12, %xmm1, %xmm1
-; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3],xmm1[4,5],xmm0[6,7]
-; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm2 = [u,u,u,u,u,u,u,u,u,u,u,u,8,9,4,5,16,17,28,29,24,25,28,29,24,25,28,29,24,25,20,21]
-; AVX2-FP-NEXT: vmovdqu (%rsp), %ymm1 # 32-byte Reload
-; AVX2-FP-NEXT: vpblendd $189, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX2-FP-NEXT: # ymm1 = mem[0],ymm1[1],mem[2,3,4,5],ymm1[6],mem[7]
-; AVX2-FP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FP-NEXT: vpshufb %ymm2, %ymm1, %ymm1
-; AVX2-FP-NEXT: vmovdqa %ymm2, %ymm5
-; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm0[0,1,2],ymm1[3,4,5,6,7]
+; AVX2-FP-NEXT: vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm7 # 32-byte Folded Reload
+; AVX2-FP-NEXT: # ymm7 = mem[0],ymm0[1],mem[2,3],ymm0[4],mem[5,6],ymm0[7]
+; AVX2-FP-NEXT: vextracti128 $1, %ymm7, %xmm0
+; AVX2-FP-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[2,1,0,3]
+; AVX2-FP-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX2-FP-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,12,13,0,1,4,5,8,9,12,13,12,13,14,15]
+; AVX2-FP-NEXT: vpshufb %xmm1, %xmm7, %xmm0
+; AVX2-FP-NEXT: vpshufb %xmm1, %xmm2, %xmm2
+; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2],xmm0[3],xmm2[4,5],xmm0[6,7]
+; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm3 = [u,u,u,u,u,u,u,u,u,u,u,u,8,9,4,5,16,17,28,29,24,25,28,29,24,25,28,29,24,25,20,21]
+; AVX2-FP-NEXT: vmovdqu (%rsp), %ymm2 # 32-byte Reload
+; AVX2-FP-NEXT: vpblendd $189, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
+; AVX2-FP-NEXT: # ymm2 = mem[0],ymm2[1],mem[2,3,4,5],ymm2[6],mem[7]
+; AVX2-FP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FP-NEXT: vpshufb %ymm3, %ymm2, %ymm2
+; AVX2-FP-NEXT: vmovdqa %ymm3, %ymm5
+; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm2 = ymm0[0,1,2],ymm2[3,4,5,6,7]
; AVX2-FP-NEXT: vmovdqa 544(%rdi), %ymm0
; AVX2-FP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FP-NEXT: vmovdqa 512(%rdi), %ymm2
+; AVX2-FP-NEXT: vmovdqa 512(%rdi), %ymm3
+; AVX2-FP-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm11 = ymm3[0,1],ymm0[2],ymm3[3,4],ymm0[5],ymm3[6,7]
+; AVX2-FP-NEXT: vmovdqa {{.*#+}} xmm0 = [0,1,4,5,4,5,4,5,0,1,12,13,8,9,4,5]
+; AVX2-FP-NEXT: vpshufb %xmm0, %xmm11, %xmm3
+; AVX2-FP-NEXT: vextracti128 $1, %ymm11, %xmm4
+; AVX2-FP-NEXT: vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX2-FP-NEXT: vpshufb %xmm0, %xmm4, %xmm4
+; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0,1,2],xmm3[3],xmm4[4,5],xmm3[6],xmm4[7]
+; AVX2-FP-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm3 = ymm2[0,1,2],ymm3[3,4,5,6,7],ymm2[8,9,10],ymm3[11,12,13,14,15]
+; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
; AVX2-FP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm13 = ymm2[0,1],ymm0[2],ymm2[3,4],ymm0[5],ymm2[6,7]
-; AVX2-FP-NEXT: vpbroadcastd {{.*#+}} xmm0 = [8,9,4,5,8,9,4,5,8,9,4,5,8,9,4,5]
-; AVX2-FP-NEXT: vpshufb %xmm0, %xmm13, %xmm2
-; AVX2-FP-NEXT: vmovdqa {{.*#+}} xmm4 = [0,1,4,5,4,5,u,u,0,1,12,13,u,u,4,5]
-; AVX2-FP-NEXT: vextracti128 $1, %ymm13, %xmm3
+; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX2-FP-NEXT: vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm6 # 32-byte Folded Reload
+; AVX2-FP-NEXT: # ymm6 = mem[0],ymm2[1],mem[2,3],ymm2[4],mem[5,6],ymm2[7]
+; AVX2-FP-NEXT: vextracti128 $1, %ymm6, %xmm2
+; AVX2-FP-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[2,1,0,3]
; AVX2-FP-NEXT: vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FP-NEXT: vpshufb %xmm4, %xmm3, %xmm3
-; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2],xmm2[3],xmm3[4,5],xmm2[6],xmm3[7]
-; AVX2-FP-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm2 = ymm1[0,1,2],ymm2[3,4,5,6,7],ymm1[8,9,10],ymm2[11,12,13,14,15]
-; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
-; AVX2-FP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-FP-NEXT: vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm11 # 32-byte Folded Reload
-; AVX2-FP-NEXT: # ymm11 = mem[0],ymm1[1],mem[2,3],ymm1[4],mem[5,6],ymm1[7]
-; AVX2-FP-NEXT: vextracti128 $1, %ymm11, %xmm1
-; AVX2-FP-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[2,1,0,3]
-; AVX2-FP-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FP-NEXT: vpshufb %xmm6, %xmm11, %xmm1
-; AVX2-FP-NEXT: vpshufb %xmm12, %xmm2, %xmm2
-; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3],xmm2[4,5],xmm1[6,7]
-; AVX2-FP-NEXT: vpblendd $66, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm2 # 32-byte Folded Reload
-; AVX2-FP-NEXT: # ymm2 = ymm15[0],mem[1],ymm15[2,3,4,5],mem[6],ymm15[7]
-; AVX2-FP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FP-NEXT: vpshufb %ymm5, %ymm2, %ymm2
+; AVX2-FP-NEXT: vpshufb %xmm1, %xmm6, %xmm2
+; AVX2-FP-NEXT: vpshufb %xmm1, %xmm3, %xmm3
+; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2],xmm2[3],xmm3[4,5],xmm2[6,7]
+; AVX2-FP-NEXT: vpblendd $66, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm3 # 32-byte Folded Reload
+; AVX2-FP-NEXT: # ymm3 = ymm15[0],mem[1],ymm15[2,3,4,5],mem[6],ymm15[7]
+; AVX2-FP-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FP-NEXT: vpshufb %ymm5, %ymm3, %ymm3
; AVX2-FP-NEXT: vmovdqa %ymm5, %ymm15
-; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5,6,7]
-; AVX2-FP-NEXT: vmovdqa 352(%rdi), %ymm2
+; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2],ymm3[3,4,5,6,7]
+; AVX2-FP-NEXT: vmovdqa 352(%rdi), %ymm3
+; AVX2-FP-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FP-NEXT: vmovdqa 320(%rdi), %ymm4
+; AVX2-FP-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm8 = ymm4[0,1],ymm3[2],ymm4[3,4],ymm3[5],ymm4[6,7]
+; AVX2-FP-NEXT: vpshufb %xmm0, %xmm8, %xmm3
+; AVX2-FP-NEXT: vextracti128 $1, %ymm8, %xmm4
+; AVX2-FP-NEXT: vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX2-FP-NEXT: vpshufb %xmm0, %xmm4, %xmm4
+; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0,1,2],xmm3[3],xmm4[4,5],xmm3[6],xmm4[7]
+; AVX2-FP-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm3 = ymm2[0,1,2],ymm3[3,4,5,6,7],ymm2[8,9,10],ymm3[11,12,13,14,15]
+; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
; AVX2-FP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FP-NEXT: vmovdqa 320(%rdi), %ymm3
+; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX2-FP-NEXT: vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm5 # 32-byte Folded Reload
+; AVX2-FP-NEXT: # ymm5 = mem[0],ymm2[1],mem[2,3],ymm2[4],mem[5,6],ymm2[7]
+; AVX2-FP-NEXT: vextracti128 $1, %ymm5, %xmm2
+; AVX2-FP-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[2,1,0,3]
+; AVX2-FP-NEXT: vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX2-FP-NEXT: vpshufb %xmm1, %xmm5, %xmm2
+; AVX2-FP-NEXT: vpshufb %xmm1, %xmm3, %xmm3
+; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2],xmm2[3],xmm3[4,5],xmm2[6,7]
+; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
+; AVX2-FP-NEXT: vpblendd $66, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
+; AVX2-FP-NEXT: # ymm3 = ymm3[0],mem[1],ymm3[2,3,4,5],mem[6],ymm3[7]
+; AVX2-FP-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FP-NEXT: vpshufb %ymm15, %ymm3, %ymm3
+; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2],ymm3[3,4,5,6,7]
+; AVX2-FP-NEXT: vmovdqa 736(%rdi), %ymm3
; AVX2-FP-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm8 = ymm3[0,1],ymm2[2],ymm3[3,4],ymm2[5],ymm3[6,7]
-; AVX2-FP-NEXT: vpshufb %xmm0, %xmm8, %xmm2
-; AVX2-FP-NEXT: vextracti128 $1, %ymm8, %xmm3
+; AVX2-FP-NEXT: vmovdqa 704(%rdi), %ymm4
+; AVX2-FP-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1],ymm3[2],ymm4[3,4],ymm3[5],ymm4[6,7]
+; AVX2-FP-NEXT: vpshufb %xmm0, %xmm4, %xmm9
+; AVX2-FP-NEXT: vextracti128 $1, %ymm4, %xmm3
; AVX2-FP-NEXT: vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FP-NEXT: vpshufb %xmm4, %xmm3, %xmm3
-; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2],xmm2[3],xmm3[4,5],xmm2[6],xmm3[7]
-; AVX2-FP-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm2 = ymm1[0,1,2],ymm2[3,4,5,6,7],ymm1[8,9,10],ymm2[11,12,13,14,15]
-; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
-; AVX2-FP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-FP-NEXT: vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm7 # 32-byte Folded Reload
-; AVX2-FP-NEXT: # ymm7 = mem[0],ymm1[1],mem[2,3],ymm1[4],mem[5,6],ymm1[7]
-; AVX2-FP-NEXT: vextracti128 $1, %ymm7, %xmm1
-; AVX2-FP-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[2,1,0,3]
-; AVX2-FP-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FP-NEXT: vpshufb %xmm6, %xmm7, %xmm1
-; AVX2-FP-NEXT: vpshufb %xmm12, %xmm2, %xmm2
-; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3],xmm2[4,5],xmm1[6,7]
-; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX2-FP-NEXT: vpblendd $66, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
-; AVX2-FP-NEXT: # ymm2 = ymm2[0],mem[1],ymm2[2,3,4,5],mem[6],ymm2[7]
+; AVX2-FP-NEXT: vpshufb %xmm0, %xmm3, %xmm10
+; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm9 = xmm10[0,1,2],xmm9[3],xmm10[4,5],xmm9[6],xmm10[7]
+; AVX2-FP-NEXT: vinserti128 $1, %xmm9, %ymm0, %ymm9
+; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm9 = ymm2[0,1,2],ymm9[3,4,5,6,7],ymm2[8,9,10],ymm9[11,12,13,14,15]
+; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm9[4,5,6,7]
; AVX2-FP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FP-NEXT: vpshufb %ymm5, %ymm2, %ymm2
-; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5,6,7]
-; AVX2-FP-NEXT: vmovdqa 736(%rdi), %ymm2
+; AVX2-FP-NEXT: vmovdqa 160(%rdi), %ymm2
; AVX2-FP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FP-NEXT: vmovdqa 704(%rdi), %ymm3
+; AVX2-FP-NEXT: vmovdqa 128(%rdi), %ymm3
; AVX2-FP-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm5 = ymm3[0,1],ymm2[2],ymm3[3,4],ymm2[5],ymm3[6,7]
-; AVX2-FP-NEXT: vpshufb %xmm0, %xmm5, %xmm6
-; AVX2-FP-NEXT: vextracti128 $1, %ymm5, %xmm2
-; AVX2-FP-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FP-NEXT: vpshufb %xmm4, %xmm2, %xmm9
-; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm6 = xmm9[0,1,2],xmm6[3],xmm9[4,5],xmm6[6],xmm9[7]
-; AVX2-FP-NEXT: vinserti128 $1, %xmm6, %ymm0, %ymm6
-; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm6 = ymm1[0,1,2],ymm6[3,4,5,6,7],ymm1[8,9,10],ymm6[11,12,13,14,15]
-; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm6[4,5,6,7]
-; AVX2-FP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FP-NEXT: vmovdqa 160(%rdi), %ymm1
-; AVX2-FP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FP-NEXT: vmovdqa 128(%rdi), %ymm2
-; AVX2-FP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm3 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
-; AVX2-FP-NEXT: vpshufb %xmm0, %xmm3, %xmm0
-; AVX2-FP-NEXT: vextracti128 $1, %ymm3, %xmm14
-; AVX2-FP-NEXT: vpshufb %xmm4, %xmm14, %xmm4
-; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm6 = xmm4[0,1,2],xmm0[3],xmm4[4,5],xmm0[6],xmm4[7]
+; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1],ymm2[2],ymm3[3,4],ymm2[5],ymm3[6,7]
+; AVX2-FP-NEXT: vpshufb %xmm0, %xmm3, %xmm9
+; AVX2-FP-NEXT: vextracti128 $1, %ymm3, %xmm13
+; AVX2-FP-NEXT: vpshufb %xmm0, %xmm13, %xmm0
+; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm9 = xmm0[0,1,2],xmm9[3],xmm0[4,5],xmm9[6],xmm0[7]
; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT: vpblendd $146, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 32-byte Folded Reload
; AVX2-FP-NEXT: # ymm2 = ymm0[0],mem[1],ymm0[2,3],mem[4],ymm0[5,6],mem[7]
-; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm4 = xmm2[0,1,12,13,u,u,4,5,u,u,u,u,12,13,14,15]
-; AVX2-FP-NEXT: vextracti128 $1, %ymm2, %xmm9
-; AVX2-FP-NEXT: vpshufd {{.*#+}} xmm9 = xmm9[2,1,0,3]
-; AVX2-FP-NEXT: vpshufb %xmm12, %xmm9, %xmm12
-; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm12 = xmm4[0,1],xmm12[2],xmm4[3],xmm12[4,5],xmm4[6,7]
+; AVX2-FP-NEXT: vextracti128 $1, %ymm2, %xmm10
+; AVX2-FP-NEXT: vpshufd {{.*#+}} xmm14 = xmm10[2,1,0,3]
+; AVX2-FP-NEXT: vpshufb %xmm1, %xmm2, %xmm12
+; AVX2-FP-NEXT: vpshufb %xmm1, %xmm14, %xmm1
+; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm12 = xmm12[0,1],xmm1[2],xmm12[3],xmm1[4,5],xmm12[6,7]
; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FP-NEXT: vpblendd $189, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm4 # 32-byte Folded Reload
-; AVX2-FP-NEXT: # ymm4 = mem[0],ymm0[1],mem[2,3,4,5],ymm0[6],mem[7]
-; AVX2-FP-NEXT: vpshufb %ymm15, %ymm4, %ymm15
+; AVX2-FP-NEXT: vpblendd $189, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm10 # 32-byte Folded Reload
+; AVX2-FP-NEXT: # ymm10 = mem[0],ymm0[1],mem[2,3,4,5],ymm0[6],mem[7]
+; AVX2-FP-NEXT: vpshufb %ymm15, %ymm10, %ymm15
; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm12 = ymm12[0,1,2],ymm15[3,4,5,6,7]
-; AVX2-FP-NEXT: vinserti128 $1, %xmm6, %ymm0, %ymm6
-; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm6 = ymm12[0,1,2],ymm6[3,4,5,6,7],ymm12[8,9,10],ymm6[11,12,13,14,15]
-; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm12[0,1,2,3],ymm6[4,5,6,7]
+; AVX2-FP-NEXT: vinserti128 $1, %xmm9, %ymm0, %ymm9
+; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm9 = ymm12[0,1,2],ymm9[3,4,5,6,7],ymm12[8,9,10],ymm9[11,12,13,14,15]
+; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm12[0,1,2,3],ymm9[4,5,6,7]
; AVX2-FP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT: vpbroadcastq {{.*#+}} xmm12 = [6,7,2,3,12,13,14,15,6,7,2,3,12,13,14,15]
-; AVX2-FP-NEXT: vpshufb %xmm12, %xmm10, %xmm6
-; AVX2-FP-NEXT: vpshuflw $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Folded Reload
-; AVX2-FP-NEXT: # xmm10 = mem[1,1,1,1,4,5,6,7]
-; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm15 = xmm6[0,1],xmm10[2],xmm6[3,4],xmm10[5],xmm6[6],xmm10[7]
-; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm6 = [u,u,u,u,u,u,u,u,u,u,u,u,10,11,6,7,18,19,30,31,26,27,26,27,30,31,26,27,26,27,22,23]
+; AVX2-FP-NEXT: vpshufb %xmm12, %xmm7, %xmm7
+; AVX2-FP-NEXT: vpshuflw $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload
+; AVX2-FP-NEXT: # xmm9 = mem[1,1,1,1,4,5,6,7]
+; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm15 = xmm7[0,1],xmm9[2],xmm7[3,4],xmm9[5],xmm7[6],xmm9[7]
+; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm7 = [u,u,u,u,u,u,u,u,u,u,u,u,10,11,6,7,18,19,30,31,26,27,26,27,30,31,26,27,26,27,22,23]
; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FP-NEXT: vpshufb %ymm6, %ymm0, %ymm0
-; AVX2-FP-NEXT: vmovdqa {{.*#+}} xmm10 = [2,3,6,7,4,5,0,1,10,11,14,15,12,13,14,15]
-; AVX2-FP-NEXT: vpshufb %xmm10, %xmm15, %xmm15
+; AVX2-FP-NEXT: vpshufb %ymm7, %ymm0, %ymm0
+; AVX2-FP-NEXT: vmovdqa {{.*#+}} xmm9 = [2,3,6,7,4,5,0,1,10,11,14,15,12,13,14,15]
+; AVX2-FP-NEXT: vpshufb %xmm9, %xmm15, %xmm15
; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm15[0,1,2],ymm0[3,4,5,6,7]
; AVX2-FP-NEXT: vmovdqa {{.*#+}} xmm15 = [6,7,2,3,4,5,u,u,2,3,14,15,u,u,6,7]
; AVX2-FP-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; AVX2-FP-NEXT: vpshufb %xmm15, %xmm1, %xmm1
-; AVX2-FP-NEXT: vpshufhw {{.*#+}} xmm13 = xmm13[0,1,2,3,5,5,5,5]
-; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2],xmm13[3],xmm1[4,5],xmm13[6],xmm1[7]
+; AVX2-FP-NEXT: vpshufhw {{.*#+}} xmm11 = xmm11[0,1,2,3,5,5,5,5]
+; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2],xmm11[3],xmm1[4,5],xmm11[6],xmm1[7]
; AVX2-FP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm1 = ymm0[0,1,2],ymm1[3,4,5,6,7],ymm0[8,9,10],ymm1[11,12,13,14,15]
; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FP-NEXT: vpshufb %xmm12, %xmm11, %xmm0
+; AVX2-FP-NEXT: vpshufb %xmm12, %xmm6, %xmm0
; AVX2-FP-NEXT: vpshuflw $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; AVX2-FP-NEXT: # xmm1 = mem[1,1,1,1,4,5,6,7]
; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3,4],xmm1[5],xmm0[6],xmm1[7]
; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-FP-NEXT: vpshufb %ymm6, %ymm1, %ymm1
-; AVX2-FP-NEXT: vpshufb %xmm10, %xmm0, %xmm0
+; AVX2-FP-NEXT: vpshufb %ymm7, %ymm1, %ymm1
+; AVX2-FP-NEXT: vpshufb %xmm9, %xmm0, %xmm0
; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
; AVX2-FP-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; AVX2-FP-NEXT: vpshufb %xmm15, %xmm1, %xmm1
-; AVX2-FP-NEXT: vpshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,5,5,5,5]
-; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2],xmm8[3],xmm1[4,5],xmm8[6],xmm1[7]
+; AVX2-FP-NEXT: vpshufhw {{.*#+}} xmm6 = xmm8[0,1,2,3,5,5,5,5]
+; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2],xmm6[3],xmm1[4,5],xmm6[6],xmm1[7]
; AVX2-FP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm1 = ymm0[0,1,2],ymm1[3,4,5,6,7],ymm0[8,9,10],ymm1[11,12,13,14,15]
; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FP-NEXT: vpshufb %xmm12, %xmm7, %xmm0
+; AVX2-FP-NEXT: vpshufb %xmm12, %xmm5, %xmm0
; AVX2-FP-NEXT: vpshuflw $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; AVX2-FP-NEXT: # xmm1 = mem[1,1,1,1,4,5,6,7]
; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3,4],xmm1[5],xmm0[6],xmm1[7]
; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-FP-NEXT: vpshufb %ymm6, %ymm1, %ymm1
-; AVX2-FP-NEXT: vpshufb %xmm10, %xmm0, %xmm0
+; AVX2-FP-NEXT: vpshufb %ymm7, %ymm1, %ymm1
+; AVX2-FP-NEXT: vpshufb %xmm9, %xmm0, %xmm0
; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
; AVX2-FP-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; AVX2-FP-NEXT: vpshufb %xmm15, %xmm1, %xmm1
-; AVX2-FP-NEXT: vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,5,5,5,5]
-; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2],xmm5[3],xmm1[4,5],xmm5[6],xmm1[7]
+; AVX2-FP-NEXT: vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,5,5,5]
+; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2],xmm4[3],xmm1[4,5],xmm4[6],xmm1[7]
; AVX2-FP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm1 = ymm0[0,1,2],ymm1[3,4,5,6,7],ymm0[8,9,10],ymm1[11,12,13,14,15]
; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FP-NEXT: vpshufb %xmm15, %xmm14, %xmm0
+; AVX2-FP-NEXT: vpshufb %xmm15, %xmm13, %xmm0
; AVX2-FP-NEXT: vpshufhw {{.*#+}} xmm1 = xmm3[0,1,2,3,5,5,5,5]
; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3],xmm0[4,5],xmm1[6],xmm0[7]
-; AVX2-FP-NEXT: vpshufb %ymm6, %ymm4, %ymm1
+; AVX2-FP-NEXT: vpshufb %ymm7, %ymm10, %ymm1
; AVX2-FP-NEXT: vpshufb %xmm12, %xmm2, %xmm2
-; AVX2-FP-NEXT: vpshuflw {{.*#+}} xmm3 = xmm9[1,1,1,1,4,5,6,7]
+; AVX2-FP-NEXT: vpshuflw {{.*#+}} xmm3 = xmm14[1,1,1,1,4,5,6,7]
; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2],xmm2[3,4],xmm3[5],xmm2[6],xmm3[7]
-; AVX2-FP-NEXT: vpshufb %xmm10, %xmm2, %xmm2
+; AVX2-FP-NEXT: vpshufb %xmm9, %xmm2, %xmm2
; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3,4,5,6,7]
; AVX2-FP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7],ymm1[8,9,10],ymm0[11,12,13,14,15]
@@ -9679,8 +9684,7 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm0 = ymm4[0,1,2],ymm0[3,4,5,6,7],ymm4[8,9,10],ymm0[11,12,13,14,15]
; AVX2-FP-NEXT: vpshufd {{.*#+}} xmm4 = xmm4[0,1,3,2]
; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm4[5,6,7]
-; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; AVX2-FP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm9 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; AVX2-FP-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX2-FP-NEXT: vpshufb %xmm2, %xmm0, %xmm1
; AVX2-FP-NEXT: vpshufhw $103, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
@@ -9697,7 +9701,7 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm1 = ymm6[0,1,2],ymm1[3,4,5,6,7],ymm6[8,9,10],ymm1[11,12,13,14,15]
; AVX2-FP-NEXT: vpshufd {{.*#+}} xmm6 = xmm6[0,1,3,2]
; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3,4],xmm6[5,6,7]
-; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm11 = ymm4[0,1,2,3],ymm1[4,5,6,7]
+; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm8 = ymm4[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FP-NEXT: vpshufb %xmm2, %xmm14, %xmm1
; AVX2-FP-NEXT: vpshufhw {{.*#+}} xmm2 = xmm12[0,1,2,3,7,5,6,5]
; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4],xmm1[5,6],xmm2[7]
@@ -9709,16 +9713,15 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3,4,5,6,7],ymm2[8,9,10],ymm1[11,12,13,14,15]
; AVX2-FP-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,1,3,2]
; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3,4],xmm2[5,6,7]
-; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm9 = ymm2[0,1,2,3],ymm1[4,5,6,7]
+; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm3 = ymm2[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT: vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 32-byte Folded Reload
; AVX2-FP-NEXT: # ymm1 = mem[0],ymm0[1],mem[2,3],ymm0[4],mem[5,6],ymm0[7]
-; AVX2-FP-NEXT: vextracti128 $1, %ymm1, %xmm7
-; AVX2-FP-NEXT: vpshufd {{.*#+}} xmm8 = xmm1[0,3,2,1]
-; AVX2-FP-NEXT: vpbroadcastq {{.*#+}} xmm2 = [8,9,0,0,0,1,12,13,8,9,0,0,0,1,12,13]
-; AVX2-FP-NEXT: vpshufb %xmm2, %xmm7, %xmm1
-; AVX2-FP-NEXT: vmovdqa {{.*#+}} xmm6 = [0,1,2,3,0,1,4,5,u,u,12,13,u,u,u,u]
-; AVX2-FP-NEXT: vpshufb %xmm6, %xmm8, %xmm4
+; AVX2-FP-NEXT: vextracti128 $1, %ymm1, %xmm2
+; AVX2-FP-NEXT: vpshufd {{.*#+}} xmm6 = xmm1[0,3,2,1]
+; AVX2-FP-NEXT: vmovdqa {{.*#+}} xmm7 = [0,1,2,3,0,1,4,5,8,9,12,13,0,1,12,13]
+; AVX2-FP-NEXT: vpshufb %xmm7, %xmm2, %xmm1
+; AVX2-FP-NEXT: vpshufb %xmm7, %xmm6, %xmm4
; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm1 = xmm4[0,1,2,3],xmm1[4],xmm4[5],xmm1[6,7]
; AVX2-FP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
; AVX2-FP-NEXT: vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm4 # 32-byte Folded Reload
@@ -9726,10 +9729,10 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT: vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 32-byte Folded Reload
; AVX2-FP-NEXT: # ymm1 = mem[0],ymm0[1],mem[2,3],ymm0[4],mem[5,6],ymm0[7]
-; AVX2-FP-NEXT: vextracti128 $1, %ymm1, %xmm10
+; AVX2-FP-NEXT: vextracti128 $1, %ymm1, %xmm11
; AVX2-FP-NEXT: vpshufd {{.*#+}} xmm12 = xmm1[0,3,2,1]
-; AVX2-FP-NEXT: vpshufb %xmm2, %xmm10, %xmm1
-; AVX2-FP-NEXT: vpshufb %xmm6, %xmm12, %xmm5
+; AVX2-FP-NEXT: vpshufb %xmm7, %xmm11, %xmm1
+; AVX2-FP-NEXT: vpshufb %xmm7, %xmm12, %xmm5
; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm1 = xmm5[0,1,2,3],xmm1[4],xmm5[5],xmm1[6,7]
; AVX2-FP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
; AVX2-FP-NEXT: vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm5 # 32-byte Folded Reload
@@ -9739,88 +9742,86 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX2-FP-NEXT: # ymm1 = mem[0],ymm0[1],mem[2,3],ymm0[4],mem[5,6],ymm0[7]
; AVX2-FP-NEXT: vextracti128 $1, %ymm1, %xmm13
; AVX2-FP-NEXT: vpshufd {{.*#+}} xmm14 = xmm1[0,3,2,1]
-; AVX2-FP-NEXT: vpshufb %xmm2, %xmm13, %xmm1
-; AVX2-FP-NEXT: vpshufb %xmm6, %xmm14, %xmm15
-; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm1 = xmm15[0,1,2,3],xmm1[4],xmm15[5],xmm1[6,7]
+; AVX2-FP-NEXT: vpshufb %xmm7, %xmm13, %xmm1
+; AVX2-FP-NEXT: vpshufb %xmm7, %xmm14, %xmm10
+; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm1 = xmm10[0,1,2,3],xmm1[4],xmm10[5],xmm1[6,7]
; AVX2-FP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX2-FP-NEXT: vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm3 # 32-byte Folded Reload
-; AVX2-FP-NEXT: # ymm3 = mem[0,1,2,3,4],ymm1[5,6,7]
+; AVX2-FP-NEXT: vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
+; AVX2-FP-NEXT: # ymm1 = mem[0,1,2,3,4],ymm1[5,6,7]
; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FP-NEXT: vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm15 # 32-byte Folded Reload
-; AVX2-FP-NEXT: # ymm15 = mem[0],ymm0[1],mem[2,3],ymm0[4],mem[5,6],ymm0[7]
-; AVX2-FP-NEXT: vextracti128 $1, %ymm15, %xmm0
-; AVX2-FP-NEXT: vpshufb %xmm2, %xmm0, %xmm2
-; AVX2-FP-NEXT: vpshufd {{.*#+}} xmm15 = xmm15[0,3,2,1]
-; AVX2-FP-NEXT: vpshufb %xmm6, %xmm15, %xmm6
+; AVX2-FP-NEXT: vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm10 # 32-byte Folded Reload
+; AVX2-FP-NEXT: # ymm10 = mem[0],ymm0[1],mem[2,3],ymm0[4],mem[5,6],ymm0[7]
+; AVX2-FP-NEXT: vextracti128 $1, %ymm10, %xmm15
+; AVX2-FP-NEXT: vpshufd {{.*#+}} xmm0 = xmm10[0,3,2,1]
+; AVX2-FP-NEXT: vpshufb %xmm7, %xmm15, %xmm10
+; AVX2-FP-NEXT: vpshufb %xmm7, %xmm0, %xmm7
+; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm7 = xmm7[0,1,2,3],xmm10[4],xmm7[5],xmm10[6,7]
+; AVX2-FP-NEXT: vinserti128 $1, %xmm7, %ymm0, %ymm7
+; AVX2-FP-NEXT: vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm10 # 32-byte Folded Reload
+; AVX2-FP-NEXT: # ymm10 = mem[0,1,2,3,4],ymm7[5,6,7]
+; AVX2-FP-NEXT: vmovdqa {{.*#+}} xmm7 = [0,1,2,3,2,3,6,7,10,11,14,15,2,3,14,15]
+; AVX2-FP-NEXT: vpshufb %xmm7, %xmm11, %xmm11
+; AVX2-FP-NEXT: vpshufb %xmm7, %xmm12, %xmm12
+; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm11 = xmm12[0,1,2,3],xmm11[4],xmm12[5],xmm11[6,7]
+; AVX2-FP-NEXT: vinserti128 $1, %xmm11, %ymm0, %ymm11
+; AVX2-FP-NEXT: vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm11 # 32-byte Folded Reload
+; AVX2-FP-NEXT: # ymm11 = mem[0,1,2,3,4],ymm11[5,6,7]
+; AVX2-FP-NEXT: vpshufb %xmm7, %xmm2, %xmm2
+; AVX2-FP-NEXT: vpshufb %xmm7, %xmm6, %xmm6
; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm2 = xmm6[0,1,2,3],xmm2[4],xmm6[5],xmm2[6,7]
; AVX2-FP-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
; AVX2-FP-NEXT: vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
; AVX2-FP-NEXT: # ymm2 = mem[0,1,2,3,4],ymm2[5,6,7]
-; AVX2-FP-NEXT: vpbroadcastq {{.*#+}} xmm1 = [10,11,0,0,2,3,14,15,10,11,0,0,2,3,14,15]
-; AVX2-FP-NEXT: vpshufb %xmm1, %xmm10, %xmm6
-; AVX2-FP-NEXT: vmovdqa {{.*#+}} xmm10 = [0,1,2,3,2,3,6,7,u,u,14,15,u,u,u,u]
-; AVX2-FP-NEXT: vpshufb %xmm10, %xmm12, %xmm12
+; AVX2-FP-NEXT: vpshufb %xmm7, %xmm13, %xmm6
+; AVX2-FP-NEXT: vpshufb %xmm7, %xmm14, %xmm12
; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm6 = xmm12[0,1,2,3],xmm6[4],xmm12[5],xmm6[6,7]
; AVX2-FP-NEXT: vinserti128 $1, %xmm6, %ymm0, %ymm6
-; AVX2-FP-NEXT: vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm12 # 32-byte Folded Reload
-; AVX2-FP-NEXT: # ymm12 = mem[0,1,2,3,4],ymm6[5,6,7]
-; AVX2-FP-NEXT: vpshufb %xmm1, %xmm7, %xmm6
-; AVX2-FP-NEXT: vpshufb %xmm10, %xmm8, %xmm7
-; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm6 = xmm7[0,1,2,3],xmm6[4],xmm7[5],xmm6[6,7]
-; AVX2-FP-NEXT: vinserti128 $1, %xmm6, %ymm0, %ymm6
; AVX2-FP-NEXT: vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm6 # 32-byte Folded Reload
; AVX2-FP-NEXT: # ymm6 = mem[0,1,2,3,4],ymm6[5,6,7]
-; AVX2-FP-NEXT: vpshufb %xmm1, %xmm13, %xmm7
-; AVX2-FP-NEXT: vpshufb %xmm10, %xmm14, %xmm8
-; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm7 = xmm8[0,1,2,3],xmm7[4],xmm8[5],xmm7[6,7]
-; AVX2-FP-NEXT: vinserti128 $1, %xmm7, %ymm0, %ymm7
-; AVX2-FP-NEXT: vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm7 # 32-byte Folded Reload
-; AVX2-FP-NEXT: # ymm7 = mem[0,1,2,3,4],ymm7[5,6,7]
-; AVX2-FP-NEXT: vpshufb %xmm1, %xmm0, %xmm0
-; AVX2-FP-NEXT: vpshufb %xmm10, %xmm15, %xmm1
-; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4],xmm1[5],xmm0[6,7]
+; AVX2-FP-NEXT: vpshufb %xmm7, %xmm15, %xmm12
+; AVX2-FP-NEXT: vpshufb %xmm7, %xmm0, %xmm0
+; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm12[4],xmm0[5],xmm12[6,7]
; AVX2-FP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
; AVX2-FP-NEXT: vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
; AVX2-FP-NEXT: # ymm0 = mem[0,1,2,3,4],ymm0[5,6,7]
-; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-FP-NEXT: vmovaps %ymm1, 96(%rsi)
-; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-FP-NEXT: vmovaps %ymm1, 32(%rsi)
-; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-FP-NEXT: vmovaps %ymm1, 64(%rsi)
-; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-FP-NEXT: vmovaps %ymm1, (%rsi)
-; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-FP-NEXT: vmovaps %ymm1, 96(%rdx)
-; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-FP-NEXT: vmovaps %ymm1, 32(%rdx)
-; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-FP-NEXT: vmovaps %ymm1, 64(%rdx)
-; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-FP-NEXT: vmovaps %ymm1, (%rdx)
-; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-FP-NEXT: vmovaps %ymm1, 32(%rcx)
-; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-FP-NEXT: vmovaps %ymm1, 96(%rcx)
-; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-FP-NEXT: vmovaps %ymm1, 64(%rcx)
-; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-FP-NEXT: vmovaps %ymm1, (%rcx)
-; AVX2-FP-NEXT: vmovdqa %ymm11, 96(%r8)
-; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-FP-NEXT: vmovaps %ymm1, 32(%r8)
-; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-FP-NEXT: vmovaps %ymm1, 64(%r8)
-; AVX2-FP-NEXT: vmovdqa %ymm9, (%r8)
-; AVX2-FP-NEXT: vmovdqa %ymm2, 96(%r9)
-; AVX2-FP-NEXT: vmovdqa %ymm3, 32(%r9)
+; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX2-FP-NEXT: vmovaps %ymm7, 96(%rsi)
+; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX2-FP-NEXT: vmovaps %ymm7, 32(%rsi)
+; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX2-FP-NEXT: vmovaps %ymm7, 64(%rsi)
+; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX2-FP-NEXT: vmovaps %ymm7, (%rsi)
+; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX2-FP-NEXT: vmovaps %ymm7, 96(%rdx)
+; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX2-FP-NEXT: vmovaps %ymm7, 32(%rdx)
+; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX2-FP-NEXT: vmovaps %ymm7, 64(%rdx)
+; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX2-FP-NEXT: vmovaps %ymm7, (%rdx)
+; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX2-FP-NEXT: vmovaps %ymm7, 32(%rcx)
+; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX2-FP-NEXT: vmovaps %ymm7, 96(%rcx)
+; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX2-FP-NEXT: vmovaps %ymm7, 64(%rcx)
+; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX2-FP-NEXT: vmovaps %ymm7, (%rcx)
+; AVX2-FP-NEXT: vmovdqa %ymm8, 96(%r8)
+; AVX2-FP-NEXT: vmovdqa %ymm9, 32(%r8)
+; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX2-FP-NEXT: vmovaps %ymm7, 64(%r8)
+; AVX2-FP-NEXT: vmovdqa %ymm3, (%r8)
+; AVX2-FP-NEXT: vmovdqa %ymm10, 96(%r9)
+; AVX2-FP-NEXT: vmovdqa %ymm1, 32(%r9)
; AVX2-FP-NEXT: vmovdqa %ymm5, (%r9)
; AVX2-FP-NEXT: vmovdqa %ymm4, 64(%r9)
; AVX2-FP-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX2-FP-NEXT: vmovdqa %ymm0, 96(%rax)
-; AVX2-FP-NEXT: vmovdqa %ymm7, 32(%rax)
-; AVX2-FP-NEXT: vmovdqa %ymm6, 64(%rax)
-; AVX2-FP-NEXT: vmovdqa %ymm12, (%rax)
+; AVX2-FP-NEXT: vmovdqa %ymm6, 32(%rax)
+; AVX2-FP-NEXT: vmovdqa %ymm2, 64(%rax)
+; AVX2-FP-NEXT: vmovdqa %ymm11, (%rax)
; AVX2-FP-NEXT: addq $1304, %rsp # imm = 0x518
; AVX2-FP-NEXT: vzeroupper
; AVX2-FP-NEXT: retq
@@ -9909,213 +9910,209 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX2-FCP-NEXT: vpshufb %ymm2, %ymm13, %ymm2
; AVX2-FCP-NEXT: vpblendvb %ymm0, %ymm14, %ymm2, %ymm2
; AVX2-FCP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpbroadcastd {{.*#+}} xmm2 = [10,11,6,7,10,11,6,7,10,11,6,7,10,11,6,7]
-; AVX2-FCP-NEXT: vpshufb %xmm2, %xmm11, %xmm14
-; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm11 = [10,11,u,u,2,3,14,15,u,u,10,11,12,13,14,15]
-; AVX2-FCP-NEXT: vpshufb %xmm11, %xmm7, %xmm7
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm14 = xmm7[0],xmm14[1],xmm7[2,3],xmm14[4],xmm7[5,6,7]
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm2 = [10,11,6,7,2,3,14,15,10,11,10,11,12,13,14,15]
+; AVX2-FCP-NEXT: vpshufb %xmm2, %xmm11, %xmm11
+; AVX2-FCP-NEXT: vpshufb %xmm2, %xmm7, %xmm7
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm11 = xmm7[0],xmm11[1],xmm7[2,3],xmm11[4],xmm7[5,6,7]
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm7 = [10,11,6,7,4,5,6,7,6,7,6,7,2,3,14,15,26,27,22,23,20,21,22,23,22,23,22,23,18,19,30,31]
; AVX2-FCP-NEXT: vpshufb %ymm7, %ymm10, %ymm10
-; AVX2-FCP-NEXT: vpblendvb %ymm0, %ymm14, %ymm10, %ymm10
+; AVX2-FCP-NEXT: vpblendvb %ymm0, %ymm11, %ymm10, %ymm10
; AVX2-FCP-NEXT: vmovdqu %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vpshufb %xmm2, %xmm5, %xmm5
-; AVX2-FCP-NEXT: vpshufb %xmm11, %xmm1, %xmm1
+; AVX2-FCP-NEXT: vpshufb %xmm2, %xmm1, %xmm1
; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm5[1],xmm1[2,3],xmm5[4],xmm1[5,6,7]
; AVX2-FCP-NEXT: vpshufb %ymm7, %ymm4, %ymm4
; AVX2-FCP-NEXT: vpblendvb %ymm0, %ymm1, %ymm4, %ymm1
; AVX2-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vpshufb %xmm2, %xmm9, %xmm1
-; AVX2-FCP-NEXT: vpshufb %xmm11, %xmm6, %xmm4
+; AVX2-FCP-NEXT: vpshufb %xmm2, %xmm6, %xmm4
; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm4[0],xmm1[1],xmm4[2,3],xmm1[4],xmm4[5,6,7]
; AVX2-FCP-NEXT: vpshufb %ymm7, %ymm8, %ymm4
; AVX2-FCP-NEXT: vpblendvb %ymm0, %ymm1, %ymm4, %ymm1
; AVX2-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vpshufb %ymm7, %ymm13, %ymm1
-; AVX2-FCP-NEXT: vpshufb %xmm2, %xmm3, %xmm2
-; AVX2-FCP-NEXT: vpshufb %xmm11, %xmm12, %xmm3
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0],xmm2[1],xmm3[2,3],xmm2[4],xmm3[5,6,7]
+; AVX2-FCP-NEXT: vpshufb %xmm2, %xmm3, %xmm3
+; AVX2-FCP-NEXT: vpshufb %xmm2, %xmm12, %xmm2
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1],xmm2[2,3],xmm3[4],xmm2[5,6,7]
; AVX2-FCP-NEXT: vpblendvb %ymm0, %ymm2, %ymm1, %ymm0
; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FCP-NEXT: vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm10 # 32-byte Folded Reload
-; AVX2-FCP-NEXT: # ymm10 = mem[0],ymm0[1],mem[2,3],ymm0[4],mem[5,6],ymm0[7]
-; AVX2-FCP-NEXT: vextracti128 $1, %ymm10, %xmm0
-; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,1,0,3]
-; AVX2-FCP-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,12,13,u,u,4,5,u,u,u,u,12,13,14,15]
-; AVX2-FCP-NEXT: vpshufb %xmm2, %xmm10, %xmm0
-; AVX2-FCP-NEXT: vmovdqa %xmm2, %xmm6
-; AVX2-FCP-NEXT: vpbroadcastq {{.*#+}} xmm12 = [8,9,12,13,0,1,0,0,8,9,12,13,0,1,0,0]
-; AVX2-FCP-NEXT: vpshufb %xmm12, %xmm1, %xmm1
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3],xmm1[4,5],xmm0[6,7]
-; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm2 = [u,u,u,u,u,u,u,u,u,u,u,u,8,9,4,5,16,17,28,29,24,25,28,29,24,25,28,29,24,25,20,21]
-; AVX2-FCP-NEXT: vmovdqu (%rsp), %ymm1 # 32-byte Reload
-; AVX2-FCP-NEXT: vpblendd $189, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX2-FCP-NEXT: # ymm1 = mem[0],ymm1[1],mem[2,3,4,5],ymm1[6],mem[7]
-; AVX2-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpshufb %ymm2, %ymm1, %ymm1
-; AVX2-FCP-NEXT: vmovdqa %ymm2, %ymm5
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm0[0,1,2],ymm1[3,4,5,6,7]
+; AVX2-FCP-NEXT: vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm7 # 32-byte Folded Reload
+; AVX2-FCP-NEXT: # ymm7 = mem[0],ymm0[1],mem[2,3],ymm0[4],mem[5,6],ymm0[7]
+; AVX2-FCP-NEXT: vextracti128 $1, %ymm7, %xmm0
+; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[2,1,0,3]
+; AVX2-FCP-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,12,13,0,1,4,5,8,9,12,13,12,13,14,15]
+; AVX2-FCP-NEXT: vpshufb %xmm1, %xmm7, %xmm0
+; AVX2-FCP-NEXT: vpshufb %xmm1, %xmm2, %xmm2
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2],xmm0[3],xmm2[4,5],xmm0[6,7]
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm3 = [u,u,u,u,u,u,u,u,u,u,u,u,8,9,4,5,16,17,28,29,24,25,28,29,24,25,28,29,24,25,20,21]
+; AVX2-FCP-NEXT: vmovdqu (%rsp), %ymm2 # 32-byte Reload
+; AVX2-FCP-NEXT: vpblendd $189, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
+; AVX2-FCP-NEXT: # ymm2 = mem[0],ymm2[1],mem[2,3,4,5],ymm2[6],mem[7]
+; AVX2-FCP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vpshufb %ymm3, %ymm2, %ymm2
+; AVX2-FCP-NEXT: vmovdqa %ymm3, %ymm5
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm0[0,1,2],ymm2[3,4,5,6,7]
; AVX2-FCP-NEXT: vmovdqa 544(%rdi), %ymm0
; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vmovdqa 512(%rdi), %ymm2
+; AVX2-FCP-NEXT: vmovdqa 512(%rdi), %ymm3
+; AVX2-FCP-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm11 = ymm3[0,1],ymm0[2],ymm3[3,4],ymm0[5],ymm3[6,7]
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm0 = [0,1,4,5,4,5,4,5,0,1,12,13,8,9,4,5]
+; AVX2-FCP-NEXT: vpshufb %xmm0, %xmm11, %xmm3
+; AVX2-FCP-NEXT: vextracti128 $1, %ymm11, %xmm4
+; AVX2-FCP-NEXT: vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX2-FCP-NEXT: vpshufb %xmm0, %xmm4, %xmm4
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0,1,2],xmm3[3],xmm4[4,5],xmm3[6],xmm4[7]
+; AVX2-FCP-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm2[0,1,2],ymm3[3,4,5,6,7],ymm2[8,9,10],ymm3[11,12,13,14,15]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
; AVX2-FCP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm13 = ymm2[0,1],ymm0[2],ymm2[3,4],ymm0[5],ymm2[6,7]
-; AVX2-FCP-NEXT: vpbroadcastd {{.*#+}} xmm0 = [8,9,4,5,8,9,4,5,8,9,4,5,8,9,4,5]
-; AVX2-FCP-NEXT: vpshufb %xmm0, %xmm13, %xmm2
-; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm4 = [0,1,4,5,4,5,u,u,0,1,12,13,u,u,4,5]
-; AVX2-FCP-NEXT: vextracti128 $1, %ymm13, %xmm3
+; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX2-FCP-NEXT: vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm6 # 32-byte Folded Reload
+; AVX2-FCP-NEXT: # ymm6 = mem[0],ymm2[1],mem[2,3],ymm2[4],mem[5,6],ymm2[7]
+; AVX2-FCP-NEXT: vextracti128 $1, %ymm6, %xmm2
+; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[2,1,0,3]
; AVX2-FCP-NEXT: vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FCP-NEXT: vpshufb %xmm4, %xmm3, %xmm3
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2],xmm2[3],xmm3[4,5],xmm2[6],xmm3[7]
-; AVX2-FCP-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm1[0,1,2],ymm2[3,4,5,6,7],ymm1[8,9,10],ymm2[11,12,13,14,15]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
-; AVX2-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-FCP-NEXT: vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm11 # 32-byte Folded Reload
-; AVX2-FCP-NEXT: # ymm11 = mem[0],ymm1[1],mem[2,3],ymm1[4],mem[5,6],ymm1[7]
-; AVX2-FCP-NEXT: vextracti128 $1, %ymm11, %xmm1
-; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[2,1,0,3]
-; AVX2-FCP-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FCP-NEXT: vpshufb %xmm6, %xmm11, %xmm1
-; AVX2-FCP-NEXT: vpshufb %xmm12, %xmm2, %xmm2
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3],xmm2[4,5],xmm1[6,7]
-; AVX2-FCP-NEXT: vpblendd $66, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm2 # 32-byte Folded Reload
-; AVX2-FCP-NEXT: # ymm2 = ymm15[0],mem[1],ymm15[2,3,4,5],mem[6],ymm15[7]
-; AVX2-FCP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpshufb %ymm5, %ymm2, %ymm2
+; AVX2-FCP-NEXT: vpshufb %xmm1, %xmm6, %xmm2
+; AVX2-FCP-NEXT: vpshufb %xmm1, %xmm3, %xmm3
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2],xmm2[3],xmm3[4,5],xmm2[6,7]
+; AVX2-FCP-NEXT: vpblendd $66, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm3 # 32-byte Folded Reload
+; AVX2-FCP-NEXT: # ymm3 = ymm15[0],mem[1],ymm15[2,3,4,5],mem[6],ymm15[7]
+; AVX2-FCP-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vpshufb %ymm5, %ymm3, %ymm3
; AVX2-FCP-NEXT: vmovdqa %ymm5, %ymm15
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5,6,7]
-; AVX2-FCP-NEXT: vmovdqa 352(%rdi), %ymm2
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2],ymm3[3,4,5,6,7]
+; AVX2-FCP-NEXT: vmovdqa 352(%rdi), %ymm3
+; AVX2-FCP-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vmovdqa 320(%rdi), %ymm4
+; AVX2-FCP-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm4[0,1],ymm3[2],ymm4[3,4],ymm3[5],ymm4[6,7]
+; AVX2-FCP-NEXT: vpshufb %xmm0, %xmm8, %xmm3
+; AVX2-FCP-NEXT: vextracti128 $1, %ymm8, %xmm4
+; AVX2-FCP-NEXT: vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX2-FCP-NEXT: vpshufb %xmm0, %xmm4, %xmm4
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0,1,2],xmm3[3],xmm4[4,5],xmm3[6],xmm4[7]
+; AVX2-FCP-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm2[0,1,2],ymm3[3,4,5,6,7],ymm2[8,9,10],ymm3[11,12,13,14,15]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
; AVX2-FCP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vmovdqa 320(%rdi), %ymm3
+; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX2-FCP-NEXT: vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm5 # 32-byte Folded Reload
+; AVX2-FCP-NEXT: # ymm5 = mem[0],ymm2[1],mem[2,3],ymm2[4],mem[5,6],ymm2[7]
+; AVX2-FCP-NEXT: vextracti128 $1, %ymm5, %xmm2
+; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[2,1,0,3]
+; AVX2-FCP-NEXT: vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX2-FCP-NEXT: vpshufb %xmm1, %xmm5, %xmm2
+; AVX2-FCP-NEXT: vpshufb %xmm1, %xmm3, %xmm3
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2],xmm2[3],xmm3[4,5],xmm2[6,7]
+; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
+; AVX2-FCP-NEXT: vpblendd $66, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
+; AVX2-FCP-NEXT: # ymm3 = ymm3[0],mem[1],ymm3[2,3,4,5],mem[6],ymm3[7]
+; AVX2-FCP-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vpshufb %ymm15, %ymm3, %ymm3
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2],ymm3[3,4,5,6,7]
+; AVX2-FCP-NEXT: vmovdqa 736(%rdi), %ymm3
; AVX2-FCP-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm3[0,1],ymm2[2],ymm3[3,4],ymm2[5],ymm3[6,7]
-; AVX2-FCP-NEXT: vpshufb %xmm0, %xmm8, %xmm2
-; AVX2-FCP-NEXT: vextracti128 $1, %ymm8, %xmm3
+; AVX2-FCP-NEXT: vmovdqa 704(%rdi), %ymm4
+; AVX2-FCP-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1],ymm3[2],ymm4[3,4],ymm3[5],ymm4[6,7]
+; AVX2-FCP-NEXT: vpshufb %xmm0, %xmm4, %xmm9
+; AVX2-FCP-NEXT: vextracti128 $1, %ymm4, %xmm3
; AVX2-FCP-NEXT: vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FCP-NEXT: vpshufb %xmm4, %xmm3, %xmm3
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2],xmm2[3],xmm3[4,5],xmm2[6],xmm3[7]
-; AVX2-FCP-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm1[0,1,2],ymm2[3,4,5,6,7],ymm1[8,9,10],ymm2[11,12,13,14,15]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
-; AVX2-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-FCP-NEXT: vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm7 # 32-byte Folded Reload
-; AVX2-FCP-NEXT: # ymm7 = mem[0],ymm1[1],mem[2,3],ymm1[4],mem[5,6],ymm1[7]
-; AVX2-FCP-NEXT: vextracti128 $1, %ymm7, %xmm1
-; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[2,1,0,3]
-; AVX2-FCP-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FCP-NEXT: vpshufb %xmm6, %xmm7, %xmm1
-; AVX2-FCP-NEXT: vpshufb %xmm12, %xmm2, %xmm2
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3],xmm2[4,5],xmm1[6,7]
-; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX2-FCP-NEXT: vpblendd $66, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
-; AVX2-FCP-NEXT: # ymm2 = ymm2[0],mem[1],ymm2[2,3,4,5],mem[6],ymm2[7]
+; AVX2-FCP-NEXT: vpshufb %xmm0, %xmm3, %xmm10
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm9 = xmm10[0,1,2],xmm9[3],xmm10[4,5],xmm9[6],xmm10[7]
+; AVX2-FCP-NEXT: vinserti128 $1, %xmm9, %ymm0, %ymm9
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm9 = ymm2[0,1,2],ymm9[3,4,5,6,7],ymm2[8,9,10],ymm9[11,12,13,14,15]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm9[4,5,6,7]
; AVX2-FCP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpshufb %ymm5, %ymm2, %ymm2
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5,6,7]
-; AVX2-FCP-NEXT: vmovdqa 736(%rdi), %ymm2
+; AVX2-FCP-NEXT: vmovdqa 160(%rdi), %ymm2
; AVX2-FCP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vmovdqa 704(%rdi), %ymm3
+; AVX2-FCP-NEXT: vmovdqa 128(%rdi), %ymm3
; AVX2-FCP-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm3[0,1],ymm2[2],ymm3[3,4],ymm2[5],ymm3[6,7]
-; AVX2-FCP-NEXT: vpshufb %xmm0, %xmm5, %xmm6
-; AVX2-FCP-NEXT: vextracti128 $1, %ymm5, %xmm2
-; AVX2-FCP-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FCP-NEXT: vpshufb %xmm4, %xmm2, %xmm9
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm6 = xmm9[0,1,2],xmm6[3],xmm9[4,5],xmm6[6],xmm9[7]
-; AVX2-FCP-NEXT: vinserti128 $1, %xmm6, %ymm0, %ymm6
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm6 = ymm1[0,1,2],ymm6[3,4,5,6,7],ymm1[8,9,10],ymm6[11,12,13,14,15]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm6[4,5,6,7]
-; AVX2-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vmovdqa 160(%rdi), %ymm1
-; AVX2-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vmovdqa 128(%rdi), %ymm2
-; AVX2-FCP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
-; AVX2-FCP-NEXT: vpshufb %xmm0, %xmm3, %xmm0
-; AVX2-FCP-NEXT: vextracti128 $1, %ymm3, %xmm14
-; AVX2-FCP-NEXT: vpshufb %xmm4, %xmm14, %xmm4
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm6 = xmm4[0,1,2],xmm0[3],xmm4[4,5],xmm0[6],xmm4[7]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1],ymm2[2],ymm3[3,4],ymm2[5],ymm3[6,7]
+; AVX2-FCP-NEXT: vpshufb %xmm0, %xmm3, %xmm9
+; AVX2-FCP-NEXT: vextracti128 $1, %ymm3, %xmm13
+; AVX2-FCP-NEXT: vpshufb %xmm0, %xmm13, %xmm0
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm9 = xmm0[0,1,2],xmm9[3],xmm0[4,5],xmm9[6],xmm0[7]
; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT: vpblendd $146, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 32-byte Folded Reload
; AVX2-FCP-NEXT: # ymm2 = ymm0[0],mem[1],ymm0[2,3],mem[4],ymm0[5,6],mem[7]
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm4 = xmm2[0,1,12,13,u,u,4,5,u,u,u,u,12,13,14,15]
-; AVX2-FCP-NEXT: vextracti128 $1, %ymm2, %xmm9
-; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm9 = xmm9[2,1,0,3]
-; AVX2-FCP-NEXT: vpshufb %xmm12, %xmm9, %xmm12
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm12 = xmm4[0,1],xmm12[2],xmm4[3],xmm12[4,5],xmm4[6,7]
+; AVX2-FCP-NEXT: vextracti128 $1, %ymm2, %xmm10
+; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm14 = xmm10[2,1,0,3]
+; AVX2-FCP-NEXT: vpshufb %xmm1, %xmm2, %xmm12
+; AVX2-FCP-NEXT: vpshufb %xmm1, %xmm14, %xmm1
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm12 = xmm12[0,1],xmm1[2],xmm12[3],xmm1[4,5],xmm12[6,7]
; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FCP-NEXT: vpblendd $189, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm4 # 32-byte Folded Reload
-; AVX2-FCP-NEXT: # ymm4 = mem[0],ymm0[1],mem[2,3,4,5],ymm0[6],mem[7]
-; AVX2-FCP-NEXT: vpshufb %ymm15, %ymm4, %ymm15
+; AVX2-FCP-NEXT: vpblendd $189, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm10 # 32-byte Folded Reload
+; AVX2-FCP-NEXT: # ymm10 = mem[0],ymm0[1],mem[2,3,4,5],ymm0[6],mem[7]
+; AVX2-FCP-NEXT: vpshufb %ymm15, %ymm10, %ymm15
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm12 = ymm12[0,1,2],ymm15[3,4,5,6,7]
-; AVX2-FCP-NEXT: vinserti128 $1, %xmm6, %ymm0, %ymm6
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm6 = ymm12[0,1,2],ymm6[3,4,5,6,7],ymm12[8,9,10],ymm6[11,12,13,14,15]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm12[0,1,2,3],ymm6[4,5,6,7]
+; AVX2-FCP-NEXT: vinserti128 $1, %xmm9, %ymm0, %ymm9
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm9 = ymm12[0,1,2],ymm9[3,4,5,6,7],ymm12[8,9,10],ymm9[11,12,13,14,15]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm12[0,1,2,3],ymm9[4,5,6,7]
; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vpbroadcastq {{.*#+}} xmm12 = [6,7,2,3,12,13,14,15,6,7,2,3,12,13,14,15]
-; AVX2-FCP-NEXT: vpshufb %xmm12, %xmm10, %xmm6
-; AVX2-FCP-NEXT: vpshuflw $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Folded Reload
-; AVX2-FCP-NEXT: # xmm10 = mem[1,1,1,1,4,5,6,7]
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm15 = xmm6[0,1],xmm10[2],xmm6[3,4],xmm10[5],xmm6[6],xmm10[7]
-; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm6 = [u,u,u,u,u,u,u,u,u,u,u,u,10,11,6,7,18,19,30,31,26,27,26,27,30,31,26,27,26,27,22,23]
+; AVX2-FCP-NEXT: vpshufb %xmm12, %xmm7, %xmm7
+; AVX2-FCP-NEXT: vpshuflw $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload
+; AVX2-FCP-NEXT: # xmm9 = mem[1,1,1,1,4,5,6,7]
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm15 = xmm7[0,1],xmm9[2],xmm7[3,4],xmm9[5],xmm7[6],xmm9[7]
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm7 = [u,u,u,u,u,u,u,u,u,u,u,u,10,11,6,7,18,19,30,31,26,27,26,27,30,31,26,27,26,27,22,23]
; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FCP-NEXT: vpshufb %ymm6, %ymm0, %ymm0
-; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm10 = [2,3,6,7,4,5,0,1,10,11,14,15,12,13,14,15]
-; AVX2-FCP-NEXT: vpshufb %xmm10, %xmm15, %xmm15
+; AVX2-FCP-NEXT: vpshufb %ymm7, %ymm0, %ymm0
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm9 = [2,3,6,7,4,5,0,1,10,11,14,15,12,13,14,15]
+; AVX2-FCP-NEXT: vpshufb %xmm9, %xmm15, %xmm15
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm15[0,1,2],ymm0[3,4,5,6,7]
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm15 = [6,7,2,3,4,5,u,u,2,3,14,15,u,u,6,7]
; AVX2-FCP-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; AVX2-FCP-NEXT: vpshufb %xmm15, %xmm1, %xmm1
-; AVX2-FCP-NEXT: vpshufhw {{.*#+}} xmm13 = xmm13[0,1,2,3,5,5,5,5]
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2],xmm13[3],xmm1[4,5],xmm13[6],xmm1[7]
+; AVX2-FCP-NEXT: vpshufhw {{.*#+}} xmm11 = xmm11[0,1,2,3,5,5,5,5]
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2],xmm11[3],xmm1[4,5],xmm11[6],xmm1[7]
; AVX2-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm0[0,1,2],ymm1[3,4,5,6,7],ymm0[8,9,10],ymm1[11,12,13,14,15]
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpshufb %xmm12, %xmm11, %xmm0
+; AVX2-FCP-NEXT: vpshufb %xmm12, %xmm6, %xmm0
; AVX2-FCP-NEXT: vpshuflw $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; AVX2-FCP-NEXT: # xmm1 = mem[1,1,1,1,4,5,6,7]
; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3,4],xmm1[5],xmm0[6],xmm1[7]
; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-FCP-NEXT: vpshufb %ymm6, %ymm1, %ymm1
-; AVX2-FCP-NEXT: vpshufb %xmm10, %xmm0, %xmm0
+; AVX2-FCP-NEXT: vpshufb %ymm7, %ymm1, %ymm1
+; AVX2-FCP-NEXT: vpshufb %xmm9, %xmm0, %xmm0
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
; AVX2-FCP-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; AVX2-FCP-NEXT: vpshufb %xmm15, %xmm1, %xmm1
-; AVX2-FCP-NEXT: vpshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,5,5,5,5]
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2],xmm8[3],xmm1[4,5],xmm8[6],xmm1[7]
+; AVX2-FCP-NEXT: vpshufhw {{.*#+}} xmm6 = xmm8[0,1,2,3,5,5,5,5]
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2],xmm6[3],xmm1[4,5],xmm6[6],xmm1[7]
; AVX2-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm0[0,1,2],ymm1[3,4,5,6,7],ymm0[8,9,10],ymm1[11,12,13,14,15]
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpshufb %xmm12, %xmm7, %xmm0
+; AVX2-FCP-NEXT: vpshufb %xmm12, %xmm5, %xmm0
; AVX2-FCP-NEXT: vpshuflw $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; AVX2-FCP-NEXT: # xmm1 = mem[1,1,1,1,4,5,6,7]
; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3,4],xmm1[5],xmm0[6],xmm1[7]
; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-FCP-NEXT: vpshufb %ymm6, %ymm1, %ymm1
-; AVX2-FCP-NEXT: vpshufb %xmm10, %xmm0, %xmm0
+; AVX2-FCP-NEXT: vpshufb %ymm7, %ymm1, %ymm1
+; AVX2-FCP-NEXT: vpshufb %xmm9, %xmm0, %xmm0
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
; AVX2-FCP-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; AVX2-FCP-NEXT: vpshufb %xmm15, %xmm1, %xmm1
-; AVX2-FCP-NEXT: vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,5,5,5,5]
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2],xmm5[3],xmm1[4,5],xmm5[6],xmm1[7]
+; AVX2-FCP-NEXT: vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,5,5,5]
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2],xmm4[3],xmm1[4,5],xmm4[6],xmm1[7]
; AVX2-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm0[0,1,2],ymm1[3,4,5,6,7],ymm0[8,9,10],ymm1[11,12,13,14,15]
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpshufb %xmm15, %xmm14, %xmm0
+; AVX2-FCP-NEXT: vpshufb %xmm15, %xmm13, %xmm0
; AVX2-FCP-NEXT: vpshufhw {{.*#+}} xmm1 = xmm3[0,1,2,3,5,5,5,5]
; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3],xmm0[4,5],xmm1[6],xmm0[7]
-; AVX2-FCP-NEXT: vpshufb %ymm6, %ymm4, %ymm1
+; AVX2-FCP-NEXT: vpshufb %ymm7, %ymm10, %ymm1
; AVX2-FCP-NEXT: vpshufb %xmm12, %xmm2, %xmm2
-; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm3 = xmm9[1,1,1,1,4,5,6,7]
+; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm3 = xmm14[1,1,1,1,4,5,6,7]
; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2],xmm2[3,4],xmm3[5],xmm2[6],xmm3[7]
-; AVX2-FCP-NEXT: vpshufb %xmm10, %xmm2, %xmm2
+; AVX2-FCP-NEXT: vpshufb %xmm9, %xmm2, %xmm2
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3,4,5,6,7]
; AVX2-FCP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7],ymm1[8,9,10],ymm0[11,12,13,14,15]
@@ -10274,8 +10271,7 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm4[0,1,2],ymm0[3,4,5,6,7],ymm4[8,9,10],ymm0[11,12,13,14,15]
; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm4 = xmm4[0,1,3,2]
; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm4[5,6,7]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm9 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; AVX2-FCP-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX2-FCP-NEXT: vpshufb %xmm2, %xmm0, %xmm1
; AVX2-FCP-NEXT: vpshufhw $103, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
@@ -10292,7 +10288,7 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm6[0,1,2],ymm1[3,4,5,6,7],ymm6[8,9,10],ymm1[11,12,13,14,15]
; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm6 = xmm6[0,1,3,2]
; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3,4],xmm6[5,6,7]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm11 = ymm4[0,1,2,3],ymm1[4,5,6,7]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm4[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FCP-NEXT: vpshufb %xmm2, %xmm14, %xmm1
; AVX2-FCP-NEXT: vpshufhw {{.*#+}} xmm2 = xmm12[0,1,2,3,7,5,6,5]
; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4],xmm1[5,6],xmm2[7]
@@ -10304,16 +10300,15 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3,4,5,6,7],ymm2[8,9,10],ymm1[11,12,13,14,15]
; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,1,3,2]
; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3,4],xmm2[5,6,7]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm9 = ymm2[0,1,2,3],ymm1[4,5,6,7]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm2[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT: vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 32-byte Folded Reload
; AVX2-FCP-NEXT: # ymm1 = mem[0],ymm0[1],mem[2,3],ymm0[4],mem[5,6],ymm0[7]
-; AVX2-FCP-NEXT: vextracti128 $1, %ymm1, %xmm7
-; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm8 = xmm1[0,3,2,1]
-; AVX2-FCP-NEXT: vpbroadcastq {{.*#+}} xmm2 = [8,9,0,0,0,1,12,13,8,9,0,0,0,1,12,13]
-; AVX2-FCP-NEXT: vpshufb %xmm2, %xmm7, %xmm1
-; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm6 = [0,1,2,3,0,1,4,5,u,u,12,13,u,u,u,u]
-; AVX2-FCP-NEXT: vpshufb %xmm6, %xmm8, %xmm4
+; AVX2-FCP-NEXT: vextracti128 $1, %ymm1, %xmm2
+; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm6 = xmm1[0,3,2,1]
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm7 = [0,1,2,3,0,1,4,5,8,9,12,13,0,1,12,13]
+; AVX2-FCP-NEXT: vpshufb %xmm7, %xmm2, %xmm1
+; AVX2-FCP-NEXT: vpshufb %xmm7, %xmm6, %xmm4
; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm4[0,1,2,3],xmm1[4],xmm4[5],xmm1[6,7]
; AVX2-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
; AVX2-FCP-NEXT: vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm4 # 32-byte Folded Reload
@@ -10321,10 +10316,10 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT: vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 32-byte Folded Reload
; AVX2-FCP-NEXT: # ymm1 = mem[0],ymm0[1],mem[2,3],ymm0[4],mem[5,6],ymm0[7]
-; AVX2-FCP-NEXT: vextracti128 $1, %ymm1, %xmm10
+; AVX2-FCP-NEXT: vextracti128 $1, %ymm1, %xmm11
; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm12 = xmm1[0,3,2,1]
-; AVX2-FCP-NEXT: vpshufb %xmm2, %xmm10, %xmm1
-; AVX2-FCP-NEXT: vpshufb %xmm6, %xmm12, %xmm5
+; AVX2-FCP-NEXT: vpshufb %xmm7, %xmm11, %xmm1
+; AVX2-FCP-NEXT: vpshufb %xmm7, %xmm12, %xmm5
; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm5[0,1,2,3],xmm1[4],xmm5[5],xmm1[6,7]
; AVX2-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
; AVX2-FCP-NEXT: vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm5 # 32-byte Folded Reload
@@ -10334,88 +10329,86 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX2-FCP-NEXT: # ymm1 = mem[0],ymm0[1],mem[2,3],ymm0[4],mem[5,6],ymm0[7]
; AVX2-FCP-NEXT: vextracti128 $1, %ymm1, %xmm13
; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm14 = xmm1[0,3,2,1]
-; AVX2-FCP-NEXT: vpshufb %xmm2, %xmm13, %xmm1
-; AVX2-FCP-NEXT: vpshufb %xmm6, %xmm14, %xmm15
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm15[0,1,2,3],xmm1[4],xmm15[5],xmm1[6,7]
+; AVX2-FCP-NEXT: vpshufb %xmm7, %xmm13, %xmm1
+; AVX2-FCP-NEXT: vpshufb %xmm7, %xmm14, %xmm10
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm10[0,1,2,3],xmm1[4],xmm10[5],xmm1[6,7]
; AVX2-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX2-FCP-NEXT: vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm3 # 32-byte Folded Reload
-; AVX2-FCP-NEXT: # ymm3 = mem[0,1,2,3,4],ymm1[5,6,7]
+; AVX2-FCP-NEXT: vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
+; AVX2-FCP-NEXT: # ymm1 = mem[0,1,2,3,4],ymm1[5,6,7]
; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FCP-NEXT: vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm15 # 32-byte Folded Reload
-; AVX2-FCP-NEXT: # ymm15 = mem[0],ymm0[1],mem[2,3],ymm0[4],mem[5,6],ymm0[7]
-; AVX2-FCP-NEXT: vextracti128 $1, %ymm15, %xmm0
-; AVX2-FCP-NEXT: vpshufb %xmm2, %xmm0, %xmm2
-; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm15 = xmm15[0,3,2,1]
-; AVX2-FCP-NEXT: vpshufb %xmm6, %xmm15, %xmm6
+; AVX2-FCP-NEXT: vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm10 # 32-byte Folded Reload
+; AVX2-FCP-NEXT: # ymm10 = mem[0],ymm0[1],mem[2,3],ymm0[4],mem[5,6],ymm0[7]
+; AVX2-FCP-NEXT: vextracti128 $1, %ymm10, %xmm15
+; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm0 = xmm10[0,3,2,1]
+; AVX2-FCP-NEXT: vpshufb %xmm7, %xmm15, %xmm10
+; AVX2-FCP-NEXT: vpshufb %xmm7, %xmm0, %xmm7
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm7 = xmm7[0,1,2,3],xmm10[4],xmm7[5],xmm10[6,7]
+; AVX2-FCP-NEXT: vinserti128 $1, %xmm7, %ymm0, %ymm7
+; AVX2-FCP-NEXT: vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm10 # 32-byte Folded Reload
+; AVX2-FCP-NEXT: # ymm10 = mem[0,1,2,3,4],ymm7[5,6,7]
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm7 = [0,1,2,3,2,3,6,7,10,11,14,15,2,3,14,15]
+; AVX2-FCP-NEXT: vpshufb %xmm7, %xmm11, %xmm11
+; AVX2-FCP-NEXT: vpshufb %xmm7, %xmm12, %xmm12
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm11 = xmm12[0,1,2,3],xmm11[4],xmm12[5],xmm11[6,7]
+; AVX2-FCP-NEXT: vinserti128 $1, %xmm11, %ymm0, %ymm11
+; AVX2-FCP-NEXT: vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm11 # 32-byte Folded Reload
+; AVX2-FCP-NEXT: # ymm11 = mem[0,1,2,3,4],ymm11[5,6,7]
+; AVX2-FCP-NEXT: vpshufb %xmm7, %xmm2, %xmm2
+; AVX2-FCP-NEXT: vpshufb %xmm7, %xmm6, %xmm6
; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm2 = xmm6[0,1,2,3],xmm2[4],xmm6[5],xmm2[6,7]
; AVX2-FCP-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
; AVX2-FCP-NEXT: vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
; AVX2-FCP-NEXT: # ymm2 = mem[0,1,2,3,4],ymm2[5,6,7]
-; AVX2-FCP-NEXT: vpbroadcastq {{.*#+}} xmm1 = [10,11,0,0,2,3,14,15,10,11,0,0,2,3,14,15]
-; AVX2-FCP-NEXT: vpshufb %xmm1, %xmm10, %xmm6
-; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm10 = [0,1,2,3,2,3,6,7,u,u,14,15,u,u,u,u]
-; AVX2-FCP-NEXT: vpshufb %xmm10, %xmm12, %xmm12
+; AVX2-FCP-NEXT: vpshufb %xmm7, %xmm13, %xmm6
+; AVX2-FCP-NEXT: vpshufb %xmm7, %xmm14, %xmm12
; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm6 = xmm12[0,1,2,3],xmm6[4],xmm12[5],xmm6[6,7]
; AVX2-FCP-NEXT: vinserti128 $1, %xmm6, %ymm0, %ymm6
-; AVX2-FCP-NEXT: vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm12 # 32-byte Folded Reload
-; AVX2-FCP-NEXT: # ymm12 = mem[0,1,2,3,4],ymm6[5,6,7]
-; AVX2-FCP-NEXT: vpshufb %xmm1, %xmm7, %xmm6
-; AVX2-FCP-NEXT: vpshufb %xmm10, %xmm8, %xmm7
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm6 = xmm7[0,1,2,3],xmm6[4],xmm7[5],xmm6[6,7]
-; AVX2-FCP-NEXT: vinserti128 $1, %xmm6, %ymm0, %ymm6
; AVX2-FCP-NEXT: vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm6 # 32-byte Folded Reload
; AVX2-FCP-NEXT: # ymm6 = mem[0,1,2,3,4],ymm6[5,6,7]
-; AVX2-FCP-NEXT: vpshufb %xmm1, %xmm13, %xmm7
-; AVX2-FCP-NEXT: vpshufb %xmm10, %xmm14, %xmm8
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm7 = xmm8[0,1,2,3],xmm7[4],xmm8[5],xmm7[6,7]
-; AVX2-FCP-NEXT: vinserti128 $1, %xmm7, %ymm0, %ymm7
-; AVX2-FCP-NEXT: vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm7 # 32-byte Folded Reload
-; AVX2-FCP-NEXT: # ymm7 = mem[0,1,2,3,4],ymm7[5,6,7]
-; AVX2-FCP-NEXT: vpshufb %xmm1, %xmm0, %xmm0
-; AVX2-FCP-NEXT: vpshufb %xmm10, %xmm15, %xmm1
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4],xmm1[5],xmm0[6,7]
+; AVX2-FCP-NEXT: vpshufb %xmm7, %xmm15, %xmm12
+; AVX2-FCP-NEXT: vpshufb %xmm7, %xmm0, %xmm0
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm12[4],xmm0[5],xmm12[6,7]
; AVX2-FCP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
; AVX2-FCP-NEXT: vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
; AVX2-FCP-NEXT: # ymm0 = mem[0,1,2,3,4],ymm0[5,6,7]
-; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-FCP-NEXT: vmovaps %ymm1, 96(%rsi)
-; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-FCP-NEXT: vmovaps %ymm1, 32(%rsi)
-; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-FCP-NEXT: vmovaps %ymm1, 64(%rsi)
-; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-FCP-NEXT: vmovaps %ymm1, (%rsi)
-; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-FCP-NEXT: vmovaps %ymm1, 96(%rdx)
-; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-FCP-NEXT: vmovaps %ymm1, 32(%rdx)
-; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-FCP-NEXT: vmovaps %ymm1, 64(%rdx)
-; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-FCP-NEXT: vmovaps %ymm1, (%rdx)
-; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-FCP-NEXT: vmovaps %ymm1, 32(%rcx)
-; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-FCP-NEXT: vmovaps %ymm1, 96(%rcx)
-; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-FCP-NEXT: vmovaps %ymm1, 64(%rcx)
-; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-FCP-NEXT: vmovaps %ymm1, (%rcx)
-; AVX2-FCP-NEXT: vmovdqa %ymm11, 96(%r8)
-; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-FCP-NEXT: vmovaps %ymm1, 32(%r8)
-; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-FCP-NEXT: vmovaps %ymm1, 64(%r8)
-; AVX2-FCP-NEXT: vmovdqa %ymm9, (%r8)
-; AVX2-FCP-NEXT: vmovdqa %ymm2, 96(%r9)
-; AVX2-FCP-NEXT: vmovdqa %ymm3, 32(%r9)
+; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX2-FCP-NEXT: vmovaps %ymm7, 96(%rsi)
+; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX2-FCP-NEXT: vmovaps %ymm7, 32(%rsi)
+; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX2-FCP-NEXT: vmovaps %ymm7, 64(%rsi)
+; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX2-FCP-NEXT: vmovaps %ymm7, (%rsi)
+; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX2-FCP-NEXT: vmovaps %ymm7, 96(%rdx)
+; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX2-FCP-NEXT: vmovaps %ymm7, 32(%rdx)
+; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX2-FCP-NEXT: vmovaps %ymm7, 64(%rdx)
+; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX2-FCP-NEXT: vmovaps %ymm7, (%rdx)
+; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX2-FCP-NEXT: vmovaps %ymm7, 32(%rcx)
+; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX2-FCP-NEXT: vmovaps %ymm7, 96(%rcx)
+; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX2-FCP-NEXT: vmovaps %ymm7, 64(%rcx)
+; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX2-FCP-NEXT: vmovaps %ymm7, (%rcx)
+; AVX2-FCP-NEXT: vmovdqa %ymm8, 96(%r8)
+; AVX2-FCP-NEXT: vmovdqa %ymm9, 32(%r8)
+; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX2-FCP-NEXT: vmovaps %ymm7, 64(%r8)
+; AVX2-FCP-NEXT: vmovdqa %ymm3, (%r8)
+; AVX2-FCP-NEXT: vmovdqa %ymm10, 96(%r9)
+; AVX2-FCP-NEXT: vmovdqa %ymm1, 32(%r9)
; AVX2-FCP-NEXT: vmovdqa %ymm5, (%r9)
; AVX2-FCP-NEXT: vmovdqa %ymm4, 64(%r9)
; AVX2-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX2-FCP-NEXT: vmovdqa %ymm0, 96(%rax)
-; AVX2-FCP-NEXT: vmovdqa %ymm7, 32(%rax)
-; AVX2-FCP-NEXT: vmovdqa %ymm6, 64(%rax)
-; AVX2-FCP-NEXT: vmovdqa %ymm12, (%rax)
+; AVX2-FCP-NEXT: vmovdqa %ymm6, 32(%rax)
+; AVX2-FCP-NEXT: vmovdqa %ymm2, 64(%rax)
+; AVX2-FCP-NEXT: vmovdqa %ymm11, (%rax)
; AVX2-FCP-NEXT: addq $1304, %rsp # imm = 0x518
; AVX2-FCP-NEXT: vzeroupper
; AVX2-FCP-NEXT: retq
@@ -10445,8 +10438,8 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512-NEXT: vpshuflw {{.*#+}} xmm1 = xmm13[2,2,2,2,4,5,6,7]
; AVX512-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2],xmm1[3,4],xmm2[5,6,7]
; AVX512-NEXT: vmovdqa64 %xmm2, %xmm21
-; AVX512-NEXT: vmovdqa {{.*#+}} xmm11 = [0,1,4,5,4,5,6,7,0,1,12,13,8,9,4,5]
-; AVX512-NEXT: vpshufb %xmm11, %xmm1, %xmm1
+; AVX512-NEXT: vmovdqa {{.*#+}} xmm10 = [0,1,4,5,4,5,6,7,0,1,12,13,8,9,4,5]
+; AVX512-NEXT: vpshufb %xmm10, %xmm1, %xmm1
; AVX512-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
; AVX512-NEXT: vinserti32x4 $2, %xmm0, %zmm1, %zmm0
; AVX512-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
@@ -10467,8 +10460,8 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512-NEXT: vinserti128 $1, 480(%rdi), %ymm0, %ymm0
; AVX512-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX512-NEXT: vpblendd {{.*#+}} ymm2 = ymm0[0],ymm2[1],ymm0[2,3,4,5],ymm2[6],ymm0[7]
-; AVX512-NEXT: vmovdqa {{.*#+}} ymm10 = [0,1,12,13,8,9,12,13,8,9,12,13,8,9,4,5,16,17,28,29,24,25,28,29,24,25,28,29,24,25,20,21]
-; AVX512-NEXT: vpshufb %ymm10, %ymm2, %ymm0
+; AVX512-NEXT: vmovdqa {{.*#+}} ymm5 = [0,1,12,13,8,9,12,13,8,9,12,13,8,9,4,5,16,17,28,29,24,25,28,29,24,25,28,29,24,25,20,21]
+; AVX512-NEXT: vpshufb %ymm5, %ymm2, %ymm0
; AVX512-NEXT: vmovdqa64 %ymm2, %ymm23
; AVX512-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
; AVX512-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
@@ -10477,12 +10470,12 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX512-NEXT: vmovdqa 704(%rdi), %ymm2
; AVX512-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-NEXT: vpblendd {{.*#+}} ymm8 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
-; AVX512-NEXT: vextracti128 $1, %ymm8, %xmm2
-; AVX512-NEXT: vpshuflw {{.*#+}} xmm1 = xmm8[2,2,2,2,4,5,6,7]
+; AVX512-NEXT: vpblendd {{.*#+}} ymm11 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
+; AVX512-NEXT: vextracti128 $1, %ymm11, %xmm2
+; AVX512-NEXT: vpshuflw {{.*#+}} xmm1 = xmm11[2,2,2,2,4,5,6,7]
; AVX512-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2],xmm1[3,4],xmm2[5,6,7]
; AVX512-NEXT: vmovdqa64 %xmm2, %xmm28
-; AVX512-NEXT: vpshufb %xmm11, %xmm1, %xmm1
+; AVX512-NEXT: vpshufb %xmm10, %xmm1, %xmm1
; AVX512-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
; AVX512-NEXT: vperm2i128 {{.*#+}} ymm2 = ymm0[2,3],mem[2,3]
; AVX512-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
@@ -10512,10 +10505,10 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX512-NEXT: vpblendd {{.*#+}} ymm4 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
; AVX512-NEXT: vmovdqa64 %ymm1, %ymm30
-; AVX512-NEXT: vextracti128 $1, %ymm4, %xmm6
+; AVX512-NEXT: vextracti128 $1, %ymm4, %xmm7
; AVX512-NEXT: vpshuflw {{.*#+}} xmm1 = xmm4[2,2,2,2,4,5,6,7]
-; AVX512-NEXT: vpblendw {{.*#+}} xmm1 = xmm6[0,1,2],xmm1[3,4],xmm6[5,6,7]
-; AVX512-NEXT: vpshufb %xmm11, %xmm1, %xmm1
+; AVX512-NEXT: vpblendw {{.*#+}} xmm1 = xmm7[0,1,2],xmm1[3,4],xmm7[5,6,7]
+; AVX512-NEXT: vpshufb %xmm10, %xmm1, %xmm1
; AVX512-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
; AVX512-NEXT: vinserti32x4 $2, %xmm0, %zmm1, %zmm0
; AVX512-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
@@ -10523,97 +10516,96 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX512-NEXT: vmovdqa 32(%rdi), %ymm1
; AVX512-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-NEXT: vpblendd {{.*#+}} ymm5 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7]
-; AVX512-NEXT: vpshufb %xmm9, %xmm5, %xmm1
-; AVX512-NEXT: vextracti128 $1, %ymm5, %xmm9
-; AVX512-NEXT: vpshufd {{.*#+}} xmm3 = xmm9[0,2,0,3]
-; AVX512-NEXT: vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,6,6,7]
-; AVX512-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2],xmm1[3],xmm3[4,5],xmm1[6,7]
-; AVX512-NEXT: vmovdqa 64(%rdi), %ymm3
-; AVX512-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm3[2,3],mem[2,3]
-; AVX512-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-NEXT: vinserti128 $1, 96(%rdi), %ymm3, %ymm2
+; AVX512-NEXT: vpblendd {{.*#+}} ymm3 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7]
+; AVX512-NEXT: vpshufb %xmm9, %xmm3, %xmm0
+; AVX512-NEXT: vextracti128 $1, %ymm3, %xmm9
+; AVX512-NEXT: vpshufd {{.*#+}} xmm2 = xmm9[0,2,0,3]
+; AVX512-NEXT: vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,6,6,7]
+; AVX512-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2],xmm0[3],xmm2[4,5],xmm0[6,7]
+; AVX512-NEXT: vmovdqa 64(%rdi), %ymm2
+; AVX512-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm2[2,3],mem[2,3]
+; AVX512-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512-NEXT: vinserti128 $1, 96(%rdi), %ymm2, %ymm2
; AVX512-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-NEXT: vpblendd {{.*#+}} ymm7 = ymm2[0],ymm0[1],ymm2[2,3,4,5],ymm0[6],ymm2[7]
-; AVX512-NEXT: vpshufb %ymm10, %ymm7, %ymm3
-; AVX512-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm3[3,4,5,6,7]
+; AVX512-NEXT: vpblendd {{.*#+}} ymm8 = ymm2[0],ymm1[1],ymm2[2,3,4,5],ymm1[6],ymm2[7]
+; AVX512-NEXT: vpshufb %ymm5, %ymm8, %ymm2
+; AVX512-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm2[3,4,5,6,7]
; AVX512-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT: vmovdqa 352(%rdi), %ymm0
; AVX512-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX512-NEXT: vmovdqa 320(%rdi), %ymm1
; AVX512-NEXT: vmovdqu %ymm1, (%rsp) # 32-byte Spill
-; AVX512-NEXT: vpblendd {{.*#+}} ymm10 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
-; AVX512-NEXT: vextracti128 $1, %ymm10, %xmm3
-; AVX512-NEXT: vpshuflw {{.*#+}} xmm1 = xmm10[2,2,2,2,4,5,6,7]
-; AVX512-NEXT: vpblendw {{.*#+}} xmm1 = xmm3[0,1,2],xmm1[3,4],xmm3[5,6,7]
-; AVX512-NEXT: vpshufb %xmm11, %xmm1, %xmm11
-; AVX512-NEXT: vmovdqa 256(%rdi), %ymm1
-; AVX512-NEXT: vperm2i128 {{.*#+}} ymm2 = ymm1[2,3],mem[2,3]
-; AVX512-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-NEXT: vinserti128 $1, 288(%rdi), %ymm1, %ymm0
-; AVX512-NEXT: vpblendd {{.*#+}} ymm1 = ymm0[0],ymm2[1],ymm0[2,3,4,5],ymm2[6],ymm0[7]
+; AVX512-NEXT: vpblendd {{.*#+}} ymm5 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
+; AVX512-NEXT: vextracti128 $1, %ymm5, %xmm6
+; AVX512-NEXT: vpshuflw {{.*#+}} xmm2 = xmm5[2,2,2,2,4,5,6,7]
+; AVX512-NEXT: vpblendw {{.*#+}} xmm2 = xmm6[0,1,2],xmm2[3,4],xmm6[5,6,7]
+; AVX512-NEXT: vpshufb %xmm10, %xmm2, %xmm10
+; AVX512-NEXT: vmovdqa 256(%rdi), %ymm2
+; AVX512-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm2[2,3],mem[2,3]
+; AVX512-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512-NEXT: vinserti128 $1, 288(%rdi), %ymm2, %ymm0
+; AVX512-NEXT: vpblendd {{.*#+}} ymm2 = ymm0[0],ymm1[1],ymm0[2,3,4,5],ymm1[6],ymm0[7]
; AVX512-NEXT: vmovdqa64 %ymm0, %ymm31
; AVX512-NEXT: vmovdqa64 %ymm17, %ymm0
-; AVX512-NEXT: vpshufb %ymm0, %ymm1, %ymm0
-; AVX512-NEXT: vinserti128 $1, %xmm11, %ymm0, %ymm11
-; AVX512-NEXT: vpblendw {{.*#+}} ymm11 = ymm0[0,1,2],ymm11[3,4,5,6,7],ymm0[8,9,10],ymm11[11,12,13,14,15]
+; AVX512-NEXT: vpshufb %ymm0, %ymm2, %ymm0
+; AVX512-NEXT: vinserti128 $1, %xmm10, %ymm0, %ymm10
+; AVX512-NEXT: vpblendw {{.*#+}} ymm10 = ymm0[0,1,2],ymm10[3,4,5,6,7],ymm0[8,9,10],ymm10[11,12,13,14,15]
; AVX512-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,4,6]
-; AVX512-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm11[4,5,6,7]
+; AVX512-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm10[4,5,6,7]
; AVX512-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-NEXT: vpbroadcastq {{.*#+}} xmm11 = [2,3,14,15,10,11,0,0,2,3,14,15,10,11,0,0]
-; AVX512-NEXT: vpshufb %xmm11, %xmm15, %xmm0
-; AVX512-NEXT: vmovdqa {{.*#+}} xmm15 = [2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15]
-; AVX512-NEXT: vpshufb %xmm15, %xmm14, %xmm14
+; AVX512-NEXT: vmovdqa {{.*#+}} xmm10 = [2,3,14,15,10,11,6,7,2,3,14,15,12,13,14,15]
+; AVX512-NEXT: vpshufb %xmm10, %xmm15, %xmm0
+; AVX512-NEXT: vpshufb %xmm10, %xmm14, %xmm14
; AVX512-NEXT: vpblendw {{.*#+}} xmm0 = xmm14[0,1],xmm0[2],xmm14[3],xmm0[4,5],xmm14[6,7]
; AVX512-NEXT: vmovdqa {{.*#+}} xmm14 = [6,7,2,3,4,5,u,u,2,3,14,15,u,u,6,7]
-; AVX512-NEXT: vpshufb %xmm14, %xmm6, %xmm6
+; AVX512-NEXT: vpshufb %xmm14, %xmm7, %xmm7
; AVX512-NEXT: vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,5,5,5]
-; AVX512-NEXT: vpblendw {{.*#+}} xmm4 = xmm6[0,1,2],xmm4[3],xmm6[4,5],xmm4[6],xmm6[7]
+; AVX512-NEXT: vpblendw {{.*#+}} xmm4 = xmm7[0,1,2],xmm4[3],xmm7[4,5],xmm4[6],xmm7[7]
; AVX512-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4
; AVX512-NEXT: vinserti32x4 $2, %xmm0, %zmm4, %zmm0
; AVX512-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-NEXT: vpshufb %xmm11, %xmm9, %xmm0
-; AVX512-NEXT: vpshufb %xmm15, %xmm5, %xmm2
-; AVX512-NEXT: vpblendw {{.*#+}} xmm0 = xmm2[0,1],xmm0[2],xmm2[3],xmm0[4,5],xmm2[6,7]
-; AVX512-NEXT: vmovdqa {{.*#+}} ymm2 = [2,3,14,15,10,11,10,11,14,15,10,11,10,11,6,7,18,19,30,31,26,27,26,27,30,31,26,27,26,27,22,23]
-; AVX512-NEXT: vpshufb %ymm2, %ymm7, %ymm4
+; AVX512-NEXT: vpshufb %xmm10, %xmm9, %xmm0
+; AVX512-NEXT: vpshufb %xmm10, %xmm3, %xmm3
+; AVX512-NEXT: vpblendw {{.*#+}} xmm0 = xmm3[0,1],xmm0[2],xmm3[3],xmm0[4,5],xmm3[6,7]
+; AVX512-NEXT: vmovdqa {{.*#+}} ymm3 = [2,3,14,15,10,11,10,11,14,15,10,11,10,11,6,7,18,19,30,31,26,27,26,27,30,31,26,27,26,27,22,23]
+; AVX512-NEXT: vpshufb %ymm3, %ymm8, %ymm4
; AVX512-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm4[3,4,5,6,7]
; AVX512-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-NEXT: vpshufb %xmm14, %xmm3, %xmm0
-; AVX512-NEXT: vpshufhw {{.*#+}} xmm3 = xmm10[0,1,2,3,5,5,5,5]
-; AVX512-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm3[3],xmm0[4,5],xmm3[6],xmm0[7]
-; AVX512-NEXT: vmovdqa {{.*#+}} ymm3 = [2,3,14,15,10,11,10,11,14,15,10,11,u,u,6,7,18,19,30,31,26,27,26,27,30,31,26,27,u,u,22,23]
-; AVX512-NEXT: vpshufb %ymm3, %ymm1, %ymm1
+; AVX512-NEXT: vpshufb %xmm14, %xmm6, %xmm0
+; AVX512-NEXT: vpshufhw {{.*#+}} xmm1 = xmm5[0,1,2,3,5,5,5,5]
+; AVX512-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3],xmm0[4,5],xmm1[6],xmm0[7]
+; AVX512-NEXT: vmovdqa {{.*#+}} ymm1 = [2,3,14,15,10,11,10,11,14,15,10,11,u,u,6,7,18,19,30,31,26,27,26,27,30,31,26,27,u,u,22,23]
+; AVX512-NEXT: vpshufb %ymm1, %ymm2, %ymm2
; AVX512-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7],ymm1[8,9,10],ymm0[11,12,13,14,15]
-; AVX512-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,5,7]
-; AVX512-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX512-NEXT: vpblendw {{.*#+}} ymm0 = ymm2[0,1,2],ymm0[3,4,5,6,7],ymm2[8,9,10],ymm0[11,12,13,14,15]
+; AVX512-NEXT: vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,5,7]
+; AVX512-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
; AVX512-NEXT: vmovdqa64 %ymm0, %ymm27
; AVX512-NEXT: vmovdqa64 %xmm20, %xmm0
-; AVX512-NEXT: vpshufb %xmm11, %xmm0, %xmm0
-; AVX512-NEXT: vmovdqa64 %ymm16, %ymm1
-; AVX512-NEXT: vpshufb %xmm15, %xmm1, %xmm1
-; AVX512-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2],xmm1[3],xmm0[4,5],xmm1[6,7]
-; AVX512-NEXT: vmovdqa64 %xmm21, %xmm1
-; AVX512-NEXT: vpshufb %xmm14, %xmm1, %xmm1
+; AVX512-NEXT: vpshufb %xmm10, %xmm0, %xmm0
+; AVX512-NEXT: vmovdqa64 %ymm16, %ymm2
+; AVX512-NEXT: vpshufb %xmm10, %xmm2, %xmm2
+; AVX512-NEXT: vpblendw {{.*#+}} xmm0 = xmm2[0,1],xmm0[2],xmm2[3],xmm0[4,5],xmm2[6,7]
+; AVX512-NEXT: vmovdqa64 %xmm21, %xmm2
+; AVX512-NEXT: vpshufb %xmm14, %xmm2, %xmm2
; AVX512-NEXT: vpshufhw {{.*#+}} xmm4 = xmm13[0,1,2,3,5,5,5,5]
-; AVX512-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2],xmm4[3],xmm1[4,5],xmm4[6],xmm1[7]
-; AVX512-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512-NEXT: vinserti32x4 $2, %xmm0, %zmm1, %zmm0
+; AVX512-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2],xmm4[3],xmm2[4,5],xmm4[6],xmm2[7]
+; AVX512-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512-NEXT: vinserti32x4 $2, %xmm0, %zmm2, %zmm0
; AVX512-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT: vmovdqa64 %ymm23, %ymm0
-; AVX512-NEXT: vpshufb %ymm2, %ymm0, %ymm0
-; AVX512-NEXT: vmovdqa64 %xmm22, %xmm1
-; AVX512-NEXT: vpshufb %xmm11, %xmm1, %xmm1
-; AVX512-NEXT: vpshufb %xmm15, %xmm12, %xmm2
-; AVX512-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1],xmm1[2],xmm2[3],xmm1[4,5],xmm2[6,7]
-; AVX512-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
+; AVX512-NEXT: vpshufb %ymm3, %ymm0, %ymm0
+; AVX512-NEXT: vmovdqa64 %xmm22, %xmm2
+; AVX512-NEXT: vpshufb %xmm10, %xmm2, %xmm2
+; AVX512-NEXT: vpshufb %xmm10, %xmm12, %xmm3
+; AVX512-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1],xmm2[2],xmm3[3],xmm2[4,5],xmm3[6,7]
+; AVX512-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2],ymm0[3,4,5,6,7]
; AVX512-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT: vmovdqa64 %ymm29, %ymm0
-; AVX512-NEXT: vpshufb %ymm3, %ymm0, %ymm0
+; AVX512-NEXT: vpshufb %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vmovdqa64 %xmm28, %xmm1
; AVX512-NEXT: vpshufb %xmm14, %xmm1, %xmm1
-; AVX512-NEXT: vpshufhw {{.*#+}} xmm2 = xmm8[0,1,2,3,5,5,5,5]
+; AVX512-NEXT: vpshufhw {{.*#+}} xmm2 = xmm11[0,1,2,3,5,5,5,5]
; AVX512-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2],xmm2[3],xmm1[4,5],xmm2[6],xmm1[7]
; AVX512-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
; AVX512-NEXT: vpblendw {{.*#+}} ymm1 = ymm0[0,1,2],ymm1[3,4,5,6,7],ymm0[8,9,10],ymm1[11,12,13,14,15]
@@ -11049,79 +11041,77 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
;
; AVX512-FCP-LABEL: load_i16_stride6_vf64:
; AVX512-FCP: # %bb.0:
-; AVX512-FCP-NEXT: subq $1480, %rsp # imm = 0x5C8
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm10 = [0,1,12,13,u,u,4,5,u,u,u,u,12,13,14,15]
+; AVX512-FCP-NEXT: subq $1416, %rsp # imm = 0x588
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm11 = [0,1,12,13,0,1,4,5,8,9,12,13,12,13,14,15]
; AVX512-FCP-NEXT: vmovdqa 608(%rdi), %ymm0
; AVX512-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX512-FCP-NEXT: vmovdqa 576(%rdi), %ymm1
; AVX512-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6],ymm0[7]
-; AVX512-FCP-NEXT: vpshufb %xmm10, %xmm2, %xmm0
-; AVX512-FCP-NEXT: vpbroadcastq {{.*#+}} xmm12 = [8,9,12,13,0,1,0,0,8,9,12,13,0,1,0,0]
+; AVX512-FCP-NEXT: vpshufb %xmm11, %xmm2, %xmm0
; AVX512-FCP-NEXT: vextracti128 $1, %ymm2, %xmm1
; AVX512-FCP-NEXT: vmovdqa64 %ymm2, %ymm20
; AVX512-FCP-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[2,1,0,3]
-; AVX512-FCP-NEXT: vpshufb %xmm12, %xmm2, %xmm1
+; AVX512-FCP-NEXT: vpshufb %xmm11, %xmm2, %xmm1
; AVX512-FCP-NEXT: vmovdqa64 %xmm2, %xmm21
; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3],xmm1[4,5],xmm0[6,7]
; AVX512-FCP-NEXT: vmovdqa 544(%rdi), %ymm1
; AVX512-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX512-FCP-NEXT: vmovdqa 512(%rdi), %ymm2
; AVX512-FCP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm15 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
-; AVX512-FCP-NEXT: vpbroadcastd {{.*#+}} xmm9 = [8,9,4,5,8,9,4,5,8,9,4,5,8,9,4,5]
-; AVX512-FCP-NEXT: vpshufb %xmm9, %xmm15, %xmm1
-; AVX512-FCP-NEXT: vextracti128 $1, %ymm15, %xmm4
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,4,5,u,u,0,1,12,13,u,u,4,5]
-; AVX512-FCP-NEXT: vpshufb %xmm2, %xmm4, %xmm3
-; AVX512-FCP-NEXT: vmovdqa64 %xmm4, %xmm22
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm3[0,1,2],xmm1[3],xmm3[4,5],xmm1[6],xmm3[7]
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm13 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm10 = [0,1,4,5,4,5,4,5,0,1,12,13,8,9,4,5]
+; AVX512-FCP-NEXT: vpshufb %xmm10, %xmm13, %xmm1
+; AVX512-FCP-NEXT: vextracti128 $1, %ymm13, %xmm3
+; AVX512-FCP-NEXT: vpshufb %xmm10, %xmm3, %xmm2
+; AVX512-FCP-NEXT: vmovdqa64 %xmm3, %xmm22
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2],xmm1[3],xmm2[4,5],xmm1[6],xmm2[7]
; AVX512-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
; AVX512-FCP-NEXT: vinserti32x4 $2, %xmm0, %zmm1, %zmm0
; AVX512-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-FCP-NEXT: vmovdqa 416(%rdi), %ymm0
-; AVX512-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-FCP-NEXT: vmovdqa 384(%rdi), %ymm1
+; AVX512-FCP-NEXT: vmovdqa 448(%rdi), %ymm0
+; AVX512-FCP-NEXT: vmovdqa 416(%rdi), %ymm1
; AVX512-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm14 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6],ymm0[7]
-; AVX512-FCP-NEXT: vpshufb %xmm10, %xmm14, %xmm0
-; AVX512-FCP-NEXT: vextracti128 $1, %ymm14, %xmm1
-; AVX512-FCP-NEXT: vpshufd {{.*#+}} xmm3 = xmm1[2,1,0,3]
-; AVX512-FCP-NEXT: vpshufb %xmm12, %xmm3, %xmm1
+; AVX512-FCP-NEXT: vmovdqa 384(%rdi), %ymm2
+; AVX512-FCP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm12 = ymm2[0],ymm1[1],ymm2[2,3],ymm1[4],ymm2[5,6],ymm1[7]
+; AVX512-FCP-NEXT: vpshufb %xmm11, %xmm12, %xmm1
+; AVX512-FCP-NEXT: vextracti128 $1, %ymm12, %xmm2
+; AVX512-FCP-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[2,1,0,3]
+; AVX512-FCP-NEXT: vpshufb %xmm11, %xmm3, %xmm2
; AVX512-FCP-NEXT: vmovdqa64 %xmm3, %xmm23
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3],xmm1[4,5],xmm0[6,7]
-; AVX512-FCP-NEXT: vmovdqa 448(%rdi), %ymm1
-; AVX512-FCP-NEXT: vperm2i128 {{.*#+}} ymm3 = ymm1[2,3],mem[2,3]
-; AVX512-FCP-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-FCP-NEXT: vinserti128 $1, 480(%rdi), %ymm1, %ymm1
-; AVX512-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm1[0],ymm3[1],ymm1[2,3,4,5],ymm3[6],ymm1[7]
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm8 = [0,1,12,13,8,9,12,13,8,9,12,13,8,9,4,5,16,17,28,29,24,25,28,29,24,25,28,29,24,25,20,21]
-; AVX512-FCP-NEXT: vpshufb %ymm8, %ymm3, %ymm1
-; AVX512-FCP-NEXT: vmovdqa64 %ymm3, %ymm24
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3],xmm2[4,5],xmm1[6,7]
+; AVX512-FCP-NEXT: vperm2i128 {{.*#+}} ymm2 = ymm0[2,3],mem[2,3]
+; AVX512-FCP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512-FCP-NEXT: vinserti128 $1, 480(%rdi), %ymm0, %ymm0
+; AVX512-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm0[0],ymm2[1],ymm0[2,3,4,5],ymm2[6],ymm0[7]
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,12,13,8,9,12,13,8,9,12,13,8,9,4,5,16,17,28,29,24,25,28,29,24,25,28,29,24,25,20,21]
+; AVX512-FCP-NEXT: vpshufb %ymm4, %ymm2, %ymm0
+; AVX512-FCP-NEXT: vmovdqa64 %ymm2, %ymm24
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
; AVX512-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-FCP-NEXT: vmovdqa 736(%rdi), %ymm0
; AVX512-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX512-FCP-NEXT: vmovdqa 704(%rdi), %ymm1
; AVX512-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm13 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
-; AVX512-FCP-NEXT: vpshufb %xmm9, %xmm13, %xmm0
-; AVX512-FCP-NEXT: vextracti128 $1, %ymm13, %xmm3
-; AVX512-FCP-NEXT: vpshufb %xmm2, %xmm3, %xmm1
-; AVX512-FCP-NEXT: vmovdqa64 %xmm3, %xmm26
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm9 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
+; AVX512-FCP-NEXT: vpshufb %xmm10, %xmm9, %xmm0
+; AVX512-FCP-NEXT: vextracti128 $1, %ymm9, %xmm2
+; AVX512-FCP-NEXT: vpshufb %xmm10, %xmm2, %xmm1
+; AVX512-FCP-NEXT: vmovdqa64 %xmm2, %xmm26
; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3],xmm1[4,5],xmm0[6],xmm1[7]
; AVX512-FCP-NEXT: vmovdqa 640(%rdi), %ymm1
; AVX512-FCP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512-FCP-NEXT: vperm2i128 {{.*#+}} ymm3 = ymm1[2,3],mem[2,3]
-; AVX512-FCP-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512-FCP-NEXT: vperm2i128 {{.*#+}} ymm2 = ymm1[2,3],mem[2,3]
+; AVX512-FCP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX512-FCP-NEXT: vinserti128 $1, 672(%rdi), %ymm1, %ymm1
; AVX512-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm1[0],ymm3[1],ymm1[2,3,4,5],ymm3[6],ymm1[7]
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,12,13,8,9,12,13,8,9,12,13,4,5,u,u,16,17,28,29,24,25,28,29,24,25,28,29,20,21,u,u]
-; AVX512-FCP-NEXT: vpshufb %ymm4, %ymm3, %ymm1
-; AVX512-FCP-NEXT: vmovdqa64 %ymm4, %ymm16
-; AVX512-FCP-NEXT: vmovdqa64 %ymm3, %ymm29
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm1[0],ymm2[1],ymm1[2,3,4,5],ymm2[6],ymm1[7]
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,12,13,8,9,12,13,8,9,12,13,4,5,u,u,16,17,28,29,24,25,28,29,24,25,28,29,20,21,u,u]
+; AVX512-FCP-NEXT: vpshufb %ymm3, %ymm2, %ymm1
+; AVX512-FCP-NEXT: vmovdqa64 %ymm3, %ymm16
+; AVX512-FCP-NEXT: vmovdqa64 %ymm2, %ymm29
; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7],ymm1[8,9,10],ymm0[11,12,13,14,15]
; AVX512-FCP-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,4,6]
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
@@ -11130,21 +11120,21 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX512-FCP-NEXT: vmovdqa 192(%rdi), %ymm1
; AVX512-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm11 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6],ymm0[7]
-; AVX512-FCP-NEXT: vextracti128 $1, %ymm11, %xmm0
-; AVX512-FCP-NEXT: vpshufb %xmm10, %xmm11, %xmm1
-; AVX512-FCP-NEXT: vpshufd {{.*#+}} xmm7 = xmm0[2,1,0,3]
-; AVX512-FCP-NEXT: vpshufb %xmm12, %xmm7, %xmm0
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm14 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6],ymm0[7]
+; AVX512-FCP-NEXT: vextracti128 $1, %ymm14, %xmm0
+; AVX512-FCP-NEXT: vpshufb %xmm11, %xmm14, %xmm1
+; AVX512-FCP-NEXT: vpshufd {{.*#+}} xmm15 = xmm0[2,1,0,3]
+; AVX512-FCP-NEXT: vpshufb %xmm11, %xmm15, %xmm0
; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2],xmm1[3],xmm0[4,5],xmm1[6,7]
; AVX512-FCP-NEXT: vmovdqa 160(%rdi), %ymm1
; AVX512-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-FCP-NEXT: vmovdqa 128(%rdi), %ymm3
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm3[0,1],ymm1[2],ymm3[3,4],ymm1[5],ymm3[6,7]
-; AVX512-FCP-NEXT: vmovdqa64 %ymm3, %ymm30
-; AVX512-FCP-NEXT: vpshufb %xmm9, %xmm5, %xmm1
-; AVX512-FCP-NEXT: vextracti128 $1, %ymm5, %xmm6
-; AVX512-FCP-NEXT: vpshufb %xmm2, %xmm6, %xmm3
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm3[0,1,2],xmm1[3],xmm3[4,5],xmm1[6],xmm3[7]
+; AVX512-FCP-NEXT: vmovdqa 128(%rdi), %ymm2
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
+; AVX512-FCP-NEXT: vmovdqa64 %ymm2, %ymm30
+; AVX512-FCP-NEXT: vpshufb %xmm10, %xmm5, %xmm1
+; AVX512-FCP-NEXT: vextracti128 $1, %ymm5, %xmm7
+; AVX512-FCP-NEXT: vpshufb %xmm10, %xmm7, %xmm2
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2],xmm1[3],xmm2[4,5],xmm1[6],xmm2[7]
; AVX512-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
; AVX512-FCP-NEXT: vinserti32x4 $2, %xmm0, %zmm1, %zmm0
; AVX512-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
@@ -11152,30 +11142,30 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX512-FCP-NEXT: vmovdqa 32(%rdi), %ymm1
; AVX512-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7]
-; AVX512-FCP-NEXT: vpshufb %xmm10, %xmm4, %xmm0
-; AVX512-FCP-NEXT: vextracti128 $1, %ymm4, %xmm3
-; AVX512-FCP-NEXT: vpshufd {{.*#+}} xmm10 = xmm3[2,1,0,3]
-; AVX512-FCP-NEXT: vpshufb %xmm12, %xmm10, %xmm3
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm3[2],xmm0[3],xmm3[4,5],xmm0[6,7]
-; AVX512-FCP-NEXT: vmovdqa 64(%rdi), %ymm3
-; AVX512-FCP-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm3[2,3],mem[2,3]
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7]
+; AVX512-FCP-NEXT: vextracti128 $1, %ymm3, %xmm0
+; AVX512-FCP-NEXT: vpshufb %xmm11, %xmm3, %xmm2
+; AVX512-FCP-NEXT: vpshufd {{.*#+}} xmm8 = xmm0[2,1,0,3]
+; AVX512-FCP-NEXT: vpshufb %xmm11, %xmm8, %xmm0
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm2[0,1],xmm0[2],xmm2[3],xmm0[4,5],xmm2[6,7]
+; AVX512-FCP-NEXT: vmovdqa 64(%rdi), %ymm2
+; AVX512-FCP-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm2[2,3],mem[2,3]
; AVX512-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-FCP-NEXT: vinserti128 $1, 96(%rdi), %ymm3, %ymm3
-; AVX512-FCP-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm12 = ymm3[0],ymm1[1],ymm3[2,3,4,5],ymm1[6],ymm3[7]
-; AVX512-FCP-NEXT: vpshufb %ymm8, %ymm12, %ymm3
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm3[3,4,5,6,7]
+; AVX512-FCP-NEXT: vinserti128 $1, 96(%rdi), %ymm2, %ymm2
+; AVX512-FCP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm11 = ymm2[0],ymm1[1],ymm2[2,3,4,5],ymm1[6],ymm2[7]
+; AVX512-FCP-NEXT: vpshufb %ymm4, %ymm11, %ymm2
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm2[3,4,5,6,7]
; AVX512-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-FCP-NEXT: vmovdqa 352(%rdi), %ymm0
-; AVX512-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512-FCP-NEXT: vmovdqu %ymm0, (%rsp) # 32-byte Spill
; AVX512-FCP-NEXT: vmovdqa 320(%rdi), %ymm1
-; AVX512-FCP-NEXT: vmovdqu %ymm1, (%rsp) # 32-byte Spill
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
-; AVX512-FCP-NEXT: vpshufb %xmm9, %xmm8, %xmm9
-; AVX512-FCP-NEXT: vextracti128 $1, %ymm8, %xmm3
-; AVX512-FCP-NEXT: vpshufb %xmm2, %xmm3, %xmm2
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm9 = xmm2[0,1,2],xmm9[3],xmm2[4,5],xmm9[6],xmm2[7]
+; AVX512-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
+; AVX512-FCP-NEXT: vpshufb %xmm10, %xmm4, %xmm2
+; AVX512-FCP-NEXT: vextracti128 $1, %ymm4, %xmm6
+; AVX512-FCP-NEXT: vpshufb %xmm10, %xmm6, %xmm10
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm10 = xmm10[0,1,2],xmm2[3],xmm10[4,5],xmm2[6],xmm10[7]
; AVX512-FCP-NEXT: vmovdqa 256(%rdi), %ymm2
; AVX512-FCP-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm2[2,3],mem[2,3]
; AVX512-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
@@ -11184,70 +11174,69 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512-FCP-NEXT: vmovdqa64 %ymm0, %ymm31
; AVX512-FCP-NEXT: vmovdqa64 %ymm16, %ymm0
; AVX512-FCP-NEXT: vpshufb %ymm0, %ymm2, %ymm0
-; AVX512-FCP-NEXT: vinserti128 $1, %xmm9, %ymm0, %ymm9
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm9 = ymm0[0,1,2],ymm9[3,4,5,6,7],ymm0[8,9,10],ymm9[11,12,13,14,15]
+; AVX512-FCP-NEXT: vinserti128 $1, %xmm10, %ymm0, %ymm10
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm10 = ymm0[0,1,2],ymm10[3,4,5,6,7],ymm0[8,9,10],ymm10[11,12,13,14,15]
; AVX512-FCP-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,4,6]
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm9[4,5,6,7]
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm10[4,5,6,7]
; AVX512-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm9 = [2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15]
-; AVX512-FCP-NEXT: vpshufb %xmm9, %xmm11, %xmm0
-; AVX512-FCP-NEXT: vpbroadcastq {{.*#+}} xmm11 = [10,11,14,15,2,3,0,0,10,11,14,15,2,3,0,0]
-; AVX512-FCP-NEXT: vpshufb %xmm11, %xmm7, %xmm7
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm7 = xmm0[0,1],xmm7[2],xmm0[3],xmm7[4,5],xmm0[6,7]
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm0 = [6,7,2,3,4,5,u,u,2,3,14,15,u,u,6,7]
-; AVX512-FCP-NEXT: vpshufb %xmm0, %xmm6, %xmm6
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm10 = [2,3,14,15,2,3,6,7,10,11,14,15,12,13,14,15]
+; AVX512-FCP-NEXT: vpshufb %xmm10, %xmm14, %xmm0
+; AVX512-FCP-NEXT: vpshufb %xmm10, %xmm15, %xmm14
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm14[2],xmm0[3],xmm14[4,5],xmm0[6,7]
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm14 = [6,7,2,3,4,5,u,u,2,3,14,15,u,u,6,7]
+; AVX512-FCP-NEXT: vpshufb %xmm14, %xmm7, %xmm7
; AVX512-FCP-NEXT: vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,5,5,5,5]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm5 = xmm6[0,1,2],xmm5[3],xmm6[4,5],xmm5[6],xmm6[7]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm5 = xmm7[0,1,2],xmm5[3],xmm7[4,5],xmm5[6],xmm7[7]
; AVX512-FCP-NEXT: vinserti128 $1, %xmm5, %ymm0, %ymm5
-; AVX512-FCP-NEXT: vinserti32x4 $2, %xmm7, %zmm5, %zmm5
-; AVX512-FCP-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-FCP-NEXT: vpshufb %xmm9, %xmm4, %xmm4
-; AVX512-FCP-NEXT: vpshufb %xmm11, %xmm10, %xmm5
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1],xmm5[2],xmm4[3],xmm5[4,5],xmm4[6,7]
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm5 = [2,3,14,15,10,11,10,11,14,15,10,11,10,11,6,7,18,19,30,31,26,27,26,27,30,31,26,27,26,27,22,23]
-; AVX512-FCP-NEXT: vpshufb %ymm5, %ymm12, %ymm6
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1,2],ymm6[3,4,5,6,7]
-; AVX512-FCP-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-FCP-NEXT: vpshufb %xmm0, %xmm3, %xmm3
-; AVX512-FCP-NEXT: vpshufhw {{.*#+}} xmm1 = xmm8[0,1,2,3,5,5,5,5]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm3[0,1,2],xmm1[3],xmm3[4,5],xmm1[6],xmm3[7]
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm3 = [2,3,14,15,10,11,10,11,14,15,10,11,u,u,6,7,18,19,30,31,26,27,26,27,30,31,26,27,u,u,22,23]
-; AVX512-FCP-NEXT: vpshufb %ymm3, %ymm2, %ymm2
-; AVX512-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3,4,5,6,7],ymm2[8,9,10],ymm1[11,12,13,14,15]
+; AVX512-FCP-NEXT: vinserti32x4 $2, %xmm0, %zmm5, %zmm0
+; AVX512-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-FCP-NEXT: vpshufb %xmm10, %xmm3, %xmm0
+; AVX512-FCP-NEXT: vpshufb %xmm10, %xmm8, %xmm3
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm3[2],xmm0[3],xmm3[4,5],xmm0[6,7]
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm3 = [2,3,14,15,10,11,10,11,14,15,10,11,10,11,6,7,18,19,30,31,26,27,26,27,30,31,26,27,26,27,22,23]
+; AVX512-FCP-NEXT: vpshufb %ymm3, %ymm11, %ymm5
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm5[3,4,5,6,7]
+; AVX512-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-FCP-NEXT: vpshufb %xmm14, %xmm6, %xmm0
+; AVX512-FCP-NEXT: vpshufhw {{.*#+}} xmm1 = xmm4[0,1,2,3,5,5,5,5]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3],xmm0[4,5],xmm1[6],xmm0[7]
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm1 = [2,3,14,15,10,11,10,11,14,15,10,11,u,u,6,7,18,19,30,31,26,27,26,27,30,31,26,27,u,u,22,23]
+; AVX512-FCP-NEXT: vpshufb %ymm1, %ymm2, %ymm2
+; AVX512-FCP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm2[0,1,2],ymm0[3,4,5,6,7],ymm2[8,9,10],ymm0[11,12,13,14,15]
; AVX512-FCP-NEXT: vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,5,7]
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
-; AVX512-FCP-NEXT: vmovdqa64 %ymm1, %ymm28
-; AVX512-FCP-NEXT: vmovdqa64 %ymm20, %ymm1
-; AVX512-FCP-NEXT: vpshufb %xmm9, %xmm1, %xmm1
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
+; AVX512-FCP-NEXT: vmovdqa64 %ymm0, %ymm28
+; AVX512-FCP-NEXT: vmovdqa64 %ymm20, %ymm0
+; AVX512-FCP-NEXT: vpshufb %xmm10, %xmm0, %xmm0
; AVX512-FCP-NEXT: vmovdqa64 %xmm21, %xmm2
-; AVX512-FCP-NEXT: vpshufb %xmm11, %xmm2, %xmm2
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3],xmm2[4,5],xmm1[6,7]
+; AVX512-FCP-NEXT: vpshufb %xmm10, %xmm2, %xmm2
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2],xmm0[3],xmm2[4,5],xmm0[6,7]
; AVX512-FCP-NEXT: vmovdqa64 %xmm22, %xmm2
-; AVX512-FCP-NEXT: vpshufb %xmm0, %xmm2, %xmm2
-; AVX512-FCP-NEXT: vpshufhw {{.*#+}} xmm4 = xmm15[0,1,2,3,5,5,5,5]
+; AVX512-FCP-NEXT: vpshufb %xmm14, %xmm2, %xmm2
+; AVX512-FCP-NEXT: vpshufhw {{.*#+}} xmm4 = xmm13[0,1,2,3,5,5,5,5]
; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2],xmm4[3],xmm2[4,5],xmm4[6],xmm2[7]
; AVX512-FCP-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512-FCP-NEXT: vinserti32x4 $2, %xmm1, %zmm2, %zmm1
-; AVX512-FCP-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-FCP-NEXT: vmovdqa64 %ymm24, %ymm1
-; AVX512-FCP-NEXT: vpshufb %ymm5, %ymm1, %ymm1
-; AVX512-FCP-NEXT: vpshufb %xmm9, %xmm14, %xmm2
-; AVX512-FCP-NEXT: vmovdqa64 %xmm23, %xmm4
-; AVX512-FCP-NEXT: vpshufb %xmm11, %xmm4, %xmm4
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm4[2],xmm2[3],xmm4[4,5],xmm2[6,7]
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3,4,5,6,7]
-; AVX512-FCP-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-FCP-NEXT: vmovdqa64 %ymm29, %ymm1
-; AVX512-FCP-NEXT: vpshufb %ymm3, %ymm1, %ymm1
-; AVX512-FCP-NEXT: vmovdqa64 %xmm26, %xmm2
-; AVX512-FCP-NEXT: vpshufb %xmm0, %xmm2, %xmm0
-; AVX512-FCP-NEXT: vpshufhw {{.*#+}} xmm2 = xmm13[0,1,2,3,5,5,5,5]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm2[3],xmm0[4,5],xmm2[6],xmm0[7]
-; AVX512-FCP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7],ymm1[8,9,10],ymm0[11,12,13,14,15]
-; AVX512-FCP-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,5,7]
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX512-FCP-NEXT: vinserti32x4 $2, %xmm0, %zmm2, %zmm0
+; AVX512-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-FCP-NEXT: vmovdqa64 %ymm24, %ymm0
+; AVX512-FCP-NEXT: vpshufb %ymm3, %ymm0, %ymm0
+; AVX512-FCP-NEXT: vpshufb %xmm10, %xmm12, %xmm2
+; AVX512-FCP-NEXT: vmovdqa64 %xmm23, %xmm3
+; AVX512-FCP-NEXT: vpshufb %xmm10, %xmm3, %xmm3
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2],xmm2[3],xmm3[4,5],xmm2[6,7]
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2],ymm0[3,4,5,6,7]
+; AVX512-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-FCP-NEXT: vmovdqa64 %ymm29, %ymm0
+; AVX512-FCP-NEXT: vpshufb %ymm1, %ymm0, %ymm0
+; AVX512-FCP-NEXT: vmovdqa64 %xmm26, %xmm1
+; AVX512-FCP-NEXT: vpshufb %xmm14, %xmm1, %xmm1
+; AVX512-FCP-NEXT: vpshufhw {{.*#+}} xmm2 = xmm9[0,1,2,3,5,5,5,5]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2],xmm2[3],xmm1[4,5],xmm2[6],xmm1[7]
+; AVX512-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm0[0,1,2],ymm1[3,4,5,6,7],ymm0[8,9,10],ymm1[11,12,13,14,15]
+; AVX512-FCP-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,5,7]
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX512-FCP-NEXT: vmovdqa64 %ymm0, %ymm27
; AVX512-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX512-FCP-NEXT: vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
@@ -11295,8 +11284,8 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512-FCP-NEXT: vmovdqa64 %ymm6, %ymm17
; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3,4],xmm5[5,6,7]
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm5[4,5,6,7]
-; AVX512-FCP-NEXT: vmovdqu (%rsp), %ymm5 # 32-byte Reload
-; AVX512-FCP-NEXT: vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm5 # 32-byte Folded Reload
+; AVX512-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX512-FCP-NEXT: vpblendd $219, (%rsp), %ymm5, %ymm5 # 32-byte Folded Reload
; AVX512-FCP-NEXT: # ymm5 = mem[0,1],ymm5[2],mem[3,4],ymm5[5],mem[6,7]
; AVX512-FCP-NEXT: vextracti128 $1, %ymm5, %xmm6
; AVX512-FCP-NEXT: vpshufd {{.*#+}} xmm7 = xmm5[2,1,0,3]
@@ -11311,16 +11300,15 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512-FCP-NEXT: # ymm15 = ymm6[0,1],mem[2],ymm6[3],mem[4],ymm6[5,6],mem[7]
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm7 = [4,5,0,1,12,13,14,15,8,9,12,13,0,1,u,u,20,21,16,17,28,29,30,31,24,25,28,29,16,17,u,u]
; AVX512-FCP-NEXT: vpshufb %ymm7, %ymm15, %ymm6
-; AVX512-FCP-NEXT: vmovdqa64 %ymm7, %ymm25
+; AVX512-FCP-NEXT: vmovdqa64 %ymm7, %ymm22
; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm5 = ymm6[0,1,2],ymm5[3,4,5,6,7],ymm6[8,9,10],ymm5[11,12,13,14,15]
; AVX512-FCP-NEXT: vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,6,5,4]
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm6[0,1,2,3],ymm5[4,5,6,7]
; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm5, %zmm0, %zmm5
; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm29 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
; AVX512-FCP-NEXT: vpternlogq $226, %zmm3, %zmm29, %zmm4
-; AVX512-FCP-NEXT: vpmovsxdq {{.*#+}} zmm3 = [18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,65535,0,0]
-; AVX512-FCP-NEXT: vpternlogq $184, %zmm4, %zmm3, %zmm5
-; AVX512-FCP-NEXT: vmovdqa64 %zmm3, %zmm22
+; AVX512-FCP-NEXT: vpmovsxdq {{.*#+}} zmm26 = [18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,65535,0,0]
+; AVX512-FCP-NEXT: vpternlogq $184, %zmm4, %zmm26, %zmm5
; AVX512-FCP-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
; AVX512-FCP-NEXT: vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
@@ -11369,20 +11357,19 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX512-FCP-NEXT: vpblendd $107, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 32-byte Folded Reload
; AVX512-FCP-NEXT: # ymm2 = mem[0,1],ymm0[2],mem[3],ymm0[4],mem[5,6],ymm0[7]
-; AVX512-FCP-NEXT: vmovdqa64 %ymm25, %ymm0
+; AVX512-FCP-NEXT: vmovdqa64 %ymm22, %ymm0
; AVX512-FCP-NEXT: vpshufb %ymm0, %ymm2, %ymm0
; AVX512-FCP-NEXT: vinserti128 $1, %xmm13, %ymm0, %ymm13
; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm13 = ymm0[0,1,2],ymm13[3,4,5,6,7],ymm0[8,9,10],ymm13[11,12,13,14,15]
; AVX512-FCP-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,5,4]
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm13[4,5,6,7]
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm25
; AVX512-FCP-NEXT: vpternlogq $226, %zmm11, %zmm29, %zmm1
-; AVX512-FCP-NEXT: vpternlogq $184, %zmm1, %zmm22, %zmm0
-; AVX512-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-FCP-NEXT: vpternlogq $184, %zmm1, %zmm26, %zmm25
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm1 = [u,u,2,3,6,7,u,u,14,15,14,15,14,15,14,15]
; AVX512-FCP-NEXT: vmovdqa64 %xmm24, %xmm0
; AVX512-FCP-NEXT: vpshufb %xmm1, %xmm0, %xmm0
-; AVX512-FCP-NEXT: vmovdqa64 %xmm1, %xmm25
+; AVX512-FCP-NEXT: vmovdqa64 %xmm1, %xmm22
; AVX512-FCP-NEXT: vmovdqa64 %xmm23, %xmm1
; AVX512-FCP-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[3,1,2,1,4,5,6,7]
; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2],xmm1[3],xmm0[4,5,6,7]
@@ -11416,10 +11403,10 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm13 = ymm15[0,1,2],ymm13[3,4,5,6,7],ymm15[8,9,10],ymm13[11,12,13,14,15]
; AVX512-FCP-NEXT: vpshufhw {{.*#+}} xmm15 = xmm15[0,1,2,3,4,7,4,5]
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm13 = ymm15[0,1,2,3],ymm13[4,5,6,7]
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm13, %zmm0, %zmm26
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm13, %zmm0, %zmm23
; AVX512-FCP-NEXT: vpternlogq $226, %zmm24, %zmm29, %zmm0
-; AVX512-FCP-NEXT: vpternlogq $184, %zmm0, %zmm22, %zmm26
-; AVX512-FCP-NEXT: vmovdqa64 %xmm25, %xmm0
+; AVX512-FCP-NEXT: vpternlogq $184, %zmm0, %zmm26, %zmm23
+; AVX512-FCP-NEXT: vmovdqa64 %xmm22, %xmm0
; AVX512-FCP-NEXT: vpshufb %xmm0, %xmm10, %xmm0
; AVX512-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm10 # 32-byte Folded Reload
; AVX512-FCP-NEXT: vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
@@ -11453,27 +11440,26 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm28
; AVX512-FCP-NEXT: vpternlogq $226, %zmm0, %zmm29, %zmm5
-; AVX512-FCP-NEXT: vpternlogq $184, %zmm5, %zmm22, %zmm28
+; AVX512-FCP-NEXT: vpternlogq $184, %zmm5, %zmm26, %zmm28
; AVX512-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512-FCP-NEXT: vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm12 # 32-byte Folded Reload
-; AVX512-FCP-NEXT: # ymm12 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
+; AVX512-FCP-NEXT: vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm11 # 32-byte Folded Reload
+; AVX512-FCP-NEXT: # ymm11 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm1 = [8,9,u,u,0,1,12,13,u,u,12,13,12,13,14,15]
-; AVX512-FCP-NEXT: vpshufb %xmm1, %xmm12, %xmm0
-; AVX512-FCP-NEXT: vextracti128 $1, %ymm12, %xmm2
-; AVX512-FCP-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512-FCP-NEXT: vpshuflw {{.*#+}} xmm2 = xmm2[2,2,2,2,4,5,6,7]
+; AVX512-FCP-NEXT: vpshufb %xmm1, %xmm11, %xmm0
+; AVX512-FCP-NEXT: vextracti128 $1, %ymm11, %xmm3
+; AVX512-FCP-NEXT: vpshuflw {{.*#+}} xmm2 = xmm3[2,2,2,2,4,5,6,7]
+; AVX512-FCP-NEXT: vmovdqa64 %xmm3, %xmm24
; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2,3],xmm2[4],xmm0[5,6,7]
; AVX512-FCP-NEXT: vmovdqa64 %ymm30, %ymm2
; AVX512-FCP-NEXT: vpblendd $146, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
; AVX512-FCP-NEXT: # ymm2 = ymm2[0],mem[1],ymm2[2,3],mem[4],ymm2[5,6],mem[7]
; AVX512-FCP-NEXT: vextracti128 $1, %ymm2, %xmm3
; AVX512-FCP-NEXT: vpshufd {{.*#+}} xmm4 = xmm2[0,3,2,1]
-; AVX512-FCP-NEXT: vpbroadcastq {{.*#+}} xmm7 = [8,9,0,0,0,1,12,13,8,9,0,0,0,1,12,13]
-; AVX512-FCP-NEXT: vpshufb %xmm7, %xmm3, %xmm2
-; AVX512-FCP-NEXT: vmovdqa64 %xmm3, %xmm24
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm15 = [0,1,2,3,0,1,4,5,u,u,12,13,u,u,u,u]
-; AVX512-FCP-NEXT: vpshufb %xmm15, %xmm4, %xmm3
-; AVX512-FCP-NEXT: vmovdqa64 %xmm4, %xmm25
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm12 = [0,1,2,3,0,1,4,5,8,9,12,13,0,1,12,13]
+; AVX512-FCP-NEXT: vpshufb %xmm12, %xmm3, %xmm2
+; AVX512-FCP-NEXT: vmovdqa64 %xmm3, %xmm22
+; AVX512-FCP-NEXT: vpshufb %xmm12, %xmm4, %xmm3
+; AVX512-FCP-NEXT: vmovdqa64 %xmm4, %xmm21
; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4],xmm3[5],xmm2[6,7]
; AVX512-FCP-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
; AVX512-FCP-NEXT: vinserti32x4 $2, %xmm0, %zmm2, %zmm30
@@ -11481,16 +11467,16 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512-FCP-NEXT: vpblendd $189, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm4 # 32-byte Folded Reload
; AVX512-FCP-NEXT: # ymm4 = mem[0],ymm0[1],mem[2,3,4,5],ymm0[6],mem[7]
; AVX512-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512-FCP-NEXT: vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm11 # 32-byte Folded Reload
-; AVX512-FCP-NEXT: # ymm11 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
-; AVX512-FCP-NEXT: vpshufb %xmm1, %xmm11, %xmm0
-; AVX512-FCP-NEXT: vextracti128 $1, %ymm11, %xmm9
-; AVX512-FCP-NEXT: vpshuflw {{.*#+}} xmm2 = xmm9[2,2,2,2,4,5,6,7]
+; AVX512-FCP-NEXT: vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm10 # 32-byte Folded Reload
+; AVX512-FCP-NEXT: # ymm10 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
+; AVX512-FCP-NEXT: vpshufb %xmm1, %xmm10, %xmm0
+; AVX512-FCP-NEXT: vextracti128 $1, %ymm10, %xmm14
+; AVX512-FCP-NEXT: vpshuflw {{.*#+}} xmm2 = xmm14[2,2,2,2,4,5,6,7]
; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm2 = xmm0[0],xmm2[1],xmm0[2,3],xmm2[4],xmm0[5,6,7]
; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} ymm29 = [65535,65535,65535,65535,65535,0,0,0,0,0,65535,65535,65535,65535,65535,65535]
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm0 = [128,128,128,128,128,128,128,128,128,128,4,5,0,1,12,13,24,25,20,21,128,128,128,128,128,128,128,128,128,128,128,128]
; AVX512-FCP-NEXT: vpshufb %ymm0, %ymm4, %ymm3
-; AVX512-FCP-NEXT: vmovdqa64 %ymm4, %ymm20
+; AVX512-FCP-NEXT: vmovdqa64 %ymm4, %ymm17
; AVX512-FCP-NEXT: vpternlogq $236, %ymm29, %ymm3, %ymm2
; AVX512-FCP-NEXT: movw $31, %ax
; AVX512-FCP-NEXT: kmovw %eax, %k1
@@ -11498,174 +11484,164 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512-FCP-NEXT: vmovdqa64 %ymm31, %ymm2
; AVX512-FCP-NEXT: vpblendd $189, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm4 # 32-byte Folded Reload
; AVX512-FCP-NEXT: # ymm4 = mem[0],ymm2[1],mem[2,3,4,5],ymm2[6],mem[7]
-; AVX512-FCP-NEXT: vmovdqu (%rsp), %ymm2 # 32-byte Reload
-; AVX512-FCP-NEXT: vpblendd $146, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
+; AVX512-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX512-FCP-NEXT: vpblendd $146, (%rsp), %ymm2, %ymm2 # 32-byte Folded Reload
; AVX512-FCP-NEXT: # ymm2 = ymm2[0],mem[1],ymm2[2,3],mem[4],ymm2[5,6],mem[7]
; AVX512-FCP-NEXT: vextracti128 $1, %ymm2, %xmm3
; AVX512-FCP-NEXT: vpshufd {{.*#+}} xmm5 = xmm2[0,3,2,1]
-; AVX512-FCP-NEXT: vpshufb %xmm7, %xmm3, %xmm2
-; AVX512-FCP-NEXT: vmovdqa64 %xmm3, %xmm27
-; AVX512-FCP-NEXT: vpshufb %xmm15, %xmm5, %xmm3
-; AVX512-FCP-NEXT: vmovdqa64 %xmm5, %xmm22
+; AVX512-FCP-NEXT: vpshufb %xmm12, %xmm3, %xmm2
+; AVX512-FCP-NEXT: vmovdqa64 %xmm3, %xmm20
+; AVX512-FCP-NEXT: vpshufb %xmm12, %xmm5, %xmm3
+; AVX512-FCP-NEXT: vmovdqa64 %xmm5, %xmm19
; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4],xmm3[5],xmm2[6,7]
; AVX512-FCP-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm5 = [8,9,4,5,4,5,6,7,0,1,4,5,0,1,12,13,24,25,20,21,20,21,22,23,16,17,20,21,16,17,28,29]
; AVX512-FCP-NEXT: vpshufb %ymm5, %ymm4, %ymm3
; AVX512-FCP-NEXT: vmovdqa64 %ymm5, %ymm16
-; AVX512-FCP-NEXT: vmovdqa64 %ymm4, %ymm21
+; AVX512-FCP-NEXT: vmovdqa64 %ymm4, %ymm18
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3,4],ymm2[5,6,7]
-; AVX512-FCP-NEXT: vmovdqa64 %ymm2, %ymm23
+; AVX512-FCP-NEXT: vmovdqa64 %ymm2, %ymm27
; AVX512-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX512-FCP-NEXT: vpblendd $189, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm4 # 32-byte Folded Reload
-; AVX512-FCP-NEXT: # ymm4 = mem[0],ymm2[1],mem[2,3,4,5],ymm2[6],mem[7]
+; AVX512-FCP-NEXT: vpblendd $189, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm15 # 32-byte Folded Reload
+; AVX512-FCP-NEXT: # ymm15 = mem[0],ymm2[1],mem[2,3,4,5],ymm2[6],mem[7]
; AVX512-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX512-FCP-NEXT: vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm14 # 32-byte Folded Reload
-; AVX512-FCP-NEXT: # ymm14 = mem[0,1],ymm2[2],mem[3,4],ymm2[5],mem[6,7]
-; AVX512-FCP-NEXT: vpshufb %xmm1, %xmm14, %xmm2
-; AVX512-FCP-NEXT: vextracti128 $1, %ymm14, %xmm5
-; AVX512-FCP-NEXT: vpshuflw {{.*#+}} xmm3 = xmm5[2,2,2,2,4,5,6,7]
-; AVX512-FCP-NEXT: vmovdqa64 %xmm5, %xmm19
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1],xmm2[2,3],xmm3[4],xmm2[5,6,7]
-; AVX512-FCP-NEXT: vpshufb %ymm0, %ymm4, %ymm0
-; AVX512-FCP-NEXT: vmovdqa64 %ymm4, %ymm18
-; AVX512-FCP-NEXT: vpternlogq $236, %ymm29, %ymm0, %ymm2
+; AVX512-FCP-NEXT: vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm6 # 32-byte Folded Reload
+; AVX512-FCP-NEXT: # ymm6 = mem[0,1],ymm2[2],mem[3,4],ymm2[5],mem[6,7]
+; AVX512-FCP-NEXT: vpshufb %xmm1, %xmm6, %xmm3
+; AVX512-FCP-NEXT: vextracti128 $1, %ymm6, %xmm13
+; AVX512-FCP-NEXT: vpshuflw {{.*#+}} xmm4 = xmm13[2,2,2,2,4,5,6,7]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0],xmm4[1],xmm3[2,3],xmm4[4],xmm3[5,6,7]
+; AVX512-FCP-NEXT: vpshufb %ymm0, %ymm15, %ymm0
+; AVX512-FCP-NEXT: vpternlogq $236, %ymm29, %ymm0, %ymm3
; AVX512-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512-FCP-NEXT: vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm4 # 32-byte Folded Reload
-; AVX512-FCP-NEXT: # ymm4 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
-; AVX512-FCP-NEXT: vpshufb %xmm1, %xmm4, %xmm1
-; AVX512-FCP-NEXT: vextracti128 $1, %ymm4, %xmm0
-; AVX512-FCP-NEXT: vpshuflw {{.*#+}} xmm3 = xmm0[2,2,2,2,4,5,6,7]
-; AVX512-FCP-NEXT: vmovdqa64 %xmm0, %xmm17
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm3[1],xmm1[2,3],xmm3[4],xmm1[5,6,7]
+; AVX512-FCP-NEXT: vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 32-byte Folded Reload
+; AVX512-FCP-NEXT: # ymm2 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
+; AVX512-FCP-NEXT: vpshufb %xmm1, %xmm2, %xmm1
+; AVX512-FCP-NEXT: vextracti128 $1, %ymm2, %xmm7
+; AVX512-FCP-NEXT: vpshuflw {{.*#+}} xmm4 = xmm7[2,2,2,2,4,5,6,7]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm4[1],xmm1[2,3],xmm4[4],xmm1[5,6,7]
; AVX512-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512-FCP-NEXT: vpblendd $146, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm3 # 32-byte Folded Reload
-; AVX512-FCP-NEXT: # ymm3 = ymm0[0],mem[1],ymm0[2,3],mem[4],ymm0[5,6],mem[7]
-; AVX512-FCP-NEXT: vextracti128 $1, %ymm3, %xmm13
-; AVX512-FCP-NEXT: vpshufd {{.*#+}} xmm8 = xmm3[0,3,2,1]
-; AVX512-FCP-NEXT: vpshufb %xmm7, %xmm13, %xmm3
-; AVX512-FCP-NEXT: vpshufb %xmm15, %xmm8, %xmm5
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm3 = xmm5[0,1,2,3],xmm3[4],xmm5[5],xmm3[6,7]
-; AVX512-FCP-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX512-FCP-NEXT: vinserti32x4 $2, %xmm1, %zmm3, %zmm31
-; AVX512-FCP-NEXT: vmovdqa32 %zmm2, %zmm31 {%k1}
+; AVX512-FCP-NEXT: vpblendd $146, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm4 # 32-byte Folded Reload
+; AVX512-FCP-NEXT: # ymm4 = ymm0[0],mem[1],ymm0[2,3],mem[4],ymm0[5,6],mem[7]
+; AVX512-FCP-NEXT: vextracti128 $1, %ymm4, %xmm9
+; AVX512-FCP-NEXT: vpshufd {{.*#+}} xmm8 = xmm4[0,3,2,1]
+; AVX512-FCP-NEXT: vpshufb %xmm12, %xmm9, %xmm4
+; AVX512-FCP-NEXT: vpshufb %xmm12, %xmm8, %xmm5
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm4 = xmm5[0,1,2,3],xmm4[4],xmm5[5],xmm4[6,7]
+; AVX512-FCP-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4
+; AVX512-FCP-NEXT: vinserti32x4 $2, %xmm1, %zmm4, %zmm31
+; AVX512-FCP-NEXT: vmovdqa32 %zmm3, %zmm31 {%k1}
; AVX512-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512-FCP-NEXT: vpblendd $66, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm10 # 32-byte Folded Reload
-; AVX512-FCP-NEXT: # ymm10 = ymm0[0],mem[1],ymm0[2,3,4,5],mem[6],ymm0[7]
+; AVX512-FCP-NEXT: vpblendd $66, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm5 # 32-byte Folded Reload
+; AVX512-FCP-NEXT: # ymm5 = ymm0[0],mem[1],ymm0[2,3,4,5],mem[6],ymm0[7]
; AVX512-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX512-FCP-NEXT: vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 32-byte Folded Reload
; AVX512-FCP-NEXT: # ymm1 = mem[0],ymm0[1],mem[2,3],ymm0[4],mem[5,6],ymm0[7]
-; AVX512-FCP-NEXT: vextracti128 $1, %ymm1, %xmm6
-; AVX512-FCP-NEXT: vpshufb %xmm7, %xmm6, %xmm7
-; AVX512-FCP-NEXT: vpshufd {{.*#+}} xmm5 = xmm1[0,3,2,1]
-; AVX512-FCP-NEXT: vpshufb %xmm15, %xmm5, %xmm1
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm7[4],xmm1[5],xmm7[6,7]
+; AVX512-FCP-NEXT: vextracti128 $1, %ymm1, %xmm3
+; AVX512-FCP-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,3,2,1]
+; AVX512-FCP-NEXT: vpshufb %xmm12, %xmm3, %xmm4
+; AVX512-FCP-NEXT: vpshufb %xmm12, %xmm1, %xmm12
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm4 = xmm12[0,1,2,3],xmm4[4],xmm12[5],xmm4[6,7]
; AVX512-FCP-NEXT: vmovdqa64 %ymm16, %ymm0
-; AVX512-FCP-NEXT: vpshufb %ymm0, %ymm10, %ymm7
-; AVX512-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm7[0,1,2,3,4],ymm1[5,6,7]
+; AVX512-FCP-NEXT: vpshufb %ymm0, %ymm5, %ymm12
+; AVX512-FCP-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm12[0,1,2,3,4],ymm4[5,6,7]
; AVX512-FCP-NEXT: vmovdqa64 %ymm0, %ymm16
-; AVX512-FCP-NEXT: vpbroadcastd {{.*#+}} xmm3 = [10,11,6,7,10,11,6,7,10,11,6,7,10,11,6,7]
-; AVX512-FCP-NEXT: vpshufb %xmm3, %xmm9, %xmm9
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm7 = [10,11,u,u,2,3,14,15,u,u,10,11,12,13,14,15]
-; AVX512-FCP-NEXT: vpshufb %xmm7, %xmm11, %xmm11
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm9 = xmm11[0],xmm9[1],xmm11[2,3],xmm9[4],xmm11[5,6,7]
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm1 = [128,128,128,128,128,128,128,128,128,128,6,7,2,3,14,15,26,27,22,23,128,128,128,128,128,128,128,128,128,128,128,128]
-; AVX512-FCP-NEXT: vmovdqa64 %ymm20, %ymm0
-; AVX512-FCP-NEXT: vpshufb %ymm1, %ymm0, %ymm11
-; AVX512-FCP-NEXT: vmovdqa64 %ymm1, %ymm20
-; AVX512-FCP-NEXT: vpternlogq $236, %ymm29, %ymm11, %ymm9
-; AVX512-FCP-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX512-FCP-NEXT: vpshufb %xmm3, %xmm0, %xmm11
-; AVX512-FCP-NEXT: vpshufb %xmm7, %xmm12, %xmm12
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm12[0],xmm11[1],xmm12[2,3],xmm11[4],xmm12[5,6,7]
-; AVX512-FCP-NEXT: vpbroadcastq {{.*#+}} xmm12 = [10,11,0,0,2,3,14,15,10,11,0,0,2,3,14,15]
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm4 = [10,11,6,7,2,3,14,15,10,11,10,11,12,13,14,15]
+; AVX512-FCP-NEXT: vpshufb %xmm4, %xmm14, %xmm14
+; AVX512-FCP-NEXT: vpshufb %xmm4, %xmm10, %xmm10
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm14 = xmm10[0],xmm14[1],xmm10[2,3],xmm14[4],xmm10[5,6,7]
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm12 = [128,128,128,128,128,128,128,128,128,128,6,7,2,3,14,15,26,27,22,23,128,128,128,128,128,128,128,128,128,128,128,128]
+; AVX512-FCP-NEXT: vmovdqa64 %ymm17, %ymm0
+; AVX512-FCP-NEXT: vpshufb %ymm12, %ymm0, %ymm10
+; AVX512-FCP-NEXT: vmovdqa64 %ymm12, %ymm17
+; AVX512-FCP-NEXT: vpternlogq $236, %ymm29, %ymm10, %ymm14
; AVX512-FCP-NEXT: vmovdqa64 %xmm24, %xmm0
-; AVX512-FCP-NEXT: vpshufb %xmm12, %xmm0, %xmm0
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm11 = [0,1,2,3,2,3,6,7,u,u,14,15,u,u,u,u]
-; AVX512-FCP-NEXT: vmovdqa64 %xmm25, %xmm2
-; AVX512-FCP-NEXT: vpshufb %xmm11, %xmm2, %xmm15
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm15[0,1,2,3],xmm0[4],xmm15[5],xmm0[6,7]
-; AVX512-FCP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512-FCP-NEXT: vinserti32x4 $2, %xmm1, %zmm0, %zmm2
-; AVX512-FCP-NEXT: vmovdqa32 %zmm9, %zmm2 {%k1}
-; AVX512-FCP-NEXT: vmovdqa64 %xmm27, %xmm0
-; AVX512-FCP-NEXT: vpshufb %xmm12, %xmm0, %xmm1
+; AVX512-FCP-NEXT: vpshufb %xmm4, %xmm0, %xmm10
+; AVX512-FCP-NEXT: vpshufb %xmm4, %xmm11, %xmm11
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm11 = xmm11[0],xmm10[1],xmm11[2,3],xmm10[4],xmm11[5,6,7]
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm10 = [0,1,2,3,2,3,6,7,10,11,14,15,2,3,14,15]
; AVX512-FCP-NEXT: vmovdqa64 %xmm22, %xmm0
-; AVX512-FCP-NEXT: vpshufb %xmm11, %xmm0, %xmm9
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm9[0,1,2,3],xmm1[4],xmm9[5],xmm1[6,7]
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm9 = [10,11,6,7,4,5,6,7,6,7,6,7,2,3,14,15,26,27,22,23,20,21,22,23,22,23,22,23,18,19,30,31]
-; AVX512-FCP-NEXT: vmovdqa64 %ymm21, %ymm0
-; AVX512-FCP-NEXT: vpshufb %ymm9, %ymm0, %ymm15
+; AVX512-FCP-NEXT: vpshufb %xmm10, %xmm0, %xmm0
+; AVX512-FCP-NEXT: vmovdqa64 %xmm21, %xmm12
+; AVX512-FCP-NEXT: vpshufb %xmm10, %xmm12, %xmm12
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm12[0,1,2,3],xmm0[4],xmm12[5],xmm0[6,7]
+; AVX512-FCP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512-FCP-NEXT: vinserti32x4 $2, %xmm11, %zmm0, %zmm0
+; AVX512-FCP-NEXT: vmovdqa32 %zmm14, %zmm0 {%k1}
+; AVX512-FCP-NEXT: vmovdqa64 %xmm20, %xmm11
+; AVX512-FCP-NEXT: vpshufb %xmm10, %xmm11, %xmm11
+; AVX512-FCP-NEXT: vmovdqa64 %xmm19, %xmm12
+; AVX512-FCP-NEXT: vpshufb %xmm10, %xmm12, %xmm12
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm11 = xmm12[0,1,2,3],xmm11[4],xmm12[5],xmm11[6,7]
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm12 = [10,11,6,7,4,5,6,7,6,7,6,7,2,3,14,15,26,27,22,23,20,21,22,23,22,23,22,23,18,19,30,31]
+; AVX512-FCP-NEXT: vmovdqa64 %ymm18, %ymm14
+; AVX512-FCP-NEXT: vpshufb %ymm12, %ymm14, %ymm14
+; AVX512-FCP-NEXT: vinserti128 $1, %xmm11, %ymm0, %ymm11
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm11 = ymm14[0,1,2,3,4],ymm11[5,6,7]
+; AVX512-FCP-NEXT: vmovdqa64 %ymm17, %ymm14
+; AVX512-FCP-NEXT: vpshufb %ymm14, %ymm15, %ymm14
+; AVX512-FCP-NEXT: vpshufb %xmm4, %xmm13, %xmm13
+; AVX512-FCP-NEXT: vpshufb %xmm4, %xmm6, %xmm6
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0],xmm13[1],xmm6[2,3],xmm13[4],xmm6[5,6,7]
+; AVX512-FCP-NEXT: vpternlogq $236, %ymm29, %ymm14, %ymm6
+; AVX512-FCP-NEXT: vpshufb %xmm4, %xmm7, %xmm7
+; AVX512-FCP-NEXT: vpshufb %xmm4, %xmm2, %xmm2
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm7[1],xmm2[2,3],xmm7[4],xmm2[5,6,7]
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm27, %zmm0, %zmm4
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm16, %zmm0, %zmm7
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm11, %zmm0, %zmm11
+; AVX512-FCP-NEXT: vpshufb %xmm10, %xmm9, %xmm9
+; AVX512-FCP-NEXT: vpshufb %xmm10, %xmm8, %xmm8
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm8 = xmm8[0,1,2,3],xmm9[4],xmm8[5],xmm9[6,7]
+; AVX512-FCP-NEXT: vinserti128 $1, %xmm8, %ymm0, %ymm8
+; AVX512-FCP-NEXT: vinserti32x4 $2, %xmm2, %zmm8, %zmm2
+; AVX512-FCP-NEXT: vmovdqa32 %zmm6, %zmm2 {%k1}
+; AVX512-FCP-NEXT: vpshufb %ymm12, %ymm5, %ymm5
+; AVX512-FCP-NEXT: vpshufb %xmm10, %xmm3, %xmm3
+; AVX512-FCP-NEXT: vpshufb %xmm10, %xmm1, %xmm1
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm3[4],xmm1[5],xmm3[6,7]
; AVX512-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm15[0,1,2,3,4],ymm1[5,6,7]
-; AVX512-FCP-NEXT: vmovdqa64 %ymm18, %ymm0
-; AVX512-FCP-NEXT: vmovdqa64 %ymm20, %ymm15
-; AVX512-FCP-NEXT: vpshufb %ymm15, %ymm0, %ymm15
-; AVX512-FCP-NEXT: vmovdqa64 %xmm19, %xmm0
-; AVX512-FCP-NEXT: vpshufb %xmm3, %xmm0, %xmm0
-; AVX512-FCP-NEXT: vpshufb %xmm7, %xmm14, %xmm14
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm14[0],xmm0[1],xmm14[2,3],xmm0[4],xmm14[5,6,7]
-; AVX512-FCP-NEXT: vpternlogq $236, %ymm29, %ymm15, %ymm0
-; AVX512-FCP-NEXT: vmovdqa64 %xmm17, %xmm14
-; AVX512-FCP-NEXT: vpshufb %xmm3, %xmm14, %xmm3
-; AVX512-FCP-NEXT: vpshufb %xmm7, %xmm4, %xmm4
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0],xmm3[1],xmm4[2,3],xmm3[4],xmm4[5,6,7]
-; AVX512-FCP-NEXT: vpshufb %xmm12, %xmm13, %xmm4
-; AVX512-FCP-NEXT: vpshufb %xmm11, %xmm8, %xmm7
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm4 = xmm7[0,1,2,3],xmm4[4],xmm7[5],xmm4[6,7]
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm23, %zmm0, %zmm7
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm16, %zmm0, %zmm8
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm5[0,1,2,3,4],ymm1[5,6,7]
; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm1
-; AVX512-FCP-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4
-; AVX512-FCP-NEXT: vinserti32x4 $2, %xmm3, %zmm4, %zmm3
-; AVX512-FCP-NEXT: vmovdqa32 %zmm0, %zmm3 {%k1}
-; AVX512-FCP-NEXT: vpshufb %ymm9, %ymm10, %ymm0
-; AVX512-FCP-NEXT: vpshufb %xmm12, %xmm6, %xmm4
-; AVX512-FCP-NEXT: vpshufb %xmm11, %xmm5, %xmm5
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm4 = xmm5[0,1,2,3],xmm4[4],xmm5[5],xmm4[6,7]
-; AVX512-FCP-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm4[5,6,7]
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
-; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm4 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
+; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm3 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512-FCP-NEXT: vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm5 # 64-byte Folded Reload
+; AVX512-FCP-NEXT: vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm5 # 64-byte Folded Reload
; AVX512-FCP-NEXT: movw $-2048, %ax # imm = 0xF800
; AVX512-FCP-NEXT: kmovw %eax, %k1
; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
; AVX512-FCP-NEXT: vmovdqa32 %zmm6, %zmm5 {%k1}
; AVX512-FCP-NEXT: vmovdqa64 %zmm5, (%rsi)
; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512-FCP-NEXT: vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm5 # 64-byte Folded Reload
+; AVX512-FCP-NEXT: vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm5 # 64-byte Folded Reload
; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
; AVX512-FCP-NEXT: vmovdqa32 %zmm6, %zmm5 {%k1}
; AVX512-FCP-NEXT: vmovdqa64 %zmm5, 64(%rsi)
; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512-FCP-NEXT: vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm5 # 64-byte Folded Reload
+; AVX512-FCP-NEXT: vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm5 # 64-byte Folded Reload
; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
; AVX512-FCP-NEXT: vmovdqa32 %zmm6, %zmm5 {%k1}
; AVX512-FCP-NEXT: vmovdqa64 %zmm5, 64(%rdx)
; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512-FCP-NEXT: vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm5 # 64-byte Folded Reload
-; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512-FCP-NEXT: vmovdqa32 %zmm4, %zmm5 {%k1}
+; AVX512-FCP-NEXT: vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm5 # 64-byte Folded Reload
+; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512-FCP-NEXT: vmovdqa32 %zmm3, %zmm5 {%k1}
; AVX512-FCP-NEXT: vmovdqa64 %zmm5, (%rdx)
-; AVX512-FCP-NEXT: vpmovsxdq {{.*#+}} zmm4 = [18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,65535,0,0]
-; AVX512-FCP-NEXT: vpternlogq $184, %zmm30, %zmm4, %zmm7
-; AVX512-FCP-NEXT: vpternlogq $184, %zmm31, %zmm4, %zmm8
-; AVX512-FCP-NEXT: vpternlogq $184, %zmm2, %zmm4, %zmm1
-; AVX512-FCP-NEXT: vpternlogq $184, %zmm3, %zmm4, %zmm0
-; AVX512-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512-FCP-NEXT: vmovaps %zmm2, 64(%rcx)
-; AVX512-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512-FCP-NEXT: vmovaps %zmm2, (%rcx)
+; AVX512-FCP-NEXT: vpternlogq $184, %zmm30, %zmm26, %zmm4
+; AVX512-FCP-NEXT: vpternlogq $184, %zmm31, %zmm26, %zmm7
+; AVX512-FCP-NEXT: vpternlogq $184, %zmm0, %zmm26, %zmm11
+; AVX512-FCP-NEXT: vpternlogq $184, %zmm2, %zmm26, %zmm1
+; AVX512-FCP-NEXT: vmovdqa64 %zmm25, 64(%rcx)
+; AVX512-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512-FCP-NEXT: vmovaps %zmm0, (%rcx)
; AVX512-FCP-NEXT: vmovdqa64 %zmm28, 64(%r8)
-; AVX512-FCP-NEXT: vmovdqa64 %zmm26, (%r8)
-; AVX512-FCP-NEXT: vmovdqa64 %zmm8, 64(%r9)
-; AVX512-FCP-NEXT: vmovdqa64 %zmm7, (%r9)
+; AVX512-FCP-NEXT: vmovdqa64 %zmm23, (%r8)
+; AVX512-FCP-NEXT: vmovdqa64 %zmm7, 64(%r9)
+; AVX512-FCP-NEXT: vmovdqa64 %zmm4, (%r9)
; AVX512-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX512-FCP-NEXT: vmovdqa64 %zmm0, 64(%rax)
-; AVX512-FCP-NEXT: vmovdqa64 %zmm1, (%rax)
-; AVX512-FCP-NEXT: addq $1480, %rsp # imm = 0x5C8
+; AVX512-FCP-NEXT: vmovdqa64 %zmm1, 64(%rax)
+; AVX512-FCP-NEXT: vmovdqa64 %zmm11, (%rax)
+; AVX512-FCP-NEXT: addq $1416, %rsp # imm = 0x588
; AVX512-FCP-NEXT: vzeroupper
; AVX512-FCP-NEXT: retq
;
@@ -11694,8 +11670,8 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512DQ-NEXT: vmovdqa64 %ymm2, %ymm23
; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm1 = xmm3[0,1,2],xmm1[3,4],xmm3[5,6,7]
; AVX512DQ-NEXT: vmovdqa64 %xmm3, %xmm22
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm11 = [0,1,4,5,4,5,6,7,0,1,12,13,8,9,4,5]
-; AVX512DQ-NEXT: vpshufb %xmm11, %xmm1, %xmm1
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm10 = [0,1,4,5,4,5,6,7,0,1,12,13,8,9,4,5]
+; AVX512DQ-NEXT: vpshufb %xmm10, %xmm1, %xmm1
; AVX512DQ-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
; AVX512DQ-NEXT: vinserti32x4 $2, %xmm0, %zmm1, %zmm0
; AVX512DQ-NEXT: vmovdqa 448(%rdi), %ymm1
@@ -11714,8 +11690,8 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512DQ-NEXT: vinserti128 $1, 480(%rdi), %ymm1, %ymm1
; AVX512DQ-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm3 = ymm1[0],ymm3[1],ymm1[2,3,4,5],ymm3[6],ymm1[7]
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm6 = [0,1,12,13,8,9,12,13,8,9,12,13,8,9,4,5,16,17,28,29,24,25,28,29,24,25,28,29,24,25,20,21]
-; AVX512DQ-NEXT: vpshufb %ymm6, %ymm3, %ymm1
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm5 = [0,1,12,13,8,9,12,13,8,9,12,13,8,9,4,5,16,17,28,29,24,25,28,29,24,25,28,29,24,25,20,21]
+; AVX512DQ-NEXT: vpshufb %ymm5, %ymm3, %ymm1
; AVX512DQ-NEXT: vmovdqa64 %ymm3, %ymm20
; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm3 = ymm2[0,1,2],ymm1[3,4,5,6,7]
; AVX512DQ-NEXT: vmovdqa 640(%rdi), %ymm1
@@ -11728,16 +11704,16 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm2 = xmm12[2,2,2,2,4,5,6,7]
; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm2 = xmm4[0,1,2],xmm2[3,4],xmm4[5,6,7]
; AVX512DQ-NEXT: vmovdqa64 %xmm4, %xmm19
-; AVX512DQ-NEXT: vpshufb %xmm11, %xmm2, %xmm2
+; AVX512DQ-NEXT: vpshufb %xmm10, %xmm2, %xmm2
; AVX512DQ-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
; AVX512DQ-NEXT: vperm2i128 {{.*#+}} ymm4 = ymm1[2,3],mem[2,3]
; AVX512DQ-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX512DQ-NEXT: vinserti128 $1, 672(%rdi), %ymm1, %ymm1
; AVX512DQ-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm4 = ymm1[0],ymm4[1],ymm1[2,3,4,5],ymm4[6],ymm1[7]
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm5 = [0,1,12,13,8,9,12,13,8,9,12,13,4,5,u,u,16,17,28,29,24,25,28,29,24,25,28,29,20,21,u,u]
-; AVX512DQ-NEXT: vpshufb %ymm5, %ymm4, %ymm1
-; AVX512DQ-NEXT: vmovdqa64 %ymm5, %ymm27
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm6 = [0,1,12,13,8,9,12,13,8,9,12,13,4,5,u,u,16,17,28,29,24,25,28,29,24,25,28,29,20,21,u,u]
+; AVX512DQ-NEXT: vpshufb %ymm6, %ymm4, %ymm1
+; AVX512DQ-NEXT: vmovdqa64 %ymm6, %ymm27
; AVX512DQ-NEXT: vmovdqa64 %ymm4, %ymm18
; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm2 = ymm1[0,1,2],ymm2[3,4,5,6,7],ymm1[8,9,10],ymm2[11,12,13,14,15]
; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,4,6]
@@ -11763,87 +11739,86 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512DQ-NEXT: vmovdqa 128(%rdi), %ymm2
; AVX512DQ-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm4 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
-; AVX512DQ-NEXT: vextracti128 $1, %ymm4, %xmm8
+; AVX512DQ-NEXT: vextracti128 $1, %ymm4, %xmm11
; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm1 = xmm4[2,2,2,2,4,5,6,7]
-; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm1 = xmm8[0,1,2],xmm1[3,4],xmm8[5,6,7]
-; AVX512DQ-NEXT: vpshufb %xmm11, %xmm1, %xmm1
+; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm1 = xmm11[0,1,2],xmm1[3,4],xmm11[5,6,7]
+; AVX512DQ-NEXT: vpshufb %xmm10, %xmm1, %xmm1
; AVX512DQ-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
; AVX512DQ-NEXT: vinserti32x4 $2, %xmm0, %zmm1, %zmm17
; AVX512DQ-NEXT: vmovdqa (%rdi), %ymm0
; AVX512DQ-NEXT: vmovdqu %ymm0, (%rsp) # 32-byte Spill
; AVX512DQ-NEXT: vmovdqa 32(%rdi), %ymm1
; AVX512DQ-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm5 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7]
-; AVX512DQ-NEXT: vpshufb %xmm9, %xmm5, %xmm1
-; AVX512DQ-NEXT: vextracti128 $1, %ymm5, %xmm9
-; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm3 = xmm9[0,2,0,3]
-; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,6,6,7]
-; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2],xmm1[3],xmm3[4,5],xmm1[6,7]
-; AVX512DQ-NEXT: vmovdqa 64(%rdi), %ymm3
-; AVX512DQ-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm3[2,3],mem[2,3]
-; AVX512DQ-NEXT: vinserti128 $1, 96(%rdi), %ymm3, %ymm3
-; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm7 = ymm3[0],ymm0[1],ymm3[2,3,4,5],ymm0[6],ymm3[7]
-; AVX512DQ-NEXT: vmovdqa64 %ymm3, %ymm31
-; AVX512DQ-NEXT: vmovdqa64 %ymm0, %ymm30
-; AVX512DQ-NEXT: vpshufb %ymm6, %ymm7, %ymm3
-; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm6 = ymm1[0,1,2],ymm3[3,4,5,6,7]
+; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm3 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7]
+; AVX512DQ-NEXT: vpshufb %xmm9, %xmm3, %xmm0
+; AVX512DQ-NEXT: vextracti128 $1, %ymm3, %xmm9
+; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm2 = xmm9[0,2,0,3]
+; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,6,6,7]
+; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2],xmm0[3],xmm2[4,5],xmm0[6,7]
+; AVX512DQ-NEXT: vmovdqa 64(%rdi), %ymm2
+; AVX512DQ-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm2[2,3],mem[2,3]
+; AVX512DQ-NEXT: vinserti128 $1, 96(%rdi), %ymm2, %ymm2
+; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm6 = ymm2[0],ymm1[1],ymm2[2,3,4,5],ymm1[6],ymm2[7]
+; AVX512DQ-NEXT: vmovdqa64 %ymm2, %ymm31
+; AVX512DQ-NEXT: vmovdqa64 %ymm1, %ymm30
+; AVX512DQ-NEXT: vpshufb %ymm5, %ymm6, %ymm2
+; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm7 = ymm0[0,1,2],ymm2[3,4,5,6,7]
; AVX512DQ-NEXT: vmovdqa 352(%rdi), %ymm0
; AVX512DQ-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX512DQ-NEXT: vmovdqa 320(%rdi), %ymm1
; AVX512DQ-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm10 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
-; AVX512DQ-NEXT: vextracti128 $1, %ymm10, %xmm3
-; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm1 = xmm10[2,2,2,2,4,5,6,7]
-; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm1 = xmm3[0,1,2],xmm1[3,4],xmm3[5,6,7]
-; AVX512DQ-NEXT: vpshufb %xmm11, %xmm1, %xmm11
-; AVX512DQ-NEXT: vmovdqa 256(%rdi), %ymm1
-; AVX512DQ-NEXT: vperm2i128 {{.*#+}} ymm2 = ymm1[2,3],mem[2,3]
-; AVX512DQ-NEXT: vinserti128 $1, 288(%rdi), %ymm1, %ymm0
-; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm1 = ymm0[0],ymm2[1],ymm0[2,3,4,5],ymm2[6],ymm0[7]
+; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm5 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
+; AVX512DQ-NEXT: vextracti128 $1, %ymm5, %xmm8
+; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm2 = xmm5[2,2,2,2,4,5,6,7]
+; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm2 = xmm8[0,1,2],xmm2[3,4],xmm8[5,6,7]
+; AVX512DQ-NEXT: vpshufb %xmm10, %xmm2, %xmm10
+; AVX512DQ-NEXT: vmovdqa 256(%rdi), %ymm2
+; AVX512DQ-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm2[2,3],mem[2,3]
+; AVX512DQ-NEXT: vinserti128 $1, 288(%rdi), %ymm2, %ymm0
+; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm2 = ymm0[0],ymm1[1],ymm0[2,3,4,5],ymm1[6],ymm0[7]
; AVX512DQ-NEXT: vmovdqa64 %ymm0, %ymm26
-; AVX512DQ-NEXT: vmovdqa64 %ymm2, %ymm29
+; AVX512DQ-NEXT: vmovdqa64 %ymm1, %ymm29
; AVX512DQ-NEXT: vmovdqa64 %ymm27, %ymm0
-; AVX512DQ-NEXT: vpshufb %ymm0, %ymm1, %ymm0
-; AVX512DQ-NEXT: vinserti128 $1, %xmm11, %ymm0, %ymm11
-; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm11 = ymm0[0,1,2],ymm11[3,4,5,6,7],ymm0[8,9,10],ymm11[11,12,13,14,15]
+; AVX512DQ-NEXT: vpshufb %ymm0, %ymm2, %ymm0
+; AVX512DQ-NEXT: vinserti128 $1, %xmm10, %ymm0, %ymm10
+; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm10 = ymm0[0,1,2],ymm10[3,4,5,6,7],ymm0[8,9,10],ymm10[11,12,13,14,15]
; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,4,6]
-; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm11[4,5,6,7]
-; AVX512DQ-NEXT: vpternlogq $226, %zmm17, %zmm16, %zmm6
-; AVX512DQ-NEXT: vinserti32x8 $1, %ymm0, %zmm0, %zmm6 {%k1}
-; AVX512DQ-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-NEXT: vpbroadcastq {{.*#+}} xmm11 = [2,3,14,15,10,11,0,0,2,3,14,15,10,11,0,0]
-; AVX512DQ-NEXT: vpshufb %xmm11, %xmm14, %xmm0
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm14 = [2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15]
-; AVX512DQ-NEXT: vpshufb %xmm14, %xmm13, %xmm13
+; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm10[4,5,6,7]
+; AVX512DQ-NEXT: vpternlogq $226, %zmm17, %zmm16, %zmm7
+; AVX512DQ-NEXT: vinserti32x8 $1, %ymm0, %zmm0, %zmm7 {%k1}
+; AVX512DQ-NEXT: vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm10 = [2,3,14,15,10,11,6,7,2,3,14,15,12,13,14,15]
+; AVX512DQ-NEXT: vpshufb %xmm10, %xmm14, %xmm0
+; AVX512DQ-NEXT: vpshufb %xmm10, %xmm13, %xmm13
; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm0 = xmm13[0,1],xmm0[2],xmm13[3],xmm0[4,5],xmm13[6,7]
; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm13 = [6,7,2,3,4,5,u,u,2,3,14,15,u,u,6,7]
-; AVX512DQ-NEXT: vpshufb %xmm13, %xmm8, %xmm6
+; AVX512DQ-NEXT: vpshufb %xmm13, %xmm11, %xmm7
; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,5,5,5]
-; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm4 = xmm6[0,1,2],xmm4[3],xmm6[4,5],xmm4[6],xmm6[7]
+; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm4 = xmm7[0,1,2],xmm4[3],xmm7[4,5],xmm4[6],xmm7[7]
; AVX512DQ-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4
; AVX512DQ-NEXT: vinserti32x4 $2, %xmm0, %zmm4, %zmm0
-; AVX512DQ-NEXT: vpshufb %xmm11, %xmm9, %xmm4
-; AVX512DQ-NEXT: vpshufb %xmm14, %xmm5, %xmm2
-; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm4[2],xmm2[3],xmm4[4,5],xmm2[6,7]
+; AVX512DQ-NEXT: vpshufb %xmm10, %xmm9, %xmm4
+; AVX512DQ-NEXT: vpshufb %xmm10, %xmm3, %xmm3
+; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm4[2],xmm3[3],xmm4[4,5],xmm3[6,7]
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm4 = [2,3,14,15,10,11,10,11,14,15,10,11,10,11,6,7,18,19,30,31,26,27,26,27,30,31,26,27,26,27,22,23]
-; AVX512DQ-NEXT: vpshufb %ymm4, %ymm7, %ymm5
-; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm5 = ymm2[0,1,2],ymm5[3,4,5,6,7]
-; AVX512DQ-NEXT: vpshufb %xmm13, %xmm3, %xmm2
-; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm3 = xmm10[0,1,2,3,5,5,5,5]
-; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2],xmm3[3],xmm2[4,5],xmm3[6],xmm2[7]
+; AVX512DQ-NEXT: vpshufb %ymm4, %ymm6, %ymm6
+; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm6 = ymm3[0,1,2],ymm6[3,4,5,6,7]
+; AVX512DQ-NEXT: vpshufb %xmm13, %xmm8, %xmm3
+; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm1 = xmm5[0,1,2,3,5,5,5,5]
+; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm1 = xmm3[0,1,2],xmm1[3],xmm3[4,5],xmm1[6],xmm3[7]
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm3 = [2,3,14,15,10,11,10,11,14,15,10,11,u,u,6,7,18,19,30,31,26,27,26,27,30,31,26,27,u,u,22,23]
-; AVX512DQ-NEXT: vpshufb %ymm3, %ymm1, %ymm1
-; AVX512DQ-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm2 = ymm1[0,1,2],ymm2[3,4,5,6,7],ymm1[8,9,10],ymm2[11,12,13,14,15]
-; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,5,7]
-; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
-; AVX512DQ-NEXT: vpternlogq $226, %zmm0, %zmm16, %zmm5
-; AVX512DQ-NEXT: vinserti32x8 $1, %ymm1, %zmm0, %zmm5 {%k1}
-; AVX512DQ-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-NEXT: vpshufb %ymm3, %ymm2, %ymm2
+; AVX512DQ-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3,4,5,6,7],ymm2[8,9,10],ymm1[11,12,13,14,15]
+; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,5,7]
+; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
+; AVX512DQ-NEXT: vpternlogq $226, %zmm0, %zmm16, %zmm6
+; AVX512DQ-NEXT: vinserti32x8 $1, %ymm1, %zmm0, %zmm6 {%k1}
+; AVX512DQ-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT: vmovdqa64 %xmm24, %xmm0
-; AVX512DQ-NEXT: vpshufb %xmm11, %xmm0, %xmm0
+; AVX512DQ-NEXT: vpshufb %xmm10, %xmm0, %xmm0
; AVX512DQ-NEXT: vmovdqa64 %ymm25, %ymm1
-; AVX512DQ-NEXT: vpshufb %xmm14, %xmm1, %xmm1
+; AVX512DQ-NEXT: vpshufb %xmm10, %xmm1, %xmm1
; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2],xmm1[3],xmm0[4,5],xmm1[6,7]
; AVX512DQ-NEXT: vmovdqa64 %xmm22, %xmm1
; AVX512DQ-NEXT: vpshufb %xmm13, %xmm1, %xmm1
@@ -11855,8 +11830,8 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512DQ-NEXT: vmovdqa64 %ymm20, %ymm1
; AVX512DQ-NEXT: vpshufb %ymm4, %ymm1, %ymm1
; AVX512DQ-NEXT: vmovdqa64 %xmm21, %xmm2
-; AVX512DQ-NEXT: vpshufb %xmm11, %xmm2, %xmm2
-; AVX512DQ-NEXT: vpshufb %xmm14, %xmm15, %xmm4
+; AVX512DQ-NEXT: vpshufb %xmm10, %xmm2, %xmm2
+; AVX512DQ-NEXT: vpshufb %xmm10, %xmm15, %xmm4
; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm2 = xmm4[0,1],xmm2[2],xmm4[3],xmm2[4,5],xmm4[6,7]
; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm4 = ymm2[0,1,2],ymm1[3,4,5,6,7]
; AVX512DQ-NEXT: vmovdqa64 %ymm18, %ymm1
@@ -12271,19 +12246,18 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
;
; AVX512DQ-FCP-LABEL: load_i16_stride6_vf64:
; AVX512DQ-FCP: # %bb.0:
-; AVX512DQ-FCP-NEXT: subq $904, %rsp # imm = 0x388
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm9 = [0,1,12,13,u,u,4,5,u,u,u,u,12,13,14,15]
+; AVX512DQ-FCP-NEXT: subq $872, %rsp # imm = 0x368
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm9 = [0,1,12,13,0,1,4,5,8,9,12,13,12,13,14,15]
; AVX512DQ-FCP-NEXT: vmovdqa 608(%rdi), %ymm0
; AVX512DQ-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX512DQ-FCP-NEXT: vmovdqa 576(%rdi), %ymm1
; AVX512DQ-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6],ymm0[7]
; AVX512DQ-FCP-NEXT: vpshufb %xmm9, %xmm2, %xmm0
-; AVX512DQ-FCP-NEXT: vpbroadcastq {{.*#+}} xmm12 = [8,9,12,13,0,1,0,0,8,9,12,13,0,1,0,0]
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm2, %xmm1
; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm2, %ymm25
; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[2,1,0,3]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm12, %xmm2, %xmm1
+; AVX512DQ-FCP-NEXT: vpshufb %xmm9, %xmm2, %xmm1
; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm2, %xmm24
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3],xmm1[4,5],xmm0[6,7]
; AVX512DQ-FCP-NEXT: vmovdqa 544(%rdi), %ymm1
@@ -12291,121 +12265,120 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512DQ-FCP-NEXT: vmovdqa 512(%rdi), %ymm2
; AVX512DQ-FCP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
-; AVX512DQ-FCP-NEXT: vpbroadcastd {{.*#+}} xmm10 = [8,9,4,5,8,9,4,5,8,9,4,5,8,9,4,5]
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm10 = [0,1,4,5,4,5,4,5,0,1,12,13,8,9,4,5]
; AVX512DQ-FCP-NEXT: vpshufb %xmm10, %xmm2, %xmm1
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm2, %xmm4
+; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm2, %xmm3
; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm2, %ymm23
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,4,5,u,u,0,1,12,13,u,u,4,5]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm2, %xmm4, %xmm3
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm4, %xmm22
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm3[0,1,2],xmm1[3],xmm3[4,5],xmm1[6],xmm3[7]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm10, %xmm3, %xmm2
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm3, %xmm22
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2],xmm1[3],xmm2[4,5],xmm1[6],xmm2[7]
; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
; AVX512DQ-FCP-NEXT: vinserti32x4 $2, %xmm0, %zmm1, %zmm0
-; AVX512DQ-FCP-NEXT: vmovdqa 416(%rdi), %ymm1
-; AVX512DQ-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FCP-NEXT: vmovdqa 448(%rdi), %ymm1
+; AVX512DQ-FCP-NEXT: vmovdqa 416(%rdi), %ymm2
+; AVX512DQ-FCP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX512DQ-FCP-NEXT: vmovdqa 384(%rdi), %ymm3
; AVX512DQ-FCP-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm15 = ymm3[0],ymm1[1],ymm3[2,3],ymm1[4],ymm3[5,6],ymm1[7]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm9, %xmm15, %xmm1
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm15 = ymm3[0],ymm2[1],ymm3[2,3],ymm2[4],ymm3[5,6],ymm2[7]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm9, %xmm15, %xmm2
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm15, %xmm3
; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} xmm4 = xmm3[2,1,0,3]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm12, %xmm4, %xmm3
+; AVX512DQ-FCP-NEXT: vpshufb %xmm9, %xmm4, %xmm3
; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm4, %xmm21
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2],xmm1[3],xmm3[4,5],xmm1[6,7]
-; AVX512DQ-FCP-NEXT: vmovdqa 448(%rdi), %ymm3
-; AVX512DQ-FCP-NEXT: vperm2i128 {{.*#+}} ymm4 = ymm3[2,3],mem[2,3]
-; AVX512DQ-FCP-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FCP-NEXT: vinserti128 $1, 480(%rdi), %ymm3, %ymm3
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2],xmm2[3],xmm3[4,5],xmm2[6,7]
+; AVX512DQ-FCP-NEXT: vperm2i128 {{.*#+}} ymm3 = ymm1[2,3],mem[2,3]
; AVX512DQ-FCP-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm3[0],ymm4[1],ymm3[2,3,4,5],ymm4[6],ymm3[7]
+; AVX512DQ-FCP-NEXT: vinserti128 $1, 480(%rdi), %ymm1, %ymm1
+; AVX512DQ-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm1[0],ymm3[1],ymm1[2,3,4,5],ymm3[6],ymm1[7]
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm6 = [0,1,12,13,8,9,12,13,8,9,12,13,8,9,4,5,16,17,28,29,24,25,28,29,24,25,28,29,24,25,20,21]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm6, %ymm4, %ymm3
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm4, %ymm20
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm1[0,1,2],ymm3[3,4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm6, %ymm3, %ymm1
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm3, %ymm20
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm2[0,1,2],ymm1[3,4,5,6,7]
; AVX512DQ-FCP-NEXT: vmovdqa 736(%rdi), %ymm1
; AVX512DQ-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FCP-NEXT: vmovdqa 704(%rdi), %ymm3
-; AVX512DQ-FCP-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm14 = ymm3[0,1],ymm1[2],ymm3[3,4],ymm1[5],ymm3[6,7]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm10, %xmm14, %xmm1
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm14, %xmm5
-; AVX512DQ-FCP-NEXT: vpshufb %xmm2, %xmm5, %xmm3
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm5, %xmm19
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm3[0,1,2],xmm1[3],xmm3[4,5],xmm1[6],xmm3[7]
-; AVX512DQ-FCP-NEXT: vmovdqa 640(%rdi), %ymm3
+; AVX512DQ-FCP-NEXT: vmovdqa 704(%rdi), %ymm2
+; AVX512DQ-FCP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm12 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm10, %xmm12, %xmm1
+; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm12, %xmm4
+; AVX512DQ-FCP-NEXT: vpshufb %xmm10, %xmm4, %xmm2
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm4, %xmm19
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2],xmm1[3],xmm2[4,5],xmm1[6],xmm2[7]
+; AVX512DQ-FCP-NEXT: vmovdqa 640(%rdi), %ymm2
; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512DQ-FCP-NEXT: vperm2i128 {{.*#+}} ymm5 = ymm3[2,3],mem[2,3]
-; AVX512DQ-FCP-NEXT: vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FCP-NEXT: vinserti128 $1, 672(%rdi), %ymm3, %ymm3
-; AVX512DQ-FCP-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm3[0],ymm5[1],ymm3[2,3,4,5],ymm5[6],ymm3[7]
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm7 = [0,1,12,13,8,9,12,13,8,9,12,13,4,5,u,u,16,17,28,29,24,25,28,29,24,25,28,29,20,21,u,u]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm7, %ymm5, %ymm3
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm7, %ymm26
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm5, %ymm18
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm3[0,1,2],ymm1[3,4,5,6,7],ymm3[8,9,10],ymm1[11,12,13,14,15]
-; AVX512DQ-FCP-NEXT: vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,4,6]
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4,5,6,7]
+; AVX512DQ-FCP-NEXT: vperm2i128 {{.*#+}} ymm4 = ymm2[2,3],mem[2,3]
+; AVX512DQ-FCP-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FCP-NEXT: vinserti128 $1, 672(%rdi), %ymm2, %ymm2
+; AVX512DQ-FCP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm2[0],ymm4[1],ymm2[2,3,4,5],ymm4[6],ymm2[7]
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm5 = [0,1,12,13,8,9,12,13,8,9,12,13,4,5,u,u,16,17,28,29,24,25,28,29,24,25,28,29,20,21,u,u]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm5, %ymm4, %ymm2
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm5, %ymm26
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm4, %ymm18
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3,4,5,6,7],ymm2[8,9,10],ymm1[11,12,13,14,15]
+; AVX512DQ-FCP-NEXT: vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,4,6]
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm16 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
-; AVX512DQ-FCP-NEXT: vpternlogq $226, %zmm0, %zmm16, %zmm4
+; AVX512DQ-FCP-NEXT: vpternlogq $226, %zmm0, %zmm16, %zmm3
; AVX512DQ-FCP-NEXT: movw $-2048, %ax # imm = 0xF800
; AVX512DQ-FCP-NEXT: kmovw %eax, %k1
-; AVX512DQ-FCP-NEXT: vinserti32x8 $1, %ymm1, %zmm0, %zmm4 {%k1}
-; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FCP-NEXT: vinserti32x8 $1, %ymm1, %zmm0, %zmm3 {%k1}
+; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-FCP-NEXT: vmovdqa 224(%rdi), %ymm0
; AVX512DQ-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX512DQ-FCP-NEXT: vmovdqa 192(%rdi), %ymm1
; AVX512DQ-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm11 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6],ymm0[7]
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm11, %xmm0
-; AVX512DQ-FCP-NEXT: vpshufb %xmm9, %xmm11, %xmm1
-; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} xmm7 = xmm0[2,1,0,3]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm12, %xmm7, %xmm0
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm13 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6],ymm0[7]
+; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm13, %xmm0
+; AVX512DQ-FCP-NEXT: vpshufb %xmm9, %xmm13, %xmm1
+; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} xmm14 = xmm0[2,1,0,3]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm9, %xmm14, %xmm0
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2],xmm1[3],xmm0[4,5],xmm1[6,7]
; AVX512DQ-FCP-NEXT: vmovdqa 160(%rdi), %ymm1
-; AVX512DQ-FCP-NEXT: vmovdqu %ymm1, (%rsp) # 32-byte Spill
-; AVX512DQ-FCP-NEXT: vmovdqa 128(%rdi), %ymm3
-; AVX512DQ-FCP-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm3[0,1],ymm1[2],ymm3[3,4],ymm1[5],ymm3[6,7]
+; AVX512DQ-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FCP-NEXT: vmovdqa 128(%rdi), %ymm2
+; AVX512DQ-FCP-NEXT: vmovdqu %ymm2, (%rsp) # 32-byte Spill
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
; AVX512DQ-FCP-NEXT: vpshufb %xmm10, %xmm5, %xmm1
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm5, %xmm13
-; AVX512DQ-FCP-NEXT: vpshufb %xmm2, %xmm13, %xmm3
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm3[0,1,2],xmm1[3],xmm3[4,5],xmm1[6],xmm3[7]
+; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm5, %xmm7
+; AVX512DQ-FCP-NEXT: vpshufb %xmm10, %xmm7, %xmm2
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2],xmm1[3],xmm2[4,5],xmm1[6],xmm2[7]
; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
; AVX512DQ-FCP-NEXT: vinserti32x4 $2, %xmm0, %zmm1, %zmm17
; AVX512DQ-FCP-NEXT: vmovdqa (%rdi), %ymm0
; AVX512DQ-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX512DQ-FCP-NEXT: vmovdqa 32(%rdi), %ymm1
; AVX512DQ-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm9, %xmm4, %xmm0
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm4, %xmm3
-; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} xmm9 = xmm3[2,1,0,3]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm12, %xmm9, %xmm3
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm3[2],xmm0[3],xmm3[4,5],xmm0[6,7]
-; AVX512DQ-FCP-NEXT: vmovdqa 64(%rdi), %ymm3
-; AVX512DQ-FCP-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm3[2,3],mem[2,3]
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7]
+; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm3, %xmm0
+; AVX512DQ-FCP-NEXT: vpshufb %xmm9, %xmm3, %xmm2
+; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} xmm11 = xmm0[2,1,0,3]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm9, %xmm11, %xmm0
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm2[0,1],xmm0[2],xmm2[3],xmm0[4,5],xmm2[6,7]
+; AVX512DQ-FCP-NEXT: vmovdqa 64(%rdi), %ymm2
+; AVX512DQ-FCP-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm2[2,3],mem[2,3]
; AVX512DQ-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FCP-NEXT: vinserti128 $1, 96(%rdi), %ymm3, %ymm3
-; AVX512DQ-FCP-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm12 = ymm3[0],ymm1[1],ymm3[2,3,4,5],ymm1[6],ymm3[7]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm6, %ymm12, %ymm3
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm0[0,1,2],ymm3[3,4,5,6,7]
+; AVX512DQ-FCP-NEXT: vinserti128 $1, 96(%rdi), %ymm2, %ymm2
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm9 = ymm2[0],ymm1[1],ymm2[2,3,4,5],ymm1[6],ymm2[7]
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm2, %ymm31
+; AVX512DQ-FCP-NEXT: vpshufb %ymm6, %ymm9, %ymm2
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm0[0,1,2],ymm2[3,4,5,6,7]
; AVX512DQ-FCP-NEXT: vmovdqa 352(%rdi), %ymm0
; AVX512DQ-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX512DQ-FCP-NEXT: vmovdqa 320(%rdi), %ymm1
; AVX512DQ-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm10, %xmm4, %xmm2
+; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm4, %xmm8
; AVX512DQ-FCP-NEXT: vpshufb %xmm10, %xmm8, %xmm10
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm8, %xmm3
-; AVX512DQ-FCP-NEXT: vpshufb %xmm2, %xmm3, %xmm2
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm10 = xmm2[0,1,2],xmm10[3],xmm2[4,5],xmm10[6],xmm2[7]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm10 = xmm10[0,1,2],xmm2[3],xmm10[4,5],xmm2[6],xmm10[7]
; AVX512DQ-FCP-NEXT: vmovdqa 256(%rdi), %ymm2
; AVX512DQ-FCP-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm2[2,3],mem[2,3]
; AVX512DQ-FCP-NEXT: vinserti128 $1, 288(%rdi), %ymm2, %ymm0
; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm0[0],ymm1[1],ymm0[2,3,4,5],ymm1[6],ymm0[7]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm0, %ymm30
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm1, %ymm31
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm0, %ymm29
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm1, %ymm30
; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm26, %ymm0
; AVX512DQ-FCP-NEXT: vpshufb %ymm0, %ymm2, %ymm0
; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm10, %ymm0, %ymm10
@@ -12415,25 +12388,24 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512DQ-FCP-NEXT: vpternlogq $226, %zmm17, %zmm16, %zmm6
; AVX512DQ-FCP-NEXT: vinserti32x8 $1, %ymm0, %zmm0, %zmm6 {%k1}
; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm10 = [2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm10, %xmm11, %xmm0
-; AVX512DQ-FCP-NEXT: vpbroadcastq {{.*#+}} xmm11 = [10,11,14,15,2,3,0,0,10,11,14,15,2,3,0,0]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm11, %xmm7, %xmm7
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm7 = xmm0[0,1],xmm7[2],xmm0[3],xmm7[4,5],xmm0[6,7]
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm0 = [6,7,2,3,4,5,u,u,2,3,14,15,u,u,6,7]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm0, %xmm13, %xmm6
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm10 = [2,3,14,15,2,3,6,7,10,11,14,15,12,13,14,15]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm10, %xmm13, %xmm0
+; AVX512DQ-FCP-NEXT: vpshufb %xmm10, %xmm14, %xmm13
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm13[2],xmm0[3],xmm13[4,5],xmm0[6,7]
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm13 = [6,7,2,3,4,5,u,u,2,3,14,15,u,u,6,7]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm13, %xmm7, %xmm7
; AVX512DQ-FCP-NEXT: vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,5,5,5,5]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm5 = xmm6[0,1,2],xmm5[3],xmm6[4,5],xmm5[6],xmm6[7]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm5 = xmm7[0,1,2],xmm5[3],xmm7[4,5],xmm5[6],xmm7[7]
; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm5, %ymm0, %ymm5
-; AVX512DQ-FCP-NEXT: vinserti32x4 $2, %xmm7, %zmm5, %zmm5
-; AVX512DQ-FCP-NEXT: vpshufb %xmm10, %xmm4, %xmm4
-; AVX512DQ-FCP-NEXT: vpshufb %xmm11, %xmm9, %xmm6
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1],xmm6[2],xmm4[3],xmm6[4,5],xmm4[6,7]
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm6 = [2,3,14,15,10,11,10,11,14,15,10,11,10,11,6,7,18,19,30,31,26,27,26,27,30,31,26,27,26,27,22,23]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm6, %ymm12, %ymm7
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1,2],ymm7[3,4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm0, %xmm3, %xmm3
-; AVX512DQ-FCP-NEXT: vpshufhw {{.*#+}} xmm1 = xmm8[0,1,2,3,5,5,5,5]
+; AVX512DQ-FCP-NEXT: vinserti32x4 $2, %xmm0, %zmm5, %zmm0
+; AVX512DQ-FCP-NEXT: vpshufb %xmm10, %xmm3, %xmm3
+; AVX512DQ-FCP-NEXT: vpshufb %xmm10, %xmm11, %xmm5
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm5[2],xmm3[3],xmm5[4,5],xmm3[6,7]
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm5 = [2,3,14,15,10,11,10,11,14,15,10,11,10,11,6,7,18,19,30,31,26,27,26,27,30,31,26,27,26,27,22,23]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm5, %ymm9, %ymm6
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm3[0,1,2],ymm6[3,4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm13, %xmm8, %xmm3
+; AVX512DQ-FCP-NEXT: vpshufhw {{.*#+}} xmm1 = xmm4[0,1,2,3,5,5,5,5]
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm3[0,1,2],xmm1[3],xmm3[4,5],xmm1[6],xmm3[7]
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm3 = [2,3,14,15,10,11,10,11,14,15,10,11,u,u,6,7,18,19,30,31,26,27,26,27,30,31,26,27,u,u,22,23]
; AVX512DQ-FCP-NEXT: vpshufb %ymm3, %ymm2, %ymm2
@@ -12441,40 +12413,40 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3,4,5,6,7],ymm2[8,9,10],ymm1[11,12,13,14,15]
; AVX512DQ-FCP-NEXT: vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,5,7]
; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpternlogq $226, %zmm5, %zmm16, %zmm4
-; AVX512DQ-FCP-NEXT: vinserti32x8 $1, %ymm1, %zmm0, %zmm4 {%k1}
-; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm25, %ymm1
+; AVX512DQ-FCP-NEXT: vpternlogq $226, %zmm0, %zmm16, %zmm6
+; AVX512DQ-FCP-NEXT: vinserti32x8 $1, %ymm1, %zmm0, %zmm6 {%k1}
+; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm25, %ymm0
+; AVX512DQ-FCP-NEXT: vpshufb %xmm10, %xmm0, %xmm0
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm24, %xmm1
; AVX512DQ-FCP-NEXT: vpshufb %xmm10, %xmm1, %xmm1
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm24, %xmm2
-; AVX512DQ-FCP-NEXT: vpshufb %xmm11, %xmm2, %xmm2
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3],xmm2[4,5],xmm1[6,7]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm22, %xmm2
-; AVX512DQ-FCP-NEXT: vpshufb %xmm0, %xmm2, %xmm2
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm23, %ymm4
-; AVX512DQ-FCP-NEXT: vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,5,5,5]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2],xmm4[3],xmm2[4,5],xmm4[6],xmm2[7]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3],xmm1[4,5],xmm0[6,7]
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm22, %xmm1
+; AVX512DQ-FCP-NEXT: vpshufb %xmm13, %xmm1, %xmm1
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm23, %ymm2
+; AVX512DQ-FCP-NEXT: vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,5,5,5,5]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2],xmm2[3],xmm1[4,5],xmm2[6],xmm1[7]
+; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512DQ-FCP-NEXT: vinserti32x4 $2, %xmm0, %zmm1, %zmm0
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm20, %ymm1
+; AVX512DQ-FCP-NEXT: vpshufb %ymm5, %ymm1, %ymm1
+; AVX512DQ-FCP-NEXT: vpshufb %xmm10, %xmm15, %xmm2
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm21, %xmm4
+; AVX512DQ-FCP-NEXT: vpshufb %xmm10, %xmm4, %xmm4
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm4[2],xmm2[3],xmm4[4,5],xmm2[6,7]
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm2[0,1,2],ymm1[3,4,5,6,7]
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm18, %ymm1
+; AVX512DQ-FCP-NEXT: vpshufb %ymm3, %ymm1, %ymm1
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm19, %xmm2
+; AVX512DQ-FCP-NEXT: vpshufb %xmm13, %xmm2, %xmm2
+; AVX512DQ-FCP-NEXT: vpshufhw {{.*#+}} xmm3 = xmm12[0,1,2,3,5,5,5,5]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2],xmm3[3],xmm2[4,5],xmm3[6],xmm2[7]
; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512DQ-FCP-NEXT: vinserti32x4 $2, %xmm1, %zmm2, %zmm1
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm20, %ymm2
-; AVX512DQ-FCP-NEXT: vpshufb %ymm6, %ymm2, %ymm2
-; AVX512DQ-FCP-NEXT: vpshufb %xmm10, %xmm15, %xmm4
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm21, %xmm5
-; AVX512DQ-FCP-NEXT: vpshufb %xmm11, %xmm5, %xmm5
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1],xmm5[2],xmm4[3],xmm5[4,5],xmm4[6,7]
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1,2],ymm2[3,4,5,6,7]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm18, %ymm2
-; AVX512DQ-FCP-NEXT: vpshufb %ymm3, %ymm2, %ymm2
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm19, %xmm3
-; AVX512DQ-FCP-NEXT: vpshufb %xmm0, %xmm3, %xmm0
-; AVX512DQ-FCP-NEXT: vpshufhw {{.*#+}} xmm3 = xmm14[0,1,2,3,5,5,5,5]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm3[3],xmm0[4,5],xmm3[6],xmm0[7]
-; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm2[0,1,2],ymm0[3,4,5,6,7],ymm2[8,9,10],ymm0[11,12,13,14,15]
-; AVX512DQ-FCP-NEXT: vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,5,7]
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpternlogq $226, %zmm1, %zmm16, %zmm4
-; AVX512DQ-FCP-NEXT: vinserti32x8 $1, %ymm0, %zmm0, %zmm4 {%k1}
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm1[0,1,2],ymm2[3,4,5,6,7],ymm1[8,9,10],ymm2[11,12,13,14,15]
+; AVX512DQ-FCP-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,5,7]
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpternlogq $226, %zmm0, %zmm16, %zmm4
+; AVX512DQ-FCP-NEXT: vinserti32x8 $1, %ymm1, %zmm0, %zmm4 {%k1}
; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX512DQ-FCP-NEXT: vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
@@ -12486,8 +12458,8 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512DQ-FCP-NEXT: vpshufb %xmm0, %xmm15, %xmm1
; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} xmm2 = xmm14[2,1,2,0,4,5,6,7]
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1,2],xmm2[3],xmm1[4,5,6,7]
-; AVX512DQ-FCP-NEXT: vmovdqu (%rsp), %ymm2 # 32-byte Reload
-; AVX512DQ-FCP-NEXT: vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
+; AVX512DQ-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX512DQ-FCP-NEXT: vpblendd $36, (%rsp), %ymm2, %ymm2 # 32-byte Folded Reload
; AVX512DQ-FCP-NEXT: # ymm2 = ymm2[0,1],mem[2],ymm2[3,4],mem[5],ymm2[6,7]
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm2, %xmm3
; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} xmm4 = xmm2[2,1,0,3]
@@ -12512,7 +12484,7 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} xmm4 = xmm5[2,1,2,0,4,5,6,7]
; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm5, %xmm22
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0],xmm1[1,2],xmm4[3],xmm1[4,5,6,7]
-; AVX512DQ-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm31, %ymm1
; AVX512DQ-FCP-NEXT: vpblendd $148, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm6 # 32-byte Folded Reload
; AVX512DQ-FCP-NEXT: # ymm6 = ymm1[0,1],mem[2],ymm1[3],mem[4],ymm1[5,6],mem[7]
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm1 = [4,5,0,1,12,13,14,15,8,9,0,1,12,13,8,9,20,21,16,17,28,29,30,31,24,25,16,17,28,29,24,25]
@@ -12532,12 +12504,12 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm7, %xmm18
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3],xmm6[4],xmm5[5,6],xmm6[7]
; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm5, %ymm0, %ymm5
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm30, %ymm6
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm31, %ymm7
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm29, %ymm6
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm30, %ymm7
; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm6[0,1],ymm7[2],ymm6[3],ymm7[4],ymm6[5,6],ymm7[7]
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm8 = [4,5,0,1,12,13,14,15,8,9,12,13,0,1,u,u,20,21,16,17,28,29,30,31,24,25,28,29,16,17,u,u]
; AVX512DQ-FCP-NEXT: vpshufb %ymm8, %ymm7, %ymm6
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm8, %ymm27
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm8, %ymm26
; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm7, %ymm17
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm5 = ymm6[0,1,2],ymm5[3,4,5,6,7],ymm6[8,9,10],ymm5[11,12,13,14,15]
; AVX512DQ-FCP-NEXT: vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,6,5,4]
@@ -12545,9 +12517,8 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm5, %zmm0, %zmm5
; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm20 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
; AVX512DQ-FCP-NEXT: vpternlogq $226, %zmm3, %zmm20, %zmm4
-; AVX512DQ-FCP-NEXT: vpmovsxdq {{.*#+}} zmm3 = [18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,65535,0,0]
-; AVX512DQ-FCP-NEXT: vpternlogq $184, %zmm4, %zmm3, %zmm5
-; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm3, %zmm26
+; AVX512DQ-FCP-NEXT: vpmovsxdq {{.*#+}} zmm28 = [18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,65535,0,0]
+; AVX512DQ-FCP-NEXT: vpternlogq $184, %zmm4, %zmm28, %zmm5
; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
; AVX512DQ-FCP-NEXT: vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
@@ -12597,18 +12568,18 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512DQ-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX512DQ-FCP-NEXT: vpblendd $148, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 32-byte Folded Reload
; AVX512DQ-FCP-NEXT: # ymm2 = ymm0[0,1],mem[2],ymm0[3],mem[4],ymm0[5,6],mem[7]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm27, %ymm0
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm26, %ymm0
; AVX512DQ-FCP-NEXT: vpshufb %ymm0, %ymm2, %ymm0
; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm10, %ymm0, %ymm10
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm10 = ymm0[0,1,2],ymm10[3,4,5,6,7],ymm0[8,9,10],ymm10[11,12,13,14,15]
; AVX512DQ-FCP-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,5,4]
; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm10[4,5,6,7]
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm29
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm27
; AVX512DQ-FCP-NEXT: vpternlogq $226, %zmm9, %zmm20, %zmm1
-; AVX512DQ-FCP-NEXT: vpternlogq $184, %zmm1, %zmm26, %zmm29
+; AVX512DQ-FCP-NEXT: vpternlogq $184, %zmm1, %zmm28, %zmm27
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm1 = [u,u,2,3,6,7,u,u,14,15,14,15,14,15,14,15]
; AVX512DQ-FCP-NEXT: vpshufb %xmm1, %xmm15, %xmm0
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm1, %xmm28
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm1, %xmm26
; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} xmm1 = xmm14[3,1,2,1,4,5,6,7]
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2],xmm1[3],xmm0[4,5,6,7]
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm3 = [2,3,2,3,2,3,2,3,u,u,10,11,14,15,u,u]
@@ -12642,11 +12613,11 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm14 = ymm0[0,1,2],ymm14[3,4,5,6,7],ymm0[8,9,10],ymm14[11,12,13,14,15]
; AVX512DQ-FCP-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,7,4,5]
; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm14[4,5,6,7]
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm27
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm19
; AVX512DQ-FCP-NEXT: vpternlogq $226, %zmm21, %zmm20, %zmm1
-; AVX512DQ-FCP-NEXT: vpternlogq $184, %zmm1, %zmm26, %zmm27
+; AVX512DQ-FCP-NEXT: vpternlogq $184, %zmm1, %zmm28, %zmm19
; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm16, %xmm0
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm28, %xmm1
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm26, %xmm1
; AVX512DQ-FCP-NEXT: vpshufb %xmm1, %xmm0, %xmm0
; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} xmm1 = xmm13[3,1,2,1,4,5,6,7]
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2],xmm1[3],xmm0[4,5,6,7]
@@ -12671,197 +12642,192 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm21
; AVX512DQ-FCP-NEXT: vpternlogq $226, %zmm0, %zmm20, %zmm1
-; AVX512DQ-FCP-NEXT: vpternlogq $184, %zmm1, %zmm26, %zmm21
+; AVX512DQ-FCP-NEXT: vpternlogq $184, %zmm1, %zmm28, %zmm21
; AVX512DQ-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512DQ-FCP-NEXT: vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm13 # 32-byte Folded Reload
-; AVX512DQ-FCP-NEXT: # ymm13 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm0 = [8,9,u,u,0,1,12,13,u,u,12,13,12,13,14,15]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm0, %xmm13, %xmm1
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm13, %xmm14
-; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} xmm2 = xmm14[2,2,2,2,4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3],xmm2[4],xmm1[5,6,7]
-; AVX512DQ-FCP-NEXT: vmovdqu (%rsp), %ymm2 # 32-byte Reload
-; AVX512DQ-FCP-NEXT: vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
+; AVX512DQ-FCP-NEXT: vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm9 # 32-byte Folded Reload
+; AVX512DQ-FCP-NEXT: # ymm9 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm1 = [8,9,u,u,0,1,12,13,u,u,12,13,12,13,14,15]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm1, %xmm9, %xmm0
+; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm9, %xmm3
+; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} xmm2 = xmm3[2,2,2,2,4,5,6,7]
+; AVX512DQ-FCP-NEXT: vmovdqa %xmm3, %xmm11
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2,3],xmm2[4],xmm0[5,6,7]
+; AVX512DQ-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX512DQ-FCP-NEXT: vpblendd $109, (%rsp), %ymm2, %ymm2 # 32-byte Folded Reload
; AVX512DQ-FCP-NEXT: # ymm2 = mem[0],ymm2[1],mem[2,3],ymm2[4],mem[5,6],ymm2[7]
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm2, %xmm3
-; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} xmm5 = xmm2[0,3,2,1]
-; AVX512DQ-FCP-NEXT: vpbroadcastq {{.*#+}} xmm4 = [8,9,0,0,0,1,12,13,8,9,0,0,0,1,12,13]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm4, %xmm3, %xmm2
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm3, %xmm26
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm11 = [0,1,2,3,0,1,4,5,u,u,12,13,u,u,u,u]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm11, %xmm5, %xmm3
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm5, %xmm28
+; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} xmm4 = xmm2[0,3,2,1]
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm13 = [0,1,2,3,0,1,4,5,8,9,12,13,0,1,12,13]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm13, %xmm3, %xmm2
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm3, %xmm16
+; AVX512DQ-FCP-NEXT: vpshufb %xmm13, %xmm4, %xmm3
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm4, %xmm17
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4],xmm3[5],xmm2[6,7]
; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512DQ-FCP-NEXT: vinserti32x4 $2, %xmm1, %zmm2, %zmm22
-; AVX512DQ-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX512DQ-FCP-NEXT: vpblendd $189, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm5 # 32-byte Folded Reload
-; AVX512DQ-FCP-NEXT: # ymm5 = mem[0],ymm1[1],mem[2,3,4,5],ymm1[6],mem[7]
-; AVX512DQ-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX512DQ-FCP-NEXT: vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm12 # 32-byte Folded Reload
-; AVX512DQ-FCP-NEXT: # ymm12 = ymm1[0,1],mem[2],ymm1[3,4],mem[5],ymm1[6,7]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm0, %xmm12, %xmm1
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm12, %xmm10
-; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} xmm2 = xmm10[2,2,2,2,4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm2 = xmm1[0],xmm2[1],xmm1[2,3],xmm2[4],xmm1[5,6,7]
+; AVX512DQ-FCP-NEXT: vinserti32x4 $2, %xmm0, %zmm2, %zmm22
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm31, %ymm0
+; AVX512DQ-FCP-NEXT: vpblendd $189, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm14 # 32-byte Folded Reload
+; AVX512DQ-FCP-NEXT: # ymm14 = mem[0],ymm0[1],mem[2,3,4,5],ymm0[6],mem[7]
+; AVX512DQ-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX512DQ-FCP-NEXT: vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm10 # 32-byte Folded Reload
+; AVX512DQ-FCP-NEXT: # ymm10 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm1, %xmm10, %xmm0
+; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm10, %xmm15
+; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} xmm2 = xmm15[2,2,2,2,4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2,3],xmm2[4],xmm0[5,6,7]
; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} ymm20 = [65535,65535,65535,65535,65535,0,0,0,0,0,65535,65535,65535,65535,65535,65535]
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm1 = [128,128,128,128,128,128,128,128,128,128,4,5,0,1,12,13,24,25,20,21,128,128,128,128,128,128,128,128,128,128,128,128]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm1, %ymm5, %ymm3
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm5, %ymm18
-; AVX512DQ-FCP-NEXT: vpternlogq $236, %ymm20, %ymm3, %ymm2
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm2 = [128,128,128,128,128,128,128,128,128,128,4,5,0,1,12,13,24,25,20,21,128,128,128,128,128,128,128,128,128,128,128,128]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm2, %ymm14, %ymm3
+; AVX512DQ-FCP-NEXT: vpternlogq $236, %ymm20, %ymm3, %ymm0
; AVX512DQ-FCP-NEXT: movw $31, %ax
; AVX512DQ-FCP-NEXT: kmovw %eax, %k1
-; AVX512DQ-FCP-NEXT: vinserti32x8 $0, %ymm2, %zmm0, %zmm22 {%k1}
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm30, %ymm2
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm31, %ymm3
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm3[0],ymm2[1],ymm3[2,3,4,5],ymm2[6],ymm3[7]
-; AVX512DQ-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX512DQ-FCP-NEXT: vpblendd $146, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
-; AVX512DQ-FCP-NEXT: # ymm2 = ymm2[0],mem[1],ymm2[2,3],mem[4],ymm2[5,6],mem[7]
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm2, %xmm3
-; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} xmm6 = xmm2[0,3,2,1]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm4, %xmm3, %xmm2
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm3, %xmm19
-; AVX512DQ-FCP-NEXT: vpshufb %xmm11, %xmm6, %xmm3
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm6, %xmm30
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4],xmm3[5],xmm2[6,7]
-; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm6 = [8,9,4,5,4,5,6,7,0,1,4,5,0,1,12,13,24,25,20,21,20,21,22,23,16,17,20,21,16,17,28,29]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm6, %ymm5, %ymm3
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm6, %ymm25
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm5, %ymm31
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3,4],ymm2[5,6,7]
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm23
-; AVX512DQ-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX512DQ-FCP-NEXT: vpblendd $66, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm5 # 32-byte Folded Reload
-; AVX512DQ-FCP-NEXT: # ymm5 = ymm2[0],mem[1],ymm2[2,3,4,5],mem[6],ymm2[7]
-; AVX512DQ-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX512DQ-FCP-NEXT: vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm15 # 32-byte Folded Reload
-; AVX512DQ-FCP-NEXT: # ymm15 = ymm2[0,1],mem[2],ymm2[3,4],mem[5],ymm2[6,7]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm0, %xmm15, %xmm2
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm15, %xmm6
-; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} xmm3 = xmm6[2,2,2,2,4,5,6,7]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm6, %xmm17
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm3 = xmm2[0],xmm3[1],xmm2[2,3],xmm3[4],xmm2[5,6,7]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm1, %ymm5, %ymm1
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm5, %ymm16
-; AVX512DQ-FCP-NEXT: vpternlogq $236, %ymm20, %ymm1, %ymm3
-; AVX512DQ-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX512DQ-FCP-NEXT: vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
-; AVX512DQ-FCP-NEXT: # ymm2 = mem[0,1],ymm1[2],mem[3,4],ymm1[5],mem[6,7]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm0, %xmm2, %xmm0
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm2, %xmm9
-; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} xmm1 = xmm9[2,2,2,2,4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3],xmm1[4],xmm0[5,6,7]
-; AVX512DQ-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX512DQ-FCP-NEXT: vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX512DQ-FCP-NEXT: # ymm1 = mem[0],ymm1[1],mem[2,3],ymm1[4],mem[5,6],ymm1[7]
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm1, %xmm8
-; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} xmm7 = xmm1[0,3,2,1]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm4, %xmm8, %xmm1
-; AVX512DQ-FCP-NEXT: vpshufb %xmm11, %xmm7, %xmm5
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm5[0,1,2,3],xmm1[4],xmm5[5],xmm1[6,7]
-; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512DQ-FCP-NEXT: vinserti32x4 $2, %xmm0, %zmm1, %zmm24
+; AVX512DQ-FCP-NEXT: vinserti32x8 $0, %ymm0, %zmm0, %zmm22 {%k1}
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm29, %ymm0
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm30, %ymm3
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm3[0],ymm0[1],ymm3[2,3,4,5],ymm0[6],ymm3[7]
+; AVX512DQ-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX512DQ-FCP-NEXT: vpblendd $146, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX512DQ-FCP-NEXT: # ymm0 = ymm0[0],mem[1],ymm0[2,3],mem[4],ymm0[5,6],mem[7]
+; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm0, %xmm3
+; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} xmm5 = xmm0[0,3,2,1]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm13, %xmm3, %xmm0
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm3, %xmm18
+; AVX512DQ-FCP-NEXT: vpshufb %xmm13, %xmm5, %xmm3
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm5, %xmm29
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm3[0,1,2,3],xmm0[4],xmm3[5],xmm0[6,7]
+; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm5 = [8,9,4,5,4,5,6,7,0,1,4,5,0,1,12,13,24,25,20,21,20,21,22,23,16,17,20,21,16,17,28,29]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm5, %ymm4, %ymm3
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm5, %ymm26
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm4, %ymm30
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3,4],ymm0[5,6,7]
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm23
+; AVX512DQ-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX512DQ-FCP-NEXT: vpblendd $66, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm6 # 32-byte Folded Reload
+; AVX512DQ-FCP-NEXT: # ymm6 = ymm0[0],mem[1],ymm0[2,3,4,5],mem[6],ymm0[7]
+; AVX512DQ-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX512DQ-FCP-NEXT: vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm5 # 32-byte Folded Reload
+; AVX512DQ-FCP-NEXT: # ymm5 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm1, %xmm5, %xmm3
+; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm5, %xmm0
+; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} xmm4 = xmm0[2,2,2,2,4,5,6,7]
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm0, %xmm25
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0],xmm4[1],xmm3[2,3],xmm4[4],xmm3[5,6,7]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm2, %ymm6, %ymm2
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm6, %ymm31
+; AVX512DQ-FCP-NEXT: vpternlogq $236, %ymm20, %ymm2, %ymm3
+; AVX512DQ-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX512DQ-FCP-NEXT: vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 32-byte Folded Reload
+; AVX512DQ-FCP-NEXT: # ymm2 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm1, %xmm2, %xmm1
+; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm2, %xmm8
+; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} xmm4 = xmm8[2,2,2,2,4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm4[1],xmm1[2,3],xmm4[4],xmm1[5,6,7]
+; AVX512DQ-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX512DQ-FCP-NEXT: vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm4 # 32-byte Folded Reload
+; AVX512DQ-FCP-NEXT: # ymm4 = mem[0],ymm0[1],mem[2,3],ymm0[4],mem[5,6],ymm0[7]
+; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm4, %xmm7
+; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} xmm6 = xmm4[0,3,2,1]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm13, %xmm7, %xmm4
+; AVX512DQ-FCP-NEXT: vpshufb %xmm13, %xmm6, %xmm12
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm4 = xmm12[0,1,2,3],xmm4[4],xmm12[5],xmm4[6,7]
+; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4
+; AVX512DQ-FCP-NEXT: vinserti32x4 $2, %xmm1, %zmm4, %zmm24
; AVX512DQ-FCP-NEXT: vinserti32x8 $0, %ymm3, %zmm0, %zmm24 {%k1}
; AVX512DQ-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512DQ-FCP-NEXT: vpblendd $189, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm6 # 32-byte Folded Reload
-; AVX512DQ-FCP-NEXT: # ymm6 = mem[0],ymm0[1],mem[2,3,4,5],ymm0[6],mem[7]
+; AVX512DQ-FCP-NEXT: vpblendd $189, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm4 # 32-byte Folded Reload
+; AVX512DQ-FCP-NEXT: # ymm4 = mem[0],ymm0[1],mem[2,3,4,5],ymm0[6],mem[7]
; AVX512DQ-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512DQ-FCP-NEXT: vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
-; AVX512DQ-FCP-NEXT: # ymm0 = mem[0],ymm0[1],mem[2,3],ymm0[4],mem[5,6],ymm0[7]
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm0, %xmm5
-; AVX512DQ-FCP-NEXT: vpshufb %xmm4, %xmm5, %xmm4
-; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[0,3,2,1]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm11, %xmm3, %xmm0
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm4[4],xmm0[5],xmm4[6,7]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm25, %ymm1
-; AVX512DQ-FCP-NEXT: vpshufb %ymm1, %ymm6, %ymm4
+; AVX512DQ-FCP-NEXT: vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 32-byte Folded Reload
+; AVX512DQ-FCP-NEXT: # ymm1 = mem[0],ymm0[1],mem[2,3],ymm0[4],mem[5,6],ymm0[7]
+; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm1, %xmm3
+; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,3,2,1]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm13, %xmm3, %xmm12
+; AVX512DQ-FCP-NEXT: vpshufb %xmm13, %xmm1, %xmm13
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm12 = xmm13[0,1,2,3],xmm12[4],xmm13[5],xmm12[6,7]
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm26, %ymm0
+; AVX512DQ-FCP-NEXT: vpshufb %ymm0, %ymm4, %ymm13
+; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm12, %ymm0, %ymm12
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm12 = ymm13[0,1,2,3,4],ymm12[5,6,7]
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm12, %zmm0, %zmm13
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm12 = [10,11,6,7,2,3,14,15,10,11,10,11,12,13,14,15]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm12, %xmm15, %xmm15
+; AVX512DQ-FCP-NEXT: vpshufb %xmm12, %xmm10, %xmm10
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm10 = xmm10[0],xmm15[1],xmm10[2,3],xmm15[4],xmm10[5,6,7]
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm0 = [128,128,128,128,128,128,128,128,128,128,6,7,2,3,14,15,26,27,22,23,128,128,128,128,128,128,128,128,128,128,128,128]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm0, %ymm14, %ymm14
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm0, %ymm26
+; AVX512DQ-FCP-NEXT: vpternlogq $236, %ymm20, %ymm14, %ymm10
+; AVX512DQ-FCP-NEXT: vpshufb %xmm12, %xmm11, %xmm14
+; AVX512DQ-FCP-NEXT: vpshufb %xmm12, %xmm9, %xmm9
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm14 = xmm9[0],xmm14[1],xmm9[2,3],xmm14[4],xmm9[5,6,7]
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm9 = [0,1,2,3,2,3,6,7,10,11,14,15,2,3,14,15]
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm16, %xmm0
+; AVX512DQ-FCP-NEXT: vpshufb %xmm9, %xmm0, %xmm0
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm17, %xmm15
+; AVX512DQ-FCP-NEXT: vpshufb %xmm9, %xmm15, %xmm15
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm15[0,1,2,3],xmm0[4],xmm15[5],xmm0[6,7]
; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm4[0,1,2,3,4],ymm0[5,6,7]
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm25
-; AVX512DQ-FCP-NEXT: vpbroadcastd {{.*#+}} xmm1 = [10,11,6,7,10,11,6,7,10,11,6,7,10,11,6,7]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm1, %xmm10, %xmm11
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm10 = [10,11,u,u,2,3,14,15,u,u,10,11,12,13,14,15]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm10, %xmm12, %xmm12
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm11 = xmm12[0],xmm11[1],xmm12[2,3],xmm11[4],xmm12[5,6,7]
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm4 = [128,128,128,128,128,128,128,128,128,128,6,7,2,3,14,15,26,27,22,23,128,128,128,128,128,128,128,128,128,128,128,128]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm18, %ymm0
-; AVX512DQ-FCP-NEXT: vpshufb %ymm4, %ymm0, %ymm12
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm4, %ymm18
-; AVX512DQ-FCP-NEXT: vpternlogq $236, %ymm20, %ymm12, %ymm11
-; AVX512DQ-FCP-NEXT: vpshufb %xmm1, %xmm14, %xmm12
-; AVX512DQ-FCP-NEXT: vpshufb %xmm10, %xmm13, %xmm13
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm13[0],xmm12[1],xmm13[2,3],xmm12[4],xmm13[5,6,7]
-; AVX512DQ-FCP-NEXT: vpbroadcastq {{.*#+}} xmm12 = [10,11,0,0,2,3,14,15,10,11,0,0,2,3,14,15]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm26, %xmm4
-; AVX512DQ-FCP-NEXT: vpshufb %xmm12, %xmm4, %xmm4
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm13 = [0,1,2,3,2,3,6,7,u,u,14,15,u,u,u,u]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm28, %xmm14
-; AVX512DQ-FCP-NEXT: vpshufb %xmm13, %xmm14, %xmm14
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm4 = xmm14[0,1,2,3],xmm4[4],xmm14[5],xmm4[6,7]
-; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4
-; AVX512DQ-FCP-NEXT: vinserti32x4 $2, %xmm0, %zmm4, %zmm26
-; AVX512DQ-FCP-NEXT: vinserti32x8 $0, %ymm11, %zmm0, %zmm26 {%k1}
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm19, %xmm0
-; AVX512DQ-FCP-NEXT: vpshufb %xmm12, %xmm0, %xmm4
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm30, %xmm0
-; AVX512DQ-FCP-NEXT: vpshufb %xmm13, %xmm0, %xmm11
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm4 = xmm11[0,1,2,3],xmm4[4],xmm11[5],xmm4[6,7]
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm11 = [10,11,6,7,4,5,6,7,6,7,6,7,2,3,14,15,26,27,22,23,20,21,22,23,22,23,22,23,18,19,30,31]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm31, %ymm0
-; AVX512DQ-FCP-NEXT: vpshufb %ymm11, %ymm0, %ymm14
-; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm14[0,1,2,3,4],ymm4[5,6,7]
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm4, %zmm0, %zmm4
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm16, %ymm0
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm18, %ymm14
-; AVX512DQ-FCP-NEXT: vpshufb %ymm14, %ymm0, %ymm14
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm17, %xmm0
-; AVX512DQ-FCP-NEXT: vpshufb %xmm1, %xmm0, %xmm0
-; AVX512DQ-FCP-NEXT: vpshufb %xmm10, %xmm15, %xmm15
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm15[0],xmm0[1],xmm15[2,3],xmm0[4],xmm15[5,6,7]
-; AVX512DQ-FCP-NEXT: vpternlogq $236, %ymm20, %ymm14, %ymm0
-; AVX512DQ-FCP-NEXT: vpshufb %xmm1, %xmm9, %xmm1
-; AVX512DQ-FCP-NEXT: vpshufb %xmm10, %xmm2, %xmm2
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2,3],xmm1[4],xmm2[5,6,7]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm12, %xmm8, %xmm2
-; AVX512DQ-FCP-NEXT: vpshufb %xmm13, %xmm7, %xmm7
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm2 = xmm7[0,1,2,3],xmm2[4],xmm7[5],xmm2[6,7]
-; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512DQ-FCP-NEXT: vinserti32x4 $2, %xmm1, %zmm2, %zmm1
-; AVX512DQ-FCP-NEXT: vinserti32x8 $0, %ymm0, %zmm0, %zmm1 {%k1}
-; AVX512DQ-FCP-NEXT: vpshufb %ymm11, %ymm6, %ymm0
-; AVX512DQ-FCP-NEXT: vpshufb %xmm12, %xmm5, %xmm2
-; AVX512DQ-FCP-NEXT: vpshufb %xmm13, %xmm3, %xmm3
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4],xmm3[5],xmm2[6,7]
-; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm2[5,6,7]
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
-; AVX512DQ-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-FCP-NEXT: vmovaps %zmm2, (%rsi)
-; AVX512DQ-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-FCP-NEXT: vmovaps %zmm2, 64(%rsi)
-; AVX512DQ-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-FCP-NEXT: vmovaps %zmm2, 64(%rdx)
-; AVX512DQ-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-FCP-NEXT: vmovaps %zmm2, (%rdx)
-; AVX512DQ-FCP-NEXT: vpmovsxdq {{.*#+}} zmm2 = [18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,65535,0,0]
-; AVX512DQ-FCP-NEXT: vpternlogq $184, %zmm22, %zmm2, %zmm23
-; AVX512DQ-FCP-NEXT: vpternlogq $184, %zmm24, %zmm2, %zmm25
-; AVX512DQ-FCP-NEXT: vpternlogq $184, %zmm26, %zmm2, %zmm4
-; AVX512DQ-FCP-NEXT: vpternlogq $184, %zmm1, %zmm2, %zmm0
-; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm29, 64(%rcx)
-; AVX512DQ-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-FCP-NEXT: vmovaps %zmm1, (%rcx)
+; AVX512DQ-FCP-NEXT: vinserti32x4 $2, %xmm14, %zmm0, %zmm0
+; AVX512DQ-FCP-NEXT: vinserti32x8 $0, %ymm10, %zmm0, %zmm0 {%k1}
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm18, %xmm10
+; AVX512DQ-FCP-NEXT: vpshufb %xmm9, %xmm10, %xmm10
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm29, %xmm14
+; AVX512DQ-FCP-NEXT: vpshufb %xmm9, %xmm14, %xmm14
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm10 = xmm14[0,1,2,3],xmm10[4],xmm14[5],xmm10[6,7]
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm14 = [10,11,6,7,4,5,6,7,6,7,6,7,2,3,14,15,26,27,22,23,20,21,22,23,22,23,22,23,18,19,30,31]
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm30, %ymm15
+; AVX512DQ-FCP-NEXT: vpshufb %ymm14, %ymm15, %ymm15
+; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm10, %ymm0, %ymm10
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm10 = ymm15[0,1,2,3,4],ymm10[5,6,7]
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm10, %zmm0, %zmm10
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm31, %ymm15
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm26, %ymm11
+; AVX512DQ-FCP-NEXT: vpshufb %ymm11, %ymm15, %ymm15
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm25, %xmm11
+; AVX512DQ-FCP-NEXT: vpshufb %xmm12, %xmm11, %xmm11
+; AVX512DQ-FCP-NEXT: vpshufb %xmm12, %xmm5, %xmm5
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0],xmm11[1],xmm5[2,3],xmm11[4],xmm5[5,6,7]
+; AVX512DQ-FCP-NEXT: vpternlogq $236, %ymm20, %ymm15, %ymm5
+; AVX512DQ-FCP-NEXT: vpshufb %xmm12, %xmm8, %xmm8
+; AVX512DQ-FCP-NEXT: vpshufb %xmm12, %xmm2, %xmm2
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm8[1],xmm2[2,3],xmm8[4],xmm2[5,6,7]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm9, %xmm7, %xmm7
+; AVX512DQ-FCP-NEXT: vpshufb %xmm9, %xmm6, %xmm6
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0,1,2,3],xmm7[4],xmm6[5],xmm7[6,7]
+; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm6, %ymm0, %ymm6
+; AVX512DQ-FCP-NEXT: vinserti32x4 $2, %xmm2, %zmm6, %zmm2
+; AVX512DQ-FCP-NEXT: vinserti32x8 $0, %ymm5, %zmm0, %zmm2 {%k1}
+; AVX512DQ-FCP-NEXT: vpshufb %ymm14, %ymm4, %ymm4
+; AVX512DQ-FCP-NEXT: vpshufb %xmm9, %xmm3, %xmm3
+; AVX512DQ-FCP-NEXT: vpshufb %xmm9, %xmm1, %xmm1
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm3[4],xmm1[5],xmm3[6,7]
+; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm4[0,1,2,3,4],ymm1[5,6,7]
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm1
+; AVX512DQ-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512DQ-FCP-NEXT: vmovaps %zmm3, (%rsi)
+; AVX512DQ-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512DQ-FCP-NEXT: vmovaps %zmm3, 64(%rsi)
+; AVX512DQ-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512DQ-FCP-NEXT: vmovaps %zmm3, 64(%rdx)
+; AVX512DQ-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512DQ-FCP-NEXT: vmovaps %zmm3, (%rdx)
+; AVX512DQ-FCP-NEXT: vpternlogq $184, %zmm22, %zmm28, %zmm23
+; AVX512DQ-FCP-NEXT: vpternlogq $184, %zmm24, %zmm28, %zmm13
+; AVX512DQ-FCP-NEXT: vpternlogq $184, %zmm0, %zmm28, %zmm10
+; AVX512DQ-FCP-NEXT: vpternlogq $184, %zmm2, %zmm28, %zmm1
+; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm27, 64(%rcx)
+; AVX512DQ-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-FCP-NEXT: vmovaps %zmm0, (%rcx)
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm21, 64(%r8)
-; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm27, (%r8)
-; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm25, 64(%r9)
+; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm19, (%r8)
+; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm13, 64(%r9)
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm23, (%r9)
; AVX512DQ-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm0, 64(%rax)
-; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm4, (%rax)
-; AVX512DQ-FCP-NEXT: addq $904, %rsp # imm = 0x388
+; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm1, 64(%rax)
+; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm10, (%rax)
+; AVX512DQ-FCP-NEXT: addq $872, %rsp # imm = 0x368
; AVX512DQ-FCP-NEXT: vzeroupper
; AVX512DQ-FCP-NEXT: retq
;
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-7.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-7.ll
index 9134e490535ba..e3acc3fe6621f 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-7.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-7.ll
@@ -1320,11 +1320,12 @@ define void @load_i16_stride7_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX2-NEXT: vpblendd {{.*#+}} xmm5 = xmm0[0],xmm1[1,2,3]
; AVX2-NEXT: vmovdqa 80(%rdi), %xmm2
; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm5 = xmm5[0],xmm2[0],xmm5[1],xmm2[1],xmm5[2],xmm2[2],xmm5[3],xmm2[3]
-; AVX2-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[u,u,u,u,u,u,u,u,u,u,12,13,10,11,4,5]
-; AVX2-NEXT: vpblendd {{.*#+}} ymm6 = ymm3[0,1],ymm4[2],ymm3[3,4,5],ymm4[6],ymm3[7]
-; AVX2-NEXT: vextracti128 $1, %ymm6, %xmm7
-; AVX2-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0,1,2,3],xmm7[4],xmm6[5],xmm7[6],xmm6[7]
-; AVX2-NEXT: vpshufb {{.*#+}} xmm6 = xmm6[0,1,14,15,12,13,10,11,8,9,u,u,u,u,u,u]
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm6 = [0,1,14,15,12,13,10,11,8,9,12,13,10,11,4,5]
+; AVX2-NEXT: vpshufb %xmm6, %xmm5, %xmm5
+; AVX2-NEXT: vpblendd {{.*#+}} ymm7 = ymm3[0,1],ymm4[2],ymm3[3,4,5],ymm4[6],ymm3[7]
+; AVX2-NEXT: vextracti128 $1, %ymm7, %xmm8
+; AVX2-NEXT: vpblendw {{.*#+}} xmm7 = xmm7[0,1,2,3],xmm8[4],xmm7[5],xmm8[6],xmm7[7]
+; AVX2-NEXT: vpshufb %xmm6, %xmm7, %xmm6
; AVX2-NEXT: vpblendw {{.*#+}} xmm5 = xmm6[0,1,2,3,4],xmm5[5,6,7]
; AVX2-NEXT: vmovdqa 96(%rdi), %xmm7
; AVX2-NEXT: vmovdqa 64(%rdi), %xmm8
@@ -1406,11 +1407,12 @@ define void @load_i16_stride7_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX2-FP-NEXT: vpblendd {{.*#+}} xmm5 = xmm0[0],xmm1[1,2,3]
; AVX2-FP-NEXT: vmovdqa 80(%rdi), %xmm2
; AVX2-FP-NEXT: vpunpcklwd {{.*#+}} xmm5 = xmm5[0],xmm2[0],xmm5[1],xmm2[1],xmm5[2],xmm2[2],xmm5[3],xmm2[3]
-; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[u,u,u,u,u,u,u,u,u,u,12,13,10,11,4,5]
-; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm6 = ymm3[0,1],ymm4[2],ymm3[3,4,5],ymm4[6],ymm3[7]
-; AVX2-FP-NEXT: vextracti128 $1, %ymm6, %xmm7
-; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0,1,2,3],xmm7[4],xmm6[5],xmm7[6],xmm6[7]
-; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm6 = xmm6[0,1,14,15,12,13,10,11,8,9,u,u,u,u,u,u]
+; AVX2-FP-NEXT: vmovdqa {{.*#+}} xmm6 = [0,1,14,15,12,13,10,11,8,9,12,13,10,11,4,5]
+; AVX2-FP-NEXT: vpshufb %xmm6, %xmm5, %xmm5
+; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm7 = ymm3[0,1],ymm4[2],ymm3[3,4,5],ymm4[6],ymm3[7]
+; AVX2-FP-NEXT: vextracti128 $1, %ymm7, %xmm8
+; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm7 = xmm7[0,1,2,3],xmm8[4],xmm7[5],xmm8[6],xmm7[7]
+; AVX2-FP-NEXT: vpshufb %xmm6, %xmm7, %xmm6
; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm5 = xmm6[0,1,2,3,4],xmm5[5,6,7]
; AVX2-FP-NEXT: vmovdqa 96(%rdi), %xmm7
; AVX2-FP-NEXT: vmovdqa 64(%rdi), %xmm9
@@ -1429,11 +1431,12 @@ define void @load_i16_stride7_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm8 = xmm8[4,5,2,3,0,1,14,15,12,13,10,11,8,9,6,7]
; AVX2-FP-NEXT: vpblendd {{.*#+}} xmm11 = xmm1[0,1],xmm0[2],xmm1[3]
; AVX2-FP-NEXT: vpunpckhwd {{.*#+}} xmm10 = xmm11[4],xmm2[4],xmm11[5],xmm2[5],xmm11[6],xmm2[6],xmm11[7],xmm2[7]
-; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm10 = xmm10[u,u,u,u,u,u,u,u,u,u,8,9,6,7,0,1]
-; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm12 = ymm4[0],ymm3[1],ymm4[2,3,4],ymm3[5],ymm4[6,7]
-; AVX2-FP-NEXT: vextracti128 $1, %ymm12, %xmm13
-; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm12 = xmm13[0],xmm12[1],xmm13[2],xmm12[3],xmm13[4,5,6,7]
-; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm12 = xmm12[6,7,4,5,2,3,0,1,14,15,u,u,u,u,u,u]
+; AVX2-FP-NEXT: vmovdqa {{.*#+}} xmm12 = [6,7,4,5,2,3,0,1,14,15,8,9,6,7,0,1]
+; AVX2-FP-NEXT: vpshufb %xmm12, %xmm10, %xmm10
+; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm13 = ymm4[0],ymm3[1],ymm4[2,3,4],ymm3[5],ymm4[6,7]
+; AVX2-FP-NEXT: vextracti128 $1, %ymm13, %xmm14
+; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm13 = xmm14[0],xmm13[1],xmm14[2],xmm13[3],xmm14[4,5,6,7]
+; AVX2-FP-NEXT: vpshufb %xmm12, %xmm13, %xmm12
; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm10 = xmm12[0,1,2,3,4],xmm10[5,6,7]
; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm11 = xmm11[0,1,2,3,4,5,6,7,0,1,14,15,u,u,10,11]
; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm11 = xmm11[0,1,2,3,4,5],xmm2[6],xmm11[7]
@@ -1483,11 +1486,12 @@ define void @load_i16_stride7_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm5 = xmm0[0],xmm1[1,2,3]
; AVX2-FCP-NEXT: vmovdqa 80(%rdi), %xmm2
; AVX2-FCP-NEXT: vpunpcklwd {{.*#+}} xmm5 = xmm5[0],xmm2[0],xmm5[1],xmm2[1],xmm5[2],xmm2[2],xmm5[3],xmm2[3]
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[u,u,u,u,u,u,u,u,u,u,12,13,10,11,4,5]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm3[0,1],ymm4[2],ymm3[3,4,5],ymm4[6],ymm3[7]
-; AVX2-FCP-NEXT: vextracti128 $1, %ymm6, %xmm7
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0,1,2,3],xmm7[4],xmm6[5],xmm7[6],xmm6[7]
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm6 = xmm6[0,1,14,15,12,13,10,11,8,9,u,u,u,u,u,u]
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm6 = [0,1,14,15,12,13,10,11,8,9,12,13,10,11,4,5]
+; AVX2-FCP-NEXT: vpshufb %xmm6, %xmm5, %xmm5
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm3[0,1],ymm4[2],ymm3[3,4,5],ymm4[6],ymm3[7]
+; AVX2-FCP-NEXT: vextracti128 $1, %ymm7, %xmm8
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm7 = xmm7[0,1,2,3],xmm8[4],xmm7[5],xmm8[6],xmm7[7]
+; AVX2-FCP-NEXT: vpshufb %xmm6, %xmm7, %xmm6
; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm5 = xmm6[0,1,2,3,4],xmm5[5,6,7]
; AVX2-FCP-NEXT: vmovdqa 96(%rdi), %xmm7
; AVX2-FCP-NEXT: vmovdqa 64(%rdi), %xmm9
@@ -1506,11 +1510,12 @@ define void @load_i16_stride7_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm8 = xmm8[4,5,2,3,0,1,14,15,12,13,10,11,8,9,6,7]
; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm11 = xmm1[0,1],xmm0[2],xmm1[3]
; AVX2-FCP-NEXT: vpunpckhwd {{.*#+}} xmm10 = xmm11[4],xmm2[4],xmm11[5],xmm2[5],xmm11[6],xmm2[6],xmm11[7],xmm2[7]
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm10 = xmm10[u,u,u,u,u,u,u,u,u,u,8,9,6,7,0,1]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm12 = ymm4[0],ymm3[1],ymm4[2,3,4],ymm3[5],ymm4[6,7]
-; AVX2-FCP-NEXT: vextracti128 $1, %ymm12, %xmm13
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm12 = xmm13[0],xmm12[1],xmm13[2],xmm12[3],xmm13[4,5,6,7]
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm12 = xmm12[6,7,4,5,2,3,0,1,14,15,u,u,u,u,u,u]
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm12 = [6,7,4,5,2,3,0,1,14,15,8,9,6,7,0,1]
+; AVX2-FCP-NEXT: vpshufb %xmm12, %xmm10, %xmm10
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm13 = ymm4[0],ymm3[1],ymm4[2,3,4],ymm3[5],ymm4[6,7]
+; AVX2-FCP-NEXT: vextracti128 $1, %ymm13, %xmm14
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm13 = xmm14[0],xmm13[1],xmm14[2],xmm13[3],xmm14[4,5,6,7]
+; AVX2-FCP-NEXT: vpshufb %xmm12, %xmm13, %xmm12
; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm10 = xmm12[0,1,2,3,4],xmm10[5,6,7]
; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm11 = xmm11[0,1,2,3,4,5,6,7,0,1,14,15,u,u,10,11]
; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm11 = xmm11[0,1,2,3,4,5],xmm2[6],xmm11[7]
@@ -1558,13 +1563,14 @@ define void @load_i16_stride7_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-NEXT: vpblendd {{.*#+}} xmm3 = xmm0[0],xmm1[1,2,3]
; AVX512-NEXT: vmovdqa 80(%rdi), %xmm2
; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
-; AVX512-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u,u,u,u,u,u,u,12,13,10,11,4,5]
+; AVX512-NEXT: vmovdqa {{.*#+}} xmm6 = [0,1,14,15,12,13,10,11,8,9,12,13,10,11,4,5]
+; AVX512-NEXT: vpshufb %xmm6, %xmm3, %xmm3
; AVX512-NEXT: vmovdqa (%rdi), %ymm4
; AVX512-NEXT: vmovdqa 32(%rdi), %ymm5
-; AVX512-NEXT: vpblendd {{.*#+}} ymm6 = ymm4[0,1],ymm5[2],ymm4[3,4,5],ymm5[6],ymm4[7]
-; AVX512-NEXT: vextracti128 $1, %ymm6, %xmm7
-; AVX512-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0,1,2,3],xmm7[4],xmm6[5],xmm7[6],xmm6[7]
-; AVX512-NEXT: vpshufb {{.*#+}} xmm6 = xmm6[0,1,14,15,12,13,10,11,8,9,u,u,u,u,u,u]
+; AVX512-NEXT: vpblendd {{.*#+}} ymm7 = ymm4[0,1],ymm5[2],ymm4[3,4,5],ymm5[6],ymm4[7]
+; AVX512-NEXT: vextracti128 $1, %ymm7, %xmm8
+; AVX512-NEXT: vpblendw {{.*#+}} xmm7 = xmm7[0,1,2,3],xmm8[4],xmm7[5],xmm8[6],xmm7[7]
+; AVX512-NEXT: vpshufb %xmm6, %xmm7, %xmm6
; AVX512-NEXT: vpblendw {{.*#+}} xmm3 = xmm6[0,1,2,3,4],xmm3[5,6,7]
; AVX512-NEXT: vpblendd {{.*#+}} xmm7 = xmm0[0,1],xmm1[2,3]
; AVX512-NEXT: vpblendw {{.*#+}} xmm6 = xmm7[0,1,2],xmm2[3],xmm7[4,5,6,7]
@@ -1642,13 +1648,14 @@ define void @load_i16_stride7_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-FCP-NEXT: vpblendd {{.*#+}} xmm3 = xmm0[0],xmm1[1,2,3]
; AVX512-FCP-NEXT: vmovdqa 80(%rdi), %xmm2
; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u,u,u,u,u,u,u,12,13,10,11,4,5]
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm6 = [0,1,14,15,12,13,10,11,8,9,12,13,10,11,4,5]
+; AVX512-FCP-NEXT: vpshufb %xmm6, %xmm3, %xmm3
; AVX512-FCP-NEXT: vmovdqa (%rdi), %ymm4
; AVX512-FCP-NEXT: vmovdqa 32(%rdi), %ymm5
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm4[0,1],ymm5[2],ymm4[3,4,5],ymm5[6],ymm4[7]
-; AVX512-FCP-NEXT: vextracti128 $1, %ymm6, %xmm7
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0,1,2,3],xmm7[4],xmm6[5],xmm7[6],xmm6[7]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm6 = xmm6[0,1,14,15,12,13,10,11,8,9,u,u,u,u,u,u]
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm4[0,1],ymm5[2],ymm4[3,4,5],ymm5[6],ymm4[7]
+; AVX512-FCP-NEXT: vextracti128 $1, %ymm7, %xmm8
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm7 = xmm7[0,1,2,3],xmm8[4],xmm7[5],xmm8[6],xmm7[7]
+; AVX512-FCP-NEXT: vpshufb %xmm6, %xmm7, %xmm6
; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm3 = xmm6[0,1,2,3,4],xmm3[5,6,7]
; AVX512-FCP-NEXT: vpblendd {{.*#+}} xmm7 = xmm0[0,1],xmm1[2,3]
; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm6 = xmm7[0,1,2],xmm2[3],xmm7[4,5,6,7]
@@ -1665,11 +1672,12 @@ define void @load_i16_stride7_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[4,5,2,3,0,1,14,15,12,13,10,11,8,9,6,7]
; AVX512-FCP-NEXT: vpblendd {{.*#+}} xmm9 = xmm1[0,1],xmm0[2],xmm1[3]
; AVX512-FCP-NEXT: vpunpckhwd {{.*#+}} xmm8 = xmm9[4],xmm2[4],xmm9[5],xmm2[5],xmm9[6],xmm2[6],xmm9[7],xmm2[7]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm8 = xmm8[u,u,u,u,u,u,u,u,u,u,8,9,6,7,0,1]
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm10 = ymm5[0],ymm4[1],ymm5[2,3,4],ymm4[5],ymm5[6,7]
-; AVX512-FCP-NEXT: vextracti128 $1, %ymm10, %xmm11
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm10 = xmm11[0],xmm10[1],xmm11[2],xmm10[3],xmm11[4,5,6,7]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm10 = xmm10[6,7,4,5,2,3,0,1,14,15,u,u,u,u,u,u]
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm10 = [6,7,4,5,2,3,0,1,14,15,8,9,6,7,0,1]
+; AVX512-FCP-NEXT: vpshufb %xmm10, %xmm8, %xmm8
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm11 = ymm5[0],ymm4[1],ymm5[2,3,4],ymm4[5],ymm5[6,7]
+; AVX512-FCP-NEXT: vextracti128 $1, %ymm11, %xmm12
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm11 = xmm12[0],xmm11[1],xmm12[2],xmm11[3],xmm12[4,5,6,7]
+; AVX512-FCP-NEXT: vpshufb %xmm10, %xmm11, %xmm10
; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm8 = xmm10[0,1,2,3,4],xmm8[5,6,7]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm9 = xmm9[u,u,u,u,u,u,u,u,0,1,14,15,u,u,10,11]
; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm9 = xmm9[0,1,2,3,4,5],xmm2[6],xmm9[7]
@@ -1717,13 +1725,14 @@ define void @load_i16_stride7_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-NEXT: vpblendd {{.*#+}} xmm3 = xmm0[0],xmm1[1,2,3]
; AVX512DQ-NEXT: vmovdqa 80(%rdi), %xmm2
; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u,u,u,u,u,u,u,12,13,10,11,4,5]
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm6 = [0,1,14,15,12,13,10,11,8,9,12,13,10,11,4,5]
+; AVX512DQ-NEXT: vpshufb %xmm6, %xmm3, %xmm3
; AVX512DQ-NEXT: vmovdqa (%rdi), %ymm4
; AVX512DQ-NEXT: vmovdqa 32(%rdi), %ymm5
-; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm6 = ymm4[0,1],ymm5[2],ymm4[3,4,5],ymm5[6],ymm4[7]
-; AVX512DQ-NEXT: vextracti128 $1, %ymm6, %xmm7
-; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0,1,2,3],xmm7[4],xmm6[5],xmm7[6],xmm6[7]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm6 = xmm6[0,1,14,15,12,13,10,11,8,9,u,u,u,u,u,u]
+; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm7 = ymm4[0,1],ymm5[2],ymm4[3,4,5],ymm5[6],ymm4[7]
+; AVX512DQ-NEXT: vextracti128 $1, %ymm7, %xmm8
+; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm7 = xmm7[0,1,2,3],xmm8[4],xmm7[5],xmm8[6],xmm7[7]
+; AVX512DQ-NEXT: vpshufb %xmm6, %xmm7, %xmm6
; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm3 = xmm6[0,1,2,3,4],xmm3[5,6,7]
; AVX512DQ-NEXT: vpblendd {{.*#+}} xmm7 = xmm0[0,1],xmm1[2,3]
; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm6 = xmm7[0,1,2],xmm2[3],xmm7[4,5,6,7]
@@ -1801,13 +1810,14 @@ define void @load_i16_stride7_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} xmm3 = xmm0[0],xmm1[1,2,3]
; AVX512DQ-FCP-NEXT: vmovdqa 80(%rdi), %xmm2
; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u,u,u,u,u,u,u,12,13,10,11,4,5]
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm6 = [0,1,14,15,12,13,10,11,8,9,12,13,10,11,4,5]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm6, %xmm3, %xmm3
; AVX512DQ-FCP-NEXT: vmovdqa (%rdi), %ymm4
; AVX512DQ-FCP-NEXT: vmovdqa 32(%rdi), %ymm5
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm4[0,1],ymm5[2],ymm4[3,4,5],ymm5[6],ymm4[7]
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm6, %xmm7
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0,1,2,3],xmm7[4],xmm6[5],xmm7[6],xmm6[7]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm6 = xmm6[0,1,14,15,12,13,10,11,8,9,u,u,u,u,u,u]
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm4[0,1],ymm5[2],ymm4[3,4,5],ymm5[6],ymm4[7]
+; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm7, %xmm8
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm7 = xmm7[0,1,2,3],xmm8[4],xmm7[5],xmm8[6],xmm7[7]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm6, %xmm7, %xmm6
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm3 = xmm6[0,1,2,3,4],xmm3[5,6,7]
; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} xmm7 = xmm0[0,1],xmm1[2,3]
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm6 = xmm7[0,1,2],xmm2[3],xmm7[4,5,6,7]
@@ -1824,11 +1834,12 @@ define void @load_i16_stride7_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[4,5,2,3,0,1,14,15,12,13,10,11,8,9,6,7]
; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} xmm9 = xmm1[0,1],xmm0[2],xmm1[3]
; AVX512DQ-FCP-NEXT: vpunpckhwd {{.*#+}} xmm8 = xmm9[4],xmm2[4],xmm9[5],xmm2[5],xmm9[6],xmm2[6],xmm9[7],xmm2[7]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm8 = xmm8[u,u,u,u,u,u,u,u,u,u,8,9,6,7,0,1]
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm10 = ymm5[0],ymm4[1],ymm5[2,3,4],ymm4[5],ymm5[6,7]
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm10, %xmm11
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm10 = xmm11[0],xmm10[1],xmm11[2],xmm10[3],xmm11[4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm10 = xmm10[6,7,4,5,2,3,0,1,14,15,u,u,u,u,u,u]
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm10 = [6,7,4,5,2,3,0,1,14,15,8,9,6,7,0,1]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm10, %xmm8, %xmm8
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm11 = ymm5[0],ymm4[1],ymm5[2,3,4],ymm4[5],ymm5[6,7]
+; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm11, %xmm12
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm11 = xmm12[0],xmm11[1],xmm12[2],xmm11[3],xmm12[4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm10, %xmm11, %xmm10
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm8 = xmm10[0,1,2,3,4],xmm8[5,6,7]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm9 = xmm9[u,u,u,u,u,u,u,u,0,1,14,15,u,u,10,11]
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm9 = xmm9[0,1,2,3,4,5],xmm2[6],xmm9[7]
@@ -3046,142 +3057,145 @@ define void @load_i16_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX2-FCP-NEXT: vmovdqa 32(%rdi), %ymm1
; AVX2-FCP-NEXT: vmovdqa 64(%rdi), %ymm2
; AVX2-FCP-NEXT: vmovdqa 96(%rdi), %ymm3
-; AVX2-FCP-NEXT: vmovdqa 128(%rdi), %ymm4
+; AVX2-FCP-NEXT: vmovdqa 128(%rdi), %ymm5
; AVX2-FCP-NEXT: vmovdqa 160(%rdi), %ymm6
; AVX2-FCP-NEXT: vmovdqa 192(%rdi), %ymm7
; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm8 = ymm7[0,1,0,2]
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm5 = ymm8[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20,21,26,27]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm10 = ymm6[0,1,2],ymm4[3],ymm6[4,5],ymm4[6],ymm6[7]
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,2,3,0,1,6,7,8,9,14,15,12,13,14,15,16,17,18,19,16,17,22,23,24,25,30,31,20,21,26,27]
+; AVX2-FCP-NEXT: vpshufb %ymm4, %ymm8, %ymm9
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm10 = ymm6[0,1,2],ymm5[3],ymm6[4,5],ymm5[6],ymm6[7]
; AVX2-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm11 = [3,6,2,5,3,6,2,5]
; AVX2-FCP-NEXT: # ymm11 = mem[0,1,0,1]
; AVX2-FCP-NEXT: vpermd %ymm10, %ymm11, %ymm10
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm10 = ymm10[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,17,22,23,24,25,30,31,u,u,u,u]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm10[0,1,2,3,4,5,6],ymm5[7]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm10 = ymm0[0,1],ymm1[2],ymm0[3,4,5],ymm1[6],ymm0[7]
-; AVX2-FCP-NEXT: vextracti128 $1, %ymm10, %xmm11
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm10 = xmm10[0,1,2,3],xmm11[4],xmm10[5],xmm11[6],xmm10[7]
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm10 = xmm10[0,1,14,15,12,13,10,11,8,9,8,9,8,9,8,9]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm11 = ymm3[0],ymm2[1],ymm3[2,3,4],ymm2[5],ymm3[6,7]
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm11 = ymm11[0,2,2,3]
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm11 = ymm11[0,1,14,15,12,13,14,15,2,3,6,7,12,13,2,3,16,17,30,31,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX2-FCP-NEXT: vpmovsxbw {{.*#+}} ymm12 = [65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0]
-; AVX2-FCP-NEXT: vpblendvb %ymm12, %ymm10, %ymm11, %ymm10
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm10[0,1,2,3,4],ymm5[5,6,7]
-; AVX2-FCP-NEXT: vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm10 = ymm0[0,1,2],ymm1[3],ymm0[4,5],ymm1[6],ymm0[7]
-; AVX2-FCP-NEXT: vextracti128 $1, %ymm10, %xmm11
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm10 = xmm11[0],xmm10[1],xmm11[2,3,4,5],xmm10[6],xmm11[7]
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm10 = xmm10[2,3,0,1,14,15,12,13,10,11,10,11,10,11,10,11]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm11 = ymm3[0,1],ymm2[2],ymm3[3,4],ymm2[5],ymm3[6,7]
-; AVX2-FCP-NEXT: vpmovsxbd {{.*#+}} ymm13 = [2,5,1,0,4,0,0,0]
-; AVX2-FCP-NEXT: vpermd %ymm11, %ymm13, %ymm11
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm13 = ymm11[2,3,2,3,2,3,2,3,8,9,0,1,6,7,8,9,18,19,18,19,18,19,18,19,24,25,16,17,22,23,24,25]
-; AVX2-FCP-NEXT: vmovdqa %xmm12, %xmm11
-; AVX2-FCP-NEXT: vpblendvb %ymm11, %ymm10, %ymm13, %ymm10
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm12 = ymm4[0,1],ymm6[2],ymm4[3,4,5],ymm6[6],ymm4[7]
-; AVX2-FCP-NEXT: vextracti128 $1, %ymm12, %xmm13
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm12 = xmm12[0,1,2,3],xmm13[4],xmm12[5],xmm13[6],xmm12[7]
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm12 = xmm12[0,1,0,1,14,15,12,13,10,11,8,9,u,u,u,u]
-; AVX2-FCP-NEXT: vinserti128 $1, %xmm12, %ymm0, %ymm12
+; AVX2-FCP-NEXT: vpshufb %ymm4, %ymm10, %ymm4
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5,6],ymm9[7]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm9 = ymm0[0,1],ymm1[2],ymm0[3,4,5],ymm1[6],ymm0[7]
+; AVX2-FCP-NEXT: vextracti128 $1, %ymm9, %xmm10
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm9 = xmm9[0,1,2,3],xmm10[4],xmm9[5],xmm10[6],xmm9[7]
+; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm9 = xmm9[0,1,14,15,12,13,10,11,8,9,8,9,8,9,8,9]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm10 = ymm3[0],ymm2[1],ymm3[2,3,4],ymm2[5],ymm3[6,7]
+; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm10 = ymm10[0,2,2,3]
+; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm10 = ymm10[0,1,14,15,12,13,14,15,2,3,6,7,12,13,2,3,16,17,30,31,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX2-FCP-NEXT: vpmovsxbw {{.*#+}} ymm11 = [65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0]
+; AVX2-FCP-NEXT: vpblendvb %ymm11, %ymm9, %ymm10, %ymm9
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm9[0,1,2,3,4],ymm4[5,6,7]
+; AVX2-FCP-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm9 = ymm0[0,1,2],ymm1[3],ymm0[4,5],ymm1[6],ymm0[7]
+; AVX2-FCP-NEXT: vextracti128 $1, %ymm9, %xmm10
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm9 = xmm10[0],xmm9[1],xmm10[2,3,4,5],xmm9[6],xmm10[7]
+; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm9 = xmm9[2,3,0,1,14,15,12,13,10,11,10,11,10,11,10,11]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm10 = ymm3[0,1],ymm2[2],ymm3[3,4],ymm2[5],ymm3[6,7]
+; AVX2-FCP-NEXT: vpmovsxbd {{.*#+}} ymm12 = [2,5,1,0,4,0,0,0]
+; AVX2-FCP-NEXT: vpermd %ymm10, %ymm12, %ymm10
+; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm12 = ymm10[2,3,2,3,2,3,2,3,8,9,0,1,6,7,8,9,18,19,18,19,18,19,18,19,24,25,16,17,22,23,24,25]
+; AVX2-FCP-NEXT: vmovdqa %xmm11, %xmm10
+; AVX2-FCP-NEXT: vpblendvb %ymm10, %ymm9, %ymm12, %ymm9
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm11 = ymm5[0,1],ymm6[2],ymm5[3,4,5],ymm6[6],ymm5[7]
+; AVX2-FCP-NEXT: vextracti128 $1, %ymm11, %xmm12
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm11 = xmm11[0,1,2,3],xmm12[4],xmm11[5],xmm12[6],xmm11[7]
+; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm11 = xmm11[0,1,0,1,14,15,12,13,10,11,8,9,u,u,u,u]
+; AVX2-FCP-NEXT: vinserti128 $1, %xmm11, %ymm0, %ymm11
; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm8 = ymm8[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,28,29]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm12[0,1,2,3,4,5,6],ymm8[7]
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm8 = ymm10[0],ymm8[1,2,3,4,5,6,7],ymm10[8],ymm8[9,10,11,12,13,14,15]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm10[0,1,2,3],ymm8[4,5,6,7]
-; AVX2-FCP-NEXT: vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm10 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6,7]
-; AVX2-FCP-NEXT: vextracti128 $1, %ymm10, %xmm12
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm10 = xmm10[0],xmm12[1],xmm10[2,3,4,5],xmm12[6],xmm10[7]
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm10 = xmm10[4,5,2,3,0,1,14,15,12,13,12,13,12,13,12,13]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm12 = ymm3[0,1],ymm2[2,3],ymm3[4,5],ymm2[6,7]
-; AVX2-FCP-NEXT: vpmovsxbd {{.*#+}} ymm13 = [2,6,1,0,5,0,0,0]
-; AVX2-FCP-NEXT: vpermd %ymm12, %ymm13, %ymm12
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm12 = ymm12[0,1,2,3,4,5,2,3,8,9,2,3,4,5,10,11,16,17,18,19,20,21,18,19,24,25,18,19,20,21,26,27]
-; AVX2-FCP-NEXT: vpblendvb %ymm11, %ymm10, %ymm12, %ymm10
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm12 = ymm4[0,1,2],ymm6[3],ymm4[4,5],ymm6[6],ymm4[7]
-; AVX2-FCP-NEXT: vextracti128 $1, %ymm12, %xmm13
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm12 = xmm13[0],xmm12[1],xmm13[2,3,4,5],xmm12[6],xmm13[7]
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm12 = xmm12[0,1,2,3,0,1,14,15,12,13,10,11,u,u,u,u]
-; AVX2-FCP-NEXT: vinserti128 $1, %xmm12, %ymm0, %ymm12
-; AVX2-FCP-NEXT: vpbroadcastq {{.*#+}} ymm13 = [2,5,2,5,2,5,2,5]
-; AVX2-FCP-NEXT: vpermd %ymm7, %ymm13, %ymm13
-; AVX2-FCP-NEXT: vpshufhw {{.*#+}} ymm13 = ymm13[0,1,2,3,4,5,4,7,8,9,10,11,12,13,12,15]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm11[0,1,2,3,4,5,6],ymm8[7]
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm8 = ymm9[0],ymm8[1,2,3,4,5,6,7],ymm9[8],ymm8[9,10,11,12,13,14,15]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm9[0,1,2,3],ymm8[4,5,6,7]
+; AVX2-FCP-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm9 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6,7]
+; AVX2-FCP-NEXT: vextracti128 $1, %ymm9, %xmm11
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm9 = xmm9[0],xmm11[1],xmm9[2,3,4,5],xmm11[6],xmm9[7]
+; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm9 = xmm9[4,5,2,3,0,1,14,15,12,13,12,13,12,13,12,13]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm11 = ymm3[0,1],ymm2[2,3],ymm3[4,5],ymm2[6,7]
+; AVX2-FCP-NEXT: vpmovsxbd {{.*#+}} ymm12 = [2,6,1,0,5,0,0,0]
+; AVX2-FCP-NEXT: vpermd %ymm11, %ymm12, %ymm11
+; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm11 = ymm11[0,1,2,3,4,5,2,3,8,9,2,3,4,5,10,11,16,17,18,19,20,21,18,19,24,25,18,19,20,21,26,27]
+; AVX2-FCP-NEXT: vpblendvb %ymm10, %ymm9, %ymm11, %ymm9
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm11 = ymm5[0,1,2],ymm6[3],ymm5[4,5],ymm6[6],ymm5[7]
+; AVX2-FCP-NEXT: vextracti128 $1, %ymm11, %xmm12
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm11 = xmm12[0],xmm11[1],xmm12[2,3,4,5],xmm11[6],xmm12[7]
+; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm11 = xmm11[0,1,2,3,0,1,14,15,12,13,10,11,u,u,u,u]
+; AVX2-FCP-NEXT: vinserti128 $1, %xmm11, %ymm0, %ymm11
+; AVX2-FCP-NEXT: vpbroadcastq {{.*#+}} ymm12 = [2,5,2,5,2,5,2,5]
+; AVX2-FCP-NEXT: vpermd %ymm7, %ymm12, %ymm12
+; AVX2-FCP-NEXT: vpshufhw {{.*#+}} ymm12 = ymm12[0,1,2,3,4,5,4,7,8,9,10,11,12,13,12,15]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm11 = ymm11[0,1,2,3,4,5,6],ymm12[7]
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm11 = ymm9[0],ymm11[1,2,3,4,5,6,7],ymm9[8],ymm11[9,10,11,12,13,14,15]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm11[4,5,6,7]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm11 = ymm6[0],ymm5[1],ymm6[2,3],ymm5[4],ymm6[5,6,7]
+; AVX2-FCP-NEXT: vextracti128 $1, %ymm11, %xmm12
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm11 = xmm11[0],xmm12[1],xmm11[2,3,4,5],xmm12[6],xmm11[7]
+; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm11 = xmm11[0,1,4,5,2,3,0,1,14,15,12,13,u,u,u,u]
+; AVX2-FCP-NEXT: vinserti128 $1, %xmm11, %ymm0, %ymm12
+; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm11 = ymm7[0,1,1,3]
+; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm13 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,24,25]
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm12 = ymm12[0,1,2,3,4,5,6],ymm13[7]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm13 = ymm1[0],ymm0[1],ymm1[2,3,4],ymm0[5],ymm1[6,7]
+; AVX2-FCP-NEXT: vextracti128 $1, %ymm13, %xmm14
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm13 = xmm14[0],xmm13[1],xmm14[2],xmm13[3],xmm14[4,5,6,7]
+; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm13 = xmm13[6,7,4,5,2,3,0,1,14,15,14,15,14,15,14,15]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm14 = ymm3[0,1,2],ymm2[3],ymm3[4,5],ymm2[6],ymm3[7]
+; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm14 = ymm14[1,3,2,3]
+; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm14 = ymm14[6,7,6,7,6,7,6,7,8,9,4,5,10,11,0,1,22,23,22,23,22,23,22,23,24,25,20,21,26,27,16,17]
+; AVX2-FCP-NEXT: vpblendvb %ymm10, %ymm13, %ymm14, %ymm10
; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm12 = ymm10[0],ymm12[1,2,3,4,5,6,7],ymm10[8],ymm12[9,10,11,12,13,14,15]
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm12[4,5,6,7]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm12 = ymm6[0],ymm4[1],ymm6[2,3],ymm4[4],ymm6[5,6,7]
-; AVX2-FCP-NEXT: vextracti128 $1, %ymm12, %xmm13
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm12 = xmm12[0],xmm13[1],xmm12[2,3,4,5],xmm13[6],xmm12[7]
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm12 = xmm12[0,1,4,5,2,3,0,1,14,15,12,13,u,u,u,u]
-; AVX2-FCP-NEXT: vinserti128 $1, %xmm12, %ymm0, %ymm12
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm13 = ymm7[0,1,1,3]
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm14 = ymm13[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,24,25]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm12 = ymm12[0,1,2,3,4,5,6],ymm14[7]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm14 = ymm1[0],ymm0[1],ymm1[2,3,4],ymm0[5],ymm1[6,7]
-; AVX2-FCP-NEXT: vextracti128 $1, %ymm14, %xmm15
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm14 = xmm15[0],xmm14[1],xmm15[2],xmm14[3],xmm15[4,5,6,7]
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm14 = xmm14[6,7,4,5,2,3,0,1,14,15,14,15,14,15,14,15]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm15 = ymm3[0,1,2],ymm2[3],ymm3[4,5],ymm2[6],ymm3[7]
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm15 = ymm15[1,3,2,3]
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm15 = ymm15[6,7,6,7,6,7,6,7,8,9,4,5,10,11,0,1,22,23,22,23,22,23,22,23,24,25,20,21,26,27,16,17]
-; AVX2-FCP-NEXT: vpblendvb %ymm11, %ymm14, %ymm15, %ymm11
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm12 = ymm11[0],ymm12[1,2,3,4,5,6,7],ymm11[8],ymm12[9,10,11,12,13,14,15]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm11 = ymm11[0,1,2,3],ymm12[4,5,6,7]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm14 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
-; AVX2-FCP-NEXT: vmovd {{.*#+}} xmm12 = [8,9,4,5,0,0,0,0,0,0,0,0,0,0,0,0]
-; AVX2-FCP-NEXT: vpshufb %xmm12, %xmm14, %xmm15
-; AVX2-FCP-NEXT: vextracti128 $1, %ymm14, %xmm14
-; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm14 = xmm14[3,1,2,3,4,5,6,7]
-; AVX2-FCP-NEXT: vpunpcklwd {{.*#+}} xmm14 = xmm15[0],xmm14[0],xmm15[1],xmm14[1],xmm15[2],xmm14[2],xmm15[3],xmm14[3]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm15 = ymm2[0,1],ymm3[2],ymm2[3,4,5],ymm3[6],ymm2[7]
-; AVX2-FCP-NEXT: vpmovsxbd {{.*#+}} ymm9 = [0,3,7,2,6,0,0,0]
-; AVX2-FCP-NEXT: vpermd %ymm15, %ymm9, %ymm9
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm15 = ymm6[0],ymm4[1],ymm6[2,3,4],ymm4[5],ymm6[6,7]
-; AVX2-FCP-NEXT: vextracti128 $1, %ymm15, %xmm5
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0],xmm15[1],xmm5[2],xmm15[3],xmm5[4,5,6,7]
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm13 = ymm13[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20,21,26,27]
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[0,1,6,7,4,5,2,3,0,1,14,15,u,u,u,u]
-; AVX2-FCP-NEXT: vinserti128 $1, %xmm5, %ymm0, %ymm5
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3,4,5,6],ymm13[7]
-; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm13 = [u,u,u,u,u,u,u,u,0,1,6,7,8,9,14,15,16,17,22,23,20,21,22,23,16,17,22,23,24,25,30,31]
-; AVX2-FCP-NEXT: vpshufb %ymm13, %ymm9, %ymm9
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm14 = xmm14[0,1],xmm9[2,3]
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm5 = ymm9[0],ymm5[1,2,3,4,5,6,7],ymm9[8],ymm5[9,10,11,12,13,14,15]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm9 = ymm14[0,1,2,3],ymm5[4,5,6,7]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm6[0,1],ymm4[2],ymm6[3,4],ymm4[5],ymm6[6,7]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm12 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
+; AVX2-FCP-NEXT: vmovd {{.*#+}} xmm13 = [8,9,4,5,0,0,0,0,0,0,0,0,0,0,0,0]
+; AVX2-FCP-NEXT: vpshufb %xmm13, %xmm12, %xmm14
+; AVX2-FCP-NEXT: vextracti128 $1, %ymm12, %xmm12
+; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm12 = xmm12[3,1,2,3,4,5,6,7]
+; AVX2-FCP-NEXT: vpunpcklwd {{.*#+}} xmm14 = xmm14[0],xmm12[0],xmm14[1],xmm12[1],xmm14[2],xmm12[2],xmm14[3],xmm12[3]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm12 = ymm2[0,1],ymm3[2],ymm2[3,4,5],ymm3[6],ymm2[7]
+; AVX2-FCP-NEXT: vpmovsxbd {{.*#+}} ymm15 = [0,3,7,2,6,0,0,0]
+; AVX2-FCP-NEXT: vpermd %ymm12, %ymm15, %ymm15
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm12 = ymm6[0],ymm5[1],ymm6[2,3,4],ymm5[5],ymm6[6,7]
+; AVX2-FCP-NEXT: vextracti128 $1, %ymm12, %xmm4
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0],xmm12[1],xmm4[2],xmm12[3],xmm4[4,5,6,7]
+; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[0,1,6,7,4,5,2,3,0,1,14,15,u,u,u,u]
+; AVX2-FCP-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4
+; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm11 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20,21,26,27]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5,6],ymm11[7]
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm12 = [u,u,u,u,u,u,u,u,0,1,6,7,8,9,14,15,16,17,22,23,20,21,22,23,16,17,22,23,24,25,30,31]
+; AVX2-FCP-NEXT: vpshufb %ymm12, %ymm15, %ymm11
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm14 = xmm14[0,1],xmm11[2,3]
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm4 = ymm11[0],ymm4[1,2,3,4,5,6,7],ymm11[8],ymm4[9,10,11,12,13,14,15]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm11 = ymm14[0,1,2,3],ymm4[4,5,6,7]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm6[0,1],ymm5[2],ymm6[3,4],ymm5[5],ymm6[6,7]
; AVX2-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm14 = [2,5,1,4,2,5,1,4]
; AVX2-FCP-NEXT: # ymm14 = mem[0,1,0,1]
-; AVX2-FCP-NEXT: vpermd %ymm5, %ymm14, %ymm5
+; AVX2-FCP-NEXT: vpermd %ymm4, %ymm14, %ymm4
; AVX2-FCP-NEXT: vpmovsxbd {{.*#+}} ymm14 = [0,0,0,0,0,3,7,0]
; AVX2-FCP-NEXT: vpermd %ymm7, %ymm14, %ymm14
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm14 = ymm14[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,17,22,23,24,25]
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm5 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,17,22,23,24,25,30,31,u,u,u,u,u,u]
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm5 = ymm5[0,1,2,3,4],ymm14[5,6,7],ymm5[8,9,10,11,12],ymm14[13,14,15]
+; AVX2-FCP-NEXT: vpbroadcastq {{.*#+}} ymm15 = [30,31,16,17,22,23,24,25,30,31,16,17,22,23,24,25,30,31,16,17,22,23,24,25,30,31,16,17,22,23,24,25]
+; AVX2-FCP-NEXT: vpshufb %ymm15, %ymm14, %ymm14
+; AVX2-FCP-NEXT: vpshufb %ymm15, %ymm4, %ymm4
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm4 = ymm4[0,1,2,3,4],ymm14[5,6,7],ymm4[8,9,10,11,12],ymm14[13,14,15]
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm14 = ymm2[0,1,2],ymm3[3],ymm2[4,5],ymm3[6],ymm2[7]
; AVX2-FCP-NEXT: vpmovsxbd {{.*#+}} ymm15 = [0,4,7,3,6,0,0,0]
; AVX2-FCP-NEXT: vpermd %ymm14, %ymm15, %ymm14
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm15 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
; AVX2-FCP-NEXT: vextracti128 $1, %ymm15, %xmm8
-; AVX2-FCP-NEXT: vpshufb %xmm12, %xmm8, %xmm8
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm12 = xmm15[10,11,6,7,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX2-FCP-NEXT: vpunpcklwd {{.*#+}} xmm8 = xmm12[0],xmm8[0],xmm12[1],xmm8[1],xmm12[2],xmm8[2],xmm12[3],xmm8[3]
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm12 = ymm14[u,u,u,u,u,u,u,u,2,3,4,5,10,11,12,13,18,19,18,19,18,19,18,19,18,19,20,21,26,27,28,29]
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm5 = ymm12[0],ymm5[1,2,3,4,5,6,7],ymm12[8],ymm5[9,10,11,12,13,14,15]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm8 = xmm8[0,1],xmm12[2,3]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm8[0,1,2,3],ymm5[4,5,6,7]
-; AVX2-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm8 = [0,4,7,0,0,4,7,0]
-; AVX2-FCP-NEXT: # ymm8 = mem[0,1,0,1]
-; AVX2-FCP-NEXT: vpermd %ymm7, %ymm8, %ymm7
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm6[0,1],ymm4[2,3],ymm6[4,5],ymm4[6,7]
+; AVX2-FCP-NEXT: vpshufb %xmm13, %xmm8, %xmm8
+; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm13 = xmm15[10,11,6,7,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX2-FCP-NEXT: vpunpcklwd {{.*#+}} xmm8 = xmm13[0],xmm8[0],xmm13[1],xmm8[1],xmm13[2],xmm8[2],xmm13[3],xmm8[3]
+; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm13 = ymm14[u,u,u,u,u,u,u,u,2,3,4,5,10,11,12,13,18,19,18,19,18,19,18,19,18,19,20,21,26,27,28,29]
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm4 = ymm13[0],ymm4[1,2,3,4,5,6,7],ymm13[8],ymm4[9,10,11,12,13,14,15]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm8 = xmm8[0,1],xmm13[2,3]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm13 = ymm8[0,1,2,3],ymm4[4,5,6,7]
+; AVX2-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm4 = [0,4,7,0,0,4,7,0]
+; AVX2-FCP-NEXT: # ymm4 = mem[0,1,0,1]
+; AVX2-FCP-NEXT: vpermd %ymm7, %ymm4, %ymm4
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm6[0,1],ymm5[2,3],ymm6[4,5],ymm5[6,7]
; AVX2-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm6 = [2,6,1,5,2,6,1,5]
; AVX2-FCP-NEXT: # ymm6 = mem[0,1,0,1]
-; AVX2-FCP-NEXT: vpermd %ymm4, %ymm6, %ymm4
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm6 = ymm7[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21,26,27]
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21,26,27,28,29,u,u,u,u,u,u]
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm4 = ymm4[0,1,2,3,4],ymm6[5,6,7],ymm4[8,9,10,11,12],ymm6[13,14,15]
+; AVX2-FCP-NEXT: vpermd %ymm5, %ymm6, %ymm5
+; AVX2-FCP-NEXT: vpbroadcastq {{.*#+}} ymm6 = [28,29,18,19,20,21,26,27,28,29,18,19,20,21,26,27,28,29,18,19,20,21,26,27,28,29,18,19,20,21,26,27]
+; AVX2-FCP-NEXT: vpshufb %ymm6, %ymm4, %ymm4
+; AVX2-FCP-NEXT: vpshufb %ymm6, %ymm5, %ymm5
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm4 = ymm5[0,1,2,3,4],ymm4[5,6,7],ymm5[8,9,10,11,12],ymm4[13,14,15]
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0],ymm2[1],ymm3[2,3],ymm2[4],ymm3[5,6,7]
; AVX2-FCP-NEXT: vpmovsxbd {{.*#+}} ymm3 = [1,4,0,3,7,0,0,0]
; AVX2-FCP-NEXT: vpermd %ymm2, %ymm3, %ymm2
-; AVX2-FCP-NEXT: vpshufb %ymm13, %ymm2, %ymm2
+; AVX2-FCP-NEXT: vpshufb %ymm12, %ymm2, %ymm2
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5],ymm0[6],ymm1[7]
; AVX2-FCP-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,u,u,u,u,u,10,11,6,7,u,u,u,u]
@@ -3194,11 +3208,11 @@ define void @load_i16_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX2-FCP-NEXT: vmovaps %ymm1, (%rsi)
; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
; AVX2-FCP-NEXT: vmovaps %ymm1, (%rdx)
-; AVX2-FCP-NEXT: vmovdqa %ymm10, (%rcx)
-; AVX2-FCP-NEXT: vmovdqa %ymm11, (%r8)
-; AVX2-FCP-NEXT: vmovdqa %ymm9, (%r9)
+; AVX2-FCP-NEXT: vmovdqa %ymm9, (%rcx)
+; AVX2-FCP-NEXT: vmovdqa %ymm10, (%r8)
+; AVX2-FCP-NEXT: vmovdqa %ymm11, (%r9)
; AVX2-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX2-FCP-NEXT: vmovdqa %ymm5, (%rax)
+; AVX2-FCP-NEXT: vmovdqa %ymm13, (%rax)
; AVX2-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX2-FCP-NEXT: vmovdqa %ymm0, (%rax)
; AVX2-FCP-NEXT: vzeroupper
@@ -3378,146 +3392,148 @@ define void @load_i16_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512-FCP-LABEL: load_i16_stride7_vf16:
; AVX512-FCP: # %bb.0:
; AVX512-FCP-NEXT: vmovdqa64 64(%rdi), %zmm0
-; AVX512-FCP-NEXT: vmovdqa64 128(%rdi), %zmm1
-; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm16 = [2,5,9,12,2,5,9,12]
-; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm9 = [10,3,6,15,12,13,6,15]
-; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm2 = [2,6,9,0,13,0,0,0]
-; AVX512-FCP-NEXT: vpermd %zmm0, %zmm2, %zmm8
-; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm2 = [2,5,9,0,12,0,0,0]
-; AVX512-FCP-NEXT: vpermd %zmm0, %zmm2, %zmm6
-; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm2 = [8,1,12,5,12,5,14,15]
-; AVX512-FCP-NEXT: vpermd %zmm0, %zmm2, %zmm3
-; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm2 = [3,6,10,13,3,6,10,13]
-; AVX512-FCP-NEXT: vpermd %zmm1, %zmm2, %zmm4
-; AVX512-FCP-NEXT: vmovdqa 192(%rdi), %ymm2
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm13 = ymm2[0,1,0,2]
-; AVX512-FCP-NEXT: vpbroadcastd {{.*#+}} ymm10 = [20,21,26,27,20,21,26,27,20,21,26,27,20,21,26,27,20,21,26,27,20,21,26,27,20,21,26,27,20,21,26,27]
-; AVX512-FCP-NEXT: vpshufb %ymm10, %ymm13, %ymm5
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,17,22,23,24,25,30,31,u,u,u,u]
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm11 = ymm4[0,1,2,3,4,5,6],ymm5[7]
+; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm16 = [2,6,9,13,2,6,9,13]
+; AVX512-FCP-NEXT: vmovdqa64 128(%rdi), %zmm2
+; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm3 = [2,5,9,12,2,5,9,12]
+; AVX512-FCP-NEXT: vpermd %zmm2, %zmm3, %zmm4
+; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm17 = [0,3,7,10,14,0,0,0]
+; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm11 = [10,3,6,15,12,13,6,15]
+; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm3 = [2,6,9,0,13,0,0,0]
+; AVX512-FCP-NEXT: vpermd %zmm0, %zmm3, %zmm10
+; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm3 = [2,5,9,0,12,0,0,0]
+; AVX512-FCP-NEXT: vpermd %zmm0, %zmm3, %zmm8
+; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm3 = [8,1,12,5,12,5,14,15]
+; AVX512-FCP-NEXT: vpermd %zmm0, %zmm3, %zmm3
+; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm5 = [3,6,10,13,3,6,10,13]
+; AVX512-FCP-NEXT: vpermd %zmm2, %zmm5, %zmm5
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm6 = [0,1,2,3,0,1,6,7,8,9,14,15,12,13,14,15,16,17,18,19,16,17,22,23,24,25,30,31,20,21,26,27]
+; AVX512-FCP-NEXT: vmovdqa 192(%rdi), %ymm7
+; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm14 = ymm7[0,1,0,2]
+; AVX512-FCP-NEXT: vpshufb %ymm6, %ymm14, %ymm12
+; AVX512-FCP-NEXT: vpshufb %ymm6, %ymm5, %ymm5
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm12 = ymm5[0,1,2,3,4,5,6],ymm12[7]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm3 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm3[6,7,12,13,2,3,16,17,30,31,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512-FCP-NEXT: vmovdqa (%rdi), %ymm4
-; AVX512-FCP-NEXT: vmovdqa 32(%rdi), %ymm5
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm12 = ymm4[0,1],ymm5[2],ymm4[3,4,5],ymm5[6],ymm4[7]
-; AVX512-FCP-NEXT: vextracti128 $1, %ymm12, %xmm14
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm12 = xmm12[0,1,2,3],xmm14[4],xmm12[5],xmm14[6],xmm12[7]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm12 = ymm12[0,1,14,15,12,13,10,11,8,9],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm12[u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512-FCP-NEXT: vpor %ymm3, %ymm12, %ymm3
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3,4],ymm11[5,6,7]
-; AVX512-FCP-NEXT: vmovdqa 160(%rdi), %ymm11
-; AVX512-FCP-NEXT: vmovdqa 128(%rdi), %ymm12
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm14 = ymm12[0,1],ymm11[2],ymm12[3,4,5],ymm11[6],ymm12[7]
-; AVX512-FCP-NEXT: vextracti128 $1, %ymm14, %xmm15
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm14 = xmm14[0,1,2,3],xmm15[4],xmm14[5],xmm15[6],xmm14[7]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm14 = xmm14[0,1,0,1,14,15,12,13,10,11,8,9,u,u,u,u]
-; AVX512-FCP-NEXT: vinserti128 $1, %xmm14, %ymm0, %ymm14
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm13 = ymm13[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,28,29]
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm13 = ymm14[0,1,2,3,4,5,6],ymm13[7]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm6 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm6[0,1,6,7,8,9,18,19],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm14 = ymm4[0,1,2],ymm5[3],ymm4[4,5],ymm5[6],ymm4[7]
+; AVX512-FCP-NEXT: vmovdqa (%rdi), %ymm5
+; AVX512-FCP-NEXT: vmovdqa 32(%rdi), %ymm6
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm13 = ymm5[0,1],ymm6[2],ymm5[3,4,5],ymm6[6],ymm5[7]
+; AVX512-FCP-NEXT: vextracti128 $1, %ymm13, %xmm15
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm13 = xmm13[0,1,2,3],xmm15[4],xmm13[5],xmm15[6],xmm13[7]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm13 = ymm13[0,1,14,15,12,13,10,11,8,9],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm13[u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512-FCP-NEXT: vpor %ymm3, %ymm13, %ymm3
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3,4],ymm12[5,6,7]
+; AVX512-FCP-NEXT: vmovdqa 160(%rdi), %ymm12
+; AVX512-FCP-NEXT: vmovdqa 128(%rdi), %ymm13
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm15 = ymm13[0,1],ymm12[2],ymm13[3,4,5],ymm12[6],ymm13[7]
+; AVX512-FCP-NEXT: vextracti128 $1, %ymm15, %xmm1
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm15[0,1,2,3],xmm1[4],xmm15[5],xmm1[6],xmm15[7]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0,1,0,1,14,15,12,13,10,11,8,9,u,u,u,u]
+; AVX512-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm14 = ymm14[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,28,29]
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm14[7]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm8 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm8[0,1,6,7,8,9,18,19],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm14 = ymm5[0,1,2],ymm6[3],ymm5[4,5],ymm6[6],ymm5[7]
; AVX512-FCP-NEXT: vextracti128 $1, %ymm14, %xmm15
; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm14 = xmm15[0],xmm14[1],xmm15[2,3,4,5],xmm14[6],xmm15[7]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm14 = ymm14[2,3,0,1,14,15,12,13,10,11],zero,zero,zero,zero,zero,zero,zero,zero,ymm14[u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512-FCP-NEXT: vpor %ymm6, %ymm14, %ymm6
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm13 = ymm6[0],ymm13[1,2,3,4,5,6,7],ymm6[8],ymm13[9,10,11,12,13,14,15]
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm13[4,5,6,7]
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm13 = ymm12[0,1,2],ymm11[3],ymm12[4,5],ymm11[6],ymm12[7]
-; AVX512-FCP-NEXT: vextracti128 $1, %ymm13, %xmm14
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm13 = xmm14[0],xmm13[1],xmm14[2,3,4,5],xmm13[6],xmm14[7]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm13 = xmm13[0,1,2,3,0,1,14,15,12,13,10,11,u,u,u,u]
-; AVX512-FCP-NEXT: vinserti128 $1, %xmm13, %ymm0, %ymm13
+; AVX512-FCP-NEXT: vpor %ymm8, %ymm14, %ymm8
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm8[0],ymm1[1,2,3,4,5,6,7],ymm8[8],ymm1[9,10,11,12,13,14,15]
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm1[4,5,6,7]
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm13[0,1,2],ymm12[3],ymm13[4,5],ymm12[6],ymm13[7]
+; AVX512-FCP-NEXT: vextracti128 $1, %ymm1, %xmm14
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm14[0],xmm1[1],xmm14[2,3,4,5],xmm1[6],xmm14[7]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0,1,2,3,0,1,14,15,12,13,10,11,u,u,u,u]
+; AVX512-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
; AVX512-FCP-NEXT: vpbroadcastq {{.*#+}} ymm14 = [2,5,2,5,2,5,2,5]
-; AVX512-FCP-NEXT: vpermd %ymm2, %ymm14, %ymm14
+; AVX512-FCP-NEXT: vpermd %ymm7, %ymm14, %ymm14
; AVX512-FCP-NEXT: vpshufhw {{.*#+}} ymm14 = ymm14[0,1,2,3,4,5,4,7,8,9,10,11,12,13,12,15]
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm13 = ymm13[0,1,2,3,4,5,6],ymm14[7]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm8 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm8[2,3,4,5,10,11,16,17],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm14 = ymm5[0],ymm4[1],ymm5[2,3],ymm4[4],ymm5[5,6,7]
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm14[7]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm10 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm10[2,3,4,5,10,11,16,17],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm14 = ymm6[0],ymm5[1],ymm6[2,3],ymm5[4],ymm6[5,6,7]
; AVX512-FCP-NEXT: vextracti128 $1, %ymm14, %xmm15
; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm14 = xmm14[0],xmm15[1],xmm14[2,3,4,5],xmm15[6],xmm14[7]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm14 = ymm14[4,5,2,3,0,1,14,15,12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm14[u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512-FCP-NEXT: vpor %ymm8, %ymm14, %ymm8
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm13 = ymm8[0],ymm13[1,2,3,4,5,6,7],ymm8[8],ymm13[9,10,11,12,13,14,15]
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm13[4,5,6,7]
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm13 = ymm11[0],ymm12[1],ymm11[2,3],ymm12[4],ymm11[5,6,7]
-; AVX512-FCP-NEXT: vextracti128 $1, %ymm13, %xmm14
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm13 = xmm13[0],xmm14[1],xmm13[2,3,4,5],xmm14[6],xmm13[7]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm13 = xmm13[0,1,4,5,2,3,0,1,14,15,12,13,u,u,u,u]
-; AVX512-FCP-NEXT: vinserti128 $1, %xmm13, %ymm0, %ymm13
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm14 = ymm2[0,1,1,3]
+; AVX512-FCP-NEXT: vpor %ymm10, %ymm14, %ymm10
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm10[0],ymm1[1,2,3,4,5,6,7],ymm10[8],ymm1[9,10,11,12,13,14,15]
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm1[4,5,6,7]
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm12[0],ymm13[1],ymm12[2,3],ymm13[4],ymm12[5,6,7]
+; AVX512-FCP-NEXT: vextracti128 $1, %ymm1, %xmm14
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm14[1],xmm1[2,3,4,5],xmm14[6],xmm1[7]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0,1,4,5,2,3,0,1,14,15,12,13,u,u,u,u]
+; AVX512-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm14 = ymm7[0,1,1,3]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm15 = ymm14[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,24,25]
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm15 = ymm13[0,1,2,3,4,5,6],ymm15[7]
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm13 = ymm5[0],ymm4[1],ymm5[2,3,4],ymm4[5],ymm5[6,7]
-; AVX512-FCP-NEXT: vextracti128 $1, %ymm13, %xmm7
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm7 = xmm7[0],xmm13[1],xmm7[2],xmm13[3],xmm7[4,5,6,7]
-; AVX512-FCP-NEXT: vpermd %zmm0, %zmm9, %zmm9
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm9 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm9[4,5,10,11,0,1,22,23],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm7 = ymm7[6,7,4,5,2,3,0,1,14,15],zero,zero,zero,zero,zero,zero,zero,zero,ymm7[u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512-FCP-NEXT: vpor %ymm7, %ymm9, %ymm7
-; AVX512-FCP-NEXT: vpermd %zmm1, %zmm16, %zmm13
-; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm9 = [0,3,7,10,14,0,0,0]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm15 = ymm7[0],ymm15[1,2,3,4,5,6,7],ymm7[8],ymm15[9,10,11,12,13,14,15]
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm15[4,5,6,7]
-; AVX512-FCP-NEXT: vpermd %zmm0, %zmm9, %zmm9
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm11 = ymm11[0],ymm12[1],ymm11[2,3,4],ymm12[5],ymm11[6,7]
-; AVX512-FCP-NEXT: vextracti128 $1, %ymm11, %xmm12
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm11 = xmm12[0],xmm11[1],xmm12[2],xmm11[3],xmm12[4,5,6,7]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm11 = xmm11[0,1,6,7,4,5,2,3,0,1,14,15,u,u,u,u]
-; AVX512-FCP-NEXT: vinserti128 $1, %xmm11, %ymm0, %ymm11
-; AVX512-FCP-NEXT: vpshufb %ymm10, %ymm14, %ymm10
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm11 = ymm11[0,1,2,3,4,5,6],ymm10[7]
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm10 = ymm5[0,1],ymm4[2],ymm5[3,4],ymm4[5],ymm5[6,7]
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm12 = [8,9,4,5,4,5,6,7,8,9,10,11,12,13,14,15]
-; AVX512-FCP-NEXT: vpshufb %xmm12, %xmm10, %xmm14
-; AVX512-FCP-NEXT: vextracti128 $1, %ymm10, %xmm10
-; AVX512-FCP-NEXT: vpshuflw {{.*#+}} xmm10 = xmm10[3,1,2,3,4,5,6,7]
-; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} xmm14 = xmm14[0],xmm10[0],xmm14[1],xmm10[1],xmm14[2],xmm10[2],xmm14[3],xmm10[3]
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm10 = [u,u,u,u,u,u,u,u,0,1,6,7,8,9,14,15,16,17,22,23,20,21,22,23,16,17,22,23,24,25,30,31]
-; AVX512-FCP-NEXT: vpshufb %ymm10, %ymm9, %ymm9
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm9 = ymm14[0,1],ymm9[2,3,4,5,6,7]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm11 = ymm9[0],ymm11[1,2,3,4,5,6,7],ymm9[8],ymm11[9,10,11,12,13,14,15]
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm11[4,5,6,7]
-; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm11 = [0,3,3,0,0,3,7,0]
-; AVX512-FCP-NEXT: vpermd %ymm2, %ymm11, %ymm11
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm11 = ymm11[u,u,u,u,u,u,u,u,u,u,0,1,6,7,8,9,u,u,u,u,u,u,u,u,u,u,16,17,22,23,24,25]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm13 = ymm13[u,u,0,1,6,7,8,9,14,15,u,u,u,u,u,u,u,u,16,17,22,23,24,25,30,31,u,u,u,u,u,u]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm11 = ymm13[0,1,2,3,4],ymm11[5,6,7],ymm13[8,9,10,11,12],ymm11[13,14,15]
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm13 = ymm5[0,1],ymm4[2,3],ymm5[4,5],ymm4[6,7]
-; AVX512-FCP-NEXT: vextracti128 $1, %ymm13, %xmm14
-; AVX512-FCP-NEXT: vpshufb %xmm12, %xmm14, %xmm12
-; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm14 = [1,4,8,11,15,0,0,0]
-; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm15 = [2,6,9,13,2,6,9,13]
-; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm16 = [0,4,7,11,14,0,0,0]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm13 = xmm13[10,11,6,7,4,5,6,7,u,u,u,u,u,u,u,u]
-; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} xmm12 = xmm13[0],xmm12[0],xmm13[1],xmm12[1],xmm13[2],xmm12[2],xmm13[3],xmm12[3]
-; AVX512-FCP-NEXT: vpermd %zmm0, %zmm16, %zmm13
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm15[7]
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm15 = ymm6[0],ymm5[1],ymm6[2,3,4],ymm5[5],ymm6[6,7]
+; AVX512-FCP-NEXT: vextracti128 $1, %ymm15, %xmm9
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm9 = xmm9[0],xmm15[1],xmm9[2],xmm15[3],xmm9[4,5,6,7]
+; AVX512-FCP-NEXT: vpermd %zmm0, %zmm11, %zmm11
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm11 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm11[4,5,10,11,0,1,22,23],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm9 = ymm9[6,7,4,5,2,3,0,1,14,15],zero,zero,zero,zero,zero,zero,zero,zero,ymm9[u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512-FCP-NEXT: vpor %ymm11, %ymm9, %ymm9
+; AVX512-FCP-NEXT: vpermd %zmm0, %zmm17, %zmm11
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm9[0],ymm1[1,2,3,4,5,6,7],ymm9[8],ymm1[9,10,11,12,13,14,15]
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm1[4,5,6,7]
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm12[0],ymm13[1],ymm12[2,3,4],ymm13[5],ymm12[6,7]
+; AVX512-FCP-NEXT: vextracti128 $1, %ymm1, %xmm12
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm12[0],xmm1[1],xmm12[2],xmm1[3],xmm12[4,5,6,7]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0,1,6,7,4,5,2,3,0,1,14,15,u,u,u,u]
+; AVX512-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm12 = ymm14[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20,21,26,27]
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm12[7]
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm12 = ymm6[0,1],ymm5[2],ymm6[3,4],ymm5[5],ymm6[6,7]
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm13 = [8,9,4,5,4,5,6,7,8,9,10,11,12,13,14,15]
+; AVX512-FCP-NEXT: vpshufb %xmm13, %xmm12, %xmm14
+; AVX512-FCP-NEXT: vextracti128 $1, %ymm12, %xmm12
+; AVX512-FCP-NEXT: vpshuflw {{.*#+}} xmm12 = xmm12[3,1,2,3,4,5,6,7]
+; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} xmm14 = xmm14[0],xmm12[0],xmm14[1],xmm12[1],xmm14[2],xmm12[2],xmm14[3],xmm12[3]
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm12 = [u,u,u,u,u,u,u,u,0,1,6,7,8,9,14,15,16,17,22,23,20,21,22,23,16,17,22,23,24,25,30,31]
+; AVX512-FCP-NEXT: vpshufb %ymm12, %ymm11, %ymm11
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm11 = ymm14[0,1],ymm11[2,3,4,5,6,7]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm11[0],ymm1[1,2,3,4,5,6,7],ymm11[8],ymm1[9,10,11,12,13,14,15]
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm11 = ymm11[0,1,2,3],ymm1[4,5,6,7]
+; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm1 = [0,3,3,0,0,3,7,0]
+; AVX512-FCP-NEXT: vpermd %ymm7, %ymm1, %ymm1
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm14 = [0,1,0,1,6,7,8,9,14,15,0,1,6,7,8,9,16,17,16,17,22,23,24,25,30,31,16,17,22,23,24,25]
+; AVX512-FCP-NEXT: vpshufb %ymm14, %ymm1, %ymm1
+; AVX512-FCP-NEXT: vpshufb %ymm14, %ymm4, %ymm4
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm4[0,1,2,3,4],ymm1[5,6,7],ymm4[8,9,10,11,12],ymm1[13,14,15]
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm6[0,1],ymm5[2,3],ymm6[4,5],ymm5[6,7]
+; AVX512-FCP-NEXT: vextracti128 $1, %ymm4, %xmm14
+; AVX512-FCP-NEXT: vpshufb %xmm13, %xmm14, %xmm13
+; AVX512-FCP-NEXT: vpermd %zmm2, %zmm16, %zmm2
+; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm14 = [0,4,7,11,14,0,0,0]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[10,11,6,7,4,5,6,7,u,u,u,u,u,u,u,u]
+; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} xmm4 = xmm4[0],xmm13[0],xmm4[1],xmm13[1],xmm4[2],xmm13[2],xmm4[3],xmm13[3]
+; AVX512-FCP-NEXT: vpermd %zmm0, %zmm14, %zmm13
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm13 = ymm13[u,u,u,u,u,u,u,u,2,3,4,5,10,11,12,13,18,19,18,19,18,19,18,19,18,19,20,21,26,27,28,29]
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm12 = ymm12[0,1],ymm13[2,3,4,5,6,7]
-; AVX512-FCP-NEXT: vpermd %zmm1, %zmm15, %zmm1
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm11 = ymm12[0],ymm11[1,2,3,4,5,6,7],ymm12[8],ymm11[9,10,11,12,13,14,15]
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm11 = ymm12[0,1,2,3],ymm11[4,5,6,7]
-; AVX512-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm12 = [0,4,7,0,0,4,7,0]
-; AVX512-FCP-NEXT: # ymm12 = mem[0,1,0,1]
-; AVX512-FCP-NEXT: vpermd %ymm2, %ymm12, %ymm2
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,2,3,4,5,10,11,u,u,u,u,u,u,u,u,u,u,18,19,20,21,26,27]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,2,3,4,5,10,11,12,13,u,u,u,u,u,u,u,u,18,19,20,21,26,27,28,29,u,u,u,u,u,u]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5,6,7],ymm1[8,9,10,11,12],ymm2[13,14,15]
-; AVX512-FCP-NEXT: vpermd %zmm0, %zmm14, %zmm0
-; AVX512-FCP-NEXT: vpshufb %ymm10, %ymm0, %ymm0
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm5[0,1,2],ymm4[3],ymm5[4,5],ymm4[6],ymm5[7]
-; AVX512-FCP-NEXT: vextracti128 $1, %ymm2, %xmm4
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[u,u,u,u,u,u,u,u,10,11,6,7,4,5,6,7]
-; AVX512-FCP-NEXT: vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,6,4,6,7]
-; AVX512-FCP-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1],ymm0[2,3,4,5,6,7]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm0[0],ymm1[1,2,3,4,5,6,7],ymm0[8],ymm1[9,10,11,12,13,14,15]
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1],ymm13[2,3,4,5,6,7]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm4[0],ymm1[1,2,3,4,5,6,7],ymm4[8],ymm1[9,10,11,12,13,14,15]
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm4[0,1,2,3],ymm1[4,5,6,7]
+; AVX512-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm4 = [0,4,7,0,0,4,7,0]
+; AVX512-FCP-NEXT: # ymm4 = mem[0,1,0,1]
+; AVX512-FCP-NEXT: vpermd %ymm7, %ymm4, %ymm4
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm7 = [4,5,2,3,4,5,10,11,12,13,2,3,4,5,10,11,20,21,18,19,20,21,26,27,28,29,18,19,20,21,26,27]
+; AVX512-FCP-NEXT: vpshufb %ymm7, %ymm4, %ymm4
+; AVX512-FCP-NEXT: vpshufb %ymm7, %ymm2, %ymm2
+; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm7 = [1,4,8,11,15,0,0,0]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm4[5,6,7],ymm2[8,9,10,11,12],ymm4[13,14,15]
+; AVX512-FCP-NEXT: vpermd %zmm0, %zmm7, %zmm0
+; AVX512-FCP-NEXT: vpshufb %ymm12, %ymm0, %ymm0
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm6[0,1,2],ymm5[3],ymm6[4,5],ymm5[6],ymm6[7]
+; AVX512-FCP-NEXT: vextracti128 $1, %ymm4, %xmm5
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[u,u,u,u,u,u,u,u,10,11,6,7,4,5,6,7]
+; AVX512-FCP-NEXT: vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,6,4,6,7]
+; AVX512-FCP-NEXT: vpunpckhwd {{.*#+}} xmm4 = xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7]
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm4[0,1],ymm0[2,3,4,5,6,7]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm0[0],ymm2[1,2,3,4,5,6,7],ymm0[8],ymm2[9,10,11,12,13,14,15]
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
; AVX512-FCP-NEXT: vmovdqa %ymm3, (%rsi)
-; AVX512-FCP-NEXT: vmovdqa %ymm6, (%rdx)
-; AVX512-FCP-NEXT: vmovdqa %ymm8, (%rcx)
-; AVX512-FCP-NEXT: vmovdqa %ymm7, (%r8)
-; AVX512-FCP-NEXT: vmovdqa %ymm9, (%r9)
+; AVX512-FCP-NEXT: vmovdqa %ymm8, (%rdx)
+; AVX512-FCP-NEXT: vmovdqa %ymm10, (%rcx)
+; AVX512-FCP-NEXT: vmovdqa %ymm9, (%r8)
+; AVX512-FCP-NEXT: vmovdqa %ymm11, (%r9)
; AVX512-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX512-FCP-NEXT: vmovdqa %ymm11, (%rax)
+; AVX512-FCP-NEXT: vmovdqa %ymm1, (%rax)
; AVX512-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX512-FCP-NEXT: vmovdqa %ymm0, (%rax)
; AVX512-FCP-NEXT: vzeroupper
@@ -3697,146 +3713,148 @@ define void @load_i16_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512DQ-FCP-LABEL: load_i16_stride7_vf16:
; AVX512DQ-FCP: # %bb.0:
; AVX512DQ-FCP-NEXT: vmovdqa64 64(%rdi), %zmm0
-; AVX512DQ-FCP-NEXT: vmovdqa64 128(%rdi), %zmm1
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm16 = [2,5,9,12,2,5,9,12]
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm9 = [10,3,6,15,12,13,6,15]
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm2 = [2,6,9,0,13,0,0,0]
-; AVX512DQ-FCP-NEXT: vpermd %zmm0, %zmm2, %zmm8
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm2 = [2,5,9,0,12,0,0,0]
-; AVX512DQ-FCP-NEXT: vpermd %zmm0, %zmm2, %zmm6
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm2 = [8,1,12,5,12,5,14,15]
-; AVX512DQ-FCP-NEXT: vpermd %zmm0, %zmm2, %zmm3
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm2 = [3,6,10,13,3,6,10,13]
-; AVX512DQ-FCP-NEXT: vpermd %zmm1, %zmm2, %zmm4
-; AVX512DQ-FCP-NEXT: vmovdqa 192(%rdi), %ymm2
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm13 = ymm2[0,1,0,2]
-; AVX512DQ-FCP-NEXT: vpbroadcastd {{.*#+}} ymm10 = [20,21,26,27,20,21,26,27,20,21,26,27,20,21,26,27,20,21,26,27,20,21,26,27,20,21,26,27,20,21,26,27]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm10, %ymm13, %ymm5
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,17,22,23,24,25,30,31,u,u,u,u]
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm11 = ymm4[0,1,2,3,4,5,6],ymm5[7]
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm16 = [2,6,9,13,2,6,9,13]
+; AVX512DQ-FCP-NEXT: vmovdqa64 128(%rdi), %zmm2
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm3 = [2,5,9,12,2,5,9,12]
+; AVX512DQ-FCP-NEXT: vpermd %zmm2, %zmm3, %zmm4
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm17 = [0,3,7,10,14,0,0,0]
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm11 = [10,3,6,15,12,13,6,15]
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm3 = [2,6,9,0,13,0,0,0]
+; AVX512DQ-FCP-NEXT: vpermd %zmm0, %zmm3, %zmm10
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm3 = [2,5,9,0,12,0,0,0]
+; AVX512DQ-FCP-NEXT: vpermd %zmm0, %zmm3, %zmm8
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm3 = [8,1,12,5,12,5,14,15]
+; AVX512DQ-FCP-NEXT: vpermd %zmm0, %zmm3, %zmm3
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm5 = [3,6,10,13,3,6,10,13]
+; AVX512DQ-FCP-NEXT: vpermd %zmm2, %zmm5, %zmm5
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm6 = [0,1,2,3,0,1,6,7,8,9,14,15,12,13,14,15,16,17,18,19,16,17,22,23,24,25,30,31,20,21,26,27]
+; AVX512DQ-FCP-NEXT: vmovdqa 192(%rdi), %ymm7
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm14 = ymm7[0,1,0,2]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm6, %ymm14, %ymm12
+; AVX512DQ-FCP-NEXT: vpshufb %ymm6, %ymm5, %ymm5
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm12 = ymm5[0,1,2,3,4,5,6],ymm12[7]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm3 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm3[6,7,12,13,2,3,16,17,30,31,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-FCP-NEXT: vmovdqa (%rdi), %ymm4
-; AVX512DQ-FCP-NEXT: vmovdqa 32(%rdi), %ymm5
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm12 = ymm4[0,1],ymm5[2],ymm4[3,4,5],ymm5[6],ymm4[7]
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm12, %xmm14
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm12 = xmm12[0,1,2,3],xmm14[4],xmm12[5],xmm14[6],xmm12[7]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm12 = ymm12[0,1,14,15,12,13,10,11,8,9],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm12[u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-FCP-NEXT: vpor %ymm3, %ymm12, %ymm3
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3,4],ymm11[5,6,7]
-; AVX512DQ-FCP-NEXT: vmovdqa 160(%rdi), %ymm11
-; AVX512DQ-FCP-NEXT: vmovdqa 128(%rdi), %ymm12
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm14 = ymm12[0,1],ymm11[2],ymm12[3,4,5],ymm11[6],ymm12[7]
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm14, %xmm15
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm14 = xmm14[0,1,2,3],xmm15[4],xmm14[5],xmm15[6],xmm14[7]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm14 = xmm14[0,1,0,1,14,15,12,13,10,11,8,9,u,u,u,u]
-; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm14, %ymm0, %ymm14
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm13 = ymm13[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,28,29]
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm13 = ymm14[0,1,2,3,4,5,6],ymm13[7]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm6 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm6[0,1,6,7,8,9,18,19],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm14 = ymm4[0,1,2],ymm5[3],ymm4[4,5],ymm5[6],ymm4[7]
+; AVX512DQ-FCP-NEXT: vmovdqa (%rdi), %ymm5
+; AVX512DQ-FCP-NEXT: vmovdqa 32(%rdi), %ymm6
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm13 = ymm5[0,1],ymm6[2],ymm5[3,4,5],ymm6[6],ymm5[7]
+; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm13, %xmm15
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm13 = xmm13[0,1,2,3],xmm15[4],xmm13[5],xmm15[6],xmm13[7]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm13 = ymm13[0,1,14,15,12,13,10,11,8,9],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm13[u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQ-FCP-NEXT: vpor %ymm3, %ymm13, %ymm3
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3,4],ymm12[5,6,7]
+; AVX512DQ-FCP-NEXT: vmovdqa 160(%rdi), %ymm12
+; AVX512DQ-FCP-NEXT: vmovdqa 128(%rdi), %ymm13
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm15 = ymm13[0,1],ymm12[2],ymm13[3,4,5],ymm12[6],ymm13[7]
+; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm15, %xmm1
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm15[0,1,2,3],xmm1[4],xmm15[5],xmm1[6],xmm15[7]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0,1,0,1,14,15,12,13,10,11,8,9,u,u,u,u]
+; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm14 = ymm14[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,28,29]
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm14[7]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm8 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm8[0,1,6,7,8,9,18,19],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm14 = ymm5[0,1,2],ymm6[3],ymm5[4,5],ymm6[6],ymm5[7]
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm14, %xmm15
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm14 = xmm15[0],xmm14[1],xmm15[2,3,4,5],xmm14[6],xmm15[7]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm14 = ymm14[2,3,0,1,14,15,12,13,10,11],zero,zero,zero,zero,zero,zero,zero,zero,ymm14[u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-FCP-NEXT: vpor %ymm6, %ymm14, %ymm6
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm13 = ymm6[0],ymm13[1,2,3,4,5,6,7],ymm6[8],ymm13[9,10,11,12,13,14,15]
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm13[4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm13 = ymm12[0,1,2],ymm11[3],ymm12[4,5],ymm11[6],ymm12[7]
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm13, %xmm14
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm13 = xmm14[0],xmm13[1],xmm14[2,3,4,5],xmm13[6],xmm14[7]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm13 = xmm13[0,1,2,3,0,1,14,15,12,13,10,11,u,u,u,u]
-; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm13, %ymm0, %ymm13
+; AVX512DQ-FCP-NEXT: vpor %ymm8, %ymm14, %ymm8
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm8[0],ymm1[1,2,3,4,5,6,7],ymm8[8],ymm1[9,10,11,12,13,14,15]
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm1[4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm13[0,1,2],ymm12[3],ymm13[4,5],ymm12[6],ymm13[7]
+; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm1, %xmm14
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm14[0],xmm1[1],xmm14[2,3,4,5],xmm1[6],xmm14[7]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0,1,2,3,0,1,14,15,12,13,10,11,u,u,u,u]
+; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
; AVX512DQ-FCP-NEXT: vpbroadcastq {{.*#+}} ymm14 = [2,5,2,5,2,5,2,5]
-; AVX512DQ-FCP-NEXT: vpermd %ymm2, %ymm14, %ymm14
+; AVX512DQ-FCP-NEXT: vpermd %ymm7, %ymm14, %ymm14
; AVX512DQ-FCP-NEXT: vpshufhw {{.*#+}} ymm14 = ymm14[0,1,2,3,4,5,4,7,8,9,10,11,12,13,12,15]
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm13 = ymm13[0,1,2,3,4,5,6],ymm14[7]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm8 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm8[2,3,4,5,10,11,16,17],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm14 = ymm5[0],ymm4[1],ymm5[2,3],ymm4[4],ymm5[5,6,7]
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm14[7]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm10 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm10[2,3,4,5,10,11,16,17],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm14 = ymm6[0],ymm5[1],ymm6[2,3],ymm5[4],ymm6[5,6,7]
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm14, %xmm15
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm14 = xmm14[0],xmm15[1],xmm14[2,3,4,5],xmm15[6],xmm14[7]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm14 = ymm14[4,5,2,3,0,1,14,15,12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm14[u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-FCP-NEXT: vpor %ymm8, %ymm14, %ymm8
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm13 = ymm8[0],ymm13[1,2,3,4,5,6,7],ymm8[8],ymm13[9,10,11,12,13,14,15]
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm13[4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm13 = ymm11[0],ymm12[1],ymm11[2,3],ymm12[4],ymm11[5,6,7]
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm13, %xmm14
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm13 = xmm13[0],xmm14[1],xmm13[2,3,4,5],xmm14[6],xmm13[7]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm13 = xmm13[0,1,4,5,2,3,0,1,14,15,12,13,u,u,u,u]
-; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm13, %ymm0, %ymm13
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm14 = ymm2[0,1,1,3]
+; AVX512DQ-FCP-NEXT: vpor %ymm10, %ymm14, %ymm10
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm10[0],ymm1[1,2,3,4,5,6,7],ymm10[8],ymm1[9,10,11,12,13,14,15]
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm1[4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm12[0],ymm13[1],ymm12[2,3],ymm13[4],ymm12[5,6,7]
+; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm1, %xmm14
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm14[1],xmm1[2,3,4,5],xmm14[6],xmm1[7]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0,1,4,5,2,3,0,1,14,15,12,13,u,u,u,u]
+; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm14 = ymm7[0,1,1,3]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm15 = ymm14[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,24,25]
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm15 = ymm13[0,1,2,3,4,5,6],ymm15[7]
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm13 = ymm5[0],ymm4[1],ymm5[2,3,4],ymm4[5],ymm5[6,7]
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm13, %xmm7
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm7 = xmm7[0],xmm13[1],xmm7[2],xmm13[3],xmm7[4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpermd %zmm0, %zmm9, %zmm9
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm9 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm9[4,5,10,11,0,1,22,23],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm7 = ymm7[6,7,4,5,2,3,0,1,14,15],zero,zero,zero,zero,zero,zero,zero,zero,ymm7[u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-FCP-NEXT: vpor %ymm7, %ymm9, %ymm7
-; AVX512DQ-FCP-NEXT: vpermd %zmm1, %zmm16, %zmm13
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm9 = [0,3,7,10,14,0,0,0]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm15 = ymm7[0],ymm15[1,2,3,4,5,6,7],ymm7[8],ymm15[9,10,11,12,13,14,15]
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm15[4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpermd %zmm0, %zmm9, %zmm9
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm11 = ymm11[0],ymm12[1],ymm11[2,3,4],ymm12[5],ymm11[6,7]
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm11, %xmm12
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm11 = xmm12[0],xmm11[1],xmm12[2],xmm11[3],xmm12[4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm11 = xmm11[0,1,6,7,4,5,2,3,0,1,14,15,u,u,u,u]
-; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm11, %ymm0, %ymm11
-; AVX512DQ-FCP-NEXT: vpshufb %ymm10, %ymm14, %ymm10
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm11 = ymm11[0,1,2,3,4,5,6],ymm10[7]
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm10 = ymm5[0,1],ymm4[2],ymm5[3,4],ymm4[5],ymm5[6,7]
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm12 = [8,9,4,5,4,5,6,7,8,9,10,11,12,13,14,15]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm12, %xmm10, %xmm14
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm10, %xmm10
-; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} xmm10 = xmm10[3,1,2,3,4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} xmm14 = xmm14[0],xmm10[0],xmm14[1],xmm10[1],xmm14[2],xmm10[2],xmm14[3],xmm10[3]
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm10 = [u,u,u,u,u,u,u,u,0,1,6,7,8,9,14,15,16,17,22,23,20,21,22,23,16,17,22,23,24,25,30,31]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm10, %ymm9, %ymm9
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm9 = ymm14[0,1],ymm9[2,3,4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm11 = ymm9[0],ymm11[1,2,3,4,5,6,7],ymm9[8],ymm11[9,10,11,12,13,14,15]
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm11[4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm11 = [0,3,3,0,0,3,7,0]
-; AVX512DQ-FCP-NEXT: vpermd %ymm2, %ymm11, %ymm11
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm11 = ymm11[u,u,u,u,u,u,u,u,u,u,0,1,6,7,8,9,u,u,u,u,u,u,u,u,u,u,16,17,22,23,24,25]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm13 = ymm13[u,u,0,1,6,7,8,9,14,15,u,u,u,u,u,u,u,u,16,17,22,23,24,25,30,31,u,u,u,u,u,u]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm11 = ymm13[0,1,2,3,4],ymm11[5,6,7],ymm13[8,9,10,11,12],ymm11[13,14,15]
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm13 = ymm5[0,1],ymm4[2,3],ymm5[4,5],ymm4[6,7]
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm13, %xmm14
-; AVX512DQ-FCP-NEXT: vpshufb %xmm12, %xmm14, %xmm12
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm14 = [1,4,8,11,15,0,0,0]
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm15 = [2,6,9,13,2,6,9,13]
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm16 = [0,4,7,11,14,0,0,0]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm13 = xmm13[10,11,6,7,4,5,6,7,u,u,u,u,u,u,u,u]
-; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} xmm12 = xmm13[0],xmm12[0],xmm13[1],xmm12[1],xmm13[2],xmm12[2],xmm13[3],xmm12[3]
-; AVX512DQ-FCP-NEXT: vpermd %zmm0, %zmm16, %zmm13
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm15[7]
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm15 = ymm6[0],ymm5[1],ymm6[2,3,4],ymm5[5],ymm6[6,7]
+; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm15, %xmm9
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm9 = xmm9[0],xmm15[1],xmm9[2],xmm15[3],xmm9[4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpermd %zmm0, %zmm11, %zmm11
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm11 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm11[4,5,10,11,0,1,22,23],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm9 = ymm9[6,7,4,5,2,3,0,1,14,15],zero,zero,zero,zero,zero,zero,zero,zero,ymm9[u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQ-FCP-NEXT: vpor %ymm11, %ymm9, %ymm9
+; AVX512DQ-FCP-NEXT: vpermd %zmm0, %zmm17, %zmm11
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm9[0],ymm1[1,2,3,4,5,6,7],ymm9[8],ymm1[9,10,11,12,13,14,15]
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm1[4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm12[0],ymm13[1],ymm12[2,3,4],ymm13[5],ymm12[6,7]
+; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm1, %xmm12
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm12[0],xmm1[1],xmm12[2],xmm1[3],xmm12[4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0,1,6,7,4,5,2,3,0,1,14,15,u,u,u,u]
+; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm12 = ymm14[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20,21,26,27]
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm12[7]
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm12 = ymm6[0,1],ymm5[2],ymm6[3,4],ymm5[5],ymm6[6,7]
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm13 = [8,9,4,5,4,5,6,7,8,9,10,11,12,13,14,15]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm13, %xmm12, %xmm14
+; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm12, %xmm12
+; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} xmm12 = xmm12[3,1,2,3,4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} xmm14 = xmm14[0],xmm12[0],xmm14[1],xmm12[1],xmm14[2],xmm12[2],xmm14[3],xmm12[3]
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm12 = [u,u,u,u,u,u,u,u,0,1,6,7,8,9,14,15,16,17,22,23,20,21,22,23,16,17,22,23,24,25,30,31]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm12, %ymm11, %ymm11
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm11 = ymm14[0,1],ymm11[2,3,4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm11[0],ymm1[1,2,3,4,5,6,7],ymm11[8],ymm1[9,10,11,12,13,14,15]
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm11 = ymm11[0,1,2,3],ymm1[4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm1 = [0,3,3,0,0,3,7,0]
+; AVX512DQ-FCP-NEXT: vpermd %ymm7, %ymm1, %ymm1
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm14 = [0,1,0,1,6,7,8,9,14,15,0,1,6,7,8,9,16,17,16,17,22,23,24,25,30,31,16,17,22,23,24,25]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm14, %ymm1, %ymm1
+; AVX512DQ-FCP-NEXT: vpshufb %ymm14, %ymm4, %ymm4
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm4[0,1,2,3,4],ymm1[5,6,7],ymm4[8,9,10,11,12],ymm1[13,14,15]
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm6[0,1],ymm5[2,3],ymm6[4,5],ymm5[6,7]
+; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm4, %xmm14
+; AVX512DQ-FCP-NEXT: vpshufb %xmm13, %xmm14, %xmm13
+; AVX512DQ-FCP-NEXT: vpermd %zmm2, %zmm16, %zmm2
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm14 = [0,4,7,11,14,0,0,0]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[10,11,6,7,4,5,6,7,u,u,u,u,u,u,u,u]
+; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} xmm4 = xmm4[0],xmm13[0],xmm4[1],xmm13[1],xmm4[2],xmm13[2],xmm4[3],xmm13[3]
+; AVX512DQ-FCP-NEXT: vpermd %zmm0, %zmm14, %zmm13
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm13 = ymm13[u,u,u,u,u,u,u,u,2,3,4,5,10,11,12,13,18,19,18,19,18,19,18,19,18,19,20,21,26,27,28,29]
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm12 = ymm12[0,1],ymm13[2,3,4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpermd %zmm1, %zmm15, %zmm1
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm11 = ymm12[0],ymm11[1,2,3,4,5,6,7],ymm12[8],ymm11[9,10,11,12,13,14,15]
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm11 = ymm12[0,1,2,3],ymm11[4,5,6,7]
-; AVX512DQ-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm12 = [0,4,7,0,0,4,7,0]
-; AVX512DQ-FCP-NEXT: # ymm12 = mem[0,1,0,1]
-; AVX512DQ-FCP-NEXT: vpermd %ymm2, %ymm12, %ymm2
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,2,3,4,5,10,11,u,u,u,u,u,u,u,u,u,u,18,19,20,21,26,27]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,2,3,4,5,10,11,12,13,u,u,u,u,u,u,u,u,18,19,20,21,26,27,28,29,u,u,u,u,u,u]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5,6,7],ymm1[8,9,10,11,12],ymm2[13,14,15]
-; AVX512DQ-FCP-NEXT: vpermd %zmm0, %zmm14, %zmm0
-; AVX512DQ-FCP-NEXT: vpshufb %ymm10, %ymm0, %ymm0
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm5[0,1,2],ymm4[3],ymm5[4,5],ymm4[6],ymm5[7]
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm2, %xmm4
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[u,u,u,u,u,u,u,u,10,11,6,7,4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,6,4,6,7]
-; AVX512DQ-FCP-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1],ymm0[2,3,4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm0[0],ymm1[1,2,3,4,5,6,7],ymm0[8],ymm1[9,10,11,12,13,14,15]
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1],ymm13[2,3,4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm4[0],ymm1[1,2,3,4,5,6,7],ymm4[8],ymm1[9,10,11,12,13,14,15]
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm4[0,1,2,3],ymm1[4,5,6,7]
+; AVX512DQ-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm4 = [0,4,7,0,0,4,7,0]
+; AVX512DQ-FCP-NEXT: # ymm4 = mem[0,1,0,1]
+; AVX512DQ-FCP-NEXT: vpermd %ymm7, %ymm4, %ymm4
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm7 = [4,5,2,3,4,5,10,11,12,13,2,3,4,5,10,11,20,21,18,19,20,21,26,27,28,29,18,19,20,21,26,27]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm7, %ymm4, %ymm4
+; AVX512DQ-FCP-NEXT: vpshufb %ymm7, %ymm2, %ymm2
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm7 = [1,4,8,11,15,0,0,0]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm4[5,6,7],ymm2[8,9,10,11,12],ymm4[13,14,15]
+; AVX512DQ-FCP-NEXT: vpermd %zmm0, %zmm7, %zmm0
+; AVX512DQ-FCP-NEXT: vpshufb %ymm12, %ymm0, %ymm0
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm6[0,1,2],ymm5[3],ymm6[4,5],ymm5[6],ymm6[7]
+; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm4, %xmm5
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[u,u,u,u,u,u,u,u,10,11,6,7,4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,6,4,6,7]
+; AVX512DQ-FCP-NEXT: vpunpckhwd {{.*#+}} xmm4 = xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7]
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm4[0,1],ymm0[2,3,4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm0[0],ymm2[1,2,3,4,5,6,7],ymm0[8],ymm2[9,10,11,12,13,14,15]
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
; AVX512DQ-FCP-NEXT: vmovdqa %ymm3, (%rsi)
-; AVX512DQ-FCP-NEXT: vmovdqa %ymm6, (%rdx)
-; AVX512DQ-FCP-NEXT: vmovdqa %ymm8, (%rcx)
-; AVX512DQ-FCP-NEXT: vmovdqa %ymm7, (%r8)
-; AVX512DQ-FCP-NEXT: vmovdqa %ymm9, (%r9)
+; AVX512DQ-FCP-NEXT: vmovdqa %ymm8, (%rdx)
+; AVX512DQ-FCP-NEXT: vmovdqa %ymm10, (%rcx)
+; AVX512DQ-FCP-NEXT: vmovdqa %ymm9, (%r8)
+; AVX512DQ-FCP-NEXT: vmovdqa %ymm11, (%r9)
; AVX512DQ-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-FCP-NEXT: vmovdqa %ymm11, (%rax)
+; AVX512DQ-FCP-NEXT: vmovdqa %ymm1, (%rax)
; AVX512DQ-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX512DQ-FCP-NEXT: vmovdqa %ymm0, (%rax)
; AVX512DQ-FCP-NEXT: vzeroupper
@@ -6405,19 +6423,18 @@ define void @load_i16_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,14,15,12,13,10,11,8,9,8,9,8,9,8,9]
; AVX2-FCP-NEXT: vpshufb %xmm3, %xmm0, %xmm4
; AVX2-FCP-NEXT: vpmovsxbw {{.*#+}} xmm0 = [65535,65535,65535,65535,65535,0,0,0]
-; AVX2-FCP-NEXT: vpblendvb %ymm0, %ymm4, %ymm2, %ymm15
+; AVX2-FCP-NEXT: vpblendvb %ymm0, %ymm4, %ymm2, %ymm2
+; AVX2-FCP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm6[0,1],ymm5[2],ymm6[3,4,5],ymm5[6],ymm6[7]
-; AVX2-FCP-NEXT: vmovdqa %ymm6, %ymm9
; AVX2-FCP-NEXT: vmovdqa %ymm5, %ymm10
; AVX2-FCP-NEXT: vextracti128 $1, %ymm2, %xmm4
; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm4[4],xmm2[5],xmm4[6],xmm2[7]
; AVX2-FCP-NEXT: vpshufb %xmm3, %xmm2, %xmm2
-; AVX2-FCP-NEXT: vmovdqa %ymm7, %ymm6
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm8[0],ymm7[1],ymm8[2,3,4],ymm7[5],ymm8[6,7]
; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,2,2,3]
; AVX2-FCP-NEXT: vpshufb %ymm1, %ymm3, %ymm1
; AVX2-FCP-NEXT: vpblendvb %ymm0, %ymm2, %ymm1, %ymm1
-; AVX2-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vmovdqu %ymm1, (%rsp) # 32-byte Spill
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm13[0,1],ymm14[2],ymm13[3,4],ymm14[5],ymm13[6,7]
; AVX2-FCP-NEXT: vpmovsxbd {{.*#+}} ymm2 = [2,5,1,0,4,0,0,0]
; AVX2-FCP-NEXT: vpermd %ymm1, %ymm2, %ymm1
@@ -6430,22 +6447,21 @@ define void @load_i16_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX2-FCP-NEXT: vpshufb %xmm5, %xmm4, %xmm4
; AVX2-FCP-NEXT: vpblendvb %ymm0, %ymm4, %ymm1, %ymm1
; AVX2-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vmovdqa %ymm9, %ymm7
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm9[0,1,2],ymm10[3],ymm9[4,5],ymm10[6],ymm9[7]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm6[0,1,2],ymm10[3],ymm6[4,5],ymm10[6],ymm6[7]
; AVX2-FCP-NEXT: vextracti128 $1, %ymm1, %xmm4
; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm4[0],xmm1[1],xmm4[2,3,4,5],xmm1[6],xmm4[7]
; AVX2-FCP-NEXT: vpshufb %xmm5, %xmm1, %xmm1
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm8[0,1],ymm6[2],ymm8[3,4],ymm6[5],ymm8[6,7]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm8[0,1],ymm7[2],ymm8[3,4],ymm7[5],ymm8[6,7]
; AVX2-FCP-NEXT: vpermd %ymm4, %ymm2, %ymm2
; AVX2-FCP-NEXT: vpshufb %ymm3, %ymm2, %ymm2
; AVX2-FCP-NEXT: vpblendvb %ymm0, %ymm1, %ymm2, %ymm1
; AVX2-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm13[0,1],ymm14[2,3],ymm13[4,5],ymm14[6,7]
; AVX2-FCP-NEXT: vmovdqu %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm13[0,1],ymm14[2,3],ymm13[4,5],ymm14[6,7]
; AVX2-FCP-NEXT: vmovdqu %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm11[0],ymm12[1],ymm11[2,3],ymm12[4],ymm11[5,6,7]
-; AVX2-FCP-NEXT: vmovdqu %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vmovdqu %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vmovdqu %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm11[0],ymm12[1],ymm11[2,3],ymm12[4],ymm11[5,6,7]
; AVX2-FCP-NEXT: vextracti128 $1, %ymm2, %xmm3
; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1],xmm2[2,3,4,5],xmm3[6],xmm2[7]
; AVX2-FCP-NEXT: vpmovsxbd {{.*#+}} ymm3 = [2,6,1,0,5,0,0,0]
@@ -6454,20 +6470,20 @@ define void @load_i16_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX2-FCP-NEXT: vpshufb %ymm4, %ymm1, %ymm1
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm5 = [4,5,2,3,0,1,14,15,12,13,12,13,12,13,12,13]
; AVX2-FCP-NEXT: vpshufb %xmm5, %xmm2, %xmm2
-; AVX2-FCP-NEXT: vpblendvb %ymm0, %ymm2, %ymm1, %ymm9
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm10[0],ymm7[1],ymm10[2,3],ymm7[4],ymm10[5,6,7]
+; AVX2-FCP-NEXT: vpblendvb %ymm0, %ymm2, %ymm1, %ymm1
+; AVX2-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vmovdqu %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm10[0],ymm6[1],ymm10[2,3],ymm6[4],ymm10[5,6,7]
; AVX2-FCP-NEXT: vextracti128 $1, %ymm1, %xmm2
; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3,4,5],xmm2[6],xmm1[7]
; AVX2-FCP-NEXT: vpshufb %xmm5, %xmm1, %xmm1
; AVX2-FCP-NEXT: vmovdqu %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vmovdqa %ymm6, %ymm5
-; AVX2-FCP-NEXT: vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm8[0,1],ymm6[2,3],ymm8[4,5],ymm6[6,7]
+; AVX2-FCP-NEXT: vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm8[0,1],ymm7[2,3],ymm8[4,5],ymm7[6,7]
; AVX2-FCP-NEXT: vpermd %ymm2, %ymm3, %ymm2
; AVX2-FCP-NEXT: vpshufb %ymm4, %ymm2, %ymm2
-; AVX2-FCP-NEXT: vpblendvb %ymm0, %ymm1, %ymm2, %ymm6
+; AVX2-FCP-NEXT: vpblendvb %ymm0, %ymm1, %ymm2, %ymm5
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm11[0],ymm12[1],ymm11[2,3,4],ymm12[5],ymm11[6,7]
; AVX2-FCP-NEXT: vextracti128 $1, %ymm1, %xmm2
; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2],xmm1[3],xmm2[4,5,6,7]
@@ -6477,246 +6493,238 @@ define void @load_i16_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX2-FCP-NEXT: vpshufb %ymm3, %ymm2, %ymm2
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm12 = [6,7,4,5,2,3,0,1,14,15,14,15,14,15,14,15]
; AVX2-FCP-NEXT: vpshufb %xmm12, %xmm1, %xmm1
-; AVX2-FCP-NEXT: vpblendvb %ymm0, %ymm1, %ymm2, %ymm1
-; AVX2-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm10[0],ymm7[1],ymm10[2,3,4],ymm7[5],ymm10[6,7]
+; AVX2-FCP-NEXT: vpblendvb %ymm0, %ymm1, %ymm2, %ymm14
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm10[0],ymm6[1],ymm10[2,3,4],ymm6[5],ymm10[6,7]
; AVX2-FCP-NEXT: vextracti128 $1, %ymm1, %xmm2
; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2],xmm1[3],xmm2[4,5,6,7]
; AVX2-FCP-NEXT: vpshufb %xmm12, %xmm1, %xmm1
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm8[0,1,2],ymm5[3],ymm8[4,5],ymm5[6],ymm8[7]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm8[0,1,2],ymm7[3],ymm8[4,5],ymm7[6],ymm8[7]
; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[1,3,2,3]
; AVX2-FCP-NEXT: vpshufb %ymm3, %ymm2, %ymm2
-; AVX2-FCP-NEXT: vpblendvb %ymm0, %ymm1, %ymm2, %ymm8
+; AVX2-FCP-NEXT: vpblendvb %ymm0, %ymm1, %ymm2, %ymm10
; AVX2-FCP-NEXT: vmovdqa 128(%rdi), %ymm0
-; AVX2-FCP-NEXT: vmovdqa 160(%rdi), %ymm1
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm1[0,1,2],ymm0[3],ymm1[4,5],ymm0[6],ymm1[7]
-; AVX2-FCP-NEXT: vmovdqa %ymm1, %ymm5
-; AVX2-FCP-NEXT: vmovdqa %ymm0, %ymm7
+; AVX2-FCP-NEXT: vmovdqa 160(%rdi), %ymm15
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm15[0,1,2],ymm0[3],ymm15[4,5],ymm0[6],ymm15[7]
+; AVX2-FCP-NEXT: vmovdqa %ymm0, %ymm8
; AVX2-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm13 = [3,6,2,5,3,6,2,5]
; AVX2-FCP-NEXT: # ymm13 = mem[0,1,0,1]
; AVX2-FCP-NEXT: vpermd %ymm2, %ymm13, %ymm2
-; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm1 = [0,1,2,3,0,1,6,7,8,9,14,15,12,13,14,15,16,17,18,19,16,17,22,23,24,25,30,31,28,29,30,31]
-; AVX2-FCP-NEXT: vpshufb %ymm1, %ymm2, %ymm12
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,2,3,0,1,6,7,8,9,14,15,12,13,14,15,16,17,18,19,16,17,22,23,24,25,30,31,20,21,26,27]
+; AVX2-FCP-NEXT: vpshufb %ymm3, %ymm2, %ymm12
; AVX2-FCP-NEXT: vmovdqa 192(%rdi), %ymm4
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm4[0,1,0,2]
-; AVX2-FCP-NEXT: vpbroadcastd {{.*#+}} ymm2 = [20,21,26,27,20,21,26,27,20,21,26,27,20,21,26,27,20,21,26,27,20,21,26,27,20,21,26,27,20,21,26,27]
-; AVX2-FCP-NEXT: vpshufb %ymm2, %ymm0, %ymm11
-; AVX2-FCP-NEXT: vmovdqa %ymm2, %ymm10
+; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm1 = ymm4[0,1,0,2]
+; AVX2-FCP-NEXT: vpshufb %ymm3, %ymm1, %ymm11
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm11 = ymm12[0,1,2,3,4,5,6],ymm11[7]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm15[0,1,2,3,4],ymm11[5,6,7]
-; AVX2-FCP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vmovdqa 352(%rdi), %ymm2
-; AVX2-FCP-NEXT: vmovdqa 384(%rdi), %ymm3
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm14 = ymm3[0,1,2],ymm2[3],ymm3[4,5],ymm2[6],ymm3[7]
-; AVX2-FCP-NEXT: vmovdqa %ymm3, %ymm12
+; AVX2-FCP-NEXT: vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm0 # 32-byte Folded Reload
+; AVX2-FCP-NEXT: # ymm0 = mem[0,1,2,3,4],ymm11[5,6,7]
+; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vmovdqa 352(%rdi), %ymm6
+; AVX2-FCP-NEXT: vmovdqa 384(%rdi), %ymm2
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm9 = ymm2[0,1,2],ymm6[3],ymm2[4,5],ymm6[6],ymm2[7]
; AVX2-FCP-NEXT: vmovdqa %ymm2, %ymm11
-; AVX2-FCP-NEXT: vpermd %ymm14, %ymm13, %ymm13
-; AVX2-FCP-NEXT: vpshufb %ymm1, %ymm13, %ymm1
+; AVX2-FCP-NEXT: vpermd %ymm9, %ymm13, %ymm9
+; AVX2-FCP-NEXT: vpshufb %ymm3, %ymm9, %ymm9
; AVX2-FCP-NEXT: vmovdqa 416(%rdi), %ymm2
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm14 = ymm2[0,1,0,2]
-; AVX2-FCP-NEXT: vmovdqa %ymm2, %ymm3
-; AVX2-FCP-NEXT: vpshufb %ymm10, %ymm14, %ymm15
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm15[7]
-; AVX2-FCP-NEXT: vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX2-FCP-NEXT: # ymm1 = mem[0,1,2,3,4],ymm1[5,6,7]
-; AVX2-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vmovdqa %ymm7, %ymm10
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm7[0,1],ymm5[2],ymm7[3,4,5],ymm5[6],ymm7[7]
-; AVX2-FCP-NEXT: vextracti128 $1, %ymm1, %xmm15
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm15[4],xmm1[5],xmm15[6],xmm1[7]
-; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm15 = [0,1,0,1,14,15,12,13,10,11,8,9,12,13,14,15]
-; AVX2-FCP-NEXT: vpshufb %xmm15, %xmm1, %xmm1
-; AVX2-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm2[0,1,0,2]
+; AVX2-FCP-NEXT: vmovdqa %ymm2, %ymm7
+; AVX2-FCP-NEXT: vpshufb %ymm3, %ymm0, %ymm3
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm9[0,1,2,3,4,5,6],ymm3[7]
+; AVX2-FCP-NEXT: vpblendd $31, (%rsp), %ymm3, %ymm2 # 32-byte Folded Reload
+; AVX2-FCP-NEXT: # ymm2 = mem[0,1,2,3,4],ymm3[5,6,7]
+; AVX2-FCP-NEXT: vmovdqu %ymm2, (%rsp) # 32-byte Spill
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm8[0,1],ymm15[2],ymm8[3,4,5],ymm15[6],ymm8[7]
+; AVX2-FCP-NEXT: vextracti128 $1, %ymm3, %xmm9
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm9[4],xmm3[5],xmm9[6],xmm3[7]
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm9 = [0,1,0,1,14,15,12,13,10,11,8,9,12,13,14,15]
+; AVX2-FCP-NEXT: vpshufb %xmm9, %xmm3, %xmm3
+; AVX2-FCP-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
; AVX2-FCP-NEXT: vpbroadcastd {{.*#+}} ymm2 = [22,23,28,29,22,23,28,29,22,23,28,29,22,23,28,29,22,23,28,29,22,23,28,29,22,23,28,29,22,23,28,29]
+; AVX2-FCP-NEXT: vpshufb %ymm2, %ymm1, %ymm1
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3,4,5,6],ymm1[7]
+; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm3[0],ymm1[1,2,3,4,5,6,7],ymm3[8],ymm1[9,10,11,12,13,14,15]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4,5,6,7]
+; AVX2-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm6[0,1],ymm11[2],ymm6[3,4,5],ymm11[6],ymm6[7]
+; AVX2-FCP-NEXT: vextracti128 $1, %ymm1, %xmm3
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm3[4],xmm1[5],xmm3[6],xmm1[7]
+; AVX2-FCP-NEXT: vpshufb %xmm9, %xmm1, %xmm1
; AVX2-FCP-NEXT: vpshufb %ymm2, %ymm0, %ymm0
+; AVX2-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5,6],ymm0[7]
; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6,7],ymm1[8],ymm0[9,10,11,12,13,14,15]
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm11[0,1],ymm12[2],ymm11[3,4,5],ymm12[6],ymm11[7]
-; AVX2-FCP-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4],xmm0[5],xmm1[6],xmm0[7]
-; AVX2-FCP-NEXT: vpshufb %xmm15, %xmm0, %xmm0
-; AVX2-FCP-NEXT: vpshufb %ymm2, %ymm14, %ymm1
-; AVX2-FCP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6],ymm1[7]
-; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6,7],ymm1[8],ymm0[9,10,11,12,13,14,15]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm7[0,1,2],ymm5[3],ymm7[4,5],ymm5[6],ymm7[7]
-; AVX2-FCP-NEXT: vmovdqa %ymm5, %ymm7
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm8[0,1,2],ymm15[3],ymm8[4,5],ymm15[6],ymm8[7]
; AVX2-FCP-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3,4,5],xmm0[6],xmm1[7]
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,2,3,0,1,14,15,12,13,10,11,12,13,14,15]
; AVX2-FCP-NEXT: vpshufb %xmm1, %xmm0, %xmm0
; AVX2-FCP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
; AVX2-FCP-NEXT: vpbroadcastq {{.*#+}} ymm2 = [2,5,2,5,2,5,2,5]
-; AVX2-FCP-NEXT: vmovdqu %ymm4, (%rsp) # 32-byte Spill
-; AVX2-FCP-NEXT: vpermd %ymm4, %ymm2, %ymm14
-; AVX2-FCP-NEXT: vpshufhw {{.*#+}} ymm14 = ymm14[0,1,2,3,4,5,4,7,8,9,10,11,12,13,12,15]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6],ymm14[7]
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm9[0],ymm0[1,2,3,4,5,6,7],ymm9[8],ymm0[9,10,11,12,13,14,15]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm9[0,1,2,3],ymm0[4,5,6,7]
+; AVX2-FCP-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vpermd %ymm4, %ymm2, %ymm3
+; AVX2-FCP-NEXT: vpshufhw {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5,4,7,8,9,10,11,12,13,12,15]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6],ymm3[7]
+; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm3[0],ymm0[1,2,3,4,5,6,7],ymm3[8],ymm0[9,10,11,12,13,14,15]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm0[4,5,6,7]
; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm11[0,1,2],ymm12[3],ymm11[4,5],ymm12[6],ymm11[7]
-; AVX2-FCP-NEXT: vmovdqa %ymm12, %ymm14
-; AVX2-FCP-NEXT: vextracti128 $1, %ymm0, %xmm9
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm9[0],xmm0[1],xmm9[2,3,4,5],xmm0[6],xmm9[7]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm6[0,1,2],ymm11[3],ymm6[4,5],ymm11[6],ymm6[7]
+; AVX2-FCP-NEXT: vmovdqu %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vextracti128 $1, %ymm0, %xmm3
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm3[0],xmm0[1],xmm3[2,3,4,5],xmm0[6],xmm3[7]
; AVX2-FCP-NEXT: vpshufb %xmm1, %xmm0, %xmm0
; AVX2-FCP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX2-FCP-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpermd %ymm3, %ymm2, %ymm1
+; AVX2-FCP-NEXT: vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vpermd %ymm7, %ymm2, %ymm1
; AVX2-FCP-NEXT: vpshufhw {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,4,7,8,9,10,11,12,13,12,15]
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6],ymm1[7]
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm6[0],ymm0[1,2,3,4,5,6,7],ymm6[8],ymm0[9,10,11,12,13,14,15]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm6[0,1,2,3],ymm0[4,5,6,7]
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm5[0],ymm0[1,2,3,4,5,6,7],ymm5[8],ymm0[9,10,11,12,13,14,15]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm5[0,1,2,3],ymm0[4,5,6,7]
; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm5[0],ymm10[1],ymm5[2,3],ymm10[4],ymm5[5,6,7]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm15[0],ymm8[1],ymm15[2,3],ymm8[4],ymm15[5,6,7]
; AVX2-FCP-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3,4,5],xmm1[6],xmm0[7]
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,4,5,2,3,0,1,14,15,12,13,12,13,14,15]
; AVX2-FCP-NEXT: vpshufb %xmm1, %xmm0, %xmm0
; AVX2-FCP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm6 = ymm4[0,1,1,3]
+; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm5 = ymm4[0,1,1,3]
; AVX2-FCP-NEXT: vpbroadcastd {{.*#+}} ymm2 = [18,19,24,25,18,19,24,25,18,19,24,25,18,19,24,25,18,19,24,25,18,19,24,25,18,19,24,25,18,19,24,25]
-; AVX2-FCP-NEXT: vpshufb %ymm2, %ymm6, %ymm9
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6],ymm9[7]
-; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm4[0],ymm0[1,2,3,4,5,6,7],ymm4[8],ymm0[9,10,11,12,13,14,15]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm4[0,1,2,3],ymm0[4,5,6,7]
+; AVX2-FCP-NEXT: vpshufb %ymm2, %ymm5, %ymm3
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6],ymm3[7]
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm14[0],ymm0[1,2,3,4,5,6,7],ymm14[8],ymm0[9,10,11,12,13,14,15]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm14[0,1,2,3],ymm0[4,5,6,7]
; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm12[0],ymm11[1],ymm12[2,3],ymm11[4],ymm12[5,6,7]
-; AVX2-FCP-NEXT: vmovdqa %ymm11, %ymm12
-; AVX2-FCP-NEXT: vextracti128 $1, %ymm0, %xmm4
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm4[1],xmm0[2,3,4,5],xmm4[6],xmm0[7]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm11[0],ymm6[1],ymm11[2,3],ymm6[4],ymm11[5,6,7]
+; AVX2-FCP-NEXT: vextracti128 $1, %ymm0, %xmm3
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm3[1],xmm0[2,3,4,5],xmm3[6],xmm0[7]
; AVX2-FCP-NEXT: vpshufb %xmm1, %xmm0, %xmm0
; AVX2-FCP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm3[0,1,1,3]
-; AVX2-FCP-NEXT: vpshufb %ymm2, %ymm4, %ymm1
+; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm3 = ymm7[0,1,1,3]
+; AVX2-FCP-NEXT: vpshufb %ymm2, %ymm3, %ymm1
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6],ymm1[7]
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm8[0],ymm0[1,2,3,4,5,6,7],ymm8[8],ymm0[9,10,11,12,13,14,15]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm8[0,1,2,3],ymm0[4,5,6,7]
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm10[0],ymm0[1,2,3,4,5,6,7],ymm10[8],ymm0[9,10,11,12,13,14,15]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm10[0,1,2,3],ymm0[4,5,6,7]
; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
-; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm15[0,1],ymm11[2],ymm15[3,4],ymm11[5],ymm15[6,7]
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm1[8,9,4,5,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX2-FCP-NEXT: vextracti128 $1, %ymm1, %xmm1
-; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[3,1,2,3,4,5,6,7]
-; AVX2-FCP-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm5[0],ymm10[1],ymm5[2,3,4],ymm10[5],ymm5[6,7]
-; AVX2-FCP-NEXT: vmovdqu %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vextracti128 $1, %ymm2, %xmm8
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm2 = xmm8[0],xmm2[1],xmm8[2],xmm2[3],xmm8[4,5,6,7]
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[0,1,6,7,4,5,2,3,0,1,14,15,12,13,14,15]
-; AVX2-FCP-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm6 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20,21,26,27]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6],ymm6[7]
-; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm0[0,1],ymm3[2],ymm0[3,4,5],ymm3[6],ymm0[7]
-; AVX2-FCP-NEXT: vpmovsxbd {{.*#+}} ymm9 = [0,3,7,2,6,0,0,0]
-; AVX2-FCP-NEXT: vpermd %ymm6, %ymm9, %ymm6
-; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm8 = [u,u,u,u,u,u,u,u,0,1,6,7,8,9,14,15,16,17,22,23,20,21,22,23,16,17,22,23,24,25,30,31]
-; AVX2-FCP-NEXT: vpshufb %ymm8, %ymm6, %ymm6
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0,1],xmm6[2,3]
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm6[0],ymm2[1,2,3,4,5,6,7],ymm6[8],ymm2[9,10,11,12,13,14,15]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
-; AVX2-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-FCP-NEXT: vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX2-FCP-NEXT: # ymm1 = mem[0,1],ymm1[2],mem[3,4],ymm1[5],mem[6,7]
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm1[8,9,4,5,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX2-FCP-NEXT: vextracti128 $1, %ymm1, %xmm1
-; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[3,1,2,3,4,5,6,7]
-; AVX2-FCP-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
-; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm13[0,1],ymm5[2],ymm13[3,4,5],ymm5[6],ymm13[7]
-; AVX2-FCP-NEXT: vpermd %ymm2, %ymm9, %ymm2
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm14[0],ymm12[1],ymm14[2,3,4],ymm12[5],ymm14[6,7]
-; AVX2-FCP-NEXT: vextracti128 $1, %ymm6, %xmm9
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm6 = xmm9[0],xmm6[1],xmm9[2],xmm6[3],xmm9[4,5,6,7]
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm6 = xmm6[0,1,6,7,4,5,2,3,0,1,14,15,12,13,14,15]
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20,21,26,27]
-; AVX2-FCP-NEXT: vinserti128 $1, %xmm6, %ymm0, %ymm6
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm6[0,1,2,3,4,5,6],ymm4[7]
-; AVX2-FCP-NEXT: vpshufb %ymm8, %ymm2, %ymm2
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3]
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm2[0],ymm4[1,2,3,4,5,6,7],ymm2[8],ymm4[9,10,11,12,13,14,15]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
-; AVX2-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpmovsxbd {{.*#+}} ymm1 = [0,3,3,3,0,3,7,7]
-; AVX2-FCP-NEXT: vpermd (%rsp), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,17,22,23,24,25]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm7[0,1],ymm10[2],ymm7[3,4],ymm10[5],ymm7[6,7]
-; AVX2-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm8 = [2,5,1,4,2,5,1,4]
-; AVX2-FCP-NEXT: # ymm8 = mem[0,1,0,1]
-; AVX2-FCP-NEXT: vpermd %ymm6, %ymm8, %ymm6
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm6 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,17,22,23,24,25,30,31,u,u,u,u,u,u]
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm6[0,1,2,3,4],ymm1[5,6,7],ymm6[8,9,10,11,12],ymm1[13,14,15]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm15[0,1],ymm11[2,3],ymm15[4,5],ymm11[6,7]
-; AVX2-FCP-NEXT: vextracti128 $1, %ymm6, %xmm15
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm13[0,1],ymm14[2],ymm13[3,4],ymm14[5],ymm13[6,7]
; AVX2-FCP-NEXT: vmovd {{.*#+}} xmm2 = [8,9,4,5,0,0,0,0,0,0,0,0,0,0,0,0]
-; AVX2-FCP-NEXT: vpshufb %xmm2, %xmm15, %xmm15
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm6 = xmm6[10,11,6,7,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX2-FCP-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm6[0],xmm15[0],xmm6[1],xmm15[1],xmm6[2],xmm15[2],xmm6[3],xmm15[3]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm15 = ymm0[0,1,2],ymm3[3],ymm0[4,5],ymm3[6],ymm0[7]
-; AVX2-FCP-NEXT: vmovdqa %ymm0, %ymm9
-; AVX2-FCP-NEXT: vmovdqa %ymm3, %ymm7
+; AVX2-FCP-NEXT: vpshufb %xmm2, %xmm0, %xmm1
+; AVX2-FCP-NEXT: vmovdqa %xmm2, %xmm4
+; AVX2-FCP-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[3,1,2,3,4,5,6,7]
+; AVX2-FCP-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm15[0],ymm8[1],ymm15[2,3,4],ymm8[5],ymm15[6,7]
+; AVX2-FCP-NEXT: vmovdqa %ymm8, %ymm11
+; AVX2-FCP-NEXT: vmovdqu %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vextracti128 $1, %ymm1, %xmm2
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2],xmm1[3],xmm2[4,5,6,7]
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,6,7,4,5,2,3,0,1,14,15,12,13,14,15]
+; AVX2-FCP-NEXT: vpshufb %xmm2, %xmm1, %xmm1
+; AVX2-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm5 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20,21,26,27]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm5[7]
+; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
+; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm12[0,1],ymm9[2],ymm12[3,4,5],ymm9[6],ymm12[7]
+; AVX2-FCP-NEXT: vpmovsxbd {{.*#+}} ymm10 = [0,3,7,2,6,0,0,0]
+; AVX2-FCP-NEXT: vpermd %ymm5, %ymm10, %ymm5
+; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm5 = ymm5[u,u,u,u,u,u,u,u,0,1,6,7,8,9,14,15,16,17,22,23,20,21,22,23,16,17,22,23,24,25,30,31]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm5[2,3]
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm5[0],ymm1[1,2,3,4,5,6,7],ymm5[8],ymm1[9,10,11,12,13,14,15]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX2-FCP-NEXT: vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX2-FCP-NEXT: # ymm0 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
+; AVX2-FCP-NEXT: vpshufb %xmm4, %xmm0, %xmm1
+; AVX2-FCP-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[3,1,2,3,4,5,6,7]
+; AVX2-FCP-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
+; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm8[0,1],ymm7[2],ymm8[3,4,5],ymm7[6],ymm8[7]
+; AVX2-FCP-NEXT: vpermd %ymm1, %ymm10, %ymm1
+; AVX2-FCP-NEXT: vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm4[0],ymm6[1],ymm4[2,3,4],ymm6[5],ymm4[6,7]
+; AVX2-FCP-NEXT: vextracti128 $1, %ymm5, %xmm10
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm5 = xmm10[0],xmm5[1],xmm10[2],xmm5[3],xmm10[4,5,6,7]
+; AVX2-FCP-NEXT: vpshufb %xmm2, %xmm5, %xmm2
+; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20,21,26,27]
+; AVX2-FCP-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6],ymm3[7]
+; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,0,1,6,7,8,9,14,15,16,17,22,23,20,21,22,23,16,17,22,23,24,25,30,31]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0],ymm2[1,2,3,4,5,6,7],ymm1[8],ymm2[9,10,11,12,13,14,15]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vpmovsxbd {{.*#+}} ymm0 = [0,3,3,3,0,3,7,7]
+; AVX2-FCP-NEXT: vpermd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 32-byte Folded Reload
+; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,17,22,23,24,25]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm15[0,1],ymm11[2],ymm15[3,4],ymm11[5],ymm15[6,7]
+; AVX2-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm10 = [2,5,1,4,2,5,1,4]
+; AVX2-FCP-NEXT: # ymm10 = mem[0,1,0,1]
+; AVX2-FCP-NEXT: vpermd %ymm3, %ymm10, %ymm3
+; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,17,22,23,24,25,30,31,u,u,u,u,u,u]
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm3[0,1,2,3,4],ymm1[5,6,7],ymm3[8,9,10,11,12],ymm1[13,14,15]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm13[0,1],ymm14[2,3],ymm13[4,5],ymm14[6,7]
+; AVX2-FCP-NEXT: vextracti128 $1, %ymm3, %xmm5
+; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[8,9,4,5,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX2-FCP-NEXT: vmovd {{.*#+}} xmm11 = [10,11,6,7,0,0,0,0,0,0,0,0,0,0,0,0]
+; AVX2-FCP-NEXT: vpshufb %xmm11, %xmm3, %xmm3
+; AVX2-FCP-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1],xmm3[2],xmm5[2],xmm3[3],xmm5[3]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm12[0,1,2],ymm9[3],ymm12[4,5],ymm9[6],ymm12[7]
; AVX2-FCP-NEXT: vpmovsxbd {{.*#+}} ymm2 = [0,4,7,3,6,0,0,0]
-; AVX2-FCP-NEXT: vpermd %ymm15, %ymm2, %ymm15
+; AVX2-FCP-NEXT: vpermd %ymm5, %ymm2, %ymm5
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm0 = [u,u,u,u,u,u,u,u,2,3,4,5,10,11,12,13,18,19,18,19,18,19,18,19,18,19,20,21,26,27,28,29]
-; AVX2-FCP-NEXT: vpshufb %ymm0, %ymm15, %ymm15
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm15[0],ymm1[1,2,3,4,5,6,7],ymm15[8],ymm1[9,10,11,12,13,14,15]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm6 = xmm6[0,1],xmm15[2,3]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm6[0,1,2,3],ymm1[4,5,6,7]
+; AVX2-FCP-NEXT: vpshufb %ymm0, %ymm5, %ymm5
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm5[0],ymm1[1,2,3,4,5,6,7],ymm5[8],ymm1[9,10,11,12,13,14,15]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm3 = xmm3[0,1],xmm5[2,3]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
+; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
; AVX2-FCP-NEXT: vpmovsxbd {{.*#+}} ymm1 = [0,3,3,3,0,3,7,7]
-; AVX2-FCP-NEXT: vpermd %ymm4, %ymm1, %ymm1
+; AVX2-FCP-NEXT: vpermd %ymm5, %ymm1, %ymm1
; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,17,22,23,24,25]
-; AVX2-FCP-NEXT: vmovdqa %ymm12, %ymm11
-; AVX2-FCP-NEXT: vmovdqa %ymm14, %ymm12
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm10 = ymm14[0,1],ymm11[2],ymm14[3,4],ymm11[5],ymm14[6,7]
-; AVX2-FCP-NEXT: vpermd %ymm10, %ymm8, %ymm8
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm8 = ymm8[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,17,22,23,24,25,30,31,u,u,u,u,u,u]
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm8[0,1,2,3,4],ymm1[5,6,7],ymm8[8,9,10,11,12],ymm1[13,14,15]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm13[0,1,2],ymm5[3],ymm13[4,5],ymm5[6],ymm13[7]
-; AVX2-FCP-NEXT: vpermd %ymm8, %ymm2, %ymm2
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0,1],ymm6[2],ymm4[3,4],ymm6[5],ymm4[6,7]
+; AVX2-FCP-NEXT: vpermd %ymm3, %ymm10, %ymm3
+; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,17,22,23,24,25,30,31,u,u,u,u,u,u]
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm3[0,1,2,3,4],ymm1[5,6,7],ymm3[8,9,10,11,12],ymm1[13,14,15]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm8[0,1,2],ymm7[3],ymm8[4,5],ymm7[6],ymm8[7]
+; AVX2-FCP-NEXT: vmovdqa %ymm8, %ymm4
+; AVX2-FCP-NEXT: vpermd %ymm3, %ymm2, %ymm2
; AVX2-FCP-NEXT: vpshufb %ymm0, %ymm2, %ymm0
+; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
-; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm15[0,1],ymm14[2,3],ymm15[4,5],ymm14[6,7]
-; AVX2-FCP-NEXT: vextracti128 $1, %ymm2, %xmm8
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm8 = xmm8[8,9,4,5,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[10,11,6,7,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX2-FCP-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm8[0],xmm2[1],xmm8[1],xmm2[2],xmm8[2],xmm2[3],xmm8[3]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm13[0,1],ymm14[2,3],ymm13[4,5],ymm14[6,7]
+; AVX2-FCP-NEXT: vextracti128 $1, %ymm2, %xmm3
+; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[8,9,4,5,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX2-FCP-NEXT: vpshufb %xmm11, %xmm2, %xmm2
+; AVX2-FCP-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm0[0],ymm1[1,2,3,4,5,6,7],ymm0[8],ymm1[9,10,11,12,13,14,15]
; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm0 = xmm2[0,1],xmm0[2,3]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
-; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FCP-NEXT: vpblendd $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
-; AVX2-FCP-NEXT: # ymm0 = mem[0,1],ymm0[2,3],mem[4,5],ymm0[6,7]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; AVX2-FCP-NEXT: vpblendd $204, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm0 # 32-byte Folded Reload
+; AVX2-FCP-NEXT: # ymm0 = ymm15[0,1],mem[2,3],ymm15[4,5],mem[6,7]
; AVX2-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [0,4,7,0,0,4,7,0]
; AVX2-FCP-NEXT: # ymm2 = mem[0,1,0,1]
-; AVX2-FCP-NEXT: vpermd (%rsp), %ymm2, %ymm3 # 32-byte Folded Reload
-; AVX2-FCP-NEXT: vpbroadcastq {{.*#+}} ymm6 = [0,0,18,19,20,21,26,27,0,0,18,19,20,21,26,27,0,0,18,19,20,21,26,27,0,0,18,19,20,21,26,27]
-; AVX2-FCP-NEXT: vpshufb %ymm6, %ymm3, %ymm3
-; AVX2-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm5 = [2,6,1,5,2,6,1,5]
-; AVX2-FCP-NEXT: # ymm5 = mem[0,1,0,1]
-; AVX2-FCP-NEXT: vpermd %ymm0, %ymm5, %ymm0
-; AVX2-FCP-NEXT: vpbroadcastq {{.*#+}} ymm1 = [28,29,18,19,20,21,26,27,28,29,18,19,20,21,26,27,28,29,18,19,20,21,26,27,28,29,18,19,20,21,26,27]
-; AVX2-FCP-NEXT: vpshufb %ymm1, %ymm0, %ymm0
+; AVX2-FCP-NEXT: vpermd {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm3 # 32-byte Folded Reload
+; AVX2-FCP-NEXT: vpbroadcastq {{.*#+}} ymm15 = [0,0,18,19,20,21,26,27,0,0,18,19,20,21,26,27,0,0,18,19,20,21,26,27,0,0,18,19,20,21,26,27]
+; AVX2-FCP-NEXT: vpshufb %ymm15, %ymm3, %ymm3
+; AVX2-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm6 = [2,6,1,5,2,6,1,5]
+; AVX2-FCP-NEXT: # ymm6 = mem[0,1,0,1]
+; AVX2-FCP-NEXT: vpermd %ymm0, %ymm6, %ymm0
+; AVX2-FCP-NEXT: vpbroadcastq {{.*#+}} ymm7 = [28,29,18,19,20,21,26,27,28,29,18,19,20,21,26,27,28,29,18,19,20,21,26,27,28,29,18,19,20,21,26,27]
+; AVX2-FCP-NEXT: vpshufb %ymm7, %ymm0, %ymm0
; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm3[5,6,7],ymm0[8,9,10,11,12],ymm3[13,14,15]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm7[0],ymm9[1],ymm7[2,3],ymm9[4],ymm7[5,6,7]
-; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
-; AVX2-FCP-NEXT: vpblendd $183, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm8 # 32-byte Folded Reload
-; AVX2-FCP-NEXT: # ymm8 = mem[0,1,2],ymm7[3],mem[4,5],ymm7[6],mem[7]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm9[0],ymm12[1],ymm9[2,3],ymm12[4],ymm9[5,6,7]
+; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
+; AVX2-FCP-NEXT: vpblendd $183, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm8 # 32-byte Folded Reload
+; AVX2-FCP-NEXT: # ymm8 = mem[0,1,2],ymm8[3],mem[4,5],ymm8[6],mem[7]
; AVX2-FCP-NEXT: vextracti128 $1, %ymm8, %xmm9
; AVX2-FCP-NEXT: vpbroadcastd {{.*#+}} xmm10 = [10,11,6,7,10,11,6,7,10,11,6,7,10,11,6,7]
; AVX2-FCP-NEXT: vpshufb %xmm10, %xmm9, %xmm9
@@ -6724,52 +6732,53 @@ define void @load_i16_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX2-FCP-NEXT: vpunpckhwd {{.*#+}} xmm8 = xmm8[4],xmm9[4],xmm8[5],xmm9[5],xmm8[6],xmm9[6],xmm8[7],xmm9[7]
; AVX2-FCP-NEXT: vpmovsxbd {{.*#+}} ymm9 = [1,4,0,3,7,0,0,0]
; AVX2-FCP-NEXT: vpermd %ymm3, %ymm9, %ymm3
-; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm7 = [u,u,u,u,u,u,u,u,0,1,6,7,8,9,14,15,16,17,22,23,20,21,22,23,16,17,22,23,24,25,30,31]
-; AVX2-FCP-NEXT: vpshufb %ymm7, %ymm3, %ymm3
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm11 = [u,u,u,u,u,u,u,u,0,1,6,7,8,9,14,15,16,17,22,23,20,21,22,23,16,17,22,23,24,25,30,31]
+; AVX2-FCP-NEXT: vpshufb %ymm11, %ymm3, %ymm3
; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm3[0],ymm0[1,2,3,4,5,6,7],ymm3[8],ymm0[9,10,11,12,13,14,15]
; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm3 = xmm8[0,1],xmm3[2,3]
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm0[4,5,6,7]
-; AVX2-FCP-NEXT: vpermd %ymm4, %ymm2, %ymm2
-; AVX2-FCP-NEXT: vpshufb %ymm6, %ymm2, %ymm2
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm12[0,1],ymm11[2,3],ymm12[4,5],ymm11[6,7]
-; AVX2-FCP-NEXT: vpermd %ymm3, %ymm5, %ymm3
-; AVX2-FCP-NEXT: vpshufb %ymm1, %ymm3, %ymm3
+; AVX2-FCP-NEXT: vpermd %ymm5, %ymm2, %ymm2
+; AVX2-FCP-NEXT: vpshufb %ymm15, %ymm2, %ymm2
+; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
+; AVX2-FCP-NEXT: vpblendd $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
+; AVX2-FCP-NEXT: # ymm3 = mem[0,1],ymm3[2,3],mem[4,5],ymm3[6,7]
+; AVX2-FCP-NEXT: vpermd %ymm3, %ymm6, %ymm3
+; AVX2-FCP-NEXT: vpshufb %ymm7, %ymm3, %ymm3
; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm3[0,1,2,3,4],ymm2[5,6,7],ymm3[8,9,10,11,12],ymm2[13,14,15]
-; AVX2-FCP-NEXT: vpblendd $237, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm3 # 32-byte Folded Reload
-; AVX2-FCP-NEXT: # ymm3 = mem[0],ymm13[1],mem[2,3],ymm13[4],mem[5,6,7]
+; AVX2-FCP-NEXT: vpblendd $237, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm3 # 32-byte Folded Reload
+; AVX2-FCP-NEXT: # ymm3 = mem[0],ymm4[1],mem[2,3],ymm4[4],mem[5,6,7]
; AVX2-FCP-NEXT: vpermd %ymm3, %ymm9, %ymm3
-; AVX2-FCP-NEXT: vpshufb %ymm7, %ymm3, %ymm3
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm15[0,1,2],ymm14[3],ymm15[4,5],ymm14[6],ymm15[7]
-; AVX2-FCP-NEXT: vextracti128 $1, %ymm4, %xmm5
-; AVX2-FCP-NEXT: vpshufb %xmm10, %xmm5, %xmm5
+; AVX2-FCP-NEXT: vpshufb %ymm11, %ymm3, %ymm3
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm13[0,1,2],ymm14[3],ymm13[4,5],ymm14[6],ymm13[7]
+; AVX2-FCP-NEXT: vextracti128 $1, %ymm4, %xmm6
+; AVX2-FCP-NEXT: vpshufb %xmm10, %xmm6, %xmm6
; AVX2-FCP-NEXT: vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,6,4,6,7]
-; AVX2-FCP-NEXT: vpunpckhwd {{.*#+}} xmm4 = xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7]
+; AVX2-FCP-NEXT: vpunpckhwd {{.*#+}} xmm4 = xmm4[4],xmm6[4],xmm4[5],xmm6[5],xmm4[6],xmm6[6],xmm4[7],xmm6[7]
; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm3[0],ymm2[1,2,3,4,5,6,7],ymm3[8],ymm2[9,10,11,12,13,14,15]
; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm3 = xmm4[0,1],xmm3[2,3]
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5,6,7]
-; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-FCP-NEXT: vmovaps %ymm1, 32(%rsi)
-; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-FCP-NEXT: vmovaps %ymm1, (%rsi)
-; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-FCP-NEXT: vmovaps %ymm1, 32(%rdx)
-; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-FCP-NEXT: vmovaps %ymm1, (%rdx)
-; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-FCP-NEXT: vmovaps %ymm1, 32(%rcx)
-; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-FCP-NEXT: vmovaps %ymm1, (%rcx)
-; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-FCP-NEXT: vmovaps %ymm1, 32(%r8)
-; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-FCP-NEXT: vmovaps %ymm1, (%r8)
-; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-FCP-NEXT: vmovaps %ymm1, 32(%r9)
-; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-FCP-NEXT: vmovaps %ymm1, (%r9)
+; AVX2-FCP-NEXT: vmovups (%rsp), %ymm3 # 32-byte Reload
+; AVX2-FCP-NEXT: vmovaps %ymm3, 32(%rsi)
+; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
+; AVX2-FCP-NEXT: vmovaps %ymm3, (%rsi)
+; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
+; AVX2-FCP-NEXT: vmovaps %ymm3, 32(%rdx)
+; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
+; AVX2-FCP-NEXT: vmovaps %ymm3, (%rdx)
+; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
+; AVX2-FCP-NEXT: vmovaps %ymm3, 32(%rcx)
+; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
+; AVX2-FCP-NEXT: vmovaps %ymm3, (%rcx)
+; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
+; AVX2-FCP-NEXT: vmovaps %ymm3, 32(%r8)
+; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
+; AVX2-FCP-NEXT: vmovaps %ymm3, (%r8)
+; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
+; AVX2-FCP-NEXT: vmovaps %ymm3, 32(%r9)
+; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
+; AVX2-FCP-NEXT: vmovaps %ymm3, (%r9)
; AVX2-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-FCP-NEXT: vmovaps %ymm1, 32(%rax)
+; AVX2-FCP-NEXT: vmovdqa %ymm1, 32(%rax)
; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
; AVX2-FCP-NEXT: vmovaps %ymm1, (%rax)
; AVX2-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
@@ -7141,296 +7150,296 @@ define void @load_i16_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
;
; AVX512-FCP-LABEL: load_i16_stride7_vf32:
; AVX512-FCP: # %bb.0:
-; AVX512-FCP-NEXT: vmovdqa64 320(%rdi), %zmm30
-; AVX512-FCP-NEXT: vmovdqa64 64(%rdi), %zmm31
+; AVX512-FCP-NEXT: vmovdqa64 320(%rdi), %zmm28
+; AVX512-FCP-NEXT: vmovdqa64 64(%rdi), %zmm30
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm1 = [2,6,9,13,2,6,9,13]
-; AVX512-FCP-NEXT: vmovdqa64 128(%rdi), %zmm25
+; AVX512-FCP-NEXT: vmovdqa64 128(%rdi), %zmm23
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm16 = [2,5,9,12,2,5,9,12]
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm18 = [10,3,6,15,12,13,6,15]
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm19 = [3,6,10,13,3,6,10,13]
-; AVX512-FCP-NEXT: vmovdqa64 384(%rdi), %zmm14
+; AVX512-FCP-NEXT: vmovdqa64 384(%rdi), %zmm8
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm0 = [2,6,9,0,13,0,0,0]
-; AVX512-FCP-NEXT: vpermd %zmm31, %zmm0, %zmm10
-; AVX512-FCP-NEXT: vpermd %zmm14, %zmm1, %zmm12
+; AVX512-FCP-NEXT: vpermd %zmm30, %zmm0, %zmm7
+; AVX512-FCP-NEXT: vpermd %zmm8, %zmm1, %zmm13
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm0 = [1,0,0,0,4,8,11,15]
-; AVX512-FCP-NEXT: vpermd %zmm30, %zmm0, %zmm15
+; AVX512-FCP-NEXT: vpermd %zmm28, %zmm0, %zmm14
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm0 = [2,5,9,0,12,0,0,0]
-; AVX512-FCP-NEXT: vpermd %zmm31, %zmm0, %zmm3
-; AVX512-FCP-NEXT: vpermd %zmm14, %zmm16, %zmm7
+; AVX512-FCP-NEXT: vpermd %zmm30, %zmm0, %zmm10
+; AVX512-FCP-NEXT: vpermd %zmm8, %zmm16, %zmm9
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm0 = [0,0,0,0,4,7,11,14]
-; AVX512-FCP-NEXT: vpermd %zmm30, %zmm0, %zmm2
+; AVX512-FCP-NEXT: vpermd %zmm28, %zmm0, %zmm2
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm0 = [8,1,12,5,12,5,14,15]
-; AVX512-FCP-NEXT: vpermd %zmm31, %zmm0, %zmm0
-; AVX512-FCP-NEXT: vpermd %zmm25, %zmm19, %zmm4
-; AVX512-FCP-NEXT: vmovdqa64 192(%rdi), %ymm28
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm11 = ymm28[0,1,0,2]
-; AVX512-FCP-NEXT: vpbroadcastd {{.*#+}} ymm1 = [20,21,26,27,20,21,26,27,20,21,26,27,20,21,26,27,20,21,26,27,20,21,26,27,20,21,26,27,20,21,26,27]
-; AVX512-FCP-NEXT: vpshufb %ymm1, %ymm11, %ymm5
-; AVX512-FCP-NEXT: vmovdqa64 %ymm1, %ymm23
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm4 = ymm4[0,1,2,3,0,1,6,7,8,9,14,15,12,13,14,15,16,17,18,19,16,17,22,23,24,25,30,31,u,u,u,u]
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm4[0,1,2,3,4,5,6],ymm5[7]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm0[6,7,12,13,2,3,16,17,30,31],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512-FCP-NEXT: vmovdqa (%rdi), %ymm4
-; AVX512-FCP-NEXT: vmovdqa 32(%rdi), %ymm5
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm4[0,1],ymm5[2],ymm4[3,4,5],ymm5[6],ymm4[7]
-; AVX512-FCP-NEXT: vextracti128 $1, %ymm8, %xmm9
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm8 = xmm8[0,1,2,3],xmm9[4],xmm8[5],xmm9[6],xmm8[7]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm8 = ymm8[0,1,14,15,12,13,10,11,8,9],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm8[u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512-FCP-NEXT: vporq %ymm0, %ymm8, %ymm22
-; AVX512-FCP-NEXT: vpbroadcastw 252(%rdi), %xmm0
-; AVX512-FCP-NEXT: vmovdqa 224(%rdi), %xmm13
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm8 = xmm13[u,u,u,u,u,u,u,u,0,1,14,15,12,13,14,15]
-; AVX512-FCP-NEXT: vpunpckhdq {{.*#+}} xmm0 = xmm8[2],xmm0[2],xmm8[3],xmm0[3]
+; AVX512-FCP-NEXT: vpermd %zmm30, %zmm0, %zmm3
+; AVX512-FCP-NEXT: vpermd %zmm23, %zmm19, %zmm4
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm5 = [0,1,2,3,0,1,6,7,8,9,14,15,12,13,14,15,16,17,18,19,16,17,22,23,24,25,30,31,20,21,26,27]
+; AVX512-FCP-NEXT: vmovdqa64 192(%rdi), %ymm25
+; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm25[0,1,0,2]
+; AVX512-FCP-NEXT: vpshufb %ymm5, %ymm0, %ymm6
+; AVX512-FCP-NEXT: vpshufb %ymm5, %ymm4, %ymm4
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm4[0,1,2,3,4,5,6],ymm6[7]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm6 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm3[6,7,12,13,2,3,16,17,30,31],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512-FCP-NEXT: vmovdqa (%rdi), %ymm3
+; AVX512-FCP-NEXT: vmovdqa 32(%rdi), %ymm4
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm11 = ymm3[0,1],ymm4[2],ymm3[3,4,5],ymm4[6],ymm3[7]
+; AVX512-FCP-NEXT: vextracti128 $1, %ymm11, %xmm12
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm11 = xmm11[0,1,2,3],xmm12[4],xmm11[5],xmm12[6],xmm11[7]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm11 = ymm11[0,1,14,15,12,13,10,11,8,9],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm11[u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512-FCP-NEXT: vporq %ymm6, %ymm11, %ymm22
+; AVX512-FCP-NEXT: vpbroadcastw 252(%rdi), %xmm6
+; AVX512-FCP-NEXT: vmovdqa 224(%rdi), %xmm12
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm11 = xmm12[u,u,u,u,u,u,u,u,0,1,14,15,12,13,14,15]
+; AVX512-FCP-NEXT: vpunpckhdq {{.*#+}} xmm6 = xmm11[2],xmm6[2],xmm11[3],xmm6[3]
; AVX512-FCP-NEXT: movw $992, %ax # imm = 0x3E0
; AVX512-FCP-NEXT: kmovw %eax, %k1
-; AVX512-FCP-NEXT: vinserti32x4 $2, %xmm0, %zmm6, %zmm22 {%k1}
-; AVX512-FCP-NEXT: vmovdqa 256(%rdi), %ymm6
-; AVX512-FCP-NEXT: vmovdqa 288(%rdi), %ymm8
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm8[0,1],ymm6[2,3],ymm8[4,5],ymm6[6,7]
-; AVX512-FCP-NEXT: vextracti128 $1, %ymm0, %xmm9
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm9[0,1,2],xmm0[3],xmm9[4],xmm0[5],xmm9[6,7]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,10,11,8,9,6,7,4,5,u,u]
+; AVX512-FCP-NEXT: vinserti32x4 $2, %xmm6, %zmm5, %zmm22 {%k1}
+; AVX512-FCP-NEXT: vmovdqa 256(%rdi), %ymm5
+; AVX512-FCP-NEXT: vmovdqa 288(%rdi), %ymm6
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm11 = ymm6[0,1],ymm5[2,3],ymm6[4,5],ymm5[6,7]
+; AVX512-FCP-NEXT: vextracti128 $1, %ymm11, %xmm15
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm11 = xmm15[0,1,2],xmm11[3],xmm15[4],xmm11[5],xmm15[6,7]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm11 = xmm11[u,u,u,u,u,u,10,11,8,9,6,7,4,5,u,u]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[0,1,6,7,8,9,14,15,8,9,14,15,4,5,2,3,16,17,22,23,24,25,30,31,24,25,30,31,20,21,18,19]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm2[0,1,2],xmm0[3,4,5,6],xmm2[7]
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm11 = xmm2[0,1,2],xmm11[3,4,5,6],xmm2[7]
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm11 = ymm11[0,1,2,3],ymm2[4,5,6,7]
; AVX512-FCP-NEXT: vpbroadcastq {{.*#+}} ymm2 = [16,17,22,23,24,25,30,31,16,17,22,23,24,25,30,31,16,17,22,23,24,25,30,31,16,17,22,23,24,25,30,31]
-; AVX512-FCP-NEXT: vpshufb %ymm2, %ymm7, %ymm7
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm7[6,7]
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm24
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm4[0,1,2],ymm5[3],ymm4[4,5],ymm5[6],ymm4[7]
-; AVX512-FCP-NEXT: vextracti128 $1, %ymm0, %xmm7
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm7[0],xmm0[1],xmm7[2,3,4,5],xmm0[6],xmm7[7]
+; AVX512-FCP-NEXT: vpshufb %ymm2, %ymm9, %ymm9
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm11[0,1,2,3,4,5],ymm9[6,7]
+; AVX512-FCP-NEXT: vmovdqa64 %ymm1, %ymm26
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm11 = ymm3[0,1,2],ymm4[3],ymm3[4,5],ymm4[6],ymm3[7]
+; AVX512-FCP-NEXT: vextracti128 $1, %ymm11, %xmm15
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm11 = xmm15[0],xmm11[1],xmm15[2,3,4,5],xmm11[6],xmm15[7]
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm1 = [2,3,0,1,14,15,12,13,10,11,128,128,128,128,128,128,128,128,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512-FCP-NEXT: vpshufb %ymm1, %ymm0, %ymm0
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm3 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm3[0,1,6,7,8,9,18,19],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512-FCP-NEXT: vporq %ymm3, %ymm0, %ymm20
-; AVX512-FCP-NEXT: vmovdqa 160(%rdi), %ymm3
-; AVX512-FCP-NEXT: vmovdqa 128(%rdi), %ymm9
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm9[0,1],ymm3[2],ymm9[3,4,5],ymm3[6],ymm9[7]
-; AVX512-FCP-NEXT: vextracti128 $1, %ymm0, %xmm7
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm7[4],xmm0[5],xmm7[6],xmm0[7]
-; AVX512-FCP-NEXT: vmovdqa 240(%rdi), %xmm7
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,0,1,14,15,12,13,10,11,8,9,u,u,u,u]
-; AVX512-FCP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm11 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,28,29]
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6],ymm11[7]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm11 = xmm7[0],xmm13[1],xmm7[2,3,4,5,6,7]
-; AVX512-FCP-NEXT: vpshufb %xmm1, %xmm11, %xmm1
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm27
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm8[0,1,2],ymm6[3],ymm8[4,5],ymm6[6],ymm8[7]
+; AVX512-FCP-NEXT: vpshufb %ymm1, %ymm11, %ymm11
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm10 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm10[0,1,6,7,8,9,18,19],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512-FCP-NEXT: vporq %ymm10, %ymm11, %ymm20
+; AVX512-FCP-NEXT: vmovdqa 160(%rdi), %ymm11
+; AVX512-FCP-NEXT: vmovdqa 128(%rdi), %ymm10
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm15 = ymm10[0,1],ymm11[2],ymm10[3,4,5],ymm11[6],ymm10[7]
+; AVX512-FCP-NEXT: vextracti128 $1, %ymm15, %xmm9
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm9 = xmm15[0,1,2,3],xmm9[4],xmm15[5],xmm9[6],xmm15[7]
+; AVX512-FCP-NEXT: vmovdqa 240(%rdi), %xmm15
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm9 = xmm9[0,1,0,1,14,15,12,13,10,11,8,9,u,u,u,u]
+; AVX512-FCP-NEXT: vinserti128 $1, %xmm9, %ymm0, %ymm9
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,28,29]
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm9[0,1,2,3,4,5,6],ymm0[7]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm9 = xmm15[0],xmm12[1],xmm15[2,3,4,5,6,7]
+; AVX512-FCP-NEXT: vpshufb %xmm1, %xmm9, %xmm1
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm24
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm6[0,1,2],ymm5[3],ymm6[4,5],ymm5[6],ymm6[7]
; AVX512-FCP-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4],xmm1[5],xmm0[6],xmm1[7]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,12,13,10,11,8,9,6,7,u,u]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm15[2,3,4,5,10,11,12,13,0,1,0,1,0,1,0,1,18,19,20,21,26,27,28,29,16,17,16,17,16,17,16,17]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm14[2,3,4,5,10,11,12,13,0,1,0,1,0,1,0,1,18,19,20,21,26,27,28,29,16,17,16,17,16,17,16,17]
; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3,4,5,6],xmm1[7]
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm12[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21,26,27,28,29]
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
-; AVX512-FCP-NEXT: vmovdqa64 %ymm0, %ymm26
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm10[2,3,4,5,10,11,16,17],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm5[0],ymm4[1],ymm5[2,3],ymm4[4],ymm5[5,6,7]
-; AVX512-FCP-NEXT: vextracti128 $1, %ymm1, %xmm10
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm10[1],xmm1[2,3,4,5],xmm10[6],xmm1[7]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm13[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21,26,27,28,29]
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm13 = ymm0[0,1,2,3,4,5],ymm1[6,7]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm7[2,3,4,5,10,11,16,17],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm4[0],ymm3[1],ymm4[2,3],ymm3[4],ymm4[5,6,7]
+; AVX512-FCP-NEXT: vextracti128 $1, %ymm1, %xmm7
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm7[1],xmm1[2,3,4,5],xmm7[6],xmm1[7]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[4,5,2,3,0,1,14,15,12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX512-FCP-NEXT: vporq %ymm0, %ymm1, %ymm21
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm9[0,1,2],ymm3[3],ymm9[4,5],ymm3[6],ymm9[7]
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm10[0,1,2],ymm11[3],ymm10[4,5],ymm11[6],ymm10[7]
; AVX512-FCP-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3,4,5],xmm0[6],xmm1[7]
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm1 = [0,1,2,3,0,1,14,15,12,13,10,11,8,9,128,128,128,128,128,128,128,128,128,128,u,u,u,u,u,u,u,u]
; AVX512-FCP-NEXT: vpshufb %xmm1, %xmm0, %xmm0
; AVX512-FCP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512-FCP-NEXT: vpbroadcastq {{.*#+}} ymm10 = [2,5,2,5,2,5,2,5]
-; AVX512-FCP-NEXT: vpermd %ymm28, %ymm10, %ymm10
-; AVX512-FCP-NEXT: vpshufhw {{.*#+}} ymm10 = ymm10[0,1,2,3,4,5,4,7,8,9,10,11,12,13,12,15]
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6],ymm10[7]
-; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} xmm10 = xmm13[0],xmm7[0],xmm13[1],xmm7[1],xmm13[2],xmm7[2],xmm13[3],xmm7[3]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm11 = xmm10[8,9,6,7,4,5,6,7,8,9,10,11,12,13,14,15]
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm11, %zmm0, %zmm15
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm6[0,1],ymm8[2],ymm6[3,4,5],ymm8[6],ymm6[7]
-; AVX512-FCP-NEXT: vextracti128 $1, %ymm0, %xmm11
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm11[4],xmm0[5],xmm11[6],xmm0[7]
-; AVX512-FCP-NEXT: vpermd %zmm31, %zmm18, %zmm11
-; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm12 = [1,0,0,0,5,8,12,15]
+; AVX512-FCP-NEXT: vpbroadcastq {{.*#+}} ymm7 = [2,5,2,5,2,5,2,5]
+; AVX512-FCP-NEXT: vpermd %ymm25, %ymm7, %ymm7
+; AVX512-FCP-NEXT: vpshufhw {{.*#+}} ymm7 = ymm7[0,1,2,3,4,5,4,7,8,9,10,11,12,13,12,15]
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6],ymm7[7]
+; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} xmm7 = xmm12[0],xmm15[0],xmm12[1],xmm15[1],xmm12[2],xmm15[2],xmm12[3],xmm15[3]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm9 = xmm7[8,9,6,7,4,5,6,7,8,9,10,11,12,13,14,15]
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm9, %zmm0, %zmm31
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm5[0,1],ymm6[2],ymm5[3,4,5],ymm6[6],ymm5[7]
+; AVX512-FCP-NEXT: vextracti128 $1, %ymm0, %xmm9
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm9[4],xmm0[5],xmm9[6],xmm0[7]
+; AVX512-FCP-NEXT: vpermd %zmm30, %zmm18, %zmm9
+; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm18 = [1,0,0,0,5,8,12,15]
; AVX512-FCP-NEXT: vpshufb %ymm1, %ymm0, %ymm0
-; AVX512-FCP-NEXT: vpermd %zmm30, %zmm12, %zmm1
+; AVX512-FCP-NEXT: vpermd %zmm28, %zmm18, %zmm1
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm1[2,3,16,17,22,23,24,25,30,31,u,u,u,u,u,u,u,u]
; AVX512-FCP-NEXT: vpor %ymm1, %ymm0, %ymm0
-; AVX512-FCP-NEXT: vpermd %zmm14, %zmm19, %zmm1
+; AVX512-FCP-NEXT: vpermd %zmm8, %zmm19, %zmm1
; AVX512-FCP-NEXT: vpshufb %ymm2, %ymm1, %ymm1
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
; AVX512-FCP-NEXT: vmovdqa64 %ymm0, %ymm17
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm11[4,5,10,11,0,1,22,23],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm5[0],ymm4[1],ymm5[2,3,4],ymm4[5],ymm5[6,7]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm9[4,5,10,11,0,1,22,23],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm4[0],ymm3[1],ymm4[2,3,4],ymm3[5],ymm4[6,7]
; AVX512-FCP-NEXT: vextracti128 $1, %ymm1, %xmm2
; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2],xmm1[3],xmm2[4,5,6,7]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[6,7,4,5,2,3,0,1,14,15],zero,zero,zero,zero,zero,zero,zero,zero,ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX512-FCP-NEXT: vporq %ymm0, %ymm1, %ymm18
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm3[0],ymm9[1],ymm3[2,3],ymm9[4],ymm3[5,6,7]
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm11[0],ymm10[1],ymm11[2,3],ymm10[4],ymm11[5,6,7]
; AVX512-FCP-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3,4,5],xmm1[6],xmm0[7]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,2,3,0,1,14,15,12,13,u,u,u,u]
-; AVX512-FCP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm11 = ymm28[0,1,1,3]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,24,25]
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6],ymm1[7]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm10[12,13,10,11,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm19
-; AVX512-FCP-NEXT: vmovdqa 416(%rdi), %ymm14
-; AVX512-FCP-NEXT: vmovdqa 384(%rdi), %ymm2
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1],ymm14[2],ymm2[3,4,5],ymm14[6],ymm2[7]
-; AVX512-FCP-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4],xmm0[5],xmm1[6],xmm0[7]
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm6[0,1,2],ymm8[3],ymm6[4,5],ymm8[6],ymm6[7]
-; AVX512-FCP-NEXT: vextracti128 $1, %ymm1, %xmm10
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm10[0],xmm1[1],xmm10[2,3,4,5],xmm1[6],xmm10[7]
-; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm10 = [2,11,2,11,12,5,8,9]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,0,1,14,15,12,13,10,11,8,9]
-; AVX512-FCP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512-FCP-NEXT: vpermd %zmm30, %zmm10, %zmm10
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm10 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm10[0,1,22,23,28,29,18,19],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm10[0,1,2],ymm0[3,4,5,6,7],ymm10[8,9,10],ymm0[11,12,13,14,15]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0,1,2,3,2,3,0,1,14,15,12,13,10,11],zero,zero
-; AVX512-FCP-NEXT: vpor %ymm1, %ymm10, %ymm1
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm10 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm0 = [0,3,7,10,14,0,0,0]
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm3[0],ymm9[1],ymm3[2,3,4],ymm9[5],ymm3[6,7]
-; AVX512-FCP-NEXT: vextracti128 $1, %ymm1, %xmm3
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm3[0],xmm1[1],xmm3[2],xmm1[3],xmm3[4,5,6,7]
-; AVX512-FCP-NEXT: vpermd %zmm31, %zmm0, %zmm0
+; AVX512-FCP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm1
+; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm25[0,1,1,3]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,24,25]
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm2[7]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm7[12,13,10,11,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm19
+; AVX512-FCP-NEXT: vmovdqa 416(%rdi), %ymm2
+; AVX512-FCP-NEXT: vmovdqa 384(%rdi), %ymm8
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm8[0,1],ymm2[2],ymm8[3,4,5],ymm2[6],ymm8[7]
+; AVX512-FCP-NEXT: vextracti128 $1, %ymm1, %xmm7
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm7[4],xmm1[5],xmm7[6],xmm1[7]
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm5[0,1,2],ymm6[3],ymm5[4,5],ymm6[6],ymm5[7]
+; AVX512-FCP-NEXT: vextracti128 $1, %ymm7, %xmm9
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm7 = xmm9[0],xmm7[1],xmm9[2,3,4,5],xmm7[6],xmm9[7]
+; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm9 = [2,11,2,11,12,5,8,9]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,u,u,u,0,1,14,15,12,13,10,11,8,9]
+; AVX512-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512-FCP-NEXT: vpermd %zmm28, %zmm9, %zmm9
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm9 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm9[0,1,22,23,28,29,18,19],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm9[0,1,2],ymm1[3,4,5,6,7],ymm9[8,9,10],ymm1[11,12,13,14,15]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[0,1,2,3,2,3,0,1,14,15,12,13,10,11],zero,zero
+; AVX512-FCP-NEXT: vpor %ymm7, %ymm9, %ymm7
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm1[4,5,6,7]
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm11[0],ymm10[1],ymm11[2,3,4],ymm10[5],ymm11[6,7]
+; AVX512-FCP-NEXT: vextracti128 $1, %ymm1, %xmm9
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm9[0],xmm1[1],xmm9[2],xmm1[3],xmm9[4,5,6,7]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0,1,6,7,4,5,2,3,0,1,14,15,u,u,u,u]
; AVX512-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512-FCP-NEXT: vmovdqa64 %ymm23, %ymm3
-; AVX512-FCP-NEXT: vpshufb %ymm3, %ymm11, %ymm3
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm3[7]
-; AVX512-FCP-NEXT: vpbroadcastw 232(%rdi), %xmm3
-; AVX512-FCP-NEXT: vpsrlq $48, %xmm7, %xmm9
-; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm3[0],xmm9[0],xmm3[1],xmm9[1],xmm3[2],xmm9[2],xmm3[3],xmm9[3]
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm9 = ymm5[0,1],ymm4[2],ymm5[3,4],ymm4[5],ymm5[6,7]
-; AVX512-FCP-NEXT: vinserti32x4 $2, %xmm3, %zmm1, %zmm23
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm12 = [8,9,4,5,4,5,6,7,8,9,10,11,12,13,14,15]
-; AVX512-FCP-NEXT: vpshufb %xmm12, %xmm9, %xmm1
-; AVX512-FCP-NEXT: vextracti128 $1, %ymm9, %xmm3
-; AVX512-FCP-NEXT: vpshuflw {{.*#+}} xmm3 = xmm3[3,1,2,3,4,5,6,7]
-; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm9 = [u,u,u,u,u,u,u,u,0,1,6,7,8,9,14,15,16,17,22,23,20,21,22,23,16,17,22,23,24,25,30,31]
-; AVX512-FCP-NEXT: vpshufb %ymm9, %ymm0, %ymm0
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5,6,7]
-; AVX512-FCP-NEXT: vpermd %zmm25, %zmm16, %zmm1
-; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm16 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
-; AVX512-FCP-NEXT: vpternlogq $184, %zmm0, %zmm16, %zmm23
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20,21,26,27]
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5,6],ymm0[7]
+; AVX512-FCP-NEXT: vpbroadcastw 232(%rdi), %xmm1
+; AVX512-FCP-NEXT: vpsrlq $48, %xmm15, %xmm9
+; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm9[0],xmm1[1],xmm9[1],xmm1[2],xmm9[2],xmm1[3],xmm9[3]
+; AVX512-FCP-NEXT: vpermd %zmm23, %zmm16, %zmm9
+; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm10 = [0,3,7,10,14,0,0,0]
+; AVX512-FCP-NEXT: vinserti32x4 $2, %xmm1, %zmm0, %zmm16
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm0 = [0,3,3,0,0,3,7,0]
-; AVX512-FCP-NEXT: vpermd %ymm28, %ymm0, %ymm0
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,0,1,6,7,8,9,u,u,u,u,u,u,u,u,u,u,16,17,22,23,24,25]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[0,1,0,1,6,7,8,9,14,15,u,u,u,u,u,u,16,17,16,17,22,23,24,25,30,31,u,u,u,u,u,u]
+; AVX512-FCP-NEXT: vpermd %ymm25, %ymm0, %ymm0
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm1 = [0,1,0,1,6,7,8,9,14,15,0,1,6,7,8,9,16,17,16,17,22,23,24,25,30,31,16,17,22,23,24,25]
+; AVX512-FCP-NEXT: vpshufb %ymm1, %ymm0, %ymm0
+; AVX512-FCP-NEXT: vpshufb %ymm1, %ymm9, %ymm1
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm9 = ymm4[0,1],ymm3[2],ymm4[3,4],ymm3[5],ymm4[6,7]
; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6,7],ymm1[8,9,10,11,12],ymm0[13,14,15]
-; AVX512-FCP-NEXT: vpsrld $16, %xmm13, %xmm1
-; AVX512-FCP-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm7[4],xmm1[5],xmm7[5],xmm1[6],xmm7[6],xmm1[7],xmm7[7]
-; AVX512-FCP-NEXT: vinserti32x4 $2, %xmm1, %zmm0, %zmm29
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2],ymm14[3],ymm2[4,5],ymm14[6],ymm2[7]
-; AVX512-FCP-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3,4,5],xmm0[6],xmm1[7]
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm8[0],ymm6[1],ymm8[2,3],ymm6[4],ymm8[5,6,7]
-; AVX512-FCP-NEXT: vextracti128 $1, %ymm1, %xmm3
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm3[1],xmm1[2,3,4,5],xmm3[6],xmm1[7]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,2,3,0,1,14,15,12,13,10,11]
-; AVX512-FCP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm3 = [2,0,0,0,6,9,13,0]
-; AVX512-FCP-NEXT: vpermd %zmm30, %zmm3, %zmm3
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm3 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm3[2,3,16,17,22,23,24,25],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm3[0,1,2],ymm0[3,4,5,6,7],ymm3[8,9,10],ymm0[11,12,13,14,15]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,2,3,0,1,14,15,12,13],zero,zero
-; AVX512-FCP-NEXT: vpor %ymm3, %ymm1, %ymm1
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm11 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm5[0,1],ymm4[2,3],ymm5[4,5],ymm4[6,7]
-; AVX512-FCP-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-FCP-NEXT: vpshufb %xmm12, %xmm1, %xmm1
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm26, %zmm0, %zmm26
-; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm3 = [2,6,9,13,2,6,9,13]
-; AVX512-FCP-NEXT: vpermd %zmm25, %zmm3, %zmm3
-; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm12 = [0,4,7,11,14,0,0,0]
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm17, %zmm0, %zmm17
-; AVX512-FCP-NEXT: vpermd %zmm31, %zmm12, %zmm12
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[10,11,6,7,4,5,6,7,u,u,u,u,u,u,u,u]
-; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm12[u,u,u,u,u,u,u,u,2,3,4,5,10,11,12,13,18,19,18,19,18,19,18,19,18,19,20,21,26,27,28,29]
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm12 = ymm0[0,1],ymm1[2,3,4,5,6,7]
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm14[0],ymm2[1],ymm14[2,3],ymm2[4],ymm14[5,6,7]
-; AVX512-FCP-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3,4,5],xmm1[6],xmm0[7]
+; AVX512-FCP-NEXT: vpsrld $16, %xmm12, %xmm1
+; AVX512-FCP-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm15[4],xmm1[5],xmm15[5],xmm1[6],xmm15[6],xmm1[7],xmm15[7]
+; AVX512-FCP-NEXT: vinserti32x4 $2, %xmm1, %zmm0, %zmm27
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm0 = [8,9,4,5,4,5,6,7,8,9,10,11,12,13,14,15]
+; AVX512-FCP-NEXT: vpshufb %xmm0, %xmm9, %xmm1
+; AVX512-FCP-NEXT: vextracti128 $1, %ymm9, %xmm9
+; AVX512-FCP-NEXT: vpshuflw {{.*#+}} xmm9 = xmm9[3,1,2,3,4,5,6,7]
+; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm9[0],xmm1[1],xmm9[1],xmm1[2],xmm9[2],xmm1[3],xmm9[3]
+; AVX512-FCP-NEXT: vpermd %zmm30, %zmm10, %zmm9
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm11 = [u,u,u,u,u,u,u,u,0,1,6,7,8,9,14,15,16,17,22,23,20,21,22,23,16,17,22,23,24,25,30,31]
+; AVX512-FCP-NEXT: vpshufb %ymm11, %ymm9, %ymm9
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1],ymm9[2,3,4,5,6,7]
+; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm29 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
+; AVX512-FCP-NEXT: vpternlogq $184, %zmm1, %zmm29, %zmm16
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm8[0,1,2],ymm2[3],ymm8[4,5],ymm2[6],ymm8[7]
+; AVX512-FCP-NEXT: vextracti128 $1, %ymm1, %xmm9
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm9[0],xmm1[1],xmm9[2,3,4,5],xmm1[6],xmm9[7]
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm9 = ymm6[0],ymm5[1],ymm6[2,3],ymm5[4],ymm6[5,6,7]
+; AVX512-FCP-NEXT: vextracti128 $1, %ymm9, %xmm10
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm9 = xmm9[0],xmm10[1],xmm9[2,3,4,5],xmm10[6],xmm9[7]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,u,u,u,2,3,0,1,14,15,12,13,10,11]
+; AVX512-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm10 = [2,0,0,0,6,9,13,0]
+; AVX512-FCP-NEXT: vpermd %zmm28, %zmm10, %zmm10
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm10 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm10[2,3,16,17,22,23,24,25],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm10[0,1,2],ymm1[3,4,5,6,7],ymm10[8,9,10],ymm1[11,12,13,14,15]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm9 = xmm9[0,1,2,3,4,5,2,3,0,1,14,15,12,13],zero,zero
+; AVX512-FCP-NEXT: vpor %ymm10, %ymm9, %ymm9
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm9[0,1,2,3],ymm1[4,5,6,7]
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm10 = ymm4[0,1],ymm3[2,3],ymm4[4,5],ymm3[6,7]
+; AVX512-FCP-NEXT: vextracti128 $1, %ymm10, %xmm9
+; AVX512-FCP-NEXT: vpshufb %xmm0, %xmm9, %xmm14
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm26, %zmm0, %zmm9
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm13, %zmm0, %zmm13
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm17, %zmm0, %zmm26
+; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm0 = [2,6,9,13,2,6,9,13]
+; AVX512-FCP-NEXT: vpermd %zmm23, %zmm0, %zmm0
+; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm23 = [0,4,7,11,14,0,0,0]
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm7, %zmm0, %zmm17
+; AVX512-FCP-NEXT: vpermd %zmm30, %zmm23, %zmm7
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm23
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm10[10,11,6,7,4,5,6,7,u,u,u,u,u,u,u,u]
+; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm14[0],xmm1[1],xmm14[1],xmm1[2],xmm14[2],xmm1[3],xmm14[3]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm7 = ymm7[u,u,u,u,u,u,u,u,2,3,4,5,10,11,12,13,18,19,18,19,18,19,18,19,18,19,20,21,26,27,28,29]
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm1[0,1],ymm7[2,3,4,5,6,7]
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0],ymm8[1],ymm2[2,3],ymm8[4],ymm2[5,6,7]
+; AVX512-FCP-NEXT: vextracti128 $1, %ymm1, %xmm10
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm10 = xmm1[0],xmm10[1],xmm1[2,3,4,5],xmm10[6],xmm1[7]
; AVX512-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [0,4,7,0,0,4,7,0]
; AVX512-FCP-NEXT: # ymm1 = mem[0,1,0,1]
-; AVX512-FCP-NEXT: vpermd %ymm28, %ymm1, %ymm1
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,2,3,4,5,10,11,u,u,u,u,u,u,u,u,u,u,18,19,20,21,26,27]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[4,5,2,3,4,5,10,11,12,13,u,u,u,u,u,u,20,21,18,19,20,21,26,27,28,29,u,u,u,u,u,u]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm3[0,1,2,3,4],ymm1[5,6,7],ymm3[8,9,10,11,12],ymm1[13,14,15]
-; AVX512-FCP-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm13[4],xmm7[4],xmm13[5],xmm7[5],xmm13[6],xmm7[6],xmm13[7],xmm7[7]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[8,9,6,7,4,5,6,7,8,9,10,11,12,13,14,15]
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm3, %zmm1, %zmm7
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm8[0],ymm6[1],ymm8[2,3,4],ymm6[5],ymm8[6,7]
-; AVX512-FCP-NEXT: vextracti128 $1, %ymm1, %xmm3
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm3[0],xmm1[1],xmm3[2],xmm1[3],xmm3[4,5,6,7]
-; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm3 = [3,0,0,0,6,10,13,0]
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm10, %zmm0, %zmm10
-; AVX512-FCP-NEXT: vpermd %zmm30, %zmm3, %zmm3
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,4,5,2,3,0,1,14,15,12,13]
-; AVX512-FCP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm3 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm3[0,1,18,19,20,21,26,27],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm3[0,1,2],ymm0[3,4,5,6,7],ymm3[8,9,10],ymm0[11,12,13,14,15]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0,1,2,3,6,7,4,5,2,3,0,1,14,15],zero,zero
-; AVX512-FCP-NEXT: vpor %ymm3, %ymm1, %ymm1
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX512-FCP-NEXT: vpermd %ymm25, %ymm1, %ymm1
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm14 = [4,5,2,3,4,5,10,11,12,13,2,3,4,5,10,11,20,21,18,19,20,21,26,27,28,29,18,19,20,21,26,27]
+; AVX512-FCP-NEXT: vpshufb %ymm14, %ymm1, %ymm1
+; AVX512-FCP-NEXT: vpshufb %ymm14, %ymm0, %ymm0
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7],ymm0[8,9,10,11,12],ymm1[13,14,15]
+; AVX512-FCP-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm12[4],xmm15[4],xmm12[5],xmm15[5],xmm12[6],xmm15[6],xmm12[7],xmm15[7]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[8,9,6,7,4,5,6,7,8,9,10,11,12,13,14,15]
+; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm14 = [3,0,0,0,6,10,13,0]
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm12
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm6[0],ymm5[1],ymm6[2,3,4],ymm5[5],ymm6[6,7]
+; AVX512-FCP-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3],xmm1[4,5,6,7]
+; AVX512-FCP-NEXT: vpermd %zmm28, %zmm14, %zmm1
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm10 = xmm10[u,u,u,u,u,u,4,5,2,3,0,1,14,15,12,13]
+; AVX512-FCP-NEXT: vinserti128 $1, %xmm10, %ymm0, %ymm10
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm1[0,1,18,19,20,21,26,27],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm10 = ymm1[0,1,2],ymm10[3,4,5,6,7],ymm1[8,9,10],ymm10[11,12,13,14,15]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,6,7,4,5,2,3,0,1,14,15],zero,zero
+; AVX512-FCP-NEXT: vpor %ymm1, %ymm0, %ymm0
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm10[4,5,6,7]
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm1 = [1,4,8,11,15,0,0,0]
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm11, %zmm0, %zmm3
-; AVX512-FCP-NEXT: vpermd %zmm31, %zmm1, %zmm1
-; AVX512-FCP-NEXT: vpshufb %ymm9, %ymm1, %ymm1
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm5[0,1,2],ymm4[3],ymm5[4,5],ymm4[6],ymm5[7]
-; AVX512-FCP-NEXT: vextracti128 $1, %ymm4, %xmm5
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[u,u,u,u,u,u,u,u,10,11,6,7,4,5,6,7]
-; AVX512-FCP-NEXT: vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,6,4,6,7]
-; AVX512-FCP-NEXT: vpunpckhwd {{.*#+}} xmm4 = xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7]
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm4[0,1],ymm1[2,3,4,5,6,7]
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm14[0],ymm2[1],ymm14[2,3,4],ymm2[5],ymm14[6,7]
-; AVX512-FCP-NEXT: vextracti128 $1, %ymm2, %xmm4
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm2 = xmm4[0],xmm2[1],xmm4[2],xmm2[3],xmm4[4,5,6,7]
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm8[0,1],ymm6[2],ymm8[3,4],ymm6[5],ymm8[6,7]
-; AVX512-FCP-NEXT: vextracti128 $1, %ymm4, %xmm5
-; AVX512-FCP-NEXT: vpshuflw {{.*#+}} xmm5 = xmm5[0,3,1,3,4,5,6,7]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[8,9,8,9,4,5,6,7,u,u,u,u,u,u,u,u]
-; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3]
-; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm5 = [0,1,10,3,14,7,10,3]
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
-; AVX512-FCP-NEXT: vpermd %zmm30, %zmm5, %zmm5
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm5 = ymm5[4,5,10,11,u,u,u,u,u,u,u,u,0,1,14,15,20,21,26,27,16,17,26,27,16,17,20,21,16,17,30,31]
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm5[0],ymm4[1,2],ymm5[3,4,5,6,7]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u,u,6,7,4,5,2,3,0,1,14,15]
-; AVX512-FCP-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm4[0,1,2],ymm2[3,4,5,6,7],ymm4[8,9,10],ymm2[11,12,13,14,15]
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm4[0,1,2,3],ymm2[4,5,6,7]
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm2
-; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm4 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0,0,0]
-; AVX512-FCP-NEXT: vpternlogq $184, %zmm22, %zmm4, %zmm24
-; AVX512-FCP-NEXT: vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm20, %zmm27
-; AVX512-FCP-NEXT: vpternlogq $184, %zmm27, %zmm4, %zmm26
-; AVX512-FCP-NEXT: vpternlogq $184, %zmm21, %zmm16, %zmm15
-; AVX512-FCP-NEXT: movw $-512, %ax # imm = 0xFE00
-; AVX512-FCP-NEXT: kmovw %eax, %k1
-; AVX512-FCP-NEXT: vmovdqa32 %zmm17, %zmm15 {%k1}
-; AVX512-FCP-NEXT: vpternlogq $184, %zmm18, %zmm16, %zmm19
-; AVX512-FCP-NEXT: vmovdqa32 %zmm10, %zmm19 {%k1}
-; AVX512-FCP-NEXT: vmovdqa32 %zmm3, %zmm23 {%k1}
-; AVX512-FCP-NEXT: vpternlogq $226, %zmm29, %zmm16, %zmm12
-; AVX512-FCP-NEXT: vmovdqa32 %zmm0, %zmm12 {%k1}
-; AVX512-FCP-NEXT: vmovdqa64 %zmm24, (%rsi)
-; AVX512-FCP-NEXT: vmovdqa64 %zmm26, (%rdx)
-; AVX512-FCP-NEXT: vmovdqa64 %zmm15, (%rcx)
-; AVX512-FCP-NEXT: vmovdqa64 %zmm19, (%r8)
-; AVX512-FCP-NEXT: vmovdqa64 %zmm23, (%r9)
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm10
+; AVX512-FCP-NEXT: vpermd %zmm30, %zmm1, %zmm0
+; AVX512-FCP-NEXT: vpshufb %ymm11, %ymm0, %ymm0
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm4[0,1,2],ymm3[3],ymm4[4,5],ymm3[6],ymm4[7]
+; AVX512-FCP-NEXT: vextracti128 $1, %ymm1, %xmm3
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u,u,u,u,u,10,11,6,7,4,5,6,7]
+; AVX512-FCP-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,4,6,7]
+; AVX512-FCP-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5,6,7]
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0],ymm8[1],ymm2[2,3,4],ymm8[5],ymm2[6,7]
+; AVX512-FCP-NEXT: vextracti128 $1, %ymm1, %xmm2
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2],xmm1[3],xmm2[4,5,6,7]
+; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm2 = [0,1,10,3,14,7,10,3]
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm6[0,1],ymm5[2],ymm6[3,4],ymm5[5],ymm6[6,7]
+; AVX512-FCP-NEXT: vextracti128 $1, %ymm3, %xmm4
+; AVX512-FCP-NEXT: vpshuflw {{.*#+}} xmm4 = xmm4[0,3,1,3,4,5,6,7]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[8,9,8,9,4,5,6,7,u,u,u,u,u,u,u,u]
+; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3]
+; AVX512-FCP-NEXT: vpermd %zmm28, %zmm2, %zmm2
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[4,5,10,11,u,u,u,u,u,u,u,u,0,1,14,15,20,21,26,27,16,17,26,27,16,17,20,21,16,17,30,31]
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0],ymm3[1,2],ymm2[3,4,5,6,7]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,u,u,u,6,7,4,5,2,3,0,1,14,15]
+; AVX512-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3,4,5,6,7],ymm2[8,9,10],ymm1[11,12,13,14,15]
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm1
; AVX512-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX512-FCP-NEXT: vmovdqa64 %zmm12, (%rax)
-; AVX512-FCP-NEXT: vpternlogq $226, %zmm7, %zmm16, %zmm1
-; AVX512-FCP-NEXT: vmovdqa32 %zmm2, %zmm1 {%k1}
+; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm2 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0,0,0]
+; AVX512-FCP-NEXT: vpternlogq $184, %zmm22, %zmm2, %zmm9
+; AVX512-FCP-NEXT: vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm20, %zmm24
+; AVX512-FCP-NEXT: vpternlogq $184, %zmm24, %zmm2, %zmm13
+; AVX512-FCP-NEXT: vpternlogq $184, %zmm21, %zmm29, %zmm31
+; AVX512-FCP-NEXT: movw $-512, %di # imm = 0xFE00
+; AVX512-FCP-NEXT: kmovw %edi, %k1
+; AVX512-FCP-NEXT: vmovdqa32 %zmm26, %zmm31 {%k1}
+; AVX512-FCP-NEXT: vpternlogq $184, %zmm18, %zmm29, %zmm19
+; AVX512-FCP-NEXT: vmovdqa32 %zmm17, %zmm19 {%k1}
+; AVX512-FCP-NEXT: vmovdqa32 %zmm23, %zmm16 {%k1}
+; AVX512-FCP-NEXT: vpternlogq $226, %zmm27, %zmm29, %zmm7
+; AVX512-FCP-NEXT: vmovdqa32 %zmm10, %zmm7 {%k1}
+; AVX512-FCP-NEXT: vmovdqa64 %zmm9, (%rsi)
+; AVX512-FCP-NEXT: vmovdqa64 %zmm13, (%rdx)
+; AVX512-FCP-NEXT: vmovdqa64 %zmm31, (%rcx)
+; AVX512-FCP-NEXT: vmovdqa64 %zmm19, (%r8)
+; AVX512-FCP-NEXT: vmovdqa64 %zmm16, (%r9)
+; AVX512-FCP-NEXT: vmovdqa64 %zmm7, (%rax)
; AVX512-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX512-FCP-NEXT: vmovdqa64 %zmm1, (%rax)
+; AVX512-FCP-NEXT: vpternlogq $226, %zmm12, %zmm29, %zmm0
+; AVX512-FCP-NEXT: vmovdqa32 %zmm1, %zmm0 {%k1}
+; AVX512-FCP-NEXT: vmovdqa64 %zmm0, (%rax)
; AVX512-FCP-NEXT: vzeroupper
; AVX512-FCP-NEXT: retq
;
@@ -7799,293 +7808,303 @@ define void @load_i16_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
;
; AVX512DQ-FCP-LABEL: load_i16_stride7_vf32:
; AVX512DQ-FCP: # %bb.0:
-; AVX512DQ-FCP-NEXT: vmovdqa64 320(%rdi), %zmm26
+; AVX512DQ-FCP-NEXT: pushq %rax
+; AVX512DQ-FCP-NEXT: vmovdqa64 320(%rdi), %zmm29
; AVX512DQ-FCP-NEXT: vmovdqa64 64(%rdi), %zmm1
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm17 = [2,6,9,13,2,6,9,13]
-; AVX512DQ-FCP-NEXT: vmovdqa64 128(%rdi), %zmm23
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm18 = [2,5,9,12,2,5,9,12]
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm19 = [3,6,10,13,3,6,10,13]
-; AVX512DQ-FCP-NEXT: vmovdqa64 384(%rdi), %zmm20
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm0 = [2,6,9,0,13,0,0,0]
-; AVX512DQ-FCP-NEXT: vpermd %zmm1, %zmm0, %zmm7
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm27 = [1,0,0,0,4,8,11,15]
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm0 = [2,5,9,0,12,0,0,0]
-; AVX512DQ-FCP-NEXT: vpermd %zmm1, %zmm0, %zmm10
-; AVX512DQ-FCP-NEXT: vpermd %zmm20, %zmm18, %zmm0
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm3 = [0,0,0,0,4,7,11,14]
-; AVX512DQ-FCP-NEXT: vpermd %zmm26, %zmm3, %zmm9
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm3 = [8,1,12,5,12,5,14,15]
-; AVX512DQ-FCP-NEXT: vpermd %zmm1, %zmm3, %zmm3
-; AVX512DQ-FCP-NEXT: vpermd %zmm23, %zmm19, %zmm4
-; AVX512DQ-FCP-NEXT: vmovdqa64 192(%rdi), %ymm25
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm8 = ymm25[0,1,0,2]
-; AVX512DQ-FCP-NEXT: vpbroadcastd {{.*#+}} ymm2 = [20,21,26,27,20,21,26,27,20,21,26,27,20,21,26,27,20,21,26,27,20,21,26,27,20,21,26,27,20,21,26,27]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm2, %ymm8, %ymm5
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm2, %ymm30
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm4 = ymm4[0,1,2,3,0,1,6,7,8,9,14,15,12,13,14,15,16,17,18,19,16,17,22,23,24,25,30,31,u,u,u,u]
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm4[0,1,2,3,4,5,6],ymm5[7]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm6 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm3[6,7,12,13,2,3,16,17,30,31],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512DQ-FCP-NEXT: vmovdqa (%rdi), %ymm3
-; AVX512DQ-FCP-NEXT: vmovdqa 32(%rdi), %ymm4
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm11 = ymm3[0,1],ymm4[2],ymm3[3,4,5],ymm4[6],ymm3[7]
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm11, %xmm12
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm11 = xmm11[0,1,2,3],xmm12[4],xmm11[5],xmm12[6],xmm11[7]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm11 = ymm11[0,1,14,15,12,13,10,11,8,9],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm11[u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-FCP-NEXT: vporq %ymm6, %ymm11, %ymm22
-; AVX512DQ-FCP-NEXT: vpbroadcastw 252(%rdi), %xmm6
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm18 = [2,6,9,13,2,6,9,13]
+; AVX512DQ-FCP-NEXT: vmovdqa64 128(%rdi), %zmm25
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm17 = [2,5,9,12,2,5,9,12]
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm2 = [0,3,7,10,14,0,0,0]
+; AVX512DQ-FCP-NEXT: vpermd %zmm1, %zmm2, %zmm0
+; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm20 = [2,11,2,11,12,5,8,9]
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm2 = [10,3,6,15,12,13,6,15]
+; AVX512DQ-FCP-NEXT: vpermd %zmm1, %zmm2, %zmm0
+; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm21 = [3,6,10,13,3,6,10,13]
+; AVX512DQ-FCP-NEXT: vmovdqa64 384(%rdi), %zmm19
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm2 = [2,6,9,0,13,0,0,0]
+; AVX512DQ-FCP-NEXT: vpermd %zmm1, %zmm2, %zmm11
+; AVX512DQ-FCP-NEXT: vpermd %zmm19, %zmm18, %zmm14
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm16 = [1,0,0,0,4,8,11,15]
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm2 = [2,5,9,0,12,0,0,0]
+; AVX512DQ-FCP-NEXT: vpermd %zmm1, %zmm2, %zmm3
+; AVX512DQ-FCP-NEXT: vpermd %zmm19, %zmm17, %zmm9
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm2 = [0,0,0,0,4,7,11,14]
+; AVX512DQ-FCP-NEXT: vpermd %zmm29, %zmm2, %zmm2
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm4 = [8,1,12,5,12,5,14,15]
+; AVX512DQ-FCP-NEXT: vpermd %zmm1, %zmm4, %zmm4
+; AVX512DQ-FCP-NEXT: vpermd %zmm25, %zmm21, %zmm5
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm6 = [0,1,2,3,0,1,6,7,8,9,14,15,12,13,14,15,16,17,18,19,16,17,22,23,24,25,30,31,20,21,26,27]
+; AVX512DQ-FCP-NEXT: vmovdqa64 192(%rdi), %ymm27
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm12 = ymm27[0,1,0,2]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm6, %ymm12, %ymm7
+; AVX512DQ-FCP-NEXT: vpshufb %ymm6, %ymm5, %ymm5
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm5[0,1,2,3,4,5,6],ymm7[7]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm7 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm4[6,7,12,13,2,3,16,17,30,31],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512DQ-FCP-NEXT: vmovdqa (%rdi), %ymm4
+; AVX512DQ-FCP-NEXT: vmovdqa 32(%rdi), %ymm5
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm13 = ymm4[0,1],ymm5[2],ymm4[3,4,5],ymm5[6],ymm4[7]
+; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm13, %xmm15
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm13 = xmm13[0,1,2,3],xmm15[4],xmm13[5],xmm15[6],xmm13[7]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm13 = ymm13[0,1,14,15,12,13,10,11,8,9],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm13[u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQ-FCP-NEXT: vporq %ymm7, %ymm13, %ymm23
+; AVX512DQ-FCP-NEXT: vpbroadcastw 252(%rdi), %xmm7
; AVX512DQ-FCP-NEXT: vmovdqa 224(%rdi), %xmm13
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm11 = xmm13[u,u,u,u,u,u,u,u,0,1,14,15,12,13,14,15]
-; AVX512DQ-FCP-NEXT: vpunpckhdq {{.*#+}} xmm6 = xmm11[2],xmm6[2],xmm11[3],xmm6[3]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm15 = xmm13[u,u,u,u,u,u,u,u,0,1,14,15,12,13,14,15]
+; AVX512DQ-FCP-NEXT: vpunpckhdq {{.*#+}} xmm7 = xmm15[2],xmm7[2],xmm15[3],xmm7[3]
; AVX512DQ-FCP-NEXT: movw $992, %ax # imm = 0x3E0
; AVX512DQ-FCP-NEXT: kmovw %eax, %k1
-; AVX512DQ-FCP-NEXT: vinserti32x4 $2, %xmm6, %zmm5, %zmm22 {%k1}
-; AVX512DQ-FCP-NEXT: vmovdqa 256(%rdi), %ymm5
-; AVX512DQ-FCP-NEXT: vmovdqa 288(%rdi), %ymm6
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm11 = ymm6[0,1],ymm5[2,3],ymm6[4,5],ymm5[6,7]
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm11, %xmm12
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm11 = xmm12[0,1,2],xmm11[3],xmm12[4],xmm11[5],xmm12[6,7]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm11 = xmm11[u,u,u,u,u,u,10,11,8,9,6,7,4,5,u,u]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm9 = ymm9[0,1,6,7,8,9,14,15,8,9,14,15,4,5,2,3,16,17,22,23,24,25,30,31,24,25,30,31,20,21,18,19]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm11 = xmm9[0,1,2],xmm11[3,4,5,6],xmm9[7]
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm11 = ymm11[0,1,2,3],ymm9[4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpbroadcastq {{.*#+}} ymm9 = [16,17,22,23,24,25,30,31,16,17,22,23,24,25,30,31,16,17,22,23,24,25,30,31,16,17,22,23,24,25,30,31]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm9, %ymm0, %ymm0
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm11[0,1,2,3,4,5],ymm0[6,7]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm0, %ymm28
-; AVX512DQ-FCP-NEXT: vmovdqa 240(%rdi), %xmm15
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm3[0,1,2],ymm4[3],ymm3[4,5],ymm4[6],ymm3[7]
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm0, %xmm11
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm11[0],xmm0[1],xmm11[2,3,4,5],xmm0[6],xmm11[7]
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm11 = [2,3,0,1,14,15,12,13,10,11,128,128,128,128,128,128,128,128,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm11, %ymm0, %ymm0
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm10 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm10[0,1,6,7,8,9,18,19],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512DQ-FCP-NEXT: vporq %ymm10, %ymm0, %ymm21
-; AVX512DQ-FCP-NEXT: vmovdqa 160(%rdi), %ymm12
-; AVX512DQ-FCP-NEXT: vmovdqa 128(%rdi), %ymm10
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm10[0,1],ymm12[2],ymm10[3,4,5],ymm12[6],ymm10[7]
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm0, %xmm14
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm14[4],xmm0[5],xmm14[6],xmm0[7]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,0,1,14,15,12,13,10,11,8,9,u,u,u,u]
-; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm8 = ymm8[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,28,29]
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6],ymm8[7]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm8 = xmm15[0],xmm13[1],xmm15[2,3,4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm11, %xmm8, %xmm8
+; AVX512DQ-FCP-NEXT: vinserti32x4 $2, %xmm7, %zmm6, %zmm23 {%k1}
+; AVX512DQ-FCP-NEXT: vmovdqa 256(%rdi), %ymm6
+; AVX512DQ-FCP-NEXT: vmovdqa 288(%rdi), %ymm7
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm15 = ymm7[0,1],ymm6[2,3],ymm7[4,5],ymm6[6,7]
+; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm15, %xmm8
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm8 = xmm8[0,1,2],xmm15[3],xmm8[4],xmm15[5],xmm8[6,7]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm8 = xmm8[u,u,u,u,u,u,10,11,8,9,6,7,4,5,u,u]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[0,1,6,7,8,9,14,15,8,9,14,15,4,5,2,3,16,17,22,23,24,25,30,31,24,25,30,31,20,21,18,19]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm8 = xmm2[0,1,2],xmm8[3,4,5,6],xmm2[7]
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm2[4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpbroadcastq {{.*#+}} ymm0 = [16,17,22,23,24,25,30,31,16,17,22,23,24,25,30,31,16,17,22,23,24,25,30,31,16,17,22,23,24,25,30,31]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm0, %ymm9, %ymm9
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm0, %ymm31
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3,4,5],ymm9[6,7]
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm8, %zmm0, %zmm24
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm7[2,3,4,5,10,11,16,17],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm4[0],ymm3[1],ymm4[2,3],ymm3[4],ymm4[5,6,7]
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm7, %xmm8
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm7 = xmm7[0],xmm8[1],xmm7[2,3,4,5],xmm8[6],xmm7[7]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm7 = ymm7[4,5,2,3,0,1,14,15,12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm7[u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-FCP-NEXT: vpor %ymm0, %ymm7, %ymm8
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm10[0,1,2],ymm12[3],ymm10[4,5],ymm12[6],ymm10[7]
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm0, %xmm7
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm7 = xmm7[0],xmm0[1],xmm7[2,3,4,5],xmm0[6],xmm7[7]
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm0 = [0,1,2,3,0,1,14,15,12,13,10,11,8,9,128,128,128,128,128,128,128,128,128,128,u,u,u,u,u,u,u,u]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm0, %xmm7, %xmm7
-; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm7, %ymm0, %ymm7
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm4[0,1,2],ymm5[3],ymm4[4,5],ymm5[6],ymm4[7]
+; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm8, %xmm9
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm8 = xmm9[0],xmm8[1],xmm9[2,3,4,5],xmm8[6],xmm9[7]
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm10 = [2,3,0,1,14,15,12,13,10,11,128,128,128,128,128,128,128,128,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm10, %ymm8, %ymm8
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm3 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm3[0,1,6,7,8,9,18,19],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512DQ-FCP-NEXT: vporq %ymm3, %ymm8, %ymm22
+; AVX512DQ-FCP-NEXT: vmovdqa 160(%rdi), %ymm3
+; AVX512DQ-FCP-NEXT: vmovdqa 128(%rdi), %ymm9
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm9[0,1],ymm3[2],ymm9[3,4,5],ymm3[6],ymm9[7]
+; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm8, %xmm15
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm8 = xmm8[0,1,2,3],xmm15[4],xmm8[5],xmm15[6],xmm8[7]
+; AVX512DQ-FCP-NEXT: vmovdqa 240(%rdi), %xmm15
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm8 = xmm8[0,1,0,1,14,15,12,13,10,11,8,9,u,u,u,u]
+; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm8, %ymm0, %ymm8
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm12 = ymm12[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,28,29]
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3,4,5,6],ymm12[7]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm12 = xmm15[0],xmm13[1],xmm15[2,3,4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm10, %xmm12, %xmm10
+; AVX512DQ-FCP-NEXT: vpermd %zmm29, %zmm16, %zmm12
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm10, %zmm8, %zmm26
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm7[0,1,2],ymm6[3],ymm7[4,5],ymm6[6],ymm7[7]
+; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm8, %xmm10
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm8 = xmm10[0,1,2,3],xmm8[4],xmm10[5],xmm8[6],xmm10[7]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm8 = xmm8[u,u,u,u,u,u,12,13,10,11,8,9,6,7,u,u]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm10 = ymm12[2,3,4,5,10,11,12,13,0,1,0,1,0,1,0,1,18,19,20,21,26,27,28,29,16,17,16,17,16,17,16,17]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm8 = xmm10[0,1,2],xmm8[3,4,5,6],xmm10[7]
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm10[4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm10 = ymm14[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21,26,27,28,29]
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3,4,5],ymm10[6,7]
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm8, %zmm0, %zmm28
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm8 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm11[2,3,4,5,10,11,16,17],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm10 = ymm5[0],ymm4[1],ymm5[2,3],ymm4[4],ymm5[5,6,7]
+; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm10, %xmm11
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm10 = xmm10[0],xmm11[1],xmm10[2,3,4,5],xmm11[6],xmm10[7]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm10 = ymm10[4,5,2,3,0,1,14,15,12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm10[u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQ-FCP-NEXT: vpor %ymm8, %ymm10, %ymm8
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm10 = ymm9[0,1,2],ymm3[3],ymm9[4,5],ymm3[6],ymm9[7]
+; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm10, %xmm11
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm10 = xmm11[0],xmm10[1],xmm11[2,3,4,5],xmm10[6],xmm11[7]
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm12 = [0,1,2,3,0,1,14,15,12,13,10,11,8,9,128,128,128,128,128,128,128,128,128,128,u,u,u,u,u,u,u,u]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm12, %xmm10, %xmm10
+; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm10, %ymm0, %ymm10
; AVX512DQ-FCP-NEXT: vpbroadcastq {{.*#+}} ymm11 = [2,5,2,5,2,5,2,5]
-; AVX512DQ-FCP-NEXT: vpermd %ymm25, %ymm11, %ymm11
+; AVX512DQ-FCP-NEXT: vpermd %ymm27, %ymm11, %ymm11
; AVX512DQ-FCP-NEXT: vpshufhw {{.*#+}} ymm11 = ymm11[0,1,2,3,4,5,4,7,8,9,10,11,12,13,12,15]
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm11 = ymm7[0,1,2,3,4,5,6],ymm11[7]
-; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} xmm7 = xmm13[0],xmm15[0],xmm13[1],xmm15[1],xmm13[2],xmm15[2],xmm13[3],xmm15[3]
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm14 = [8,9,6,7,4,5,6,7,8,9,10,11,12,13,14,15]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm14, %xmm7, %xmm2
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm14, %xmm31
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm2, %zmm11, %zmm16
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm6[0,1,2],ymm5[3],ymm6[4,5],ymm5[6],ymm6[7]
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm2, %xmm11
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm2 = xmm11[0,1,2,3],xmm2[4],xmm11[5],xmm2[6],xmm11[7]
-; AVX512DQ-FCP-NEXT: vpermd %zmm26, %zmm27, %zmm11
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u,u,12,13,10,11,8,9,6,7,u,u]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm11 = ymm11[2,3,4,5,10,11,12,13,0,1,0,1,0,1,0,1,18,19,20,21,26,27,28,29,16,17,16,17,16,17,16,17]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm2 = xmm11[0,1,2],xmm2[3,4,5,6],xmm11[7]
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm11[4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpermd %zmm20, %zmm17, %zmm11
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm11 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21,26,27,28,29]
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5],ymm11[6,7]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm2, %ymm29
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm5[0,1],ymm6[2],ymm5[3,4,5],ymm6[6],ymm5[7]
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm2, %xmm14
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm14[4],xmm2[5],xmm14[6],xmm2[7]
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm14 = [10,3,6,15,12,13,6,15]
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm27 = [1,0,0,0,5,8,12,15]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm0, %ymm2, %ymm0
-; AVX512DQ-FCP-NEXT: vpermd %zmm26, %zmm27, %zmm2
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm2[2,3,16,17,22,23,24,25,30,31,u,u,u,u,u,u,u,u]
-; AVX512DQ-FCP-NEXT: vpor %ymm2, %ymm0, %ymm0
-; AVX512DQ-FCP-NEXT: vpermd %zmm20, %zmm19, %zmm2
-; AVX512DQ-FCP-NEXT: vpshufb %ymm9, %ymm2, %ymm2
-; AVX512DQ-FCP-NEXT: vpermd %zmm1, %zmm14, %zmm9
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm10 = ymm10[0,1,2,3,4,5,6],ymm11[7]
+; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} xmm11 = xmm13[0],xmm15[0],xmm13[1],xmm15[1],xmm13[2],xmm15[2],xmm13[3],xmm15[3]
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm0 = [8,9,6,7,4,5,6,7,8,9,10,11,12,13,14,15]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm0, %xmm11, %xmm2
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm0, %xmm30
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm2, %zmm10, %zmm16
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm6[0,1],ymm7[2],ymm6[3,4,5],ymm7[6],ymm6[7]
+; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm2, %xmm10
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm10[4],xmm2[5],xmm10[6],xmm2[7]
+; AVX512DQ-FCP-NEXT: vpermd %zmm19, %zmm21, %zmm10
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm19 = [1,0,0,0,5,8,12,15]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm12, %ymm2, %ymm2
+; AVX512DQ-FCP-NEXT: vpermd %zmm29, %zmm19, %zmm12
; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm19 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
; AVX512DQ-FCP-NEXT: vpternlogq $184, %zmm8, %zmm19, %zmm16
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm2[6,7]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm8 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm12[2,3,16,17,22,23,24,25,30,31,u,u,u,u,u,u,u,u]
+; AVX512DQ-FCP-NEXT: vpor %ymm2, %ymm8, %ymm2
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm31, %ymm0
+; AVX512DQ-FCP-NEXT: vpshufb %ymm0, %ymm10, %ymm8
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5],ymm8[6,7]
; AVX512DQ-FCP-NEXT: movw $-512, %ax # imm = 0xFE00
; AVX512DQ-FCP-NEXT: kmovw %eax, %k1
-; AVX512DQ-FCP-NEXT: vinserti32x8 $1, %ymm0, %zmm0, %zmm16 {%k1}
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm9[4,5,10,11,0,1,22,23],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm4[0],ymm3[1],ymm4[2,3,4],ymm3[5],ymm4[6,7]
+; AVX512DQ-FCP-NEXT: vinserti32x8 $1, %ymm2, %zmm0, %zmm16 {%k1}
+; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm0[4,5,10,11,0,1,22,23],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm5[0],ymm4[1],ymm5[2,3,4],ymm4[5],ymm5[6,7]
+; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm8, %xmm10
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm8 = xmm10[0],xmm8[1],xmm10[2],xmm8[3],xmm10[4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm8 = ymm8[6,7,4,5,2,3,0,1,14,15],zero,zero,zero,zero,zero,zero,zero,zero,ymm8[u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQ-FCP-NEXT: vpor %ymm2, %ymm8, %ymm10
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0],ymm9[1],ymm3[2,3],ymm9[4],ymm3[5,6,7]
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm2, %xmm8
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm2 = xmm8[0],xmm2[1],xmm8[2],xmm2[3],xmm8[4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[6,7,4,5,2,3,0,1,14,15],zero,zero,zero,zero,zero,zero,zero,zero,ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-FCP-NEXT: vpor %ymm0, %ymm2, %ymm8
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm12[0],ymm10[1],ymm12[2,3],ymm10[4],ymm12[5,6,7]
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm0, %xmm2
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2,3,4,5],xmm2[6],xmm0[7]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,2,3,0,1,14,15,12,13,u,u,u,u]
-; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm2
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm25[0,1,1,3]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm9 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,24,25]
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6],ymm9[7]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[12,13,10,11,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm7, %zmm2, %zmm20
-; AVX512DQ-FCP-NEXT: vmovdqa 416(%rdi), %ymm2
-; AVX512DQ-FCP-NEXT: vmovdqa 384(%rdi), %ymm9
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm9[0,1],ymm2[2],ymm9[3,4,5],ymm2[6],ymm9[7]
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm7, %xmm14
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm7 = xmm7[0,1,2,3],xmm14[4],xmm7[5],xmm14[6],xmm7[7]
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm14 = ymm5[0,1,2],ymm6[3],ymm5[4,5],ymm6[6],ymm5[7]
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm14, %xmm11
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm11 = xmm11[0],xmm14[1],xmm11[2,3,4,5],xmm14[6],xmm11[7]
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm14 = [2,11,2,11,12,5,8,9]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[u,u,u,u,u,u,0,1,14,15,12,13,10,11,8,9]
-; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm7, %ymm0, %ymm7
-; AVX512DQ-FCP-NEXT: vpermd %zmm26, %zmm14, %zmm14
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm14 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm14[0,1,22,23,28,29,18,19],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm7 = ymm14[0,1,2],ymm7[3,4,5,6,7],ymm14[8,9,10],ymm7[11,12,13,14,15]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm11 = xmm11[0,1,2,3,2,3,0,1,14,15,12,13,10,11],zero,zero
-; AVX512DQ-FCP-NEXT: vpor %ymm14, %ymm11, %ymm11
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm11[0,1,2,3],ymm7[4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpternlogq $184, %zmm8, %zmm19, %zmm20
-; AVX512DQ-FCP-NEXT: vinserti32x8 $1, %ymm7, %zmm0, %zmm20 {%k1}
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm4[0,1],ymm3[2],ymm4[3,4],ymm3[5],ymm4[6,7]
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm7 = [8,9,4,5,4,5,6,7,8,9,10,11,12,13,14,15]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm7, %xmm8, %xmm11
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm8, %xmm8
-; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} xmm8 = xmm8[3,1,2,3,4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} xmm11 = xmm11[0],xmm8[0],xmm11[1],xmm8[1],xmm11[2],xmm8[2],xmm11[3],xmm8[3]
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm8 = [0,3,7,10,14,0,0,0]
-; AVX512DQ-FCP-NEXT: vpermd %zmm1, %zmm8, %zmm14
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm8 = [u,u,u,u,u,u,u,u,0,1,6,7,8,9,14,15,16,17,22,23,20,21,22,23,16,17,22,23,24,25,30,31]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm8, %ymm14, %ymm14
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm11 = ymm11[0,1],ymm14[2,3,4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm10 = ymm12[0],ymm10[1],ymm12[2,3,4],ymm10[5],ymm12[6,7]
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm10, %xmm12
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm10 = xmm12[0],xmm10[1],xmm12[2],xmm10[3],xmm12[4,5,6,7]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm30, %ymm12
-; AVX512DQ-FCP-NEXT: vpshufb %ymm12, %ymm0, %ymm0
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm10 = xmm10[0,1,6,7,4,5,2,3,0,1,14,15,u,u,u,u]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm8[1],xmm2[2,3,4,5],xmm8[6],xmm2[7]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[0,1,4,5,2,3,0,1,14,15,12,13,u,u,u,u]
+; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm12 = ymm27[0,1,1,3]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm8 = ymm12[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,24,25]
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6],ymm8[7]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm8 = xmm11[12,13,10,11,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm8, %zmm2, %zmm21
+; AVX512DQ-FCP-NEXT: vmovdqa 416(%rdi), %ymm8
+; AVX512DQ-FCP-NEXT: vmovdqa 384(%rdi), %ymm2
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm11 = ymm2[0,1],ymm8[2],ymm2[3,4,5],ymm8[6],ymm2[7]
+; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm11, %xmm14
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm11 = xmm11[0,1,2,3],xmm14[4],xmm11[5],xmm14[6],xmm11[7]
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm14 = ymm6[0,1,2],ymm7[3],ymm6[4,5],ymm7[6],ymm6[7]
+; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm14, %xmm0
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm14[1],xmm0[2,3,4,5],xmm14[6],xmm0[7]
+; AVX512DQ-FCP-NEXT: vpermd %zmm29, %zmm20, %zmm14
+; AVX512DQ-FCP-NEXT: vpternlogq $184, %zmm10, %zmm19, %zmm21
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm10 = xmm11[u,u,u,u,u,u,0,1,14,15,12,13,10,11,8,9]
; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm10, %ymm0, %ymm10
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm10[0,1,2,3,4,5,6],ymm0[7]
-; AVX512DQ-FCP-NEXT: vpbroadcastw 232(%rdi), %xmm10
-; AVX512DQ-FCP-NEXT: vpsrlq $48, %xmm15, %xmm12
-; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} xmm10 = xmm10[0],xmm12[0],xmm10[1],xmm12[1],xmm10[2],xmm12[2],xmm10[3],xmm12[3]
-; AVX512DQ-FCP-NEXT: vinserti32x4 $2, %xmm10, %zmm0, %zmm10
-; AVX512DQ-FCP-NEXT: vpternlogq $184, %zmm11, %zmm19, %zmm10
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm9[0,1,2],ymm2[3],ymm9[4,5],ymm2[6],ymm9[7]
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm0, %xmm11
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm11[0],xmm0[1],xmm11[2,3,4,5],xmm0[6],xmm11[7]
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm11 = ymm6[0],ymm5[1],ymm6[2,3],ymm5[4],ymm6[5,6,7]
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm11, %xmm12
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm11 = xmm11[0],xmm12[1],xmm11[2,3,4,5],xmm12[6],xmm11[7]
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm12 = [2,0,0,0,6,9,13,0]
-; AVX512DQ-FCP-NEXT: vpermd %zmm26, %zmm12, %zmm12
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm11 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm14[0,1,22,23,28,29,18,19],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm10 = ymm11[0,1,2],ymm10[3,4,5,6,7],ymm11[8,9,10],ymm10[11,12,13,14,15]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,2,3,0,1,14,15,12,13,10,11],zero,zero
+; AVX512DQ-FCP-NEXT: vpor %ymm0, %ymm11, %ymm0
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm10[4,5,6,7]
+; AVX512DQ-FCP-NEXT: vinserti32x8 $1, %ymm0, %zmm0, %zmm21 {%k1}
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm5[0,1],ymm4[2],ymm5[3,4],ymm4[5],ymm5[6,7]
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm11 = [8,9,4,5,4,5,6,7,8,9,10,11,12,13,14,15]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm11, %xmm0, %xmm10
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm11, %xmm31
+; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[3,1,2,3,4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm10[0],xmm0[0],xmm10[1],xmm0[1],xmm10[2],xmm0[2],xmm10[3],xmm0[3]
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm11 = [u,u,u,u,u,u,u,u,0,1,6,7,8,9,14,15,16,17,22,23,20,21,22,23,16,17,22,23,24,25,30,31]
+; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
+; AVX512DQ-FCP-NEXT: vpshufb %ymm11, %ymm10, %ymm10
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm11, %ymm20
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm10[2,3,4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0],ymm9[1],ymm3[2,3,4],ymm9[5],ymm3[6,7]
+; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm3, %xmm9
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm3 = xmm9[0],xmm3[1],xmm9[2],xmm3[3],xmm9[4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[0,1,6,7,4,5,2,3,0,1,14,15,u,u,u,u]
+; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm9 = ymm12[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20,21,26,27]
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5,6],ymm9[7]
+; AVX512DQ-FCP-NEXT: vpbroadcastw 232(%rdi), %xmm9
+; AVX512DQ-FCP-NEXT: vpsrlq $48, %xmm15, %xmm10
+; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} xmm9 = xmm9[0],xmm10[0],xmm9[1],xmm10[1],xmm9[2],xmm10[2],xmm9[3],xmm10[3]
+; AVX512DQ-FCP-NEXT: vpermd %zmm25, %zmm18, %zmm12
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm10 = [0,4,7,11,14,0,0,0]
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm14 = [2,0,0,0,6,9,13,0]
+; AVX512DQ-FCP-NEXT: vinserti32x4 $2, %xmm9, %zmm3, %zmm3
+; AVX512DQ-FCP-NEXT: vpternlogq $184, %zmm0, %zmm19, %zmm3
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2],ymm8[3],ymm2[4,5],ymm8[6],ymm2[7]
+; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm0, %xmm9
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm9[0],xmm0[1],xmm9[2,3,4,5],xmm0[6],xmm9[7]
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm9 = ymm7[0],ymm6[1],ymm7[2,3],ymm6[4],ymm7[5,6,7]
+; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm9, %xmm11
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm9 = xmm9[0],xmm11[1],xmm9[2,3,4,5],xmm11[6],xmm9[7]
+; AVX512DQ-FCP-NEXT: vpermd %zmm29, %zmm14, %zmm11
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,2,3,0,1,14,15,12,13,10,11]
; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm12 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm12[2,3,16,17,22,23,24,25],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm12[0,1,2],ymm0[3,4,5,6,7],ymm12[8,9,10],ymm0[11,12,13,14,15]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm11 = xmm11[0,1,2,3,4,5,2,3,0,1,14,15,12,13],zero,zero
-; AVX512DQ-FCP-NEXT: vpor %ymm12, %ymm11, %ymm11
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm11 = ymm11[0,1,2,3],ymm0[4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpermd %zmm23, %zmm17, %zmm0
-; AVX512DQ-FCP-NEXT: vpermd %zmm23, %zmm18, %zmm12
-; AVX512DQ-FCP-NEXT: vinserti32x8 $1, %ymm11, %zmm0, %zmm10 {%k1}
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm11 = [0,3,3,0,0,3,7,0]
-; AVX512DQ-FCP-NEXT: vpermd %ymm25, %ymm11, %ymm11
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm11 = ymm11[u,u,u,u,u,u,u,u,u,u,0,1,6,7,8,9,u,u,u,u,u,u,u,u,u,u,16,17,22,23,24,25]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm12 = ymm12[0,1,0,1,6,7,8,9,14,15,u,u,u,u,u,u,16,17,16,17,22,23,24,25,30,31,u,u,u,u,u,u]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm11 = ymm12[0,1,2,3,4],ymm11[5,6,7],ymm12[8,9,10,11,12],ymm11[13,14,15]
-; AVX512DQ-FCP-NEXT: vpsrld $16, %xmm13, %xmm12
-; AVX512DQ-FCP-NEXT: vpunpckhwd {{.*#+}} xmm12 = xmm12[4],xmm15[4],xmm12[5],xmm15[5],xmm12[6],xmm15[6],xmm12[7],xmm15[7]
-; AVX512DQ-FCP-NEXT: vinserti32x4 $2, %xmm12, %zmm11, %zmm11
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm12 = ymm4[0,1],ymm3[2,3],ymm4[4,5],ymm3[6,7]
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm12, %xmm14
-; AVX512DQ-FCP-NEXT: vpshufb %xmm7, %xmm14, %xmm7
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm14 = [0,4,7,11,14,0,0,0]
-; AVX512DQ-FCP-NEXT: vpermd %zmm1, %zmm14, %zmm14
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm12 = xmm12[10,11,6,7,4,5,6,7,u,u,u,u,u,u,u,u]
-; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} xmm7 = xmm12[0],xmm7[0],xmm12[1],xmm7[1],xmm12[2],xmm7[2],xmm12[3],xmm7[3]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm12 = ymm14[u,u,u,u,u,u,u,u,2,3,4,5,10,11,12,13,18,19,18,19,18,19,18,19,18,19,20,21,26,27,28,29]
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm12 = ymm7[0,1],ymm12[2,3,4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm2[0],ymm9[1],ymm2[2,3],ymm9[4],ymm2[5,6,7]
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm7, %xmm14
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm7 = xmm7[0],xmm14[1],xmm7[2,3,4,5],xmm14[6],xmm7[7]
-; AVX512DQ-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm14 = [0,4,7,0,0,4,7,0]
-; AVX512DQ-FCP-NEXT: # ymm14 = mem[0,1,0,1]
-; AVX512DQ-FCP-NEXT: vpermd %ymm25, %ymm14, %ymm14
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm14 = ymm14[u,u,u,u,u,u,u,u,u,u,2,3,4,5,10,11,u,u,u,u,u,u,u,u,u,u,18,19,20,21,26,27]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[4,5,2,3,4,5,10,11,12,13,u,u,u,u,u,u,20,21,18,19,20,21,26,27,28,29,u,u,u,u,u,u]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm14[5,6,7],ymm0[8,9,10,11,12],ymm14[13,14,15]
-; AVX512DQ-FCP-NEXT: vpunpckhwd {{.*#+}} xmm13 = xmm13[4],xmm15[4],xmm13[5],xmm15[5],xmm13[6],xmm15[6],xmm13[7],xmm15[7]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm11 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm11[2,3,16,17,22,23,24,25],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm11[0,1,2],ymm0[3,4,5,6,7],ymm11[8,9,10],ymm0[11,12,13,14,15]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm9 = xmm9[0,1,2,3,4,5,2,3,0,1,14,15,12,13],zero,zero
+; AVX512DQ-FCP-NEXT: vpor %ymm11, %ymm9, %ymm9
+; AVX512DQ-FCP-NEXT: vpermd %zmm25, %zmm17, %zmm11
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm9[0,1,2,3],ymm0[4,5,6,7]
+; AVX512DQ-FCP-NEXT: vinserti32x8 $1, %ymm0, %zmm0, %zmm3 {%k1}
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm0 = [0,3,3,0,0,3,7,0]
+; AVX512DQ-FCP-NEXT: vpermd %ymm27, %ymm0, %ymm0
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm9 = [0,1,0,1,6,7,8,9,14,15,0,1,6,7,8,9,16,17,16,17,22,23,24,25,30,31,16,17,22,23,24,25]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm9, %ymm0, %ymm0
+; AVX512DQ-FCP-NEXT: vpshufb %ymm9, %ymm11, %ymm9
+; AVX512DQ-FCP-NEXT: vpermd %zmm1, %zmm10, %zmm10
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm9[0,1,2,3,4],ymm0[5,6,7],ymm9[8,9,10,11,12],ymm0[13,14,15]
+; AVX512DQ-FCP-NEXT: vpsrld $16, %xmm13, %xmm9
+; AVX512DQ-FCP-NEXT: vpunpckhwd {{.*#+}} xmm9 = xmm9[4],xmm15[4],xmm9[5],xmm15[5],xmm9[6],xmm15[6],xmm9[7],xmm15[7]
+; AVX512DQ-FCP-NEXT: vinserti32x4 $2, %xmm9, %zmm0, %zmm0
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm9 = ymm5[0,1],ymm4[2,3],ymm5[4,5],ymm4[6,7]
+; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm9, %xmm11
; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm31, %xmm14
-; AVX512DQ-FCP-NEXT: vpshufb %xmm14, %xmm13, %xmm13
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm13, %zmm0, %zmm13
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm6[0],ymm5[1],ymm6[2,3,4],ymm5[5],ymm6[6,7]
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm0, %xmm14
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm14[0],xmm0[1],xmm14[2],xmm0[3],xmm14[4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm14 = [3,0,0,0,6,10,13,0]
-; AVX512DQ-FCP-NEXT: vpermd %zmm26, %zmm14, %zmm14
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[u,u,u,u,u,u,4,5,2,3,0,1,14,15,12,13]
-; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm7, %ymm0, %ymm7
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm14 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm14[0,1,18,19,20,21,26,27],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm7 = ymm14[0,1,2],ymm7[3,4,5,6,7],ymm14[8,9,10],ymm7[11,12,13,14,15]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,6,7,4,5,2,3,0,1,14,15],zero,zero
-; AVX512DQ-FCP-NEXT: vpor %ymm0, %ymm14, %ymm0
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm7[4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpternlogq $226, %zmm11, %zmm19, %zmm12
-; AVX512DQ-FCP-NEXT: vinserti32x8 $1, %ymm0, %zmm0, %zmm12 {%k1}
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm0 = [1,4,8,11,15,0,0,0]
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm28, %zmm0, %zmm7
-; AVX512DQ-FCP-NEXT: vpermd %zmm1, %zmm0, %zmm0
-; AVX512DQ-FCP-NEXT: vpshufb %ymm8, %ymm0, %ymm0
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm4[0,1,2],ymm3[3],ymm4[4,5],ymm3[6],ymm4[7]
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm1, %xmm3
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u,u,u,u,u,10,11,6,7,4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm14, %xmm11, %xmm11
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm9 = xmm9[10,11,6,7,4,5,6,7,u,u,u,u,u,u,u,u]
+; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} xmm9 = xmm9[0],xmm11[0],xmm9[1],xmm11[1],xmm9[2],xmm11[2],xmm9[3],xmm11[3]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm10 = ymm10[u,u,u,u,u,u,u,u,2,3,4,5,10,11,12,13,18,19,18,19,18,19,18,19,18,19,20,21,26,27,28,29]
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm9 = ymm9[0,1],ymm10[2,3,4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm10 = ymm8[0],ymm2[1],ymm8[2,3],ymm2[4],ymm8[5,6,7]
+; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm10, %xmm11
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm11 = xmm10[0],xmm11[1],xmm10[2,3,4,5],xmm11[6],xmm10[7]
+; AVX512DQ-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm10 = [0,4,7,0,0,4,7,0]
+; AVX512DQ-FCP-NEXT: # ymm10 = mem[0,1,0,1]
+; AVX512DQ-FCP-NEXT: vpermd %ymm27, %ymm10, %ymm10
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm14 = [4,5,2,3,4,5,10,11,12,13,2,3,4,5,10,11,20,21,18,19,20,21,26,27,28,29,18,19,20,21,26,27]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm14, %ymm10, %ymm10
+; AVX512DQ-FCP-NEXT: vpshufb %ymm14, %ymm12, %ymm12
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm10 = ymm12[0,1,2,3,4],ymm10[5,6,7],ymm12[8,9,10,11,12],ymm10[13,14,15]
+; AVX512DQ-FCP-NEXT: vpunpckhwd {{.*#+}} xmm12 = xmm13[4],xmm15[4],xmm13[5],xmm15[5],xmm13[6],xmm15[6],xmm13[7],xmm15[7]
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm30, %xmm13
+; AVX512DQ-FCP-NEXT: vpshufb %xmm13, %xmm12, %xmm12
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm13 = [3,0,0,0,6,10,13,0]
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm12, %zmm10, %zmm10
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm12 = ymm7[0],ymm6[1],ymm7[2,3,4],ymm6[5],ymm7[6,7]
+; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm12, %xmm14
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm12 = xmm14[0],xmm12[1],xmm14[2],xmm12[3],xmm14[4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpermd %zmm29, %zmm13, %zmm13
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm11 = xmm11[u,u,u,u,u,u,4,5,2,3,0,1,14,15,12,13]
+; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm11, %ymm0, %ymm11
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm13 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm13[0,1,18,19,20,21,26,27],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm11 = ymm13[0,1,2],ymm11[3,4,5,6,7],ymm13[8,9,10],ymm11[11,12,13,14,15]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm12 = xmm12[0,1,2,3,6,7,4,5,2,3,0,1,14,15],zero,zero
+; AVX512DQ-FCP-NEXT: vpor %ymm13, %ymm12, %ymm12
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm11 = ymm12[0,1,2,3],ymm11[4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm12 = [1,4,8,11,15,0,0,0]
+; AVX512DQ-FCP-NEXT: vpternlogq $226, %zmm0, %zmm19, %zmm9
+; AVX512DQ-FCP-NEXT: vinserti32x8 $1, %ymm11, %zmm0, %zmm9 {%k1}
+; AVX512DQ-FCP-NEXT: vpermd %zmm1, %zmm12, %zmm0
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm20, %ymm1
+; AVX512DQ-FCP-NEXT: vpshufb %ymm1, %ymm0, %ymm0
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm5[0,1,2],ymm4[3],ymm5[4,5],ymm4[6],ymm5[7]
+; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm1, %xmm4
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[u,u,u,u,u,u,u,u,10,11,6,7,4,5,6,7]
; AVX512DQ-FCP-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,4,6,7]
-; AVX512DQ-FCP-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0],ymm9[1],ymm2[2,3,4],ymm9[5],ymm2[6,7]
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm1, %xmm2
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2],xmm1[3],xmm2[4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm6[0,1],ymm5[2],ymm6[3,4],ymm5[5],ymm6[6,7]
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm2, %xmm3
-; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} xmm3 = xmm3[0,3,1,3,4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[8,9,8,9,4,5,6,7,u,u,u,u,u,u,u,u]
-; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm3 = [0,1,10,3,14,7,10,3]
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm29, %zmm0, %zmm4
-; AVX512DQ-FCP-NEXT: vpermd %zmm26, %zmm3, %zmm3
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[4,5,10,11,u,u,u,u,u,u,u,u,0,1,14,15,20,21,26,27,16,17,26,27,16,17,20,21,16,17,30,31]
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0],ymm2[1,2],ymm3[3,4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,u,u,u,6,7,4,5,2,3,0,1,14,15]
-; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3,4,5,6,7],ymm2[8,9,10],ymm1[11,12,13,14,15]
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpternlogq $226, %zmm13, %zmm19, %zmm0
-; AVX512DQ-FCP-NEXT: vinserti32x8 $1, %ymm1, %zmm0, %zmm0 {%k1}
-; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm1 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0,0,0]
-; AVX512DQ-FCP-NEXT: vpternlogq $184, %zmm22, %zmm1, %zmm7
-; AVX512DQ-FCP-NEXT: vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm21, %zmm24
-; AVX512DQ-FCP-NEXT: vpternlogq $184, %zmm24, %zmm1, %zmm4
-; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm7, (%rsi)
-; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm4, (%rdx)
-; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm16, (%rcx)
-; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm20, (%r8)
-; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm10, (%r9)
+; AVX512DQ-FCP-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1],ymm0[2,3,4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm8[0],ymm2[1],ymm8[2,3,4],ymm2[5],ymm8[6,7]
+; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm0, %xmm2
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm2[0],xmm0[1],xmm2[2],xmm0[3],xmm2[4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm2 = [0,1,10,3,14,7,10,3]
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm7[0,1],ymm6[2],ymm7[3,4],ymm6[5],ymm7[6,7]
+; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm4, %xmm5
+; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} xmm5 = xmm5[0,3,1,3,4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[8,9,8,9,4,5,6,7,u,u,u,u,u,u,u,u]
+; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3]
+; AVX512DQ-FCP-NEXT: vpermd %zmm29, %zmm2, %zmm2
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[4,5,10,11,u,u,u,u,u,u,u,u,0,1,14,15,20,21,26,27,16,17,26,27,16,17,20,21,16,17,30,31]
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0],ymm4[1,2],ymm2[3,4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,6,7,4,5,2,3,0,1,14,15]
+; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm2[0,1,2],ymm0[3,4,5,6,7],ymm2[8,9,10],ymm0[11,12,13,14,15]
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpternlogq $226, %zmm10, %zmm19, %zmm1
+; AVX512DQ-FCP-NEXT: vinserti32x8 $1, %ymm0, %zmm0, %zmm1 {%k1}
; AVX512DQ-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm12, (%rax)
+; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm0 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0,0,0]
+; AVX512DQ-FCP-NEXT: vpternlogq $184, %zmm23, %zmm0, %zmm24
+; AVX512DQ-FCP-NEXT: vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm22, %zmm26
+; AVX512DQ-FCP-NEXT: vpternlogq $184, %zmm26, %zmm0, %zmm28
+; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm24, (%rsi)
+; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm28, (%rdx)
+; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm16, (%rcx)
+; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm21, (%r8)
+; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm3, (%r9)
+; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm9, (%rax)
; AVX512DQ-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm0, (%rax)
+; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm1, (%rax)
+; AVX512DQ-FCP-NEXT: popq %rax
; AVX512DQ-FCP-NEXT: vzeroupper
; AVX512DQ-FCP-NEXT: retq
;
@@ -13230,88 +13249,85 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX2-FCP: # %bb.0:
; AVX2-FCP-NEXT: subq $1544, %rsp # imm = 0x608
; AVX2-FCP-NEXT: vmovdqa 256(%rdi), %ymm6
+; AVX2-FCP-NEXT: vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vmovdqa 224(%rdi), %ymm7
-; AVX2-FCP-NEXT: vmovdqa 512(%rdi), %ymm14
+; AVX2-FCP-NEXT: vmovdqu %ymm7, (%rsp) # 32-byte Spill
+; AVX2-FCP-NEXT: vmovdqa 512(%rdi), %ymm11
+; AVX2-FCP-NEXT: vmovdqu %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vmovdqa 544(%rdi), %ymm15
-; AVX2-FCP-NEXT: vmovdqu %ymm15, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vmovdqa 480(%rdi), %ymm12
-; AVX2-FCP-NEXT: vmovdqu %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vmovdqa 448(%rdi), %ymm13
-; AVX2-FCP-NEXT: vmovdqu %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vmovdqa (%rdi), %ymm2
-; AVX2-FCP-NEXT: vmovdqa 32(%rdi), %ymm9
-; AVX2-FCP-NEXT: vmovdqa 64(%rdi), %ymm1
-; AVX2-FCP-NEXT: vmovdqa 96(%rdi), %ymm0
+; AVX2-FCP-NEXT: vmovdqa 32(%rdi), %ymm3
+; AVX2-FCP-NEXT: vmovdqa 64(%rdi), %ymm0
; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4],ymm1[5],ymm0[6,7]
-; AVX2-FCP-NEXT: vmovdqa %ymm1, %ymm11
+; AVX2-FCP-NEXT: vmovdqa 96(%rdi), %ymm1
+; AVX2-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3,4],ymm0[5],ymm1[6,7]
; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,14,15,12,13,14,15,2,3,6,7,12,13,2,3,16,17,30,31,28,29,30,31,18,19,22,23,28,29,18,19]
; AVX2-FCP-NEXT: vpshufb %ymm4, %ymm0, %ymm0
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1],ymm9[2],ymm2[3,4,5],ymm9[6],ymm2[7]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1],ymm3[2],ymm2[3,4,5],ymm3[6],ymm2[7]
+; AVX2-FCP-NEXT: vmovdqa %ymm3, %ymm8
; AVX2-FCP-NEXT: vmovdqa %ymm2, %ymm10
; AVX2-FCP-NEXT: vextracti128 $1, %ymm1, %xmm2
; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4],xmm1[5],xmm2[6],xmm1[7]
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm5 = [0,1,14,15,12,13,10,11,8,9,8,9,8,9,8,9]
; AVX2-FCP-NEXT: vpshufb %xmm5, %xmm1, %xmm2
-; AVX2-FCP-NEXT: vpmovsxbw {{.*#+}} xmm8 = [65535,65535,65535,65535,65535,0,0,0]
-; AVX2-FCP-NEXT: vpblendvb %ymm8, %ymm2, %ymm0, %ymm0
+; AVX2-FCP-NEXT: vpmovsxbw {{.*#+}} xmm1 = [65535,65535,65535,65535,65535,0,0,0]
+; AVX2-FCP-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX2-FCP-NEXT: vmovdqa %ymm1, %ymm9
; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm13[0,1],ymm12[2],ymm13[3,4,5],ymm12[6],ymm13[7]
+; AVX2-FCP-NEXT: vmovdqu %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vextracti128 $1, %ymm2, %xmm3
; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm3[4],xmm2[5],xmm3[6],xmm2[7]
; AVX2-FCP-NEXT: vpshufb %xmm5, %xmm2, %xmm2
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm15[0],ymm14[1],ymm15[2,3,4],ymm14[5],ymm15[6,7]
-; AVX2-FCP-NEXT: vmovdqu %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm15[0],ymm11[1],ymm15[2,3,4],ymm11[5],ymm15[6,7]
; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,2,2,3]
; AVX2-FCP-NEXT: vpshufb %ymm4, %ymm3, %ymm3
-; AVX2-FCP-NEXT: vpblendvb %ymm8, %ymm2, %ymm3, %ymm0
-; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vpblendvb %ymm1, %ymm2, %ymm3, %ymm1
+; AVX2-FCP-NEXT: vmovdqa %ymm9, %ymm11
+; AVX2-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm7[0,1],ymm6[2],ymm7[3,4,5],ymm6[6],ymm7[7]
-; AVX2-FCP-NEXT: vmovdqa %ymm7, %ymm15
-; AVX2-FCP-NEXT: vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vmovdqa %ymm6, %ymm13
-; AVX2-FCP-NEXT: vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vextracti128 $1, %ymm3, %xmm6
; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm6[4],xmm3[5],xmm6[6],xmm3[7]
; AVX2-FCP-NEXT: vpshufb %xmm5, %xmm3, %xmm3
-; AVX2-FCP-NEXT: vmovdqa 288(%rdi), %ymm1
-; AVX2-FCP-NEXT: vmovdqa 320(%rdi), %ymm12
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm12[0],ymm1[1],ymm12[2,3,4],ymm1[5],ymm12[6,7]
-; AVX2-FCP-NEXT: vmovdqu %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vmovdqa 288(%rdi), %ymm2
+; AVX2-FCP-NEXT: vmovdqa 320(%rdi), %ymm14
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm14[0],ymm2[1],ymm14[2,3,4],ymm2[5],ymm14[6,7]
+; AVX2-FCP-NEXT: vmovdqu %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,2,2,3]
; AVX2-FCP-NEXT: vpshufb %ymm4, %ymm6, %ymm6
-; AVX2-FCP-NEXT: vpblendvb %ymm8, %ymm3, %ymm6, %ymm0
-; AVX2-FCP-NEXT: vmovdqu %ymm0, (%rsp) # 32-byte Spill
-; AVX2-FCP-NEXT: vmovdqa 704(%rdi), %ymm2
-; AVX2-FCP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vmovdqa 672(%rdi), %ymm0
+; AVX2-FCP-NEXT: vpblendvb %ymm9, %ymm3, %ymm6, %ymm1
+; AVX2-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vmovdqa 704(%rdi), %ymm0
; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm0[0,1],ymm2[2],ymm0[3,4,5],ymm2[6],ymm0[7]
+; AVX2-FCP-NEXT: vmovdqa 672(%rdi), %ymm1
+; AVX2-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm1[0,1],ymm0[2],ymm1[3,4,5],ymm0[6],ymm1[7]
; AVX2-FCP-NEXT: vextracti128 $1, %ymm6, %xmm7
; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0,1,2,3],xmm7[4],xmm6[5],xmm7[6],xmm6[7]
; AVX2-FCP-NEXT: vpshufb %xmm5, %xmm6, %xmm5
-; AVX2-FCP-NEXT: vmovdqa 736(%rdi), %ymm2
-; AVX2-FCP-NEXT: vmovdqa 768(%rdi), %ymm0
+; AVX2-FCP-NEXT: vmovdqa 736(%rdi), %ymm0
; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm0[0],ymm2[1],ymm0[2,3,4],ymm2[5],ymm0[6,7]
-; AVX2-FCP-NEXT: vmovdqa %ymm2, %ymm3
-; AVX2-FCP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vmovdqa 768(%rdi), %ymm9
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm9[0],ymm0[1],ymm9[2,3,4],ymm0[5],ymm9[6,7]
; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,2,2,3]
; AVX2-FCP-NEXT: vpshufb %ymm4, %ymm6, %ymm4
-; AVX2-FCP-NEXT: vmovdqa %ymm8, %ymm0
-; AVX2-FCP-NEXT: vpblendvb %ymm8, %ymm5, %ymm4, %ymm2
-; AVX2-FCP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vmovdqa %ymm9, %ymm2
-; AVX2-FCP-NEXT: vmovdqu %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vpblendvb %ymm11, %ymm5, %ymm4, %ymm3
+; AVX2-FCP-NEXT: vmovdqa %ymm11, %ymm0
+; AVX2-FCP-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vmovdqa %ymm8, %ymm3
+; AVX2-FCP-NEXT: vmovdqu %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vmovdqu %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm10[0,1,2],ymm9[3],ymm10[4,5],ymm9[6],ymm10[7]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm10[0,1,2],ymm8[3],ymm10[4,5],ymm8[6],ymm10[7]
; AVX2-FCP-NEXT: vextracti128 $1, %ymm4, %xmm5
; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm7 = xmm5[0],xmm4[1],xmm5[2,3,4,5],xmm4[6],xmm5[7]
-; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
-; AVX2-FCP-NEXT: vmovdqu %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm9[0,1],ymm11[2],ymm9[3,4],ymm11[5],ymm9[6,7]
+; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
+; AVX2-FCP-NEXT: vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm5 # 32-byte Folded Reload
+; AVX2-FCP-NEXT: # ymm5 = ymm11[0,1],mem[2],ymm11[3,4],mem[5],ymm11[6,7]
; AVX2-FCP-NEXT: vpmovsxbd {{.*#+}} ymm4 = [2,5,1,0,4,0,0,0]
; AVX2-FCP-NEXT: vpermd %ymm5, %ymm4, %ymm6
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm5 = [2,3,2,3,2,3,2,3,8,9,0,1,6,7,8,9,18,19,18,19,18,19,18,19,24,25,16,17,22,23,24,25]
@@ -13320,43 +13336,49 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX2-FCP-NEXT: vpshufb %xmm6, %xmm7, %xmm7
; AVX2-FCP-NEXT: vpblendvb %ymm0, %ymm7, %ymm8, %ymm7
; AVX2-FCP-NEXT: vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
-; AVX2-FCP-NEXT: vpblendd $72, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm7 # 32-byte Folded Reload
-; AVX2-FCP-NEXT: # ymm7 = ymm7[0,1,2],mem[3],ymm7[4,5],mem[6],ymm7[7]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm13[0,1,2],ymm12[3],ymm13[4,5],ymm12[6],ymm13[7]
; AVX2-FCP-NEXT: vextracti128 $1, %ymm7, %xmm8
; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm7 = xmm8[0],xmm7[1],xmm8[2,3,4,5],xmm7[6],xmm8[7]
; AVX2-FCP-NEXT: vpshufb %xmm6, %xmm7, %xmm7
-; AVX2-FCP-NEXT: vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm8 # 32-byte Folded Reload
-; AVX2-FCP-NEXT: # ymm8 = mem[0,1],ymm14[2],mem[3,4],ymm14[5],mem[6,7]
+; AVX2-FCP-NEXT: vmovdqu %ymm15, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm15[0,1],ymm1[2],ymm15[3,4],ymm1[5],ymm15[6,7]
; AVX2-FCP-NEXT: vpermd %ymm8, %ymm4, %ymm8
; AVX2-FCP-NEXT: vpshufb %ymm5, %ymm8, %ymm8
; AVX2-FCP-NEXT: vpblendvb %ymm0, %ymm7, %ymm8, %ymm7
-; AVX2-FCP-NEXT: vmovdqa %ymm0, %ymm14
+; AVX2-FCP-NEXT: vmovdqa %ymm0, %ymm12
; AVX2-FCP-NEXT: vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm15[0,1,2],ymm13[3],ymm15[4,5],ymm13[6],ymm15[7]
+; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX2-FCP-NEXT: vpblendd $183, (%rsp), %ymm0, %ymm7 # 32-byte Folded Reload
+; AVX2-FCP-NEXT: # ymm7 = mem[0,1,2],ymm0[3],mem[4,5],ymm0[6],mem[7]
; AVX2-FCP-NEXT: vextracti128 $1, %ymm7, %xmm8
; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm7 = xmm8[0],xmm7[1],xmm8[2,3,4,5],xmm7[6],xmm8[7]
; AVX2-FCP-NEXT: vpshufb %xmm6, %xmm7, %xmm7
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm12[0,1],ymm1[2],ymm12[3,4],ymm1[5],ymm12[6,7]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm14[0,1],ymm2[2],ymm14[3,4],ymm2[5],ymm14[6,7]
; AVX2-FCP-NEXT: vpermd %ymm8, %ymm4, %ymm8
; AVX2-FCP-NEXT: vpshufb %ymm5, %ymm8, %ymm8
-; AVX2-FCP-NEXT: vpblendvb %ymm0, %ymm7, %ymm8, %ymm7
+; AVX2-FCP-NEXT: vmovdqa %ymm12, %ymm0
+; AVX2-FCP-NEXT: vpblendvb %ymm12, %ymm7, %ymm8, %ymm7
; AVX2-FCP-NEXT: vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm0[0,1,2],ymm12[3],ymm0[4,5],ymm12[6],ymm0[7]
+; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
+; AVX2-FCP-NEXT: vpblendd $72, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm7 # 32-byte Folded Reload
+; AVX2-FCP-NEXT: # ymm7 = ymm14[0,1,2],mem[3],ymm14[4,5],mem[6],ymm14[7]
; AVX2-FCP-NEXT: vextracti128 $1, %ymm7, %xmm8
; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm7 = xmm8[0],xmm7[1],xmm8[2,3,4,5],xmm7[6],xmm8[7]
; AVX2-FCP-NEXT: vpshufb %xmm6, %xmm7, %xmm6
-; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm15[0,1],ymm3[2],ymm15[3,4],ymm3[5],ymm15[6,7]
+; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm9[0,1],ymm2[2],ymm9[3,4],ymm2[5],ymm9[6,7]
+; AVX2-FCP-NEXT: vmovdqa %ymm9, %ymm12
+; AVX2-FCP-NEXT: vmovdqu %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vpermd %ymm7, %ymm4, %ymm4
; AVX2-FCP-NEXT: vpshufb %ymm5, %ymm4, %ymm4
-; AVX2-FCP-NEXT: vpblendvb %ymm14, %ymm6, %ymm4, %ymm3
-; AVX2-FCP-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm2[0],ymm10[1],ymm2[2,3],ymm10[4],ymm2[5,6,7]
+; AVX2-FCP-NEXT: vpblendvb %ymm0, %ymm6, %ymm4, %ymm4
+; AVX2-FCP-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm3[0],ymm10[1],ymm3[2,3],ymm10[4],ymm3[5,6,7]
; AVX2-FCP-NEXT: vextracti128 $1, %ymm4, %xmm5
; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm7 = xmm4[0],xmm5[1],xmm4[2,3,4,5],xmm5[6],xmm4[7]
+; AVX2-FCP-NEXT: vmovdqa %ymm11, %ymm9
+; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm9[0,1],ymm11[2,3],ymm9[4,5],ymm11[6,7]
; AVX2-FCP-NEXT: vpmovsxbd {{.*#+}} ymm4 = [2,6,1,0,5,0,0,0]
; AVX2-FCP-NEXT: vpermd %ymm5, %ymm4, %ymm6
@@ -13364,36 +13386,34 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX2-FCP-NEXT: vpshufb %ymm5, %ymm6, %ymm8
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm6 = [4,5,2,3,0,1,14,15,12,13,12,13,12,13,12,13]
; AVX2-FCP-NEXT: vpshufb %xmm6, %xmm7, %xmm7
-; AVX2-FCP-NEXT: vpblendvb %ymm14, %ymm7, %ymm8, %ymm2
-; AVX2-FCP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
-; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm3[0],ymm13[1],ymm3[2,3],ymm13[4],ymm3[5,6,7]
+; AVX2-FCP-NEXT: vpblendvb %ymm0, %ymm7, %ymm8, %ymm3
+; AVX2-FCP-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vmovdqu %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm10[0],ymm13[1],ymm10[2,3],ymm13[4],ymm10[5,6,7]
; AVX2-FCP-NEXT: vextracti128 $1, %ymm7, %xmm8
; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm7 = xmm7[0],xmm8[1],xmm7[2,3,4,5],xmm8[6],xmm7[7]
; AVX2-FCP-NEXT: vpshufb %xmm6, %xmm7, %xmm7
-; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
-; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm9[0,1],ymm10[2,3],ymm9[4,5],ymm10[6,7]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm15[0,1],ymm1[2,3],ymm15[4,5],ymm1[6,7]
; AVX2-FCP-NEXT: vpermd %ymm8, %ymm4, %ymm8
; AVX2-FCP-NEXT: vpshufb %ymm5, %ymm8, %ymm8
-; AVX2-FCP-NEXT: vpblendvb %ymm14, %ymm7, %ymm8, %ymm1
-; AVX2-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vmovdqa %ymm12, %ymm1
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm12[0],ymm0[1],ymm12[2,3],ymm0[4],ymm12[5,6,7]
+; AVX2-FCP-NEXT: vmovdqa %ymm0, %ymm3
+; AVX2-FCP-NEXT: vpblendvb %ymm0, %ymm7, %ymm8, %ymm0
+; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm0[0],ymm14[1],ymm0[2,3],ymm14[4],ymm0[5,6,7]
; AVX2-FCP-NEXT: vextracti128 $1, %ymm7, %xmm8
; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm7 = xmm7[0],xmm8[1],xmm7[2,3,4,5],xmm8[6],xmm7[7]
; AVX2-FCP-NEXT: vpshufb %xmm6, %xmm7, %xmm7
-; AVX2-FCP-NEXT: vmovdqa %ymm15, %ymm11
-; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm15[0,1],ymm0[2,3],ymm15[4,5],ymm0[6,7]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm12[0,1],ymm2[2,3],ymm12[4,5],ymm2[6,7]
; AVX2-FCP-NEXT: vpermd %ymm8, %ymm4, %ymm8
; AVX2-FCP-NEXT: vpshufb %ymm5, %ymm8, %ymm8
-; AVX2-FCP-NEXT: vpblendvb %ymm14, %ymm7, %ymm8, %ymm7
-; AVX2-FCP-NEXT: vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
-; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm2[0],ymm12[1],ymm2[2,3],ymm12[4],ymm2[5,6,7]
+; AVX2-FCP-NEXT: vpblendvb %ymm3, %ymm7, %ymm8, %ymm1
+; AVX2-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vmovdqa %ymm3, %ymm1
+; AVX2-FCP-NEXT: vmovdqu (%rsp), %ymm12 # 32-byte Reload
+; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm3[0],ymm12[1],ymm3[2,3],ymm12[4],ymm3[5,6,7]
; AVX2-FCP-NEXT: vextracti128 $1, %ymm7, %xmm8
; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm7 = xmm7[0],xmm8[1],xmm7[2,3,4,5],xmm8[6],xmm7[7]
; AVX2-FCP-NEXT: vpshufb %xmm6, %xmm7, %xmm6
@@ -13402,98 +13422,96 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm14[0,1],ymm15[2,3],ymm14[4,5],ymm15[6,7]
; AVX2-FCP-NEXT: vpermd %ymm7, %ymm4, %ymm4
; AVX2-FCP-NEXT: vpshufb %ymm5, %ymm4, %ymm4
-; AVX2-FCP-NEXT: vpmovsxbw {{.*#+}} xmm8 = [65535,65535,65535,65535,65535,0,0,0]
-; AVX2-FCP-NEXT: vpblendvb %ymm8, %ymm6, %ymm4, %ymm4
-; AVX2-FCP-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vmovdqa %ymm1, %ymm8
+; AVX2-FCP-NEXT: vpblendvb %ymm1, %ymm6, %ymm4, %ymm1
+; AVX2-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
; AVX2-FCP-NEXT: vpblendd $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm4 # 32-byte Folded Reload
; AVX2-FCP-NEXT: # ymm4 = ymm4[0],mem[1],ymm4[2,3,4],mem[5],ymm4[6,7]
; AVX2-FCP-NEXT: vextracti128 $1, %ymm4, %xmm5
; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm6 = xmm5[0],xmm4[1],xmm5[2],xmm4[3],xmm5[4,5,6,7]
-; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX2-FCP-NEXT: vpblendd $72, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm4 # 32-byte Folded Reload
-; AVX2-FCP-NEXT: # ymm4 = ymm4[0,1,2],mem[3],ymm4[4,5],mem[6],ymm4[7]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm9[0,1,2],ymm11[3],ymm9[4,5],ymm11[6],ymm9[7]
; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm5 = ymm4[1,3,2,3]
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm4 = [6,7,6,7,6,7,6,7,8,9,4,5,10,11,0,1,22,23,22,23,22,23,22,23,24,25,20,21,26,27,16,17]
; AVX2-FCP-NEXT: vpshufb %ymm4, %ymm5, %ymm7
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm5 = [6,7,4,5,2,3,0,1,14,15,14,15,14,15,14,15]
; AVX2-FCP-NEXT: vpshufb %xmm5, %xmm6, %xmm6
-; AVX2-FCP-NEXT: vpblendvb %ymm8, %ymm6, %ymm7, %ymm6
-; AVX2-FCP-NEXT: vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm3[0],ymm13[1],ymm3[2,3,4],ymm13[5],ymm3[6,7]
+; AVX2-FCP-NEXT: vpblendvb %ymm8, %ymm6, %ymm7, %ymm1
+; AVX2-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm10[0],ymm13[1],ymm10[2,3,4],ymm13[5],ymm10[6,7]
; AVX2-FCP-NEXT: vextracti128 $1, %ymm6, %xmm7
; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm6 = xmm7[0],xmm6[1],xmm7[2],xmm6[3],xmm7[4,5,6,7]
; AVX2-FCP-NEXT: vpshufb %xmm5, %xmm6, %xmm6
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm9[0,1,2],ymm10[3],ymm9[4,5],ymm10[6],ymm9[7]
+; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX2-FCP-NEXT: vpblendd $72, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm7 # 32-byte Folded Reload
+; AVX2-FCP-NEXT: # ymm7 = ymm7[0,1,2],mem[3],ymm7[4,5],mem[6],ymm7[7]
; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm7 = ymm7[1,3,2,3]
; AVX2-FCP-NEXT: vpshufb %ymm4, %ymm7, %ymm7
-; AVX2-FCP-NEXT: vpblendvb %ymm8, %ymm6, %ymm7, %ymm3
-; AVX2-FCP-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm2[0],ymm12[1],ymm2[2,3,4],ymm12[5],ymm2[6,7]
+; AVX2-FCP-NEXT: vpblendvb %ymm8, %ymm6, %ymm7, %ymm1
+; AVX2-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm3[0],ymm12[1],ymm3[2,3,4],ymm12[5],ymm3[6,7]
; AVX2-FCP-NEXT: vextracti128 $1, %ymm6, %xmm7
; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm6 = xmm7[0],xmm6[1],xmm7[2],xmm6[3],xmm7[4,5,6,7]
; AVX2-FCP-NEXT: vpshufb %xmm5, %xmm6, %xmm6
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm14[0,1,2],ymm15[3],ymm14[4,5],ymm15[6],ymm14[7]
; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm7 = ymm7[1,3,2,3]
; AVX2-FCP-NEXT: vpshufb %ymm4, %ymm7, %ymm7
-; AVX2-FCP-NEXT: vpblendvb %ymm8, %ymm6, %ymm7, %ymm2
-; AVX2-FCP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpblendd $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm6 # 32-byte Folded Reload
-; AVX2-FCP-NEXT: # ymm6 = ymm1[0],mem[1],ymm1[2,3,4],mem[5],ymm1[6,7]
+; AVX2-FCP-NEXT: vpblendvb %ymm8, %ymm6, %ymm7, %ymm1
+; AVX2-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vpblendd $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm6 # 32-byte Folded Reload
+; AVX2-FCP-NEXT: # ymm6 = ymm0[0],mem[1],ymm0[2,3,4],mem[5],ymm0[6,7]
; AVX2-FCP-NEXT: vextracti128 $1, %ymm6, %xmm7
; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm6 = xmm7[0],xmm6[1],xmm7[2],xmm6[3],xmm7[4,5,6,7]
; AVX2-FCP-NEXT: vpshufb %xmm5, %xmm6, %xmm5
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm11[0,1,2],ymm0[3],ymm11[4,5],ymm0[6],ymm11[7]
+; AVX2-FCP-NEXT: vpblendd $183, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm6 # 32-byte Folded Reload
+; AVX2-FCP-NEXT: # ymm6 = mem[0,1,2],ymm2[3],mem[4,5],ymm2[6],mem[7]
; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm6 = ymm6[1,3,2,3]
; AVX2-FCP-NEXT: vpshufb %ymm4, %ymm6, %ymm4
; AVX2-FCP-NEXT: vpblendvb %ymm8, %ymm5, %ymm4, %ymm0
; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vmovdqa 128(%rdi), %ymm10
+; AVX2-FCP-NEXT: vmovdqa 128(%rdi), %ymm11
; AVX2-FCP-NEXT: vmovdqa 160(%rdi), %ymm1
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm1[0,1,2],ymm10[3],ymm1[4,5],ymm10[6],ymm1[7]
-; AVX2-FCP-NEXT: vmovdqa %ymm1, %ymm11
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm1[0,1,2],ymm11[3],ymm1[4,5],ymm11[6],ymm1[7]
+; AVX2-FCP-NEXT: vmovdqa %ymm1, %ymm10
; AVX2-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm5 = [3,6,2,5,3,6,2,5]
; AVX2-FCP-NEXT: # ymm5 = mem[0,1,0,1]
; AVX2-FCP-NEXT: vpermd %ymm4, %ymm5, %ymm4
-; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,2,3,0,1,6,7,8,9,14,15,12,13,14,15,16,17,18,19,16,17,22,23,24,25,30,31,28,29,30,31]
-; AVX2-FCP-NEXT: vpshufb %ymm3, %ymm4, %ymm6
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm6 = [0,1,2,3,0,1,6,7,8,9,14,15,12,13,14,15,16,17,18,19,16,17,22,23,24,25,30,31,20,21,26,27]
+; AVX2-FCP-NEXT: vpshufb %ymm6, %ymm4, %ymm7
; AVX2-FCP-NEXT: vmovdqa 192(%rdi), %ymm0
; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm0[0,1,0,2]
-; AVX2-FCP-NEXT: vpbroadcastd {{.*#+}} ymm0 = [20,21,26,27,20,21,26,27,20,21,26,27,20,21,26,27,20,21,26,27,20,21,26,27,20,21,26,27,20,21,26,27]
-; AVX2-FCP-NEXT: vpshufb %ymm0, %ymm4, %ymm7
-; AVX2-FCP-NEXT: vmovdqa %ymm0, %ymm14
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5,6],ymm7[7]
+; AVX2-FCP-NEXT: vpshufb %ymm6, %ymm4, %ymm6
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm7[0,1,2,3,4,5,6],ymm6[7]
; AVX2-FCP-NEXT: vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm0 # 32-byte Folded Reload
; AVX2-FCP-NEXT: # ymm0 = mem[0,1,2,3,4],ymm6[5,6,7]
; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vmovdqa 576(%rdi), %ymm12
+; AVX2-FCP-NEXT: vmovdqa 576(%rdi), %ymm1
; AVX2-FCP-NEXT: vmovdqa 608(%rdi), %ymm7
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm7[0,1,2],ymm12[3],ymm7[4,5],ymm12[6],ymm7[7]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm7[0,1,2],ymm1[3],ymm7[4,5],ymm1[6],ymm7[7]
; AVX2-FCP-NEXT: vpermd %ymm0, %ymm5, %ymm0
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,2,3,0,1,6,7,8,9,14,15,12,13,14,15,16,17,18,19,16,17,22,23,24,25,30,31,28,29,30,31]
; AVX2-FCP-NEXT: vpshufb %ymm3, %ymm0, %ymm8
; AVX2-FCP-NEXT: vmovdqa 640(%rdi), %ymm0
; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm1 = ymm0[0,1,0,2]
-; AVX2-FCP-NEXT: vpshufb %ymm14, %ymm1, %ymm2
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm8[0,1,2,3,4,5,6],ymm2[7]
-; AVX2-FCP-NEXT: vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm0 # 32-byte Folded Reload
-; AVX2-FCP-NEXT: # ymm0 = mem[0,1,2,3,4],ymm2[5,6,7]
+; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm12 = ymm0[0,1,0,2]
+; AVX2-FCP-NEXT: vpbroadcastd {{.*#+}} ymm13 = [20,21,26,27,20,21,26,27,20,21,26,27,20,21,26,27,20,21,26,27,20,21,26,27,20,21,26,27,20,21,26,27]
+; AVX2-FCP-NEXT: vpshufb %ymm13, %ymm12, %ymm15
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3,4,5,6],ymm15[7]
+; AVX2-FCP-NEXT: vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm0 # 32-byte Folded Reload
+; AVX2-FCP-NEXT: # ymm0 = mem[0,1,2,3,4],ymm8[5,6,7]
; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vmovdqa 352(%rdi), %ymm0
+; AVX2-FCP-NEXT: vmovdqa 352(%rdi), %ymm15
; AVX2-FCP-NEXT: vmovdqa 384(%rdi), %ymm6
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm6[0,1,2],ymm0[3],ymm6[4,5],ymm0[6],ymm6[7]
-; AVX2-FCP-NEXT: vmovdqa %ymm0, %ymm15
-; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm6[0,1,2],ymm15[3],ymm6[4,5],ymm15[6],ymm6[7]
; AVX2-FCP-NEXT: vpermd %ymm2, %ymm5, %ymm2
; AVX2-FCP-NEXT: vpshufb %ymm3, %ymm2, %ymm8
; AVX2-FCP-NEXT: vmovdqa 416(%rdi), %ymm0
; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm0[0,1,0,2]
-; AVX2-FCP-NEXT: vpshufb %ymm14, %ymm2, %ymm9
-; AVX2-FCP-NEXT: vmovdqa %ymm14, %ymm13
+; AVX2-FCP-NEXT: vpshufb %ymm13, %ymm2, %ymm9
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3,4,5,6],ymm9[7]
-; AVX2-FCP-NEXT: vpblendd $31, (%rsp), %ymm8, %ymm0 # 32-byte Folded Reload
+; AVX2-FCP-NEXT: vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm0 # 32-byte Folded Reload
; AVX2-FCP-NEXT: # ymm0 = mem[0,1,2,3,4],ymm8[5,6,7]
; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vmovdqa 800(%rdi), %ymm0
@@ -13510,8 +13528,7 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX2-FCP-NEXT: vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm0 # 32-byte Folded Reload
; AVX2-FCP-NEXT: # ymm0 = mem[0,1,2,3,4],ymm5[5,6,7]
; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vmovdqa %ymm11, %ymm13
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm10[0,1],ymm11[2],ymm10[3,4,5],ymm11[6],ymm10[7]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm11[0,1],ymm10[2],ymm11[3,4,5],ymm10[6],ymm11[7]
; AVX2-FCP-NEXT: vextracti128 $1, %ymm5, %xmm14
; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3],xmm14[4],xmm5[5],xmm14[6],xmm5[7]
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm14 = [0,1,0,1,14,15,12,13,10,11,8,9,12,13,14,15]
@@ -13524,12 +13541,13 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm4 = ymm5[0],ymm4[1,2,3,4,5,6,7],ymm5[8],ymm4[9,10,11,12,13,14,15]
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm5[0,1,2,3],ymm4[4,5,6,7]
; AVX2-FCP-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm12[0,1],ymm7[2],ymm12[3,4,5],ymm7[6],ymm12[7]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm1[0,1],ymm7[2],ymm1[3,4,5],ymm7[6],ymm1[7]
+; AVX2-FCP-NEXT: vmovdqa %ymm1, %ymm13
; AVX2-FCP-NEXT: vextracti128 $1, %ymm4, %xmm5
; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm5[4],xmm4[5],xmm5[6],xmm4[7]
; AVX2-FCP-NEXT: vpshufb %xmm14, %xmm4, %xmm4
; AVX2-FCP-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4
-; AVX2-FCP-NEXT: vpshufb %ymm0, %ymm1, %ymm1
+; AVX2-FCP-NEXT: vpshufb %ymm0, %ymm12, %ymm1
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm4[0,1,2,3,4,5,6],ymm1[7]
; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm4[0],ymm1[1,2,3,4,5,6,7],ymm4[8],ymm1[9,10,11,12,13,14,15]
@@ -13546,7 +13564,6 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm2[0],ymm1[1,2,3,4,5,6,7],ymm2[8],ymm1[9,10,11,12,13,14,15]
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vmovdqa %ymm9, %ymm15
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm9[0,1],ymm8[2],ymm9[3,4,5],ymm8[6],ymm9[7]
; AVX2-FCP-NEXT: vextracti128 $1, %ymm1, %xmm2
; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4],xmm1[5],xmm2[6],xmm1[7]
@@ -13558,18 +13575,18 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6,7],ymm1[8],ymm0[9,10,11,12,13,14,15]
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm10[0,1,2],ymm11[3],ymm10[4,5],ymm11[6],ymm10[7]
-; AVX2-FCP-NEXT: vmovdqa %ymm10, %ymm14
-; AVX2-FCP-NEXT: vmovdqu %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm11[0,1,2],ymm10[3],ymm11[4,5],ymm10[6],ymm11[7]
; AVX2-FCP-NEXT: vmovdqu %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vmovdqa %ymm10, %ymm4
+; AVX2-FCP-NEXT: vmovdqu %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3,4,5],xmm0[6],xmm1[7]
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,2,3,0,1,14,15,12,13,10,11,12,13,14,15]
; AVX2-FCP-NEXT: vpshufb %xmm1, %xmm0, %xmm0
; AVX2-FCP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
; AVX2-FCP-NEXT: vpbroadcastq {{.*#+}} ymm2 = [2,5,2,5,2,5,2,5]
-; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX2-FCP-NEXT: vpermd %ymm4, %ymm2, %ymm3
+; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
+; AVX2-FCP-NEXT: vpermd %ymm12, %ymm2, %ymm3
; AVX2-FCP-NEXT: vpshufhw {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5,4,7,8,9,10,11,12,13,12,15]
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6],ymm3[7]
; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
@@ -13577,14 +13594,13 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm0[4,5,6,7]
; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vmovdqa %ymm7, %ymm5
-; AVX2-FCP-NEXT: vmovdqa %ymm12, %ymm10
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm12[0,1,2],ymm7[3],ymm12[4,5],ymm7[6],ymm12[7]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm13[0,1,2],ymm7[3],ymm13[4,5],ymm7[6],ymm13[7]
; AVX2-FCP-NEXT: vextracti128 $1, %ymm0, %xmm3
; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm3[0],xmm0[1],xmm3[2,3,4,5],xmm0[6],xmm3[7]
; AVX2-FCP-NEXT: vpshufb %xmm1, %xmm0, %xmm0
; AVX2-FCP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
-; AVX2-FCP-NEXT: vpermd %ymm11, %ymm2, %ymm3
+; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
+; AVX2-FCP-NEXT: vpermd %ymm10, %ymm2, %ymm3
; AVX2-FCP-NEXT: vpshufhw {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5,4,7,8,9,10,11,12,13,12,15]
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6],ymm3[7]
; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
@@ -13592,60 +13608,60 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm0[4,5,6,7]
; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm9[0,1,2],ymm8[3],ymm9[4,5],ymm8[6],ymm9[7]
+; AVX2-FCP-NEXT: vmovdqa %ymm9, %ymm14
; AVX2-FCP-NEXT: vmovdqu %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vmovdqu %ymm8, (%rsp) # 32-byte Spill
+; AVX2-FCP-NEXT: vmovdqa %ymm8, %ymm9
+; AVX2-FCP-NEXT: vmovdqu %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vextracti128 $1, %ymm0, %xmm3
; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm3[0],xmm0[1],xmm3[2,3,4,5],xmm0[6],xmm3[7]
; AVX2-FCP-NEXT: vpshufb %xmm1, %xmm0, %xmm0
; AVX2-FCP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
-; AVX2-FCP-NEXT: vpermd %ymm9, %ymm2, %ymm3
+; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
+; AVX2-FCP-NEXT: vpermd %ymm8, %ymm2, %ymm3
; AVX2-FCP-NEXT: vpshufhw {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5,4,7,8,9,10,11,12,13,12,15]
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6],ymm3[7]
; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm3[0],ymm0[1,2,3,4,5,6,7],ymm3[8],ymm0[9,10,11,12,13,14,15]
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm0[4,5,6,7]
; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm7[0,1,2],ymm6[3],ymm7[4,5],ymm6[6],ymm7[7]
+; AVX2-FCP-NEXT: vmovdqa %ymm15, %ymm7
+; AVX2-FCP-NEXT: vmovdqu %ymm15, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm15[0,1,2],ymm6[3],ymm15[4,5],ymm6[6],ymm15[7]
; AVX2-FCP-NEXT: vextracti128 $1, %ymm0, %xmm3
; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm3[0],xmm0[1],xmm3[2,3,4,5],xmm0[6],xmm3[7]
; AVX2-FCP-NEXT: vpshufb %xmm1, %xmm0, %xmm0
; AVX2-FCP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
-; AVX2-FCP-NEXT: vpermd %ymm12, %ymm2, %ymm1
+; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
+; AVX2-FCP-NEXT: vpermd %ymm15, %ymm2, %ymm1
; AVX2-FCP-NEXT: vpshufhw {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,4,7,8,9,10,11,12,13,12,15]
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6],ymm1[7]
; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6,7],ymm1[8],ymm0[9,10,11,12,13,14,15]
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm13[0],ymm14[1],ymm13[2,3],ymm14[4],ymm13[5,6,7]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm4[0],ymm11[1],ymm4[2,3],ymm11[4],ymm4[5,6,7]
; AVX2-FCP-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3,4,5],xmm1[6],xmm0[7]
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,4,5,2,3,0,1,14,15,12,13,12,13,14,15]
; AVX2-FCP-NEXT: vpshufb %xmm3, %xmm0, %xmm0
; AVX2-FCP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm1
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm4[0,1,1,3]
-; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm12 = ymm12[0,1,1,3]
; AVX2-FCP-NEXT: vpbroadcastd {{.*#+}} ymm4 = [18,19,24,25,18,19,24,25,18,19,24,25,18,19,24,25,18,19,24,25,18,19,24,25,18,19,24,25,18,19,24,25]
-; AVX2-FCP-NEXT: vpshufb %ymm4, %ymm0, %ymm2
+; AVX2-FCP-NEXT: vpshufb %ymm4, %ymm12, %ymm2
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm2[7]
-; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm2[0],ymm1[1,2,3,4,5,6,7],ymm2[8],ymm1[9,10,11,12,13,14,15]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
+; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm0[0],ymm1[1,2,3,4,5,6,7],ymm0[8],ymm1[9,10,11,12,13,14,15]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm5[0],ymm10[1],ymm5[2,3],ymm10[4],ymm5[5,6,7]
-; AVX2-FCP-NEXT: vmovdqa %ymm10, %ymm14
-; AVX2-FCP-NEXT: vmovdqu %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm5[0],ymm13[1],ymm5[2,3],ymm13[4],ymm5[5,6,7]
+; AVX2-FCP-NEXT: vmovdqu %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vmovdqa %ymm5, %ymm0
; AVX2-FCP-NEXT: vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vextracti128 $1, %ymm1, %xmm2
; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3,4,5],xmm2[6],xmm1[7]
; AVX2-FCP-NEXT: vpshufb %xmm3, %xmm1, %xmm1
; AVX2-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm11[0,1,1,3]
+; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm10[0,1,1,3]
; AVX2-FCP-NEXT: vpshufb %ymm4, %ymm2, %ymm5
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm5[7]
; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
@@ -13653,24 +13669,24 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm5[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm6[0],ymm7[1],ymm6[2,3],ymm7[4],ymm6[5,6,7]
+; AVX2-FCP-NEXT: vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vextracti128 $1, %ymm1, %xmm5
; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm5[1],xmm1[2,3,4,5],xmm5[6],xmm1[7]
; AVX2-FCP-NEXT: vpshufb %xmm3, %xmm1, %xmm1
; AVX2-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm5 = ymm12[0,1,1,3]
-; AVX2-FCP-NEXT: vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpshufb %ymm4, %ymm5, %ymm5
+; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm15 = ymm15[0,1,1,3]
+; AVX2-FCP-NEXT: vpshufb %ymm4, %ymm15, %ymm5
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm5[7]
; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm5[0],ymm1[1,2,3,4,5,6,7],ymm5[8],ymm1[9,10,11,12,13,14,15]
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm5[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm8[0],ymm15[1],ymm8[2,3],ymm15[4],ymm8[5,6,7]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm9[0],ymm14[1],ymm9[2,3],ymm14[4],ymm9[5,6,7]
; AVX2-FCP-NEXT: vextracti128 $1, %ymm1, %xmm5
; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm5[1],xmm1[2,3,4,5],xmm5[6],xmm1[7]
; AVX2-FCP-NEXT: vpshufb %xmm3, %xmm1, %xmm1
; AVX2-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm3 = ymm9[0,1,1,3]
+; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm3 = ymm8[0,1,1,3]
; AVX2-FCP-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vpshufb %ymm4, %ymm3, %ymm3
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm3[7]
@@ -13679,86 +13695,82 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-FCP-NEXT: vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX2-FCP-NEXT: # ymm1 = mem[0,1],ymm1[2],mem[3,4],ymm1[5],mem[6,7]
+; AVX2-FCP-NEXT: vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
+; AVX2-FCP-NEXT: # ymm1 = ymm1[0,1],mem[2],ymm1[3,4],mem[5],ymm1[6,7]
; AVX2-FCP-NEXT: vmovd {{.*#+}} xmm4 = [8,9,4,5,0,0,0,0,0,0,0,0,0,0,0,0]
; AVX2-FCP-NEXT: vpshufb %xmm4, %xmm1, %xmm3
-; AVX2-FCP-NEXT: vmovdqa %xmm4, %xmm10
+; AVX2-FCP-NEXT: vmovdqa %xmm4, %xmm9
; AVX2-FCP-NEXT: vextracti128 $1, %ymm1, %xmm1
; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[3,1,2,3,4,5,6,7]
; AVX2-FCP-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm0[0],ymm14[1],ymm0[2,3,4],ymm14[5],ymm0[6,7]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm0[0],ymm13[1],ymm0[2,3,4],ymm13[5],ymm0[6,7]
; AVX2-FCP-NEXT: vextracti128 $1, %ymm1, %xmm4
; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0],xmm1[1],xmm4[2],xmm1[3],xmm4[4,5,6,7]
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm0 = [0,1,6,7,4,5,2,3,0,1,14,15,12,13,14,15]
; AVX2-FCP-NEXT: vpshufb %xmm0, %xmm4, %xmm4
-; AVX2-FCP-NEXT: vmovdqa %xmm0, %xmm1
; AVX2-FCP-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4
-; AVX2-FCP-NEXT: vpbroadcastd {{.*#+}} ymm11 = [20,21,26,27,20,21,26,27,20,21,26,27,20,21,26,27,20,21,26,27,20,21,26,27,20,21,26,27,20,21,26,27]
-; AVX2-FCP-NEXT: vpshufb %ymm11, %ymm2, %ymm2
+; AVX2-FCP-NEXT: vpbroadcastd {{.*#+}} ymm8 = [20,21,26,27,20,21,26,27,20,21,26,27,20,21,26,27,20,21,26,27,20,21,26,27,20,21,26,27,20,21,26,27]
+; AVX2-FCP-NEXT: vpshufb %ymm8, %ymm2, %ymm2
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5,6],ymm2[7]
-; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FCP-NEXT: vpblendd $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm5 # 32-byte Folded Reload
-; AVX2-FCP-NEXT: # ymm5 = ymm0[0,1],mem[2],ymm0[3,4,5],mem[6],ymm0[7]
-; AVX2-FCP-NEXT: vpmovsxbd {{.*#+}} ymm12 = [0,3,7,2,6,0,0,0]
-; AVX2-FCP-NEXT: vpermd %ymm5, %ymm12, %ymm5
-; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm2 = [u,u,u,u,u,u,u,u,0,1,6,7,8,9,14,15,16,17,22,23,20,21,22,23,16,17,22,23,24,25,30,31]
-; AVX2-FCP-NEXT: vpshufb %ymm2, %ymm5, %ymm5
+; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX2-FCP-NEXT: vpblendd $187, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm5 # 32-byte Folded Reload
+; AVX2-FCP-NEXT: # ymm5 = mem[0,1],ymm2[2],mem[3,4,5],ymm2[6],mem[7]
+; AVX2-FCP-NEXT: vpmovsxbd {{.*#+}} ymm13 = [0,3,7,2,6,0,0,0]
+; AVX2-FCP-NEXT: vpermd %ymm5, %ymm13, %ymm5
+; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm5 = ymm5[u,u,u,u,u,u,u,u,0,1,6,7,8,9,14,15,16,17,22,23,20,21,22,23,16,17,22,23,24,25,30,31]
; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm3 = xmm3[0,1],xmm5[2,3]
; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm4 = ymm5[0],ymm4[1,2,3,4,5,6,7],ymm5[8],ymm4[9,10,11,12,13,14,15]
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm4[4,5,6,7]
; AVX2-FCP-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
-; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm13[0,1],ymm7[2],ymm13[3,4],ymm7[5],ymm13[6,7]
-; AVX2-FCP-NEXT: vpshufb %xmm10, %xmm3, %xmm4
+; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm11[0,1],ymm7[2],ymm11[3,4],ymm7[5],ymm11[6,7]
+; AVX2-FCP-NEXT: vpshufb %xmm9, %xmm3, %xmm4
+; AVX2-FCP-NEXT: vmovdqa %xmm9, %xmm1
; AVX2-FCP-NEXT: vextracti128 $1, %ymm3, %xmm3
; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm3 = xmm3[3,1,2,3,4,5,6,7]
; AVX2-FCP-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
+; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
-; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm14[0],ymm15[1],ymm14[2,3,4],ymm15[5],ymm14[6,7]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm14[0],ymm10[1],ymm14[2,3,4],ymm10[5],ymm14[6,7]
; AVX2-FCP-NEXT: vextracti128 $1, %ymm4, %xmm5
; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm4 = xmm5[0],xmm4[1],xmm5[2],xmm4[3],xmm5[4,5,6,7]
-; AVX2-FCP-NEXT: vpshufb %xmm1, %xmm4, %xmm4
-; AVX2-FCP-NEXT: vmovdqa %xmm1, %xmm10
+; AVX2-FCP-NEXT: vpshufb %xmm0, %xmm4, %xmm4
+; AVX2-FCP-NEXT: vmovdqa %xmm0, %xmm2
; AVX2-FCP-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4
-; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FCP-NEXT: vpshufb %ymm11, %ymm0, %ymm0
+; AVX2-FCP-NEXT: vpshufb %ymm8, %ymm12, %ymm0
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm4[0,1,2,3,4,5,6],ymm0[7]
-; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm6[0,1],ymm5[2],ymm6[3,4,5],ymm5[6],ymm6[7]
-; AVX2-FCP-NEXT: vpermd %ymm4, %ymm12, %ymm4
-; AVX2-FCP-NEXT: vpshufb %ymm2, %ymm4, %ymm4
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm9[0,1],ymm5[2],ymm9[3,4,5],ymm5[6],ymm9[7]
+; AVX2-FCP-NEXT: vpermd %ymm4, %ymm13, %ymm4
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm12 = [u,u,u,u,u,u,u,u,0,1,6,7,8,9,14,15,16,17,22,23,20,21,22,23,16,17,22,23,24,25,30,31]
+; AVX2-FCP-NEXT: vpshufb %ymm12, %ymm4, %ymm4
; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm3 = xmm3[0,1],xmm4[2,3]
; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm4[0],ymm0[1,2,3,4,5,6,7],ymm4[8],ymm0[9,10,11,12,13,14,15]
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm0[4,5,6,7]
; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FCP-NEXT: vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
-; AVX2-FCP-NEXT: # ymm0 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
-; AVX2-FCP-NEXT: vmovd {{.*#+}} xmm1 = [8,9,4,5,0,0,0,0,0,0,0,0,0,0,0,0]
+; AVX2-FCP-NEXT: vpblendd $36, (%rsp), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX2-FCP-NEXT: # ymm0 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
; AVX2-FCP-NEXT: vpshufb %xmm1, %xmm0, %xmm3
; AVX2-FCP-NEXT: vextracti128 $1, %ymm0, %xmm0
; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[3,1,2,3,4,5,6,7]
; AVX2-FCP-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3]
-; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
-; AVX2-FCP-NEXT: vpblendd $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm3 # 32-byte Folded Reload
-; AVX2-FCP-NEXT: # ymm3 = ymm8[0],mem[1],ymm8[2,3,4],mem[5],ymm8[6,7]
+; AVX2-FCP-NEXT: vpblendd $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm3 # 32-byte Folded Reload
+; AVX2-FCP-NEXT: # ymm3 = ymm6[0],mem[1],ymm6[2,3,4],mem[5],ymm6[6,7]
; AVX2-FCP-NEXT: vextracti128 $1, %ymm3, %xmm4
; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0],xmm3[1],xmm4[2],xmm3[3],xmm4[4,5,6,7]
-; AVX2-FCP-NEXT: vpshufb %xmm10, %xmm3, %xmm3
+; AVX2-FCP-NEXT: vpshufb %xmm2, %xmm3, %xmm3
; AVX2-FCP-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX2-FCP-NEXT: vpshufb %ymm11, %ymm4, %ymm4
+; AVX2-FCP-NEXT: vpshufb %ymm8, %ymm15, %ymm4
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5,6],ymm4[7]
-; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
-; AVX2-FCP-NEXT: vpblendd $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm4 # 32-byte Folded Reload
-; AVX2-FCP-NEXT: # ymm4 = ymm9[0,1],mem[2],ymm9[3,4,5],mem[6],ymm9[7]
-; AVX2-FCP-NEXT: vpermd %ymm4, %ymm12, %ymm4
-; AVX2-FCP-NEXT: vpshufb %ymm2, %ymm4, %ymm4
-; AVX2-FCP-NEXT: vmovdqa %ymm2, %ymm10
+; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
+; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm15[0,1],ymm8[2],ymm15[3,4,5],ymm8[6],ymm15[7]
+; AVX2-FCP-NEXT: vpermd %ymm4, %ymm13, %ymm4
+; AVX2-FCP-NEXT: vpshufb %ymm12, %ymm4, %ymm4
+; AVX2-FCP-NEXT: vmovdqa %ymm12, %ymm6
; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm4[2,3]
; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm4[0],ymm3[1,2,3,4,5,6,7],ymm4[8],ymm3[9,10,11,12,13,14,15]
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm3[4,5,6,7]
@@ -13767,122 +13779,127 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX2-FCP-NEXT: vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
; AVX2-FCP-NEXT: # ymm0 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
; AVX2-FCP-NEXT: vpshufb %xmm1, %xmm0, %xmm3
+; AVX2-FCP-NEXT: vmovdqa %xmm1, %xmm12
; AVX2-FCP-NEXT: vextracti128 $1, %ymm0, %xmm0
; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[3,1,2,3,4,5,6,7]
; AVX2-FCP-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3]
; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
; AVX2-FCP-NEXT: vpblendd $187, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
; AVX2-FCP-NEXT: # ymm3 = mem[0,1],ymm3[2],mem[3,4,5],ymm3[6],mem[7]
-; AVX2-FCP-NEXT: vpermd %ymm3, %ymm12, %ymm2
-; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX2-FCP-NEXT: vpblendd $221, (%rsp), %ymm3, %ymm3 # 32-byte Folded Reload
-; AVX2-FCP-NEXT: # ymm3 = mem[0],ymm3[1],mem[2,3,4],ymm3[5],mem[6,7]
+; AVX2-FCP-NEXT: vpermd %ymm3, %ymm13, %ymm2
+; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX2-FCP-NEXT: vpblendd $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm3 # 32-byte Folded Reload
+; AVX2-FCP-NEXT: # ymm3 = ymm1[0],mem[1],ymm1[2,3,4],mem[5],ymm1[6,7]
; AVX2-FCP-NEXT: vextracti128 $1, %ymm3, %xmm4
; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0],xmm3[1],xmm4[2],xmm3[3],xmm4[4,5,6,7]
; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm3[0,1,6,7,4,5,2,3,0,1,14,15,12,13,14,15]
; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX2-FCP-NEXT: vpshufb %ymm11, %ymm3, %ymm3
+; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20,21,26,27]
; AVX2-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm3[7]
-; AVX2-FCP-NEXT: vpshufb %ymm10, %ymm2, %ymm2
+; AVX2-FCP-NEXT: vpshufb %ymm6, %ymm2, %ymm2
; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3]
; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm2[0],ymm1[1,2,3,4,5,6,7],ymm2[8],ymm1[9,10,11,12,13,14,15]
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpmovsxbd {{.*#+}} ymm11 = [0,3,3,3,0,3,7,7]
-; AVX2-FCP-NEXT: vpermd {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm0 # 32-byte Folded Reload
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,17,22,23,24,25]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm14[0,1],ymm15[2],ymm14[3,4],ymm15[5],ymm14[6,7]
+; AVX2-FCP-NEXT: vpmovsxbd {{.*#+}} ymm1 = [0,3,3,3,0,3,7,7]
+; AVX2-FCP-NEXT: vpermd {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm0 # 32-byte Folded Reload
+; AVX2-FCP-NEXT: vmovdqa %ymm1, %ymm6
+; AVX2-FCP-NEXT: vpbroadcastq {{.*#+}} ymm2 = [0,0,16,17,22,23,24,25,0,0,16,17,22,23,24,25,0,0,16,17,22,23,24,25,0,0,16,17,22,23,24,25]
+; AVX2-FCP-NEXT: vpshufb %ymm2, %ymm0, %ymm1
+; AVX2-FCP-NEXT: vmovdqa %ymm2, %ymm4
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm14[0,1],ymm10[2],ymm14[3,4],ymm10[5],ymm14[6,7]
; AVX2-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [2,5,1,4,2,5,1,4]
; AVX2-FCP-NEXT: # ymm3 = mem[0,1,0,1]
; AVX2-FCP-NEXT: vpermd %ymm2, %ymm3, %ymm2
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,17,22,23,24,25,30,31,u,u,u,u,u,u]
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm12 = ymm2[0,1,2,3,4],ymm1[5,6,7],ymm2[8,9,10,11,12],ymm1[13,14,15]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm13[0,1],ymm7[2,3],ymm13[4,5],ymm7[6,7]
+; AVX2-FCP-NEXT: vpbroadcastq {{.*#+}} ymm14 = [30,31,16,17,22,23,24,25,30,31,16,17,22,23,24,25,30,31,16,17,22,23,24,25,30,31,16,17,22,23,24,25]
+; AVX2-FCP-NEXT: vpshufb %ymm14, %ymm2, %ymm2
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm13 = ymm2[0,1,2,3,4],ymm1[5,6,7],ymm2[8,9,10,11,12],ymm1[13,14,15]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm11[0,1],ymm7[2,3],ymm11[4,5],ymm7[6,7]
; AVX2-FCP-NEXT: vextracti128 $1, %ymm1, %xmm2
-; AVX2-FCP-NEXT: vmovd {{.*#+}} xmm4 = [8,9,4,5,0,0,0,0,0,0,0,0,0,0,0,0]
-; AVX2-FCP-NEXT: vpshufb %xmm4, %xmm2, %xmm2
+; AVX2-FCP-NEXT: vmovdqa %xmm12, %xmm7
+; AVX2-FCP-NEXT: vpshufb %xmm12, %xmm2, %xmm11
; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[10,11,6,7,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX2-FCP-NEXT: vpunpcklwd {{.*#+}} xmm13 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm6[0,1,2],ymm5[3],ymm6[4,5],ymm5[6],ymm6[7]
-; AVX2-FCP-NEXT: vpmovsxbd {{.*#+}} ymm2 = [0,4,7,3,6,0,0,0]
-; AVX2-FCP-NEXT: vpermd %ymm1, %ymm2, %ymm0
-; AVX2-FCP-NEXT: vmovdqa %ymm2, %ymm5
+; AVX2-FCP-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm1[0],xmm11[0],xmm1[1],xmm11[1],xmm1[2],xmm11[2],xmm1[3],xmm11[3]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm9[0,1,2],ymm5[3],ymm9[4,5],ymm5[6],ymm9[7]
+; AVX2-FCP-NEXT: vpmovsxbd {{.*#+}} ymm0 = [0,4,7,3,6,0,0,0]
+; AVX2-FCP-NEXT: vpermd %ymm1, %ymm0, %ymm0
; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,2,3,4,5,10,11,12,13,18,19,18,19,18,19,18,19,18,19,20,21,26,27,28,29]
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm12 = ymm0[0],ymm12[1,2,3,4,5,6,7],ymm0[8],ymm12[9,10,11,12,13,14,15]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm0 = xmm13[0,1],xmm0[2,3]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm12[4,5,6,7]
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm13 = ymm0[0],ymm13[1,2,3,4,5,6,7],ymm0[8],ymm13[9,10,11,12,13,14,15]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm0 = xmm2[0,1],xmm0[2,3]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm13[4,5,6,7]
; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpermd {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm0 # 32-byte Folded Reload
-; AVX2-FCP-NEXT: vpbroadcastq {{.*#+}} ymm15 = [0,0,16,17,22,23,24,25,0,0,16,17,22,23,24,25,0,0,16,17,22,23,24,25,0,0,16,17,22,23,24,25]
-; AVX2-FCP-NEXT: vpshufb %ymm15, %ymm0, %ymm0
-; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
-; AVX2-FCP-NEXT: vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm12 # 32-byte Folded Reload
-; AVX2-FCP-NEXT: # ymm12 = mem[0,1],ymm14[2],mem[3,4],ymm14[5],mem[6,7]
-; AVX2-FCP-NEXT: vpermd %ymm12, %ymm3, %ymm12
-; AVX2-FCP-NEXT: vpbroadcastq {{.*#+}} ymm1 = [30,31,16,17,22,23,24,25,30,31,16,17,22,23,24,25,30,31,16,17,22,23,24,25,30,31,16,17,22,23,24,25]
-; AVX2-FCP-NEXT: vpshufb %ymm1, %ymm12, %ymm12
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm12[0,1,2,3,4],ymm0[5,6,7],ymm12[8,9,10,11,12],ymm0[13,14,15]
-; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
-; AVX2-FCP-NEXT: vpblendd $204, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm12 # 32-byte Folded Reload
-; AVX2-FCP-NEXT: # ymm12 = ymm7[0,1],mem[2,3],ymm7[4,5],mem[6,7]
-; AVX2-FCP-NEXT: vextracti128 $1, %ymm12, %xmm13
-; AVX2-FCP-NEXT: vpshufb %xmm4, %xmm13, %xmm13
-; AVX2-FCP-NEXT: vmovdqa %xmm4, %xmm6
-; AVX2-FCP-NEXT: vmovd {{.*#+}} xmm2 = [10,11,6,7,0,0,0,0,0,0,0,0,0,0,0,0]
-; AVX2-FCP-NEXT: vpshufb %xmm2, %xmm12, %xmm12
-; AVX2-FCP-NEXT: vpunpcklwd {{.*#+}} xmm12 = xmm12[0],xmm13[0],xmm12[1],xmm13[1],xmm12[2],xmm13[2],xmm12[3],xmm13[3]
+; AVX2-FCP-NEXT: vpermd {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm0 # 32-byte Folded Reload
+; AVX2-FCP-NEXT: vmovdqa %ymm6, %ymm12
+; AVX2-FCP-NEXT: vmovdqa %ymm4, %ymm1
+; AVX2-FCP-NEXT: vpshufb %ymm4, %ymm0, %ymm0
+; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm6[0,1],ymm5[2],ymm6[3,4],ymm5[5],ymm6[6,7]
+; AVX2-FCP-NEXT: vpermd %ymm2, %ymm3, %ymm2
+; AVX2-FCP-NEXT: vpshufb %ymm14, %ymm2, %ymm2
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm2[0,1,2,3,4],ymm0[5,6,7],ymm2[8,9,10,11,12],ymm0[13,14,15]
+; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX2-FCP-NEXT: vpblendd $204, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
+; AVX2-FCP-NEXT: # ymm2 = ymm2[0,1],mem[2,3],ymm2[4,5],mem[6,7]
+; AVX2-FCP-NEXT: vextracti128 $1, %ymm2, %xmm13
+; AVX2-FCP-NEXT: vmovdqa %xmm7, %xmm9
+; AVX2-FCP-NEXT: vpshufb %xmm7, %xmm13, %xmm13
+; AVX2-FCP-NEXT: vmovd {{.*#+}} xmm4 = [10,11,6,7,0,0,0,0,0,0,0,0,0,0,0,0]
+; AVX2-FCP-NEXT: vpshufb %xmm4, %xmm2, %xmm2
+; AVX2-FCP-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm13[0],xmm2[1],xmm13[1],xmm2[2],xmm13[2],xmm2[3],xmm13[3]
; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm13 = ymm7[0,1,2],ymm10[3],ymm7[4,5],ymm10[6],ymm7[7]
-; AVX2-FCP-NEXT: vmovdqa %ymm5, %ymm4
-; AVX2-FCP-NEXT: vpermd %ymm13, %ymm5, %ymm13
-; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm5 = [u,u,u,u,u,u,u,u,2,3,4,5,10,11,12,13,18,19,18,19,18,19,18,19,18,19,20,21,26,27,28,29]
-; AVX2-FCP-NEXT: vpshufb %ymm5, %ymm13, %ymm13
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm13 = ymm10[0,1,2],ymm7[3],ymm10[4,5],ymm7[6],ymm10[7]
+; AVX2-FCP-NEXT: vpmovsxbd {{.*#+}} ymm11 = [0,4,7,3,6,0,0,0]
+; AVX2-FCP-NEXT: vpermd %ymm13, %ymm11, %ymm13
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm11 = [u,u,u,u,u,u,u,u,2,3,4,5,10,11,12,13,18,19,18,19,18,19,18,19,18,19,20,21,26,27,28,29]
+; AVX2-FCP-NEXT: vpshufb %ymm11, %ymm13, %ymm13
; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm13[0],ymm0[1,2,3,4,5,6,7],ymm13[8],ymm0[9,10,11,12,13,14,15]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm12 = xmm12[0,1],xmm13[2,3]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm12[0,1,2,3],ymm0[4,5,6,7]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0,1],xmm13[2,3]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpermd {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm0 # 32-byte Folded Reload
-; AVX2-FCP-NEXT: vpshufb %ymm15, %ymm0, %ymm0
-; AVX2-FCP-NEXT: vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm12 # 32-byte Folded Reload
-; AVX2-FCP-NEXT: # ymm12 = ymm8[0,1],mem[2],ymm8[3,4],mem[5],ymm8[6,7]
-; AVX2-FCP-NEXT: vpermd %ymm12, %ymm3, %ymm12
-; AVX2-FCP-NEXT: vpshufb %ymm1, %ymm12, %ymm12
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm12[0,1,2,3,4],ymm0[5,6,7],ymm12[8,9,10,11,12],ymm0[13,14,15]
-; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
-; AVX2-FCP-NEXT: vpblendd $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm12 # 32-byte Folded Reload
-; AVX2-FCP-NEXT: # ymm12 = mem[0,1],ymm8[2,3],mem[4,5],ymm8[6,7]
-; AVX2-FCP-NEXT: vextracti128 $1, %ymm12, %xmm13
-; AVX2-FCP-NEXT: vpshufb %xmm6, %xmm13, %xmm13
-; AVX2-FCP-NEXT: vpshufb %xmm2, %xmm12, %xmm12
-; AVX2-FCP-NEXT: vpunpcklwd {{.*#+}} xmm12 = xmm12[0],xmm13[0],xmm12[1],xmm13[1],xmm12[2],xmm13[2],xmm12[3],xmm13[3]
-; AVX2-FCP-NEXT: vpblendd $72, {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm13 # 32-byte Folded Reload
-; AVX2-FCP-NEXT: # ymm13 = ymm9[0,1,2],mem[3],ymm9[4,5],mem[6],ymm9[7]
+; AVX2-FCP-NEXT: vpermd {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm0 # 32-byte Folded Reload
+; AVX2-FCP-NEXT: vpshufb %ymm1, %ymm0, %ymm0
+; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX2-FCP-NEXT: vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
+; AVX2-FCP-NEXT: # ymm2 = mem[0,1],ymm2[2],mem[3,4],ymm2[5],mem[6,7]
+; AVX2-FCP-NEXT: vpermd %ymm2, %ymm3, %ymm2
+; AVX2-FCP-NEXT: vpshufb %ymm14, %ymm2, %ymm2
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm2[0,1,2,3,4],ymm0[5,6,7],ymm2[8,9,10,11,12],ymm0[13,14,15]
+; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX2-FCP-NEXT: vpblendd $204, (%rsp), %ymm2, %ymm2 # 32-byte Folded Reload
+; AVX2-FCP-NEXT: # ymm2 = ymm2[0,1],mem[2,3],ymm2[4,5],mem[6,7]
+; AVX2-FCP-NEXT: vextracti128 $1, %ymm2, %xmm13
+; AVX2-FCP-NEXT: vpshufb %xmm9, %xmm13, %xmm13
+; AVX2-FCP-NEXT: vpshufb %xmm4, %xmm2, %xmm2
+; AVX2-FCP-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm13[0],xmm2[1],xmm13[1],xmm2[2],xmm13[2],xmm2[3],xmm13[3]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm13 = ymm15[0,1,2],ymm8[3],ymm15[4,5],ymm8[6],ymm15[7]
+; AVX2-FCP-NEXT: vpmovsxbd {{.*#+}} ymm4 = [0,4,7,3,6,0,0,0]
; AVX2-FCP-NEXT: vpermd %ymm13, %ymm4, %ymm13
-; AVX2-FCP-NEXT: vmovdqa %ymm4, %ymm2
-; AVX2-FCP-NEXT: vpshufb %ymm5, %ymm13, %ymm13
+; AVX2-FCP-NEXT: vpshufb %ymm11, %ymm13, %ymm13
; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm13[0],ymm0[1,2,3,4,5,6,7],ymm13[8],ymm0[9,10,11,12,13,14,15]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm12 = xmm12[0,1],xmm13[2,3]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm12[0,1,2,3],ymm0[4,5,6,7]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0,1],xmm13[2,3]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
-; AVX2-FCP-NEXT: vpermd %ymm15, %ymm11, %ymm0
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,17,22,23,24,25]
-; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
-; AVX2-FCP-NEXT: vmovdqu (%rsp), %ymm4 # 32-byte Reload
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm4[0,1],ymm9[2],ymm4[3,4],ymm9[5],ymm4[6,7]
-; AVX2-FCP-NEXT: vpermd %ymm6, %ymm3, %ymm3
-; AVX2-FCP-NEXT: vpshufb %ymm1, %ymm3, %ymm3
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm3[0,1,2,3,4],ymm0[5,6,7],ymm3[8,9,10,11,12],ymm0[13,14,15]
-; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
-; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm12[0,1,2],ymm8[3],ymm12[4,5],ymm8[6],ymm12[7]
-; AVX2-FCP-NEXT: vpermd %ymm3, %ymm2, %ymm2
-; AVX2-FCP-NEXT: vpshufb %ymm5, %ymm2, %ymm1
; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
+; AVX2-FCP-NEXT: vpermd %ymm13, %ymm12, %ymm0
+; AVX2-FCP-NEXT: vpshufb %ymm1, %ymm0, %ymm0
+; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
+; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm8[0,1],ymm4[2],ymm8[3,4],ymm4[5],ymm8[6,7]
+; AVX2-FCP-NEXT: vpermd %ymm2, %ymm3, %ymm2
+; AVX2-FCP-NEXT: vpshufb %ymm14, %ymm2, %ymm2
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm2[0,1,2,3,4],ymm0[5,6,7],ymm2[8,9,10,11,12],ymm0[13,14,15]
+; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
+; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm9[0,1,2],ymm12[3],ymm9[4,5],ymm12[6],ymm9[7]
+; AVX2-FCP-NEXT: vpmovsxbd {{.*#+}} ymm1 = [0,4,7,3,6,0,0,0]
+; AVX2-FCP-NEXT: vpermd %ymm2, %ymm1, %ymm2
+; AVX2-FCP-NEXT: vpshufb %ymm11, %ymm2, %ymm1
+; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm11[0,1],ymm13[2,3],ymm11[4,5],ymm13[6,7]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm15[0,1],ymm11[2,3],ymm15[4,5],ymm11[6,7]
; AVX2-FCP-NEXT: vextracti128 $1, %ymm2, %xmm3
; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[8,9,4,5,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[10,11,6,7,u,u,u,u,u,u,u,u,u,u,u,u]
@@ -13891,8 +13908,7 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3]
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpblendd $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm0 # 32-byte Folded Reload
-; AVX2-FCP-NEXT: # ymm0 = mem[0,1],ymm14[2,3],mem[4,5],ymm14[6,7]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm6[0,1],ymm5[2,3],ymm6[4,5],ymm5[6,7]
; AVX2-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [0,4,7,0,0,4,7,0]
; AVX2-FCP-NEXT: # ymm1 = mem[0,1,0,1]
; AVX2-FCP-NEXT: vpermd {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm3 # 32-byte Folded Reload
@@ -13902,48 +13918,50 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX2-FCP-NEXT: # ymm3 = mem[0,1,0,1]
; AVX2-FCP-NEXT: vpermd %ymm0, %ymm3, %ymm0
; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21,26,27,28,29,u,u,u,u,u,u]
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm5[5,6,7],ymm0[8,9,10,11,12],ymm5[13,14,15]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm10[0],ymm7[1],ymm10[2,3],ymm7[4],ymm10[5,6,7]
-; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX2-FCP-NEXT: vpblendd $72, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm7 # 32-byte Folded Reload
-; AVX2-FCP-NEXT: # ymm7 = ymm5[0,1,2],mem[3],ymm5[4,5],mem[6],ymm5[7]
-; AVX2-FCP-NEXT: vextracti128 $1, %ymm7, %xmm10
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm6 = ymm0[0,1,2,3,4],ymm5[5,6,7],ymm0[8,9,10,11,12],ymm5[13,14,15]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm7[0],ymm10[1],ymm7[2,3],ymm10[4],ymm7[5,6,7]
+; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX2-FCP-NEXT: vpblendd $72, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX2-FCP-NEXT: # ymm0 = ymm0[0,1,2],mem[3],ymm0[4,5],mem[6],ymm0[7]
+; AVX2-FCP-NEXT: vextracti128 $1, %ymm0, %xmm10
; AVX2-FCP-NEXT: vpbroadcastd {{.*#+}} xmm5 = [10,11,6,7,10,11,6,7,10,11,6,7,10,11,6,7]
; AVX2-FCP-NEXT: vpshufb %xmm5, %xmm10, %xmm10
-; AVX2-FCP-NEXT: vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,6,4,6,7]
-; AVX2-FCP-NEXT: vpunpckhwd {{.*#+}} xmm7 = xmm7[4],xmm10[4],xmm7[5],xmm10[5],xmm7[6],xmm10[6],xmm7[7],xmm10[7]
-; AVX2-FCP-NEXT: vpmovsxbd {{.*#+}} ymm10 = [1,4,0,3,7,0,0,0]
-; AVX2-FCP-NEXT: vpermd %ymm6, %ymm10, %ymm6
+; AVX2-FCP-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,4,6,7]
+; AVX2-FCP-NEXT: vpunpckhwd {{.*#+}} xmm10 = xmm0[4],xmm10[4],xmm0[5],xmm10[5],xmm0[6],xmm10[6],xmm0[7],xmm10[7]
+; AVX2-FCP-NEXT: vpmovsxbd {{.*#+}} ymm0 = [1,4,0,3,7,0,0,0]
+; AVX2-FCP-NEXT: vpermd %ymm7, %ymm0, %ymm7
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm14 = [u,u,u,u,u,u,u,u,0,1,6,7,8,9,14,15,16,17,22,23,20,21,22,23,16,17,22,23,24,25,30,31]
-; AVX2-FCP-NEXT: vpshufb %ymm14, %ymm6, %ymm6
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm6[0],ymm0[1,2,3,4,5,6,7],ymm6[8],ymm0[9,10,11,12,13,14,15]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm6 = xmm7[0,1],xmm6[2,3]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm6[0,1,2,3],ymm0[4,5,6,7]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm4[0,1],ymm9[2,3],ymm4[4,5],ymm9[6,7]
-; AVX2-FCP-NEXT: vpermd %ymm15, %ymm1, %ymm7
+; AVX2-FCP-NEXT: vpshufb %ymm14, %ymm7, %ymm7
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm6 = ymm7[0],ymm6[1,2,3,4,5,6,7],ymm7[8],ymm6[9,10,11,12,13,14,15]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm7 = xmm10[0,1],xmm7[2,3]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm7[0,1,2,3],ymm6[4,5,6,7]
+; AVX2-FCP-NEXT: vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm8[0,1],ymm4[2,3],ymm8[4,5],ymm4[6,7]
+; AVX2-FCP-NEXT: vpermd %ymm13, %ymm1, %ymm7
; AVX2-FCP-NEXT: vpshufb %ymm2, %ymm7, %ymm7
; AVX2-FCP-NEXT: vpermd %ymm6, %ymm3, %ymm6
-; AVX2-FCP-NEXT: vpbroadcastq {{.*#+}} ymm15 = [28,29,18,19,20,21,26,27,28,29,18,19,20,21,26,27,28,29,18,19,20,21,26,27,28,29,18,19,20,21,26,27]
-; AVX2-FCP-NEXT: vpshufb %ymm15, %ymm6, %ymm6
+; AVX2-FCP-NEXT: vpbroadcastq {{.*#+}} ymm10 = [28,29,18,19,20,21,26,27,28,29,18,19,20,21,26,27,28,29,18,19,20,21,26,27,28,29,18,19,20,21,26,27]
+; AVX2-FCP-NEXT: vpshufb %ymm10, %ymm6, %ymm6
; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm6 = ymm6[0,1,2,3,4],ymm7[5,6,7],ymm6[8,9,10,11,12],ymm7[13,14,15]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm8[0],ymm12[1],ymm8[2,3],ymm12[4],ymm8[5,6,7]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm11[0,1,2],ymm13[3],ymm11[4,5],ymm13[6],ymm11[7]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm12[0],ymm9[1],ymm12[2,3],ymm9[4],ymm12[5,6,7]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm15[0,1,2],ymm11[3],ymm15[4,5],ymm11[6],ymm15[7]
; AVX2-FCP-NEXT: vextracti128 $1, %ymm8, %xmm9
; AVX2-FCP-NEXT: vpshufb %xmm5, %xmm9, %xmm9
; AVX2-FCP-NEXT: vpshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,6,4,6,7]
; AVX2-FCP-NEXT: vpunpckhwd {{.*#+}} xmm8 = xmm8[4],xmm9[4],xmm8[5],xmm9[5],xmm8[6],xmm9[6],xmm8[7],xmm9[7]
-; AVX2-FCP-NEXT: vpermd %ymm7, %ymm10, %ymm7
+; AVX2-FCP-NEXT: vpermd %ymm7, %ymm0, %ymm7
; AVX2-FCP-NEXT: vpshufb %ymm14, %ymm7, %ymm7
; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm6 = ymm7[0],ymm6[1,2,3,4,5,6,7],ymm7[8],ymm6[9,10,11,12,13,14,15]
; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm7 = xmm8[0,1],xmm7[2,3]
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm7[0,1,2,3],ymm6[4,5,6,7]
; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
-; AVX2-FCP-NEXT: vpblendd $204, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm7 # 32-byte Folded Reload
-; AVX2-FCP-NEXT: # ymm7 = ymm7[0,1],mem[2,3],ymm7[4,5],mem[6,7]
+; AVX2-FCP-NEXT: vpblendd $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm7 # 32-byte Folded Reload
+; AVX2-FCP-NEXT: # ymm7 = mem[0,1],ymm7[2,3],mem[4,5],ymm7[6,7]
; AVX2-FCP-NEXT: vpermd {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm8 # 32-byte Folded Reload
; AVX2-FCP-NEXT: vpshufb %ymm2, %ymm8, %ymm8
; AVX2-FCP-NEXT: vpermd %ymm7, %ymm3, %ymm7
-; AVX2-FCP-NEXT: vpshufb %ymm15, %ymm7, %ymm7
+; AVX2-FCP-NEXT: vpshufb %ymm10, %ymm7, %ymm7
+; AVX2-FCP-NEXT: vmovdqa %ymm10, %ymm11
; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm7 = ymm7[0,1,2,3,4],ymm8[5,6,7],ymm7[8,9,10,11,12],ymm8[13,14,15]
; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
; AVX2-FCP-NEXT: vpblendd $237, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm8 # 32-byte Folded Reload
@@ -13951,11 +13969,11 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
; AVX2-FCP-NEXT: vpblendd $183, {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm9 # 32-byte Folded Reload
; AVX2-FCP-NEXT: # ymm9 = mem[0,1,2],ymm9[3],mem[4,5],ymm9[6],mem[7]
-; AVX2-FCP-NEXT: vextracti128 $1, %ymm9, %xmm11
-; AVX2-FCP-NEXT: vpshufb %xmm5, %xmm11, %xmm11
+; AVX2-FCP-NEXT: vextracti128 $1, %ymm9, %xmm10
+; AVX2-FCP-NEXT: vpshufb %xmm5, %xmm10, %xmm10
; AVX2-FCP-NEXT: vpshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,6,4,6,7]
-; AVX2-FCP-NEXT: vpunpckhwd {{.*#+}} xmm9 = xmm9[4],xmm11[4],xmm9[5],xmm11[5],xmm9[6],xmm11[6],xmm9[7],xmm11[7]
-; AVX2-FCP-NEXT: vpermd %ymm8, %ymm10, %ymm8
+; AVX2-FCP-NEXT: vpunpckhwd {{.*#+}} xmm9 = xmm9[4],xmm10[4],xmm9[5],xmm10[5],xmm9[6],xmm10[6],xmm9[7],xmm10[7]
+; AVX2-FCP-NEXT: vpermd %ymm8, %ymm0, %ymm8
; AVX2-FCP-NEXT: vpshufb %ymm14, %ymm8, %ymm8
; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm7 = ymm8[0],ymm7[1,2,3,4,5,6,7],ymm8[8],ymm7[9,10,11,12,13,14,15]
; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm8 = xmm9[0,1],xmm8[2,3]
@@ -13963,80 +13981,81 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX2-FCP-NEXT: vpermd {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
; AVX2-FCP-NEXT: vpshufb %ymm2, %ymm1, %ymm1
; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX2-FCP-NEXT: vpblendd $204, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
-; AVX2-FCP-NEXT: # ymm2 = ymm2[0,1],mem[2,3],ymm2[4,5],mem[6,7]
+; AVX2-FCP-NEXT: vpblendd $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
+; AVX2-FCP-NEXT: # ymm2 = mem[0,1],ymm2[2,3],mem[4,5],ymm2[6,7]
; AVX2-FCP-NEXT: vpermd %ymm2, %ymm3, %ymm2
-; AVX2-FCP-NEXT: vpshufb %ymm15, %ymm2, %ymm2
+; AVX2-FCP-NEXT: vpshufb %ymm11, %ymm2, %ymm2
; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm2[0,1,2,3,4],ymm1[5,6,7],ymm2[8,9,10,11,12],ymm1[13,14,15]
; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
; AVX2-FCP-NEXT: vpblendd $237, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
; AVX2-FCP-NEXT: # ymm2 = mem[0],ymm2[1],mem[2,3],ymm2[4],mem[5,6,7]
-; AVX2-FCP-NEXT: vpermd %ymm2, %ymm10, %ymm2
-; AVX2-FCP-NEXT: vpshufb %ymm14, %ymm2, %ymm2
-; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX2-FCP-NEXT: vpblendd $72, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
-; AVX2-FCP-NEXT: # ymm3 = ymm3[0,1,2],mem[3],ymm3[4,5],mem[6],ymm3[7]
-; AVX2-FCP-NEXT: vextracti128 $1, %ymm3, %xmm4
-; AVX2-FCP-NEXT: vpshufb %xmm5, %xmm4, %xmm4
-; AVX2-FCP-NEXT: vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,6,4,6,7]
-; AVX2-FCP-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm2[0],ymm1[1,2,3,4,5,6,7],ymm2[8],ymm1[9,10,11,12,13,14,15]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm2 = xmm3[0,1],xmm2[2,3]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
-; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX2-FCP-NEXT: vmovaps %ymm2, 96(%rsi)
-; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX2-FCP-NEXT: vmovaps %ymm2, 32(%rsi)
-; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX2-FCP-NEXT: vmovaps %ymm2, 64(%rsi)
-; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX2-FCP-NEXT: vmovaps %ymm2, (%rsi)
-; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX2-FCP-NEXT: vmovaps %ymm2, 96(%rdx)
-; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX2-FCP-NEXT: vmovaps %ymm2, 32(%rdx)
-; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX2-FCP-NEXT: vmovaps %ymm2, 64(%rdx)
-; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX2-FCP-NEXT: vmovaps %ymm2, (%rdx)
-; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX2-FCP-NEXT: vmovaps %ymm2, 32(%rcx)
-; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX2-FCP-NEXT: vmovaps %ymm2, 96(%rcx)
-; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX2-FCP-NEXT: vmovaps %ymm2, 64(%rcx)
-; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX2-FCP-NEXT: vmovaps %ymm2, (%rcx)
-; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX2-FCP-NEXT: vmovaps %ymm2, 96(%r8)
-; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX2-FCP-NEXT: vmovaps %ymm2, 32(%r8)
-; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX2-FCP-NEXT: vmovaps %ymm2, 64(%r8)
-; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX2-FCP-NEXT: vmovaps %ymm2, (%r8)
-; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX2-FCP-NEXT: vmovaps %ymm2, 96(%r9)
-; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX2-FCP-NEXT: vmovaps %ymm2, 32(%r9)
-; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX2-FCP-NEXT: vmovaps %ymm2, (%r9)
-; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX2-FCP-NEXT: vmovaps %ymm2, 64(%r9)
+; AVX2-FCP-NEXT: vpermd %ymm2, %ymm0, %ymm0
+; AVX2-FCP-NEXT: vpshufb %ymm14, %ymm0, %ymm0
+; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX2-FCP-NEXT: vpblendd $72, (%rsp), %ymm2, %ymm2 # 32-byte Folded Reload
+; AVX2-FCP-NEXT: # ymm2 = ymm2[0,1,2],mem[3],ymm2[4,5],mem[6],ymm2[7]
+; AVX2-FCP-NEXT: vextracti128 $1, %ymm2, %xmm3
+; AVX2-FCP-NEXT: vpshufb %xmm5, %xmm3, %xmm3
+; AVX2-FCP-NEXT: vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,6,4,6,7]
+; AVX2-FCP-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm0[0],ymm1[1,2,3,4,5,6,7],ymm0[8],ymm1[9,10,11,12,13,14,15]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm0 = xmm2[0,1],xmm0[2,3]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX2-FCP-NEXT: vmovaps %ymm1, 96(%rsi)
+; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX2-FCP-NEXT: vmovaps %ymm1, 32(%rsi)
+; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX2-FCP-NEXT: vmovaps %ymm1, 64(%rsi)
+; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX2-FCP-NEXT: vmovaps %ymm1, (%rsi)
+; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX2-FCP-NEXT: vmovaps %ymm1, 96(%rdx)
+; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX2-FCP-NEXT: vmovaps %ymm1, 32(%rdx)
+; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX2-FCP-NEXT: vmovaps %ymm1, 64(%rdx)
+; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX2-FCP-NEXT: vmovaps %ymm1, (%rdx)
+; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX2-FCP-NEXT: vmovaps %ymm1, 32(%rcx)
+; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX2-FCP-NEXT: vmovaps %ymm1, 96(%rcx)
+; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX2-FCP-NEXT: vmovaps %ymm1, 64(%rcx)
+; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX2-FCP-NEXT: vmovaps %ymm1, (%rcx)
+; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX2-FCP-NEXT: vmovaps %ymm1, 96(%r8)
+; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX2-FCP-NEXT: vmovaps %ymm1, 32(%r8)
+; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX2-FCP-NEXT: vmovaps %ymm1, 64(%r8)
+; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX2-FCP-NEXT: vmovaps %ymm1, (%r8)
+; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX2-FCP-NEXT: vmovaps %ymm1, 96(%r9)
+; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX2-FCP-NEXT: vmovaps %ymm1, 32(%r9)
+; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX2-FCP-NEXT: vmovaps %ymm1, (%r9)
+; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX2-FCP-NEXT: vmovaps %ymm1, 64(%r9)
; AVX2-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX2-FCP-NEXT: vmovaps %ymm2, 96(%rax)
-; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX2-FCP-NEXT: vmovaps %ymm2, 32(%rax)
-; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX2-FCP-NEXT: vmovaps %ymm2, 64(%rax)
-; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX2-FCP-NEXT: vmovaps %ymm2, (%rax)
+; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX2-FCP-NEXT: vmovaps %ymm1, 96(%rax)
+; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX2-FCP-NEXT: vmovaps %ymm1, 32(%rax)
+; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX2-FCP-NEXT: vmovaps %ymm1, 64(%rax)
+; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX2-FCP-NEXT: vmovaps %ymm1, (%rax)
; AVX2-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX2-FCP-NEXT: vmovdqa %ymm1, 32(%rax)
+; AVX2-FCP-NEXT: vmovdqa %ymm0, 32(%rax)
; AVX2-FCP-NEXT: vmovdqa %ymm7, (%rax)
; AVX2-FCP-NEXT: vmovdqa %ymm6, 96(%rax)
-; AVX2-FCP-NEXT: vmovdqa %ymm0, 64(%rax)
+; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX2-FCP-NEXT: vmovaps %ymm0, 64(%rax)
; AVX2-FCP-NEXT: addq $1544, %rsp # imm = 0x608
; AVX2-FCP-NEXT: vzeroupper
; AVX2-FCP-NEXT: retq
@@ -14938,7 +14957,7 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
;
; AVX512-FCP-LABEL: load_i16_stride7_vf64:
; AVX512-FCP: # %bb.0:
-; AVX512-FCP-NEXT: subq $1800, %rsp # imm = 0x708
+; AVX512-FCP-NEXT: subq $1736, %rsp # imm = 0x6C8
; AVX512-FCP-NEXT: vmovdqa64 512(%rdi), %zmm5
; AVX512-FCP-NEXT: vmovdqa64 64(%rdi), %zmm4
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm1 = [2,5,9,0,12,0,0,0]
@@ -14947,28 +14966,28 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512-FCP-NEXT: vpermd %zmm4, %zmm3, %zmm2
; AVX512-FCP-NEXT: vmovdqa64 %zmm4, %zmm26
; AVX512-FCP-NEXT: vpermd %zmm5, %zmm3, %zmm4
-; AVX512-FCP-NEXT: vmovdqa64 %zmm5, %zmm22
+; AVX512-FCP-NEXT: vmovdqa64 %zmm5, %zmm20
; AVX512-FCP-NEXT: vmovdqa 480(%rdi), %ymm5
; AVX512-FCP-NEXT: vmovdqa 448(%rdi), %ymm6
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm6[0,1],ymm5[2],ymm6[3,4,5],ymm5[6],ymm6[7]
-; AVX512-FCP-NEXT: vmovdqa64 %ymm6, %ymm21
-; AVX512-FCP-NEXT: vmovdqa64 %ymm5, %ymm23
+; AVX512-FCP-NEXT: vmovdqa64 %ymm6, %ymm23
+; AVX512-FCP-NEXT: vmovdqa64 %ymm5, %ymm25
; AVX512-FCP-NEXT: vextracti128 $1, %ymm3, %xmm5
; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm5 = xmm3[0,1,2,3],xmm5[4],xmm3[5],xmm5[6],xmm3[7]
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,14,15,12,13,10,11,8,9,128,128,128,128,128,128,128,128,128,128,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX512-FCP-NEXT: vpshufb %ymm3, %ymm5, %ymm6
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm5 = [128,128,128,128,128,128,128,128,128,128,6,7,12,13,2,3,16,17,30,31,128,128,128,128,128,128,128,128,128,128,128,128]
; AVX512-FCP-NEXT: vpshufb %ymm5, %ymm4, %ymm4
-; AVX512-FCP-NEXT: vporq %ymm4, %ymm6, %ymm17
+; AVX512-FCP-NEXT: vporq %ymm4, %ymm6, %ymm16
; AVX512-FCP-NEXT: vmovdqa 672(%rdi), %xmm7
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm4 = [0,1,2,3,4,5,6,7,0,1,14,15,12,13,14,15]
; AVX512-FCP-NEXT: vpshufb %xmm4, %xmm7, %xmm6
-; AVX512-FCP-NEXT: vmovdqa64 %xmm7, %xmm20
+; AVX512-FCP-NEXT: vmovdqa64 %xmm7, %xmm21
; AVX512-FCP-NEXT: vpbroadcastw 700(%rdi), %xmm7
; AVX512-FCP-NEXT: vpunpckhdq {{.*#+}} xmm28 = xmm6[2],xmm7[2],xmm6[3],xmm7[3]
-; AVX512-FCP-NEXT: vmovdqa64 192(%rdi), %ymm31
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm9 = ymm31[0,1,0,2]
-; AVX512-FCP-NEXT: vmovdqu %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512-FCP-NEXT: vmovdqa64 192(%rdi), %ymm24
+; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm10 = ymm24[0,1,0,2]
+; AVX512-FCP-NEXT: vmovdqu %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX512-FCP-NEXT: vpshufb %ymm5, %ymm2, %ymm2
; AVX512-FCP-NEXT: vmovdqa (%rdi), %ymm6
; AVX512-FCP-NEXT: vmovdqa 32(%rdi), %ymm7
@@ -14977,400 +14996,403 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512-FCP-NEXT: vextracti128 $1, %ymm5, %xmm6
; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3],xmm6[4],xmm5[5],xmm6[6],xmm5[7]
; AVX512-FCP-NEXT: vpshufb %ymm3, %ymm5, %ymm3
-; AVX512-FCP-NEXT: vporq %ymm2, %ymm3, %ymm16
+; AVX512-FCP-NEXT: vporq %ymm2, %ymm3, %ymm17
; AVX512-FCP-NEXT: vmovdqa 224(%rdi), %xmm14
; AVX512-FCP-NEXT: vpshufb %xmm4, %xmm14, %xmm2
; AVX512-FCP-NEXT: vpbroadcastw 252(%rdi), %xmm3
; AVX512-FCP-NEXT: vpunpckhdq {{.*#+}} xmm2 = xmm2[2],xmm3[2],xmm2[3],xmm3[3]
; AVX512-FCP-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX512-FCP-NEXT: vmovdqa 240(%rdi), %xmm15
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm5 = [128,128,128,128,128,128,128,128,128,128,0,1,6,7,8,9,18,19,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
-; AVX512-FCP-NEXT: vpshufb %ymm5, %ymm0, %ymm0
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm12 = [128,128,128,128,128,128,128,128,128,128,0,1,6,7,8,9,18,19,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
+; AVX512-FCP-NEXT: vpshufb %ymm12, %ymm0, %ymm0
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm8[0,1,2],ymm7[3],ymm8[4,5],ymm7[6],ymm8[7]
; AVX512-FCP-NEXT: vmovdqa64 %ymm8, %ymm18
; AVX512-FCP-NEXT: vmovdqa64 %ymm7, %ymm19
; AVX512-FCP-NEXT: vextracti128 $1, %ymm3, %xmm4
; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0],xmm3[1],xmm4[2,3,4,5],xmm3[6],xmm4[7]
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm8 = [2,3,0,1,14,15,12,13,10,11,128,128,128,128,128,128,128,128,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512-FCP-NEXT: vpshufb %ymm8, %ymm3, %ymm3
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm9 = [2,3,0,1,14,15,12,13,10,11,128,128,128,128,128,128,128,128,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512-FCP-NEXT: vpshufb %ymm9, %ymm3, %ymm3
; AVX512-FCP-NEXT: vpor %ymm0, %ymm3, %ymm0
; AVX512-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-FCP-NEXT: vmovdqa 160(%rdi), %ymm2
-; AVX512-FCP-NEXT: vmovdqa 128(%rdi), %ymm12
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm12[0,1],ymm2[2],ymm12[3,4,5],ymm2[6],ymm12[7]
-; AVX512-FCP-NEXT: vextracti128 $1, %ymm4, %xmm6
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm6 = xmm4[0,1,2,3],xmm6[4],xmm4[5],xmm6[6],xmm4[7]
+; AVX512-FCP-NEXT: vmovdqa 160(%rdi), %ymm13
+; AVX512-FCP-NEXT: vmovdqa 128(%rdi), %ymm2
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm2[0,1],ymm13[2],ymm2[3,4,5],ymm13[6],ymm2[7]
+; AVX512-FCP-NEXT: vextracti128 $1, %ymm4, %xmm7
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm7 = xmm4[0,1,2,3],xmm7[4],xmm4[5],xmm7[6],xmm4[7]
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,0,1,14,15,12,13,10,11,8,9,12,13,14,15]
-; AVX512-FCP-NEXT: vpshufb %xmm3, %xmm6, %xmm6
-; AVX512-FCP-NEXT: vinserti128 $1, %xmm6, %ymm0, %ymm7
-; AVX512-FCP-NEXT: vpbroadcastd {{.*#+}} ymm6 = [22,23,28,29,22,23,28,29,22,23,28,29,22,23,28,29,22,23,28,29,22,23,28,29,22,23,28,29,22,23,28,29]
-; AVX512-FCP-NEXT: vpshufb %ymm6, %ymm9, %ymm9
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm9 = ymm7[0,1,2,3,4,5,6],ymm9[7]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm10 = xmm15[0],xmm14[1],xmm15[2,3,4,5,6,7]
-; AVX512-FCP-NEXT: vmovdqa %xmm15, %xmm7
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm11 = [2,3,0,1,14,15,14,15,8,9,10,11,12,13,14,15]
-; AVX512-FCP-NEXT: vpshufb %xmm11, %xmm10, %xmm10
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm10, %zmm9, %zmm9
-; AVX512-FCP-NEXT: vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-FCP-NEXT: vmovdqa64 %ymm21, %ymm13
-; AVX512-FCP-NEXT: vmovdqa64 %ymm23, %ymm15
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm9 = ymm13[0,1,2],ymm15[3],ymm13[4,5],ymm15[6],ymm13[7]
-; AVX512-FCP-NEXT: vextracti128 $1, %ymm9, %xmm10
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm9 = xmm10[0],xmm9[1],xmm10[2,3,4,5],xmm9[6],xmm10[7]
-; AVX512-FCP-NEXT: vpshufb %ymm8, %ymm9, %ymm8
-; AVX512-FCP-NEXT: vpermd %zmm22, %zmm1, %zmm1
-; AVX512-FCP-NEXT: vpshufb %ymm5, %ymm1, %ymm1
-; AVX512-FCP-NEXT: vpor %ymm1, %ymm8, %ymm1
+; AVX512-FCP-NEXT: vpshufb %xmm3, %xmm7, %xmm7
+; AVX512-FCP-NEXT: vinserti128 $1, %xmm7, %ymm0, %ymm8
+; AVX512-FCP-NEXT: vpbroadcastd {{.*#+}} ymm7 = [22,23,28,29,22,23,28,29,22,23,28,29,22,23,28,29,22,23,28,29,22,23,28,29,22,23,28,29,22,23,28,29]
+; AVX512-FCP-NEXT: vpshufb %ymm7, %ymm10, %ymm10
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm10 = ymm8[0,1,2,3,4,5,6],ymm10[7]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm11 = xmm15[0],xmm14[1],xmm15[2,3,4,5,6,7]
+; AVX512-FCP-NEXT: vmovdqa %xmm15, %xmm4
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm8 = [2,3,0,1,14,15,14,15,8,9,10,11,12,13,14,15]
+; AVX512-FCP-NEXT: vpshufb %xmm8, %xmm11, %xmm11
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm11, %zmm10, %zmm6
+; AVX512-FCP-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-FCP-NEXT: vmovdqa64 %ymm23, %ymm6
+; AVX512-FCP-NEXT: vmovdqa64 %ymm25, %ymm15
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm10 = ymm6[0,1,2],ymm15[3],ymm6[4,5],ymm15[6],ymm6[7]
+; AVX512-FCP-NEXT: vextracti128 $1, %ymm10, %xmm11
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm10 = xmm11[0],xmm10[1],xmm11[2,3,4,5],xmm10[6],xmm11[7]
+; AVX512-FCP-NEXT: vpshufb %ymm9, %ymm10, %ymm9
+; AVX512-FCP-NEXT: vpermd %zmm20, %zmm1, %zmm1
+; AVX512-FCP-NEXT: vpshufb %ymm12, %ymm1, %ymm1
+; AVX512-FCP-NEXT: vpor %ymm1, %ymm9, %ymm1
; AVX512-FCP-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-FCP-NEXT: vmovdqa 608(%rdi), %ymm4
+; AVX512-FCP-NEXT: vmovdqa 608(%rdi), %ymm0
; AVX512-FCP-NEXT: vmovdqa 576(%rdi), %ymm5
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm5[0,1],ymm4[2],ymm5[3,4,5],ymm4[6],ymm5[7]
-; AVX512-FCP-NEXT: vextracti128 $1, %ymm8, %xmm9
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm8 = xmm8[0,1,2,3],xmm9[4],xmm8[5],xmm9[6],xmm8[7]
-; AVX512-FCP-NEXT: vmovdqa64 640(%rdi), %ymm25
-; AVX512-FCP-NEXT: vpshufb %xmm3, %xmm8, %xmm8
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm25[0,1,0,2]
-; AVX512-FCP-NEXT: vinserti128 $1, %xmm8, %ymm0, %ymm8
-; AVX512-FCP-NEXT: vpshufb %ymm6, %ymm0, %ymm6
-; AVX512-FCP-NEXT: vmovdqa64 %ymm0, %ymm21
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm8[0,1,2,3,4,5,6],ymm6[7]
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm9 = ymm5[0,1],ymm0[2],ymm5[3,4,5],ymm0[6],ymm5[7]
+; AVX512-FCP-NEXT: vmovdqa64 %ymm0, %ymm23
+; AVX512-FCP-NEXT: vextracti128 $1, %ymm9, %xmm10
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm9 = xmm9[0,1,2,3],xmm10[4],xmm9[5],xmm10[6],xmm9[7]
+; AVX512-FCP-NEXT: vmovdqa64 640(%rdi), %ymm31
+; AVX512-FCP-NEXT: vpshufb %xmm3, %xmm9, %xmm9
+; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm31[0,1,0,2]
+; AVX512-FCP-NEXT: vinserti128 $1, %xmm9, %ymm0, %ymm9
+; AVX512-FCP-NEXT: vpshufb %ymm7, %ymm0, %ymm7
+; AVX512-FCP-NEXT: vmovdqa64 %ymm0, %ymm22
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3,4,5,6],ymm7[7]
; AVX512-FCP-NEXT: vmovdqa 688(%rdi), %xmm3
-; AVX512-FCP-NEXT: vmovdqa64 %xmm20, %xmm1
-; AVX512-FCP-NEXT: vmovdqa64 %xmm20, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm8 = xmm3[0],xmm1[1],xmm3[2,3,4,5,6,7]
-; AVX512-FCP-NEXT: vpshufb %xmm11, %xmm8, %xmm8
-; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm23 = [2,6,9,0,13,0,0,0]
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm8, %zmm6, %zmm0
+; AVX512-FCP-NEXT: vmovdqa64 %xmm21, %xmm1
+; AVX512-FCP-NEXT: vmovdqa64 %xmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm7 = xmm3[0],xmm1[1],xmm3[2,3,4,5,6,7]
+; AVX512-FCP-NEXT: vpshufb %xmm8, %xmm7, %xmm8
+; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm7 = [2,6,9,0,13,0,0,0]
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm8, %zmm9, %zmm0
; AVX512-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-FCP-NEXT: vmovdqa64 %ymm18, %ymm11
+; AVX512-FCP-NEXT: vmovdqa64 %ymm18, %ymm12
; AVX512-FCP-NEXT: vmovdqu64 %ymm18, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX512-FCP-NEXT: vmovdqa64 %ymm19, %ymm0
; AVX512-FCP-NEXT: vmovdqu64 %ymm19, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm0[0],ymm11[1],ymm0[2,3],ymm11[4],ymm0[5,6,7]
-; AVX512-FCP-NEXT: vextracti128 $1, %ymm6, %xmm8
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm8 = xmm6[0],xmm8[1],xmm6[2,3,4,5],xmm8[6],xmm6[7]
-; AVX512-FCP-NEXT: vmovdqa64 %zmm26, %zmm29
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm0[0],ymm12[1],ymm0[2,3],ymm12[4],ymm0[5,6,7]
+; AVX512-FCP-NEXT: vextracti128 $1, %ymm8, %xmm9
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm8 = xmm8[0],xmm9[1],xmm8[2,3,4,5],xmm9[6],xmm8[7]
+; AVX512-FCP-NEXT: vmovdqa64 %zmm26, %zmm25
; AVX512-FCP-NEXT: vmovdqu64 %zmm26, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-FCP-NEXT: vpermd %zmm26, %zmm23, %zmm9
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm6 = [128,128,128,128,128,128,128,128,128,128,2,3,4,5,10,11,16,17,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
-; AVX512-FCP-NEXT: vpshufb %ymm6, %ymm9, %ymm9
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm10 = [4,5,2,3,0,1,14,15,12,13,128,128,128,128,128,128,128,128,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512-FCP-NEXT: vpshufb %ymm10, %ymm8, %ymm8
-; AVX512-FCP-NEXT: vpor %ymm9, %ymm8, %ymm8
+; AVX512-FCP-NEXT: vpermd %zmm26, %zmm7, %zmm10
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm9 = [128,128,128,128,128,128,128,128,128,128,2,3,4,5,10,11,16,17,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
+; AVX512-FCP-NEXT: vpshufb %ymm9, %ymm10, %ymm10
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm11 = [4,5,2,3,0,1,14,15,12,13,128,128,128,128,128,128,128,128,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512-FCP-NEXT: vpshufb %ymm11, %ymm8, %ymm8
+; AVX512-FCP-NEXT: vpor %ymm10, %ymm8, %ymm8
; AVX512-FCP-NEXT: vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm12[0,1,2],ymm2[3],ymm12[4,5],ymm2[6],ymm12[7]
-; AVX512-FCP-NEXT: vmovdqa64 %ymm2, %ymm27
-; AVX512-FCP-NEXT: vmovdqa64 %ymm12, %ymm26
-; AVX512-FCP-NEXT: vextracti128 $1, %ymm8, %xmm9
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm8 = xmm9[0],xmm8[1],xmm9[2,3,4,5],xmm8[6],xmm9[7]
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm2[0,1,2],ymm13[3],ymm2[4,5],ymm13[6],ymm2[7]
+; AVX512-FCP-NEXT: vmovdqa64 %ymm13, %ymm27
+; AVX512-FCP-NEXT: vmovdqa64 %ymm2, %ymm26
+; AVX512-FCP-NEXT: vextracti128 $1, %ymm8, %xmm10
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm8 = xmm10[0],xmm8[1],xmm10[2,3,4,5],xmm8[6],xmm10[7]
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,2,3,0,1,14,15,12,13,10,11,12,13,14,15]
; AVX512-FCP-NEXT: vpshufb %xmm2, %xmm8, %xmm8
-; AVX512-FCP-NEXT: vmovdqa64 %xmm2, %xmm24
+; AVX512-FCP-NEXT: vmovdqa64 %xmm2, %xmm29
; AVX512-FCP-NEXT: vinserti128 $1, %xmm8, %ymm0, %ymm8
; AVX512-FCP-NEXT: vpbroadcastq {{.*#+}} ymm18 = [2,5,2,5,2,5,2,5]
-; AVX512-FCP-NEXT: vpermd %ymm31, %ymm18, %ymm12
-; AVX512-FCP-NEXT: vpshufhw {{.*#+}} ymm12 = ymm12[0,1,2,3,4,5,4,7,8,9,10,11,12,13,12,15]
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm12 = ymm8[0,1,2,3,4,5,6],ymm12[7]
-; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} xmm8 = xmm14[0],xmm7[0],xmm14[1],xmm7[1],xmm14[2],xmm7[2],xmm14[3],xmm7[3]
-; AVX512-FCP-NEXT: vmovdqa64 %xmm7, %xmm20
+; AVX512-FCP-NEXT: vpermd %ymm24, %ymm18, %ymm13
+; AVX512-FCP-NEXT: vpshufhw {{.*#+}} ymm13 = ymm13[0,1,2,3,4,5,4,7,8,9,10,11,12,13,12,15]
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm13 = ymm8[0,1,2,3,4,5,6],ymm13[7]
+; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} xmm8 = xmm14[0],xmm4[0],xmm14[1],xmm4[1],xmm14[2],xmm4[2],xmm14[3],xmm4[3]
+; AVX512-FCP-NEXT: vmovdqa64 %xmm4, %xmm21
; AVX512-FCP-NEXT: vmovdqa64 %xmm14, %xmm19
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm9 = [8,9,6,7,4,5,6,7,8,9,10,11,12,13,14,15]
-; AVX512-FCP-NEXT: vpshufb %xmm9, %xmm8, %xmm14
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm14, %zmm12, %zmm2
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm10 = [8,9,6,7,4,5,6,7,8,9,10,11,12,13,14,15]
+; AVX512-FCP-NEXT: vpshufb %xmm10, %xmm8, %xmm14
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm14, %zmm13, %zmm2
; AVX512-FCP-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm12 = ymm15[0],ymm13[1],ymm15[2,3],ymm13[4],ymm15[5,6,7]
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm13 = ymm15[0],ymm6[1],ymm15[2,3],ymm6[4],ymm15[5,6,7]
; AVX512-FCP-NEXT: vmovdqu %ymm15, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-FCP-NEXT: vmovdqa %ymm13, %ymm2
-; AVX512-FCP-NEXT: vmovdqu %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-FCP-NEXT: vextracti128 $1, %ymm12, %xmm14
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm12 = xmm12[0],xmm14[1],xmm12[2,3,4,5],xmm14[6],xmm12[7]
-; AVX512-FCP-NEXT: vpshufb %ymm10, %ymm12, %ymm10
-; AVX512-FCP-NEXT: vpermd %zmm22, %zmm23, %zmm7
-; AVX512-FCP-NEXT: vmovdqu64 %zmm22, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-FCP-NEXT: vpshufb %ymm6, %ymm7, %ymm6
-; AVX512-FCP-NEXT: vpor %ymm6, %ymm10, %ymm6
+; AVX512-FCP-NEXT: vmovdqa %ymm6, %ymm2
+; AVX512-FCP-NEXT: vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512-FCP-NEXT: vextracti128 $1, %ymm13, %xmm14
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm13 = xmm13[0],xmm14[1],xmm13[2,3,4,5],xmm14[6],xmm13[7]
+; AVX512-FCP-NEXT: vpshufb %ymm11, %ymm13, %ymm11
+; AVX512-FCP-NEXT: vpermd %zmm20, %zmm7, %zmm7
+; AVX512-FCP-NEXT: vmovdqu64 %zmm20, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-FCP-NEXT: vpshufb %ymm9, %ymm7, %ymm7
+; AVX512-FCP-NEXT: vpor %ymm7, %ymm11, %ymm6
; AVX512-FCP-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm5[0,1,2],ymm4[3],ymm5[4,5],ymm4[6],ymm5[7]
-; AVX512-FCP-NEXT: vmovdqa64 %ymm4, %ymm23
-; AVX512-FCP-NEXT: vmovdqa %ymm5, %ymm4
-; AVX512-FCP-NEXT: vextracti128 $1, %ymm6, %xmm7
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm6 = xmm7[0],xmm6[1],xmm7[2,3,4,5],xmm6[6],xmm7[7]
-; AVX512-FCP-NEXT: vmovdqa64 %xmm24, %xmm5
-; AVX512-FCP-NEXT: vpshufb %xmm5, %xmm6, %xmm6
-; AVX512-FCP-NEXT: vinserti128 $1, %xmm6, %ymm0, %ymm6
-; AVX512-FCP-NEXT: vpermd %ymm25, %ymm18, %ymm7
-; AVX512-FCP-NEXT: vpshufhw {{.*#+}} ymm7 = ymm7[0,1,2,3,4,5,4,7,8,9,10,11,12,13,12,15]
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5,6],ymm7[7]
-; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} xmm7 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
+; AVX512-FCP-NEXT: vmovdqa64 %ymm23, %ymm4
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm5[0,1,2],ymm4[3],ymm5[4,5],ymm4[6],ymm5[7]
+; AVX512-FCP-NEXT: vmovdqa64 %ymm5, %ymm23
+; AVX512-FCP-NEXT: vextracti128 $1, %ymm7, %xmm9
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm7 = xmm9[0],xmm7[1],xmm9[2,3,4,5],xmm7[6],xmm9[7]
+; AVX512-FCP-NEXT: vmovdqa64 %xmm29, %xmm5
+; AVX512-FCP-NEXT: vpshufb %xmm5, %xmm7, %xmm7
+; AVX512-FCP-NEXT: vinserti128 $1, %xmm7, %ymm0, %ymm7
+; AVX512-FCP-NEXT: vpermd %ymm31, %ymm18, %ymm9
+; AVX512-FCP-NEXT: vpshufhw {{.*#+}} ymm9 = ymm9[0,1,2,3,4,5,4,7,8,9,10,11,12,13,12,15]
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm7[0,1,2,3,4,5,6],ymm9[7]
+; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} xmm9 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
; AVX512-FCP-NEXT: vmovdqa64 %xmm3, %xmm18
-; AVX512-FCP-NEXT: vpshufb %xmm9, %xmm7, %xmm9
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm9, %zmm6, %zmm1
+; AVX512-FCP-NEXT: vpshufb %xmm10, %xmm9, %xmm10
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm10, %zmm7, %zmm1
; AVX512-FCP-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm0[0],ymm11[1],ymm0[2,3,4],ymm11[5],ymm0[6,7]
-; AVX512-FCP-NEXT: vextracti128 $1, %ymm6, %xmm9
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm9 = xmm9[0],xmm6[1],xmm9[2],xmm6[3],xmm9[4,5,6,7]
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm0[0],ymm12[1],ymm0[2,3,4],ymm12[5],ymm0[6,7]
+; AVX512-FCP-NEXT: vextracti128 $1, %ymm7, %xmm10
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm7 = xmm10[0],xmm7[1],xmm10[2],xmm7[3],xmm10[4,5,6,7]
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm1 = [10,3,6,15,12,13,6,15]
-; AVX512-FCP-NEXT: vpermd %zmm29, %zmm1, %zmm10
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm6 = [128,128,128,128,128,128,128,128,128,128,4,5,10,11,0,1,22,23,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
-; AVX512-FCP-NEXT: vpshufb %ymm6, %ymm10, %ymm10
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm12 = [6,7,4,5,2,3,0,1,14,15,128,128,128,128,128,128,128,128,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512-FCP-NEXT: vpshufb %ymm12, %ymm9, %ymm9
-; AVX512-FCP-NEXT: vpor %ymm10, %ymm9, %ymm0
+; AVX512-FCP-NEXT: vpermd %zmm25, %zmm1, %zmm11
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm10 = [128,128,128,128,128,128,128,128,128,128,4,5,10,11,0,1,22,23,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
+; AVX512-FCP-NEXT: vpshufb %ymm10, %ymm11, %ymm11
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm13 = [6,7,4,5,2,3,0,1,14,15,128,128,128,128,128,128,128,128,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512-FCP-NEXT: vpshufb %ymm13, %ymm7, %ymm7
+; AVX512-FCP-NEXT: vpor %ymm7, %ymm11, %ymm0
; AVX512-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-FCP-NEXT: vmovdqa64 %ymm27, %ymm3
; AVX512-FCP-NEXT: vmovdqa64 %ymm26, %ymm5
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm9 = ymm3[0],ymm5[1],ymm3[2,3],ymm5[4],ymm3[5,6,7]
-; AVX512-FCP-NEXT: vextracti128 $1, %ymm9, %xmm10
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm9 = xmm9[0],xmm10[1],xmm9[2,3,4,5],xmm10[6],xmm9[7]
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm10 = [0,1,4,5,2,3,0,1,14,15,12,13,12,13,14,15]
-; AVX512-FCP-NEXT: vpshufb %xmm10, %xmm9, %xmm9
-; AVX512-FCP-NEXT: vinserti128 $1, %xmm9, %ymm0, %ymm14
-; AVX512-FCP-NEXT: vpbroadcastd {{.*#+}} ymm11 = [18,19,24,25,18,19,24,25,18,19,24,25,18,19,24,25,18,19,24,25,18,19,24,25,18,19,24,25,18,19,24,25]
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm9 = ymm31[0,1,1,3]
-; AVX512-FCP-NEXT: vpshufb %ymm11, %ymm9, %ymm13
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm13 = ymm14[0,1,2,3,4,5,6],ymm13[7]
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm3[0],ymm5[1],ymm3[2,3],ymm5[4],ymm3[5,6,7]
+; AVX512-FCP-NEXT: vextracti128 $1, %ymm7, %xmm11
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm7 = xmm7[0],xmm11[1],xmm7[2,3,4,5],xmm11[6],xmm7[7]
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm11 = [0,1,4,5,2,3,0,1,14,15,12,13,12,13,14,15]
+; AVX512-FCP-NEXT: vpshufb %xmm11, %xmm7, %xmm7
+; AVX512-FCP-NEXT: vinserti128 $1, %xmm7, %ymm0, %ymm14
+; AVX512-FCP-NEXT: vpbroadcastd {{.*#+}} ymm12 = [18,19,24,25,18,19,24,25,18,19,24,25,18,19,24,25,18,19,24,25,18,19,24,25,18,19,24,25,18,19,24,25]
+; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm7 = ymm24[0,1,1,3]
+; AVX512-FCP-NEXT: vpshufb %ymm12, %ymm7, %ymm6
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm14[0,1,2,3,4,5,6],ymm6[7]
; AVX512-FCP-NEXT: vpbroadcastd {{.*#+}} xmm14 = [12,13,10,11,12,13,10,11,12,13,10,11,12,13,10,11]
; AVX512-FCP-NEXT: vpshufb %xmm14, %xmm8, %xmm8
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm8, %zmm13, %zmm0
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm8, %zmm6, %zmm0
; AVX512-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm15[0],ymm2[1],ymm15[2,3,4],ymm2[5],ymm15[6,7]
-; AVX512-FCP-NEXT: vextracti128 $1, %ymm8, %xmm13
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm8 = xmm13[0],xmm8[1],xmm13[2],xmm8[3],xmm13[4,5,6,7]
-; AVX512-FCP-NEXT: vpshufb %ymm12, %ymm8, %ymm8
-; AVX512-FCP-NEXT: vpermd %zmm22, %zmm1, %zmm12
-; AVX512-FCP-NEXT: vpshufb %ymm6, %ymm12, %ymm6
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm15[0],ymm2[1],ymm15[2,3,4],ymm2[5],ymm15[6,7]
+; AVX512-FCP-NEXT: vextracti128 $1, %ymm6, %xmm8
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm6 = xmm8[0],xmm6[1],xmm8[2],xmm6[3],xmm8[4,5,6,7]
+; AVX512-FCP-NEXT: vpshufb %ymm13, %ymm6, %ymm6
+; AVX512-FCP-NEXT: vpermd %zmm20, %zmm1, %zmm8
+; AVX512-FCP-NEXT: vpshufb %ymm10, %ymm8, %ymm8
; AVX512-FCP-NEXT: vpor %ymm6, %ymm8, %ymm0
; AVX512-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-FCP-NEXT: vmovdqa %ymm4, %ymm1
-; AVX512-FCP-NEXT: vmovdqa64 %ymm23, %ymm2
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm2[0],ymm4[1],ymm2[2,3],ymm4[4],ymm2[5,6,7]
+; AVX512-FCP-NEXT: vmovdqa64 %ymm23, %ymm1
+; AVX512-FCP-NEXT: vmovdqa %ymm4, %ymm2
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm4[0],ymm1[1],ymm4[2,3],ymm1[4],ymm4[5,6,7]
; AVX512-FCP-NEXT: vextracti128 $1, %ymm6, %xmm8
; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0],xmm8[1],xmm6[2,3,4,5],xmm8[6],xmm6[7]
-; AVX512-FCP-NEXT: vpshufb %xmm10, %xmm6, %xmm6
+; AVX512-FCP-NEXT: vpshufb %xmm11, %xmm6, %xmm6
; AVX512-FCP-NEXT: vinserti128 $1, %xmm6, %ymm0, %ymm6
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm8 = ymm25[0,1,1,3]
-; AVX512-FCP-NEXT: vpshufb %ymm11, %ymm8, %ymm10
+; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm8 = ymm31[0,1,1,3]
+; AVX512-FCP-NEXT: vpshufb %ymm12, %ymm8, %ymm10
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5,6],ymm10[7]
-; AVX512-FCP-NEXT: vpshufb %xmm14, %xmm7, %xmm7
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm7, %zmm6, %zmm0
+; AVX512-FCP-NEXT: vpshufb %xmm14, %xmm9, %xmm9
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm9, %zmm6, %zmm0
; AVX512-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm3[0],ymm5[1],ymm3[2,3,4],ymm5[5],ymm3[6,7]
; AVX512-FCP-NEXT: vextracti128 $1, %ymm0, %xmm3
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0],xmm0[1],xmm3[2],xmm0[3],xmm3[4,5,6,7]
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm0 = [0,1,6,7,4,5,2,3,0,1,14,15,12,13,14,15]
-; AVX512-FCP-NEXT: vpshufb %xmm0, %xmm3, %xmm3
-; AVX512-FCP-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX512-FCP-NEXT: vpbroadcastd {{.*#+}} ymm7 = [20,21,26,27,20,21,26,27,20,21,26,27,20,21,26,27,20,21,26,27,20,21,26,27,20,21,26,27,20,21,26,27]
-; AVX512-FCP-NEXT: vpshufb %ymm7, %ymm9, %ymm6
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5,6],ymm6[7]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm3[0],xmm0[1],xmm3[2],xmm0[3],xmm3[4,5,6,7]
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,6,7,4,5,2,3,0,1,14,15,12,13,14,15]
+; AVX512-FCP-NEXT: vpshufb %xmm3, %xmm0, %xmm0
+; AVX512-FCP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512-FCP-NEXT: vpbroadcastd {{.*#+}} ymm9 = [20,21,26,27,20,21,26,27,20,21,26,27,20,21,26,27,20,21,26,27,20,21,26,27,20,21,26,27,20,21,26,27]
+; AVX512-FCP-NEXT: vpshufb %ymm9, %ymm7, %ymm6
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6],ymm6[7]
; AVX512-FCP-NEXT: vpbroadcastw 232(%rdi), %xmm6
-; AVX512-FCP-NEXT: vmovdqa64 %xmm20, %xmm5
-; AVX512-FCP-NEXT: vpsrlq $48, %xmm20, %xmm9
-; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm6[0],xmm9[0],xmm6[1],xmm9[1],xmm6[2],xmm9[2],xmm6[3],xmm9[3]
-; AVX512-FCP-NEXT: vinserti32x4 $2, %xmm6, %zmm3, %zmm27
-; AVX512-FCP-NEXT: vmovdqa64 %ymm21, %ymm3
-; AVX512-FCP-NEXT: vpshufb %ymm7, %ymm3, %ymm4
+; AVX512-FCP-NEXT: vmovdqa64 %xmm21, %xmm5
+; AVX512-FCP-NEXT: vpsrlq $48, %xmm21, %xmm7
+; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3]
+; AVX512-FCP-NEXT: vinserti32x4 $2, %xmm6, %zmm0, %zmm27
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm7 = [0,1,2,3,0,1,6,7,8,9,14,15,12,13,14,15,16,17,18,19,16,17,22,23,24,25,30,31,20,21,26,27]
+; AVX512-FCP-NEXT: vmovdqa64 %ymm22, %ymm0
+; AVX512-FCP-NEXT: vpshufb %ymm7, %ymm0, %ymm4
; AVX512-FCP-NEXT: vmovdqa64 576(%rdi), %zmm21
-; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm24 = [3,6,10,13,3,6,10,13]
-; AVX512-FCP-NEXT: vpermd %zmm21, %zmm24, %zmm6
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm11 = [0,1,2,3,0,1,6,7,8,9,14,15,12,13,14,15,16,17,18,19,16,17,22,23,24,25,30,31,u,u,u,u]
-; AVX512-FCP-NEXT: vpshufb %ymm11, %ymm6, %ymm6
+; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm25 = [3,6,10,13,3,6,10,13]
+; AVX512-FCP-NEXT: vpermd %zmm21, %zmm25, %zmm6
+; AVX512-FCP-NEXT: vpshufb %ymm7, %ymm6, %ymm6
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm6[0,1,2,3,4,5,6],ymm4[7]
; AVX512-FCP-NEXT: movw $992, %ax # imm = 0x3E0
; AVX512-FCP-NEXT: kmovw %eax, %k1
-; AVX512-FCP-NEXT: vinserti32x4 $2, %xmm28, %zmm4, %zmm17 {%k1}
-; AVX512-FCP-NEXT: vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-FCP-NEXT: vinserti32x4 $2, %xmm28, %zmm4, %zmm16 {%k1}
+; AVX512-FCP-NEXT: vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3,4],ymm1[5],ymm2[6,7]
; AVX512-FCP-NEXT: vextracti128 $1, %ymm1, %xmm2
; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2],xmm1[3],xmm2[4,5,6,7]
-; AVX512-FCP-NEXT: vpshufb %xmm0, %xmm1, %xmm0
-; AVX512-FCP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512-FCP-NEXT: vpshufb %ymm7, %ymm8, %ymm1
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6],ymm1[7]
-; AVX512-FCP-NEXT: vpbroadcastw 680(%rdi), %xmm1
+; AVX512-FCP-NEXT: vpshufb %xmm3, %xmm1, %xmm1
+; AVX512-FCP-NEXT: vpshufb %ymm9, %ymm8, %ymm2
+; AVX512-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm2[7]
+; AVX512-FCP-NEXT: vpbroadcastw 680(%rdi), %xmm2
; AVX512-FCP-NEXT: vmovdqa64 %xmm18, %xmm9
-; AVX512-FCP-NEXT: vpsrlq $48, %xmm18, %xmm2
-; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
-; AVX512-FCP-NEXT: vinserti32x4 $2, %xmm1, %zmm0, %zmm26
+; AVX512-FCP-NEXT: vpsrlq $48, %xmm18, %xmm3
+; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
+; AVX512-FCP-NEXT: vinserti32x4 $2, %xmm2, %zmm1, %zmm26
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm2 = [0,3,3,3,0,3,7,7]
-; AVX512-FCP-NEXT: vpermd %ymm31, %ymm2, %ymm0
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm12 = [0,1,6,7,4,5,6,7,8,9,0,1,6,7,8,9,16,17,22,23,20,21,22,23,24,25,16,17,22,23,24,25]
-; AVX512-FCP-NEXT: vpshufb %ymm12, %ymm0, %ymm6
+; AVX512-FCP-NEXT: vpermd %ymm24, %ymm2, %ymm1
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm11 = [0,1,6,7,4,5,6,7,8,9,0,1,6,7,8,9,16,17,22,23,20,21,22,23,24,25,16,17,22,23,24,25]
+; AVX512-FCP-NEXT: vpshufb %ymm11, %ymm1, %ymm3
; AVX512-FCP-NEXT: vmovdqa64 128(%rdi), %zmm4
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm1 = [2,5,9,12,2,5,9,12]
-; AVX512-FCP-NEXT: vpermd %zmm4, %zmm1, %zmm8
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm0 = [0,1,0,1,6,7,8,9,14,15,14,15,14,15,14,15,16,17,16,17,22,23,24,25,30,31,30,31,30,31,30,31]
-; AVX512-FCP-NEXT: vpshufb %ymm0, %ymm8, %ymm8
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm6 = ymm8[0,1,2,3,4],ymm6[5,6,7],ymm8[8,9,10,11,12],ymm6[13,14,15]
+; AVX512-FCP-NEXT: vpermd %zmm4, %zmm1, %zmm6
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm12 = [0,1,0,1,6,7,8,9,14,15,14,15,14,15,14,15,16,17,16,17,22,23,24,25,30,31,30,31,30,31,30,31]
+; AVX512-FCP-NEXT: vpshufb %ymm12, %ymm6, %ymm6
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm6[0,1,2,3,4],ymm3[5,6,7],ymm6[8,9,10,11,12],ymm3[13,14,15]
; AVX512-FCP-NEXT: vmovdqa64 %xmm19, %xmm15
-; AVX512-FCP-NEXT: vpsrld $16, %xmm19, %xmm8
-; AVX512-FCP-NEXT: vpunpckhwd {{.*#+}} xmm8 = xmm8[4],xmm5[4],xmm8[5],xmm5[5],xmm8[6],xmm5[6],xmm8[7],xmm5[7]
-; AVX512-FCP-NEXT: vinserti32x4 $2, %xmm8, %zmm6, %zmm3
-; AVX512-FCP-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-FCP-NEXT: vmovdqa 704(%rdi), %ymm3
-; AVX512-FCP-NEXT: vmovdqa 736(%rdi), %ymm8
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm8[0,1],ymm3[2,3],ymm8[4,5],ymm3[6,7]
-; AVX512-FCP-NEXT: vmovdqa64 %ymm8, %ymm18
-; AVX512-FCP-NEXT: vmovdqa64 %ymm3, %ymm20
-; AVX512-FCP-NEXT: vextracti128 $1, %ymm6, %xmm8
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm6 = xmm8[0,1,2],xmm6[3],xmm8[4],xmm6[5],xmm8[6,7]
-; AVX512-FCP-NEXT: vpbroadcastq {{.*#+}} xmm3 = [8,9,6,7,4,5,10,11,8,9,6,7,4,5,10,11]
-; AVX512-FCP-NEXT: vpshufb %xmm3, %xmm6, %xmm8
+; AVX512-FCP-NEXT: vpsrld $16, %xmm19, %xmm6
+; AVX512-FCP-NEXT: vpunpckhwd {{.*#+}} xmm6 = xmm6[4],xmm5[4],xmm6[5],xmm5[5],xmm6[6],xmm5[6],xmm6[7],xmm5[7]
+; AVX512-FCP-NEXT: vinserti32x4 $2, %xmm6, %zmm3, %zmm0
+; AVX512-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-FCP-NEXT: vmovdqa 704(%rdi), %ymm0
+; AVX512-FCP-NEXT: vmovdqa 736(%rdi), %ymm6
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm6[0,1],ymm0[2,3],ymm6[4,5],ymm0[6,7]
+; AVX512-FCP-NEXT: vmovdqa64 %ymm6, %ymm18
+; AVX512-FCP-NEXT: vmovdqa64 %ymm0, %ymm20
+; AVX512-FCP-NEXT: vextracti128 $1, %ymm3, %xmm6
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm3 = xmm6[0,1,2],xmm3[3],xmm6[4],xmm3[5],xmm6[6,7]
+; AVX512-FCP-NEXT: vpbroadcastq {{.*#+}} xmm0 = [8,9,6,7,4,5,10,11,8,9,6,7,4,5,10,11]
+; AVX512-FCP-NEXT: vpshufb %xmm0, %xmm3, %xmm6
; AVX512-FCP-NEXT: vmovdqa64 768(%rdi), %zmm30
-; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm6 = [0,0,0,0,4,7,11,14]
-; AVX512-FCP-NEXT: vpermd %zmm30, %zmm6, %zmm13
+; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm3 = [0,0,0,0,4,7,11,14]
+; AVX512-FCP-NEXT: vpermd %zmm30, %zmm3, %zmm8
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm10 = [0,1,6,7,8,9,14,15,8,9,14,15,4,5,2,3,16,17,22,23,24,25,30,31,24,25,30,31,20,21,18,19]
-; AVX512-FCP-NEXT: vpshufb %ymm10, %ymm13, %ymm13
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm8 = xmm13[0,1,2],xmm8[3,4,5,6],xmm13[7]
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm13[4,5,6,7]
-; AVX512-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
+; AVX512-FCP-NEXT: vpshufb %ymm10, %ymm8, %ymm8
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm6 = xmm8[0,1,2],xmm6[3,4,5,6],xmm8[7]
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm14 = ymm6[0,1,2,3],ymm8[4,5,6,7]
+; AVX512-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX512-FCP-NEXT: vpshufb %ymm7, %ymm6, %ymm6
+; AVX512-FCP-NEXT: vpermd %zmm4, %zmm25, %zmm13
; AVX512-FCP-NEXT: vpshufb %ymm7, %ymm13, %ymm7
-; AVX512-FCP-NEXT: vpermd %zmm4, %zmm24, %zmm13
-; AVX512-FCP-NEXT: vpshufb %ymm11, %ymm13, %ymm11
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm11[0,1,2,3,4,5,6],ymm7[7]
-; AVX512-FCP-NEXT: vinserti32x4 $2, {{[-0-9]+}}(%r{{[sb]}}p), %zmm7, %zmm16 {%k1} # 16-byte Folded Reload
-; AVX512-FCP-NEXT: vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-FCP-NEXT: vpermd %ymm25, %ymm2, %ymm2
-; AVX512-FCP-NEXT: vpshufb %ymm12, %ymm2, %ymm2
-; AVX512-FCP-NEXT: vpermd %zmm21, %zmm1, %zmm7
-; AVX512-FCP-NEXT: vpshufb %ymm0, %ymm7, %ymm0
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm2[5,6,7],ymm0[8,9,10,11,12],ymm2[13,14,15]
-; AVX512-FCP-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
-; AVX512-FCP-NEXT: vpsrld $16, %xmm14, %xmm2
-; AVX512-FCP-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm2[4],xmm9[4],xmm2[5],xmm9[5],xmm2[6],xmm9[6],xmm2[7],xmm9[7]
-; AVX512-FCP-NEXT: vinserti32x4 $2, %xmm2, %zmm0, %zmm0
-; AVX512-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm7[0,1,2,3,4,5,6],ymm6[7]
+; AVX512-FCP-NEXT: vinserti32x4 $2, {{[-0-9]+}}(%r{{[sb]}}p), %zmm6, %zmm17 {%k1} # 16-byte Folded Reload
+; AVX512-FCP-NEXT: vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-FCP-NEXT: vpermd %ymm31, %ymm2, %ymm2
+; AVX512-FCP-NEXT: vpshufb %ymm11, %ymm2, %ymm2
+; AVX512-FCP-NEXT: vpermd %zmm21, %zmm1, %zmm6
+; AVX512-FCP-NEXT: vpshufb %ymm12, %ymm6, %ymm6
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm6[0,1,2,3,4],ymm2[5,6,7],ymm6[8,9,10,11,12],ymm2[13,14,15]
+; AVX512-FCP-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
+; AVX512-FCP-NEXT: vpsrld $16, %xmm8, %xmm6
+; AVX512-FCP-NEXT: vpunpckhwd {{.*#+}} xmm6 = xmm6[4],xmm9[4],xmm6[5],xmm9[5],xmm6[6],xmm9[6],xmm6[7],xmm9[7]
+; AVX512-FCP-NEXT: vinserti32x4 $2, %xmm6, %zmm2, %zmm2
+; AVX512-FCP-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm12 = [0,4,7,0,0,4,7,0]
; AVX512-FCP-NEXT: # ymm12 = mem[0,1,0,1]
-; AVX512-FCP-NEXT: vpermd %ymm31, %ymm12, %ymm0
+; AVX512-FCP-NEXT: vpermd %ymm24, %ymm12, %ymm6
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm2 = [4,5,2,3,4,5,6,7,8,9,2,3,4,5,10,11,20,21,18,19,20,21,22,23,24,25,18,19,20,21,26,27]
-; AVX512-FCP-NEXT: vpshufb %ymm2, %ymm0, %ymm0
-; AVX512-FCP-NEXT: vmovdqa64 %ymm2, %ymm23
+; AVX512-FCP-NEXT: vpshufb %ymm2, %ymm6, %ymm6
+; AVX512-FCP-NEXT: vmovdqa64 %ymm2, %ymm24
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm17 = [2,6,9,13,2,6,9,13]
; AVX512-FCP-NEXT: vpermd %zmm4, %zmm17, %zmm7
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm2 = [4,5,2,3,4,5,10,11,12,13,12,13,12,13,12,13,20,21,18,19,20,21,26,27,28,29,28,29,28,29,28,29]
; AVX512-FCP-NEXT: vpshufb %ymm2, %ymm7, %ymm7
-; AVX512-FCP-NEXT: vmovdqa64 %ymm2, %ymm19
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm7[0,1,2,3,4],ymm0[5,6,7],ymm7[8,9,10,11,12],ymm0[13,14,15]
+; AVX512-FCP-NEXT: vmovdqa64 %ymm2, %ymm16
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm6 = ymm7[0,1,2,3,4],ymm6[5,6,7],ymm7[8,9,10,11,12],ymm6[13,14,15]
; AVX512-FCP-NEXT: vpunpckhwd {{.*#+}} xmm5 = xmm15[4],xmm5[4],xmm15[5],xmm5[5],xmm15[6],xmm5[6],xmm15[7],xmm5[7]
-; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} xmm22 = [8,9,6,7,4,5,6,7,8,9,10,11,12,13,14,15]
-; AVX512-FCP-NEXT: vmovdqa64 %xmm22, %xmm2
+; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} xmm23 = [8,9,6,7,4,5,6,7,8,9,10,11,12,13,14,15]
+; AVX512-FCP-NEXT: vmovdqa64 %xmm23, %xmm2
; AVX512-FCP-NEXT: vpshufb %xmm2, %xmm5, %xmm5
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm5, %zmm0, %zmm0
-; AVX512-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-FCP-NEXT: vmovdqa64 832(%rdi), %zmm0
-; AVX512-FCP-NEXT: vpermd %zmm0, %zmm1, %zmm7
-; AVX512-FCP-NEXT: vpbroadcastq {{.*#+}} ymm5 = [16,17,22,23,24,25,30,31,16,17,22,23,24,25,30,31,16,17,22,23,24,25,30,31,16,17,22,23,24,25,30,31]
-; AVX512-FCP-NEXT: vpshufb %ymm5, %ymm7, %ymm7
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm8[0,1,2,3,4,5],ymm7[6,7]
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm5, %zmm6, %zmm2
+; AVX512-FCP-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-FCP-NEXT: vmovdqa64 832(%rdi), %zmm5
+; AVX512-FCP-NEXT: vpermd %zmm5, %zmm1, %zmm7
+; AVX512-FCP-NEXT: vpbroadcastq {{.*#+}} ymm6 = [16,17,22,23,24,25,30,31,16,17,22,23,24,25,30,31,16,17,22,23,24,25,30,31,16,17,22,23,24,25,30,31]
+; AVX512-FCP-NEXT: vpshufb %ymm6, %ymm7, %ymm7
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm14[0,1,2,3,4,5],ymm7[6,7]
; AVX512-FCP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX512-FCP-NEXT: vmovdqa 256(%rdi), %ymm2
; AVX512-FCP-NEXT: vmovdqa 288(%rdi), %ymm4
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm13 = ymm4[0,1],ymm2[2,3],ymm4[4,5],ymm2[6,7]
; AVX512-FCP-NEXT: vextracti128 $1, %ymm13, %xmm11
; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm11 = xmm11[0,1,2],xmm13[3],xmm11[4],xmm13[5],xmm11[6,7]
-; AVX512-FCP-NEXT: vpshufb %xmm3, %xmm11, %xmm11
-; AVX512-FCP-NEXT: vmovdqa64 320(%rdi), %zmm16
-; AVX512-FCP-NEXT: vpermd %zmm16, %zmm6, %zmm6
-; AVX512-FCP-NEXT: vpshufb %ymm10, %ymm6, %ymm6
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm10 = xmm6[0,1,2],xmm11[3,4,5,6],xmm6[7]
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm10[0,1,2,3],ymm6[4,5,6,7]
+; AVX512-FCP-NEXT: vpshufb %xmm0, %xmm11, %xmm11
+; AVX512-FCP-NEXT: vmovdqa64 320(%rdi), %zmm22
+; AVX512-FCP-NEXT: vpermd %zmm22, %zmm3, %zmm3
+; AVX512-FCP-NEXT: vpshufb %ymm10, %ymm3, %ymm3
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm10 = xmm3[0,1,2],xmm11[3,4,5,6],xmm3[7]
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm10[0,1,2,3],ymm3[4,5,6,7]
; AVX512-FCP-NEXT: vmovdqa64 384(%rdi), %zmm10
; AVX512-FCP-NEXT: vpermd %zmm10, %zmm1, %zmm1
-; AVX512-FCP-NEXT: vpshufb %ymm5, %ymm1, %ymm1
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm6[0,1,2,3,4,5],ymm1[6,7]
-; AVX512-FCP-NEXT: vmovdqu %ymm1, (%rsp) # 32-byte Spill
+; AVX512-FCP-NEXT: vpshufb %ymm6, %ymm1, %ymm1
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3,4,5],ymm1[6,7]
+; AVX512-FCP-NEXT: vmovdqu %ymm0, (%rsp) # 32-byte Spill
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm4[0,1,2],ymm2[3],ymm4[4,5],ymm2[6],ymm4[7]
; AVX512-FCP-NEXT: vmovdqa %ymm4, %ymm15
-; AVX512-FCP-NEXT: vmovdqa %ymm2, %ymm4
-; AVX512-FCP-NEXT: vextracti128 $1, %ymm1, %xmm6
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm6[0,1,2,3],xmm1[4],xmm6[5],xmm1[6],xmm6[7]
-; AVX512-FCP-NEXT: vpbroadcastq {{.*#+}} xmm6 = [10,11,8,9,6,7,12,13,10,11,8,9,6,7,12,13]
-; AVX512-FCP-NEXT: vpshufb %xmm6, %xmm1, %xmm1
+; AVX512-FCP-NEXT: vmovdqa %ymm2, %ymm14
+; AVX512-FCP-NEXT: vextracti128 $1, %ymm1, %xmm3
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm3[0,1,2,3],xmm1[4],xmm3[5],xmm1[6],xmm3[7]
+; AVX512-FCP-NEXT: vpbroadcastq {{.*#+}} xmm3 = [10,11,8,9,6,7,12,13,10,11,8,9,6,7,12,13]
+; AVX512-FCP-NEXT: vpshufb %xmm3, %xmm1, %xmm1
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm11 = [1,0,0,0,4,8,11,15]
-; AVX512-FCP-NEXT: vpermd %zmm16, %zmm11, %zmm13
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm3 = [2,3,4,5,10,11,12,13,0,1,0,1,0,1,0,1,18,19,20,21,26,27,28,29,16,17,16,17,16,17,16,17]
-; AVX512-FCP-NEXT: vpshufb %ymm3, %ymm13, %ymm13
+; AVX512-FCP-NEXT: vpermd %zmm22, %zmm11, %zmm13
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm2 = [2,3,4,5,10,11,12,13,0,1,0,1,0,1,0,1,18,19,20,21,26,27,28,29,16,17,16,17,16,17,16,17]
+; AVX512-FCP-NEXT: vpshufb %ymm2, %ymm13, %ymm13
; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm13[0,1,2],xmm1[3,4,5,6],xmm13[7]
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm13[4,5,6,7]
; AVX512-FCP-NEXT: vpermd %zmm10, %zmm17, %zmm13
-; AVX512-FCP-NEXT: vpbroadcastq {{.*#+}} ymm2 = [18,19,20,21,26,27,28,29,18,19,20,21,26,27,28,29,18,19,20,21,26,27,28,29,18,19,20,21,26,27,28,29]
-; AVX512-FCP-NEXT: vpshufb %ymm2, %ymm13, %ymm13
+; AVX512-FCP-NEXT: vpbroadcastq {{.*#+}} ymm0 = [18,19,20,21,26,27,28,29,18,19,20,21,26,27,28,29,18,19,20,21,26,27,28,29,18,19,20,21,26,27,28,29]
+; AVX512-FCP-NEXT: vpshufb %ymm0, %ymm13, %ymm13
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm13[6,7]
; AVX512-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX512-FCP-NEXT: vmovdqa64 %ymm18, %ymm7
-; AVX512-FCP-NEXT: vmovdqa64 %ymm20, %ymm8
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm7[0,1,2],ymm8[3],ymm7[4,5],ymm8[6],ymm7[7]
+; AVX512-FCP-NEXT: vmovdqa64 %ymm20, %ymm4
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm7[0,1,2],ymm4[3],ymm7[4,5],ymm4[6],ymm7[7]
; AVX512-FCP-NEXT: vextracti128 $1, %ymm1, %xmm13
; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm13[0,1,2,3],xmm1[4],xmm13[5],xmm1[6],xmm13[7]
-; AVX512-FCP-NEXT: vpshufb %xmm6, %xmm1, %xmm1
-; AVX512-FCP-NEXT: vpermd %zmm30, %zmm11, %zmm6
-; AVX512-FCP-NEXT: vpshufb %ymm3, %ymm6, %ymm3
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm3[0,1,2],xmm1[3,4,5,6],xmm3[7]
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm3[4,5,6,7]
-; AVX512-FCP-NEXT: vpermd %zmm0, %zmm17, %zmm3
+; AVX512-FCP-NEXT: vpshufb %xmm3, %xmm1, %xmm1
+; AVX512-FCP-NEXT: vpermd %zmm30, %zmm11, %zmm3
; AVX512-FCP-NEXT: vpshufb %ymm2, %ymm3, %ymm2
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm2[6,7]
-; AVX512-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm4[0,1],ymm15[2],ymm4[3,4,5],ymm15[6],ymm4[7]
-; AVX512-FCP-NEXT: vmovdqa %ymm15, %ymm13
-; AVX512-FCP-NEXT: vextracti128 $1, %ymm1, %xmm2
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4],xmm1[5],xmm2[6],xmm1[7]
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,2,3,0,1,14,15,12,13,10,11,8,9,128,128,128,128,128,128,128,128,128,128,u,u,u,u,u,u,u,u]
-; AVX512-FCP-NEXT: vpshufb %ymm2, %ymm1, %ymm1
-; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm3 = [1,0,0,0,5,8,12,15]
-; AVX512-FCP-NEXT: vpermd %zmm16, %zmm3, %zmm6
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm11 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,2,3,16,17,22,23,24,25,30,31,128,128,128,128,128,128,128,128]
-; AVX512-FCP-NEXT: vpshufb %ymm11, %ymm6, %ymm6
-; AVX512-FCP-NEXT: vpor %ymm6, %ymm1, %ymm1
-; AVX512-FCP-NEXT: vpermd %zmm10, %zmm24, %zmm6
-; AVX512-FCP-NEXT: vpshufb %ymm5, %ymm6, %ymm6
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm6[6,7]
-; AVX512-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm8[0,1],ymm7[2],ymm8[3,4,5],ymm7[6],ymm8[7]
-; AVX512-FCP-NEXT: vmovdqa64 %ymm18, %ymm10
-; AVX512-FCP-NEXT: vextracti128 $1, %ymm1, %xmm6
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm6[4],xmm1[5],xmm6[6],xmm1[7]
-; AVX512-FCP-NEXT: vpshufb %ymm2, %ymm1, %ymm1
-; AVX512-FCP-NEXT: vpermd %zmm30, %zmm3, %zmm2
-; AVX512-FCP-NEXT: vpshufb %ymm11, %ymm2, %ymm2
-; AVX512-FCP-NEXT: vpor %ymm2, %ymm1, %ymm1
-; AVX512-FCP-NEXT: vpermd %zmm0, %zmm24, %zmm0
-; AVX512-FCP-NEXT: vpshufb %ymm5, %ymm0, %ymm0
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2],xmm1[3,4,5,6],xmm2[7]
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
+; AVX512-FCP-NEXT: vpermd %zmm5, %zmm17, %zmm2
+; AVX512-FCP-NEXT: vpshufb %ymm0, %ymm2, %ymm0
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
; AVX512-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-FCP-NEXT: vpermd %ymm25, %ymm12, %ymm0
-; AVX512-FCP-NEXT: vmovdqa64 %ymm23, %ymm1
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm14[0,1],ymm15[2],ymm14[3,4,5],ymm15[6],ymm14[7]
+; AVX512-FCP-NEXT: vmovdqa64 %ymm15, %ymm19
+; AVX512-FCP-NEXT: vmovdqa %ymm14, %ymm13
+; AVX512-FCP-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4],xmm0[5],xmm1[6],xmm0[7]
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm1 = [0,1,2,3,0,1,14,15,12,13,10,11,8,9,128,128,128,128,128,128,128,128,128,128,u,u,u,u,u,u,u,u]
+; AVX512-FCP-NEXT: vpshufb %ymm1, %ymm0, %ymm0
+; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm2 = [1,0,0,0,5,8,12,15]
+; AVX512-FCP-NEXT: vpermd %zmm22, %zmm2, %zmm3
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm11 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,2,3,16,17,22,23,24,25,30,31,128,128,128,128,128,128,128,128]
+; AVX512-FCP-NEXT: vpshufb %ymm11, %ymm3, %ymm3
+; AVX512-FCP-NEXT: vpor %ymm3, %ymm0, %ymm0
+; AVX512-FCP-NEXT: vpermd %zmm10, %zmm25, %zmm3
+; AVX512-FCP-NEXT: vpshufb %ymm6, %ymm3, %ymm3
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm3[6,7]
+; AVX512-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm4[0,1],ymm7[2],ymm4[3,4,5],ymm7[6],ymm4[7]
+; AVX512-FCP-NEXT: vextracti128 $1, %ymm0, %xmm3
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm3[4],xmm0[5],xmm3[6],xmm0[7]
+; AVX512-FCP-NEXT: vpshufb %ymm1, %ymm0, %ymm0
+; AVX512-FCP-NEXT: vpermd %zmm30, %zmm2, %zmm1
+; AVX512-FCP-NEXT: vpshufb %ymm11, %ymm1, %ymm1
+; AVX512-FCP-NEXT: vpor %ymm1, %ymm0, %ymm0
+; AVX512-FCP-NEXT: vpermd %zmm5, %zmm25, %zmm1
+; AVX512-FCP-NEXT: vpshufb %ymm6, %ymm1, %ymm1
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
+; AVX512-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512-FCP-NEXT: vpermd %ymm31, %ymm12, %ymm0
+; AVX512-FCP-NEXT: vmovdqa64 %ymm24, %ymm1
; AVX512-FCP-NEXT: vpshufb %ymm1, %ymm0, %ymm0
; AVX512-FCP-NEXT: vpermd %zmm21, %zmm17, %zmm1
-; AVX512-FCP-NEXT: vmovdqa64 %ymm19, %ymm2
+; AVX512-FCP-NEXT: vmovdqa64 %ymm16, %ymm2
; AVX512-FCP-NEXT: vpshufb %ymm2, %ymm1, %ymm1
; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6,7],ymm1[8,9,10,11,12],ymm0[13,14,15]
-; AVX512-FCP-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm14[4],xmm9[4],xmm14[5],xmm9[5],xmm14[6],xmm9[6],xmm14[7],xmm9[7]
-; AVX512-FCP-NEXT: vmovdqa64 %xmm22, %xmm2
+; AVX512-FCP-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm8[4],xmm9[4],xmm8[5],xmm9[5],xmm8[6],xmm9[6],xmm8[7],xmm9[7]
+; AVX512-FCP-NEXT: vmovdqa64 %xmm23, %xmm2
; AVX512-FCP-NEXT: vpshufb %xmm2, %xmm1, %xmm1
; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
; AVX512-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-FCP-NEXT: vmovdqa 416(%rdi), %ymm1
-; AVX512-FCP-NEXT: vmovdqa 384(%rdi), %ymm15
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm15[0,1],ymm1[2],ymm15[3,4,5],ymm1[6],ymm15[7]
-; AVX512-FCP-NEXT: vmovdqa %ymm1, %ymm7
+; AVX512-FCP-NEXT: vmovdqa 384(%rdi), %ymm2
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1],ymm1[2],ymm2[3,4,5],ymm1[6],ymm2[7]
+; AVX512-FCP-NEXT: vmovdqa %ymm2, %ymm15
+; AVX512-FCP-NEXT: vmovdqa %ymm1, %ymm14
; AVX512-FCP-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4],xmm0[5],xmm1[6],xmm0[7]
; AVX512-FCP-NEXT: vmovdqa %ymm13, %ymm11
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm4[0,1,2],ymm13[3],ymm4[4,5],ymm13[6],ymm4[7]
+; AVX512-FCP-NEXT: vmovdqa64 %ymm19, %ymm13
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm11[0,1,2],ymm13[3],ymm11[4,5],ymm13[6],ymm11[7]
; AVX512-FCP-NEXT: vextracti128 $1, %ymm1, %xmm2
; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2,3,4,5],xmm1[6],xmm2[7]
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,0,1,0,1,0,1,14,15,12,13,10,11,8,9]
; AVX512-FCP-NEXT: vpshufb %xmm2, %xmm0, %xmm0
; AVX512-FCP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm3 = [2,11,2,11,12,5,8,9]
-; AVX512-FCP-NEXT: vpermd %zmm16, %zmm3, %zmm6
+; AVX512-FCP-NEXT: vpermd %zmm22, %zmm3, %zmm6
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm12 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,0,1,22,23,28,29,18,19,128,128,128,128,128,128,128,128,128,128]
; AVX512-FCP-NEXT: vpshufb %ymm12, %ymm6, %ymm6
; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm6[0,1,2],ymm0[3,4,5,6,7],ymm6[8,9,10],ymm0[11,12,13,14,15]
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm13 = [0,1,2,3,2,3,0,1,14,15,12,13,10,11,128,128]
-; AVX512-FCP-NEXT: vpshufb %xmm13, %xmm1, %xmm1
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm8 = [0,1,2,3,2,3,0,1,14,15,12,13,10,11,128,128]
+; AVX512-FCP-NEXT: vpshufb %xmm8, %xmm1, %xmm1
; AVX512-FCP-NEXT: vpor %ymm6, %ymm1, %ymm1
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; AVX512-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
@@ -15378,17 +15400,17 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512-FCP-NEXT: vmovdqa 832(%rdi), %ymm5
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm5[0,1],ymm1[2],ymm5[3,4,5],ymm1[6],ymm5[7]
; AVX512-FCP-NEXT: vmovdqa64 %ymm5, %ymm21
-; AVX512-FCP-NEXT: vmovdqa64 %ymm1, %ymm19
+; AVX512-FCP-NEXT: vmovdqa64 %ymm1, %ymm23
; AVX512-FCP-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4],xmm0[5],xmm1[6],xmm0[7]
; AVX512-FCP-NEXT: vpshufb %xmm2, %xmm0, %xmm0
; AVX512-FCP-NEXT: vpermd %zmm30, %zmm3, %zmm1
; AVX512-FCP-NEXT: vpshufb %ymm12, %ymm1, %ymm1
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm8[0,1,2],ymm10[3],ymm8[4,5],ymm10[6],ymm8[7]
-; AVX512-FCP-NEXT: vmovdqa64 %ymm20, %ymm23
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm4[0,1,2],ymm7[3],ymm4[4,5],ymm7[6],ymm4[7]
+; AVX512-FCP-NEXT: vmovdqa64 %ymm20, %ymm25
; AVX512-FCP-NEXT: vextracti128 $1, %ymm2, %xmm3
; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0],xmm2[1],xmm3[2,3,4,5],xmm2[6],xmm3[7]
-; AVX512-FCP-NEXT: vpshufb %xmm13, %xmm2, %xmm2
+; AVX512-FCP-NEXT: vpshufb %xmm8, %xmm2, %xmm2
; AVX512-FCP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7],ymm1[8,9,10],ymm0[11,12,13,14,15]
; AVX512-FCP-NEXT: vpor %ymm1, %ymm2, %ymm1
@@ -15397,146 +15419,146 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
; AVX512-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm9[0,1],ymm8[2],ymm9[3,4],ymm8[5],ymm9[6,7]
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm0 = [8,9,4,5,4,5,6,7,8,9,10,11,12,13,14,15]
-; AVX512-FCP-NEXT: vpshufb %xmm0, %xmm1, %xmm2
-; AVX512-FCP-NEXT: vmovdqa64 %xmm0, %xmm25
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm10 = [8,9,4,5,4,5,6,7,8,9,10,11,12,13,14,15]
+; AVX512-FCP-NEXT: vpshufb %xmm10, %xmm1, %xmm2
; AVX512-FCP-NEXT: vextracti128 $1, %ymm1, %xmm1
; AVX512-FCP-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[3,1,2,3,4,5,6,7]
; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm5 = [0,3,7,10,14,0,0,0]
-; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload
-; AVX512-FCP-NEXT: vpermd %zmm24, %zmm5, %zmm3
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm14 = [u,u,u,u,u,u,u,u,0,1,6,7,8,9,14,15,16,17,22,23,20,21,22,23,16,17,22,23,24,25,30,31]
-; AVX512-FCP-NEXT: vpshufb %ymm14, %ymm3, %ymm3
+; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm19 = [0,3,7,10,14,0,0,0]
+; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
+; AVX512-FCP-NEXT: vpermd %zmm18, %zmm19, %zmm3
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm5 = [u,u,u,u,u,u,u,u,0,1,6,7,8,9,14,15,16,17,22,23,20,21,22,23,16,17,22,23,24,25,30,31]
+; AVX512-FCP-NEXT: vpshufb %ymm5, %ymm3, %ymm3
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1],ymm3[2,3,4,5,6,7]
-; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm29 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
-; AVX512-FCP-NEXT: vpternlogq $184, %zmm1, %zmm29, %zmm27
+; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm28 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
+; AVX512-FCP-NEXT: vpternlogq $184, %zmm1, %zmm28, %zmm27
; AVX512-FCP-NEXT: vmovdqu64 %zmm27, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm15[0,1,2],ymm7[3],ymm15[4,5],ymm7[6],ymm15[7]
-; AVX512-FCP-NEXT: vmovdqa %ymm7, %ymm12
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm15[0,1,2],ymm14[3],ymm15[4,5],ymm14[6],ymm15[7]
+; AVX512-FCP-NEXT: vmovdqa64 %ymm15, %ymm17
+; AVX512-FCP-NEXT: vmovdqa %ymm14, %ymm15
; AVX512-FCP-NEXT: vextracti128 $1, %ymm1, %xmm3
; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm3[0],xmm1[1],xmm3[2,3,4,5],xmm1[6],xmm3[7]
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm11[0],ymm4[1],ymm11[2,3],ymm4[4],ymm11[5,6,7]
-; AVX512-FCP-NEXT: vmovdqa64 %ymm11, %ymm22
-; AVX512-FCP-NEXT: vmovdqa64 %ymm4, %ymm17
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm13[0],ymm11[1],ymm13[2,3],ymm11[4],ymm13[5,6,7]
+; AVX512-FCP-NEXT: vmovdqa64 %ymm13, %ymm16
+; AVX512-FCP-NEXT: vmovdqa %ymm11, %ymm13
; AVX512-FCP-NEXT: vextracti128 $1, %ymm3, %xmm6
; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0],xmm6[1],xmm3[2,3,4,5],xmm6[6],xmm3[7]
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm6 = [2,3,2,3,2,3,2,3,0,1,14,15,12,13,10,11]
; AVX512-FCP-NEXT: vpshufb %xmm6, %xmm1, %xmm1
; AVX512-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm20 = [2,0,0,0,6,9,13,0]
-; AVX512-FCP-NEXT: vpermd %zmm16, %zmm20, %zmm13
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm0 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,2,3,16,17,22,23,24,25,128,128,128,128,128,128,128,128,128,128]
-; AVX512-FCP-NEXT: vpshufb %ymm0, %ymm13, %ymm13
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm13[0,1,2],ymm1[3,4,5,6,7],ymm13[8,9,10],ymm1[11,12,13,14,15]
+; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm24 = [2,0,0,0,6,9,13,0]
+; AVX512-FCP-NEXT: vpermd %zmm22, %zmm24, %zmm0
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm12 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,2,3,16,17,22,23,24,25,128,128,128,128,128,128,128,128,128,128]
+; AVX512-FCP-NEXT: vpshufb %ymm12, %ymm0, %ymm0
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm0[0,1,2],ymm1[3,4,5,6,7],ymm0[8,9,10],ymm1[11,12,13,14,15]
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,2,3,4,5,2,3,0,1,14,15,12,13,128,128]
; AVX512-FCP-NEXT: vpshufb %xmm2, %xmm3, %xmm3
-; AVX512-FCP-NEXT: vpor %ymm3, %ymm13, %ymm3
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4,5,6,7]
-; AVX512-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX512-FCP-NEXT: vpor %ymm0, %ymm3, %ymm0
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; AVX512-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
; AVX512-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm11[0,1],ymm7[2],ymm11[3,4],ymm7[5],ymm11[6,7]
-; AVX512-FCP-NEXT: vmovdqa64 %xmm25, %xmm13
-; AVX512-FCP-NEXT: vpshufb %xmm13, %xmm1, %xmm3
-; AVX512-FCP-NEXT: vextracti128 $1, %ymm1, %xmm1
-; AVX512-FCP-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[3,1,2,3,4,5,6,7]
-; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3]
-; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload
-; AVX512-FCP-NEXT: vpermd %zmm25, %zmm5, %zmm3
-; AVX512-FCP-NEXT: vpshufb %ymm14, %ymm3, %ymm3
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1],ymm3[2,3,4,5,6,7]
-; AVX512-FCP-NEXT: vpternlogq $184, %zmm1, %zmm29, %zmm26
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm4[0,1],ymm11[2],ymm4[3,4],ymm11[5],ymm4[6,7]
+; AVX512-FCP-NEXT: vpshufb %xmm10, %xmm0, %xmm1
+; AVX512-FCP-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX512-FCP-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[3,1,2,3,4,5,6,7]
+; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
+; AVX512-FCP-NEXT: vpermd %zmm20, %zmm19, %zmm1
+; AVX512-FCP-NEXT: vpshufb %ymm5, %ymm1, %ymm1
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5,6,7]
+; AVX512-FCP-NEXT: vpternlogq $184, %zmm0, %zmm28, %zmm26
; AVX512-FCP-NEXT: vmovdqu64 %zmm26, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-FCP-NEXT: vmovdqa64 %ymm21, %ymm5
-; AVX512-FCP-NEXT: vmovdqa64 %ymm19, %ymm4
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm5[0,1,2],ymm4[3],ymm5[4,5],ymm4[6],ymm5[7]
-; AVX512-FCP-NEXT: vextracti128 $1, %ymm1, %xmm3
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm3[0],xmm1[1],xmm3[2,3,4,5],xmm1[6],xmm3[7]
-; AVX512-FCP-NEXT: vpshufb %xmm6, %xmm1, %xmm1
-; AVX512-FCP-NEXT: vpermd %zmm30, %zmm20, %zmm3
-; AVX512-FCP-NEXT: vpshufb %ymm0, %ymm3, %ymm0
; AVX512-FCP-NEXT: vmovdqa64 %ymm23, %ymm14
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm10[0],ymm14[1],ymm10[2,3],ymm14[4],ymm10[5,6,7]
-; AVX512-FCP-NEXT: vmovdqa64 %ymm18, %ymm20
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm5[0,1,2],ymm14[3],ymm5[4,5],ymm14[6],ymm5[7]
+; AVX512-FCP-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3,4,5],xmm0[6],xmm1[7]
+; AVX512-FCP-NEXT: vpshufb %xmm6, %xmm0, %xmm0
+; AVX512-FCP-NEXT: vpermd %zmm30, %zmm24, %zmm1
+; AVX512-FCP-NEXT: vpshufb %ymm12, %ymm1, %ymm1
+; AVX512-FCP-NEXT: vmovdqa64 %ymm25, %ymm12
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm7[0],ymm12[1],ymm7[2,3],ymm12[4],ymm7[5,6,7]
; AVX512-FCP-NEXT: vextracti128 $1, %ymm3, %xmm6
; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0],xmm6[1],xmm3[2,3,4,5],xmm6[6],xmm3[7]
; AVX512-FCP-NEXT: vpshufb %xmm2, %xmm3, %xmm2
-; AVX512-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm0[0,1,2],ymm1[3,4,5,6,7],ymm0[8,9,10],ymm1[11,12,13,14,15]
-; AVX512-FCP-NEXT: vpor %ymm0, %ymm2, %ymm0
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; AVX512-FCP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7],ymm1[8,9,10],ymm0[11,12,13,14,15]
+; AVX512-FCP-NEXT: vpor %ymm1, %ymm2, %ymm1
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; AVX512-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm9[0,1],ymm8[2,3],ymm9[4,5],ymm8[6,7]
-; AVX512-FCP-NEXT: vmovdqa64 %ymm8, %ymm23
+; AVX512-FCP-NEXT: vmovdqa64 %ymm8, %ymm25
; AVX512-FCP-NEXT: vmovdqa64 %ymm9, %ymm26
; AVX512-FCP-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-FCP-NEXT: vpshufb %xmm13, %xmm1, %xmm1
-; AVX512-FCP-NEXT: vmovdqa64 %xmm13, %xmm27
+; AVX512-FCP-NEXT: vpshufb %xmm10, %xmm1, %xmm1
+; AVX512-FCP-NEXT: vmovdqa64 %xmm10, %xmm27
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm2 = [10,11,6,7,4,5,6,7,8,9,10,11,12,13,14,15]
; AVX512-FCP-NEXT: vpshufb %xmm2, %xmm0, %xmm0
-; AVX512-FCP-NEXT: vmovdqa64 %xmm2, %xmm28
+; AVX512-FCP-NEXT: vmovdqa64 %xmm2, %xmm29
; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm21 = [0,4,7,11,14,0,0,0]
-; AVX512-FCP-NEXT: vpermd %zmm24, %zmm21, %zmm2
+; AVX512-FCP-NEXT: vpermd %zmm18, %zmm21, %zmm2
+; AVX512-FCP-NEXT: vmovdqa64 %zmm18, %zmm24
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm8 = [u,u,u,u,u,u,u,u,2,3,4,5,10,11,12,13,18,19,18,19,18,19,18,19,18,19,20,21,26,27,28,29]
; AVX512-FCP-NEXT: vpshufb %ymm8, %ymm2, %ymm2
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm2[2,3,4,5,6,7]
-; AVX512-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm12[0],ymm15[1],ymm12[2,3],ymm15[4],ymm12[5,6,7]
-; AVX512-FCP-NEXT: vmovdqa64 %ymm12, %ymm19
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm0[0,1],ymm2[2,3,4,5,6,7]
+; AVX512-FCP-NEXT: vmovdqa64 %ymm17, %ymm9
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm15[0],ymm9[1],ymm15[2,3],ymm9[4],ymm15[5,6,7]
+; AVX512-FCP-NEXT: vmovdqa %ymm15, %ymm10
; AVX512-FCP-NEXT: vextracti128 $1, %ymm0, %xmm6
; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm6[1],xmm0[2,3,4,5],xmm6[6],xmm0[7]
-; AVX512-FCP-NEXT: vmovdqa64 %ymm22, %ymm9
-; AVX512-FCP-NEXT: vmovdqa64 %ymm17, %ymm2
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm9[0],ymm2[1],ymm9[2,3,4],ymm2[5],ymm9[6,7]
+; AVX512-FCP-NEXT: vmovdqa64 %ymm16, %ymm1
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm1[0],ymm13[1],ymm1[2,3,4],ymm13[5],ymm1[6,7]
+; AVX512-FCP-NEXT: vmovdqa64 %ymm13, %ymm19
; AVX512-FCP-NEXT: vextracti128 $1, %ymm6, %xmm13
; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm6 = xmm13[0],xmm6[1],xmm13[2],xmm6[3],xmm13[4,5,6,7]
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm13 = [4,5,4,5,4,5,4,5,2,3,0,1,14,15,12,13]
; AVX512-FCP-NEXT: vpshufb %xmm13, %xmm0, %xmm0
; AVX512-FCP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm18 = [3,0,0,0,6,10,13,0]
-; AVX512-FCP-NEXT: vpermd %zmm16, %zmm18, %zmm1
+; AVX512-FCP-NEXT: vpermd %zmm22, %zmm18, %zmm1
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm3 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,0,1,18,19,20,21,26,27,128,128,128,128,128,128,128,128,128,128]
; AVX512-FCP-NEXT: vpshufb %ymm3, %ymm1, %ymm1
; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7],ymm1[8,9,10],ymm0[11,12,13,14,15]
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm10 = [0,1,2,3,6,7,4,5,2,3,0,1,14,15,128,128]
-; AVX512-FCP-NEXT: vpshufb %xmm10, %xmm6, %xmm6
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm15 = [0,1,2,3,6,7,4,5,2,3,0,1,14,15,128,128]
+; AVX512-FCP-NEXT: vpshufb %xmm15, %xmm6, %xmm6
; AVX512-FCP-NEXT: vpor %ymm1, %ymm6, %ymm1
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; AVX512-FCP-NEXT: vmovdqa64 %ymm0, %ymm31
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm11[0,1],ymm7[2,3],ymm11[4,5],ymm7[6,7]
-; AVX512-FCP-NEXT: vmovdqa64 %ymm7, %ymm22
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm4[0,1],ymm11[2,3],ymm4[4,5],ymm11[6,7]
+; AVX512-FCP-NEXT: vmovdqa64 %ymm4, %ymm23
; AVX512-FCP-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-FCP-NEXT: vmovdqa64 %xmm27, %xmm6
-; AVX512-FCP-NEXT: vpshufb %xmm6, %xmm1, %xmm1
-; AVX512-FCP-NEXT: vmovdqa64 %xmm28, %xmm6
-; AVX512-FCP-NEXT: vpshufb %xmm6, %xmm0, %xmm0
+; AVX512-FCP-NEXT: vmovdqa64 %xmm27, %xmm4
+; AVX512-FCP-NEXT: vpshufb %xmm4, %xmm1, %xmm1
+; AVX512-FCP-NEXT: vmovdqa64 %xmm29, %xmm4
+; AVX512-FCP-NEXT: vpshufb %xmm4, %xmm0, %xmm0
; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; AVX512-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm17 # 32-byte Folded Reload
-; AVX512-FCP-NEXT: vpermd %zmm25, %zmm21, %zmm1
+; AVX512-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm6 # 32-byte Folded Reload
+; AVX512-FCP-NEXT: vpermd %zmm20, %zmm21, %zmm1
; AVX512-FCP-NEXT: vpshufb %ymm8, %ymm1, %ymm1
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm0[0,1],ymm1[2,3,4,5,6,7]
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm4[0],ymm5[1],ymm4[2,3],ymm5[4],ymm4[5,6,7]
-; AVX512-FCP-NEXT: vmovdqa64 %ymm4, %ymm28
-; AVX512-FCP-NEXT: vmovdqa64 %ymm5, %ymm27
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm14[0],ymm5[1],ymm14[2,3],ymm5[4],ymm14[5,6,7]
+; AVX512-FCP-NEXT: vmovdqa64 %ymm14, %ymm27
+; AVX512-FCP-NEXT: vmovdqa64 %ymm5, %ymm21
; AVX512-FCP-NEXT: vextracti128 $1, %ymm0, %xmm8
; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm8[1],xmm0[2,3,4,5],xmm8[6],xmm0[7]
-; AVX512-FCP-NEXT: vinserti64x4 $1, (%rsp), %zmm0, %zmm21 # 32-byte Folded Reload
+; AVX512-FCP-NEXT: vinserti64x4 $1, (%rsp), %zmm0, %zmm17 # 32-byte Folded Reload
; AVX512-FCP-NEXT: vpshufb %xmm13, %xmm0, %xmm0
; AVX512-FCP-NEXT: vpermd %zmm30, %zmm18, %zmm8
; AVX512-FCP-NEXT: vpshufb %ymm3, %ymm8, %ymm3
-; AVX512-FCP-NEXT: vmovdqa64 %ymm20, %ymm6
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm6[0],ymm14[1],ymm6[2,3,4],ymm14[5],ymm6[6,7]
-; AVX512-FCP-NEXT: vextracti128 $1, %ymm8, %xmm12
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm8 = xmm12[0],xmm8[1],xmm12[2],xmm8[3],xmm12[4,5,6,7]
-; AVX512-FCP-NEXT: vpshufb %xmm10, %xmm8, %xmm4
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm7[0],ymm12[1],ymm7[2,3,4],ymm12[5],ymm7[6,7]
+; AVX512-FCP-NEXT: vmovdqa64 %ymm7, %ymm18
+; AVX512-FCP-NEXT: vmovdqa %ymm12, %ymm14
+; AVX512-FCP-NEXT: vextracti128 $1, %ymm8, %xmm13
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm8 = xmm13[0],xmm8[1],xmm13[2],xmm8[3],xmm13[4,5,6,7]
+; AVX512-FCP-NEXT: vpshufb %xmm15, %xmm8, %xmm4
; AVX512-FCP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm3[0,1,2],ymm0[3,4,5,6,7],ymm3[8,9,10],ymm0[11,12,13,14,15]
; AVX512-FCP-NEXT: vpor %ymm3, %ymm4, %ymm3
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm0[4,5,6,7]
-; AVX512-FCP-NEXT: vmovdqa64 %ymm0, %ymm18
-; AVX512-FCP-NEXT: vmovdqa64 %ymm23, %ymm0
+; AVX512-FCP-NEXT: vmovdqa64 %ymm0, %ymm29
+; AVX512-FCP-NEXT: vmovdqa64 %ymm25, %ymm0
; AVX512-FCP-NEXT: vmovdqa64 %ymm26, %ymm3
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm3[0,1,2],ymm0[3],ymm3[4,5],ymm0[6],ymm3[7]
; AVX512-FCP-NEXT: vextracti128 $1, %ymm0, %xmm3
@@ -15546,121 +15568,122 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512-FCP-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm3 = [1,4,8,11,15,0,0,0]
; AVX512-FCP-NEXT: vpermd %zmm24, %zmm3, %zmm8
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm10 = [u,u,u,u,u,u,u,u,0,1,6,7,8,9,14,15,16,17,22,23,20,21,22,23,16,17,22,23,24,25,30,31]
-; AVX512-FCP-NEXT: vpshufb %ymm10, %ymm8, %ymm8
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm12 = [u,u,u,u,u,u,u,u,0,1,6,7,8,9,14,15,16,17,22,23,20,21,22,23,16,17,22,23,24,25,30,31]
+; AVX512-FCP-NEXT: vpshufb %ymm12, %ymm8, %ymm8
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm8[2,3,4,5,6,7]
-; AVX512-FCP-NEXT: vmovdqa64 %ymm19, %ymm5
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm5[0],ymm15[1],ymm5[2,3,4],ymm15[5],ymm5[6,7]
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm10[0],ymm9[1],ymm10[2,3,4],ymm9[5],ymm10[6,7]
; AVX512-FCP-NEXT: vextracti128 $1, %ymm5, %xmm7
; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm5 = xmm7[0],xmm5[1],xmm7[2],xmm5[3],xmm7[4,5,6,7]
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm9[0,1],ymm2[2],ymm9[3,4],ymm2[5],ymm9[6,7]
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm12 = [8,9,8,9,4,5,6,7,8,9,10,11,12,13,14,15]
-; AVX512-FCP-NEXT: vpshufb %xmm12, %xmm7, %xmm8
+; AVX512-FCP-NEXT: vmovdqa64 %ymm19, %ymm10
+; AVX512-FCP-NEXT: vmovdqa64 %ymm16, %ymm7
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm7[0,1],ymm10[2],ymm7[3,4],ymm10[5],ymm7[6,7]
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm8 = [8,9,8,9,4,5,6,7,8,9,10,11,12,13,14,15]
+; AVX512-FCP-NEXT: vpshufb %xmm8, %xmm7, %xmm9
; AVX512-FCP-NEXT: vextracti128 $1, %ymm7, %xmm7
; AVX512-FCP-NEXT: vpshuflw {{.*#+}} xmm7 = xmm7[0,3,1,3,4,5,6,7]
-; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} xmm7 = xmm8[0],xmm7[0],xmm8[1],xmm7[1],xmm8[2],xmm7[2],xmm8[3],xmm7[3]
-; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm9 = [0,1,10,3,14,7,10,3]
-; AVX512-FCP-NEXT: vpermd %zmm16, %zmm9, %zmm15
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm8 = [4,5,10,11,0,1,10,11,0,1,4,5,0,1,14,15,20,21,26,27,16,17,26,27,16,17,20,21,16,17,30,31]
-; AVX512-FCP-NEXT: vpshufb %ymm8, %ymm15, %ymm15
+; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} xmm7 = xmm9[0],xmm7[0],xmm9[1],xmm7[1],xmm9[2],xmm7[2],xmm9[3],xmm7[3]
+; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm16 = [0,1,10,3,14,7,10,3]
+; AVX512-FCP-NEXT: vpermd %zmm22, %zmm16, %zmm15
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm9 = [4,5,10,11,0,1,10,11,0,1,4,5,0,1,14,15,20,21,26,27,16,17,26,27,16,17,20,21,16,17,30,31]
+; AVX512-FCP-NEXT: vpshufb %ymm9, %ymm15, %ymm15
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm15[0],ymm7[1,2],ymm15[3,4,5,6,7]
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm15 = [0,1,2,3,12,13,6,7,4,5,2,3,0,1,14,15]
; AVX512-FCP-NEXT: vpshufb %xmm15, %xmm5, %xmm5
; AVX512-FCP-NEXT: vinserti128 $1, %xmm5, %ymm0, %ymm5
; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm5 = ymm7[0,1,2],ymm5[3,4,5,6,7],ymm7[8,9,10],ymm5[11,12,13,14,15]
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm5[4,5,6,7]
-; AVX512-FCP-NEXT: vmovdqa64 %ymm22, %ymm2
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm11[0,1,2],ymm2[3],ymm11[4,5],ymm2[6],ymm11[7]
+; AVX512-FCP-NEXT: vmovdqa64 %ymm23, %ymm5
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm5[0,1,2],ymm11[3],ymm5[4,5],ymm11[6],ymm5[7]
; AVX512-FCP-NEXT: vextracti128 $1, %ymm5, %xmm13
; AVX512-FCP-NEXT: vpshufb %xmm4, %xmm13, %xmm4
; AVX512-FCP-NEXT: vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,6,4,6,7]
; AVX512-FCP-NEXT: vpunpckhwd {{.*#+}} xmm4 = xmm5[4],xmm4[4],xmm5[5],xmm4[5],xmm5[6],xmm4[6],xmm5[7],xmm4[7]
; AVX512-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm5 # 32-byte Folded Reload
-; AVX512-FCP-NEXT: vpermd %zmm25, %zmm3, %zmm3
-; AVX512-FCP-NEXT: vpshufb %ymm10, %ymm3, %ymm3
+; AVX512-FCP-NEXT: vpermd %zmm20, %zmm3, %zmm3
+; AVX512-FCP-NEXT: vpshufb %ymm12, %ymm3, %ymm3
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1],ymm3[2,3,4,5,6,7]
-; AVX512-FCP-NEXT: vmovdqa64 %ymm28, %ymm2
-; AVX512-FCP-NEXT: vmovdqa64 %ymm27, %ymm3
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm2[0],ymm3[1],ymm2[2,3,4],ymm3[5],ymm2[6,7]
+; AVX512-FCP-NEXT: vmovdqa64 %ymm21, %ymm3
+; AVX512-FCP-NEXT: vmovdqa64 %ymm27, %ymm10
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm10[0],ymm3[1],ymm10[2,3,4],ymm3[5],ymm10[6,7]
; AVX512-FCP-NEXT: vextracti128 $1, %ymm3, %xmm10
; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm3 = xmm10[0],xmm3[1],xmm10[2],xmm3[3],xmm10[4,5,6,7]
; AVX512-FCP-NEXT: vpshufb %xmm15, %xmm3, %xmm3
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm10 = ymm6[0,1],ymm14[2],ymm6[3,4],ymm14[5],ymm6[6,7]
-; AVX512-FCP-NEXT: vpshufb %xmm12, %xmm10, %xmm11
+; AVX512-FCP-NEXT: vmovdqa64 %ymm18, %ymm10
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm10 = ymm10[0,1],ymm14[2],ymm10[3,4],ymm14[5],ymm10[6,7]
+; AVX512-FCP-NEXT: vpshufb %xmm8, %xmm10, %xmm8
; AVX512-FCP-NEXT: vextracti128 $1, %ymm10, %xmm10
; AVX512-FCP-NEXT: vpshuflw {{.*#+}} xmm10 = xmm10[0,3,1,3,4,5,6,7]
-; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} xmm10 = xmm11[0],xmm10[0],xmm11[1],xmm10[1],xmm11[2],xmm10[2],xmm11[3],xmm10[3]
-; AVX512-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm11 # 32-byte Folded Reload
-; AVX512-FCP-NEXT: vpermd %zmm30, %zmm9, %zmm9
-; AVX512-FCP-NEXT: vpshufb %ymm8, %ymm9, %ymm8
-; AVX512-FCP-NEXT: vpmovsxdq {{.*#+}} zmm9 = [0,0,18446744073709486080,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615]
-; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512-FCP-NEXT: vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm9, %zmm12 # 64-byte Folded Reload
+; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} xmm8 = xmm8[0],xmm10[0],xmm8[1],xmm10[1],xmm8[2],xmm10[2],xmm8[3],xmm10[3]
+; AVX512-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm10 # 32-byte Folded Reload
+; AVX512-FCP-NEXT: vpermd %zmm30, %zmm16, %zmm11
+; AVX512-FCP-NEXT: vpshufb %ymm9, %ymm11, %ymm9
+; AVX512-FCP-NEXT: vpmovsxdq {{.*#+}} zmm11 = [0,0,18446744073709486080,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615]
; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512-FCP-NEXT: vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm9, %zmm13 # 64-byte Folded Reload
-; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm9 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0,0,0]
-; AVX512-FCP-NEXT: vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm9, %zmm17 # 64-byte Folded Reload
-; AVX512-FCP-NEXT: vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm9, %zmm21 # 64-byte Folded Reload
-; AVX512-FCP-NEXT: vpternlogq $184, %zmm12, %zmm9, %zmm5
-; AVX512-FCP-NEXT: vpternlogq $184, %zmm13, %zmm9, %zmm11
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm8[0],ymm10[1,2],ymm8[3,4,5,6,7]
+; AVX512-FCP-NEXT: vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm11, %zmm13 # 64-byte Folded Reload
+; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
+; AVX512-FCP-NEXT: vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm11, %zmm14 # 64-byte Folded Reload
+; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm11 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0,0,0]
+; AVX512-FCP-NEXT: vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm11, %zmm6 # 64-byte Folded Reload
+; AVX512-FCP-NEXT: vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm11, %zmm17 # 64-byte Folded Reload
+; AVX512-FCP-NEXT: vpternlogq $184, %zmm13, %zmm11, %zmm5
+; AVX512-FCP-NEXT: vpternlogq $184, %zmm14, %zmm11, %zmm10
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm9[0],ymm8[1,2],ymm9[3,4,5,6,7]
; AVX512-FCP-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm8[0,1,2],ymm3[3,4,5,6,7],ymm8[8,9,10],ymm3[11,12,13,14,15]
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm8[0,1,2,3],ymm3[4,5,6,7]
; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
-; AVX512-FCP-NEXT: vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm29, %zmm19 # 64-byte Folded Reload
+; AVX512-FCP-NEXT: vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm28, %zmm19 # 64-byte Folded Reload
; AVX512-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm8 # 32-byte Folded Reload
; AVX512-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm9 # 32-byte Folded Reload
-; AVX512-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm10 # 32-byte Folded Reload
-; AVX512-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm12 # 32-byte Folded Reload
+; AVX512-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm11 # 32-byte Folded Reload
; AVX512-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm13 # 32-byte Folded Reload
+; AVX512-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm14 # 32-byte Folded Reload
; AVX512-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm15 # 32-byte Folded Reload
; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm31, %zmm0, %zmm16
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm18, %zmm0, %zmm18
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm29, %zmm0, %zmm18
; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm7, %zmm0, %zmm7
; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm3, %zmm0, %zmm3
; AVX512-FCP-NEXT: movw $-512, %ax # imm = 0xFE00
; AVX512-FCP-NEXT: kmovw %eax, %k1
; AVX512-FCP-NEXT: vmovdqa32 %zmm8, %zmm19 {%k1}
-; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512-FCP-NEXT: vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm29, %zmm2 # 64-byte Folded Reload
-; AVX512-FCP-NEXT: vmovdqa32 %zmm9, %zmm2 {%k1}
; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512-FCP-NEXT: vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm29, %zmm8 # 64-byte Folded Reload
-; AVX512-FCP-NEXT: vmovdqa32 %zmm10, %zmm8 {%k1}
-; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512-FCP-NEXT: vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm29, %zmm6 # 64-byte Folded Reload
-; AVX512-FCP-NEXT: vmovdqa32 %zmm12, %zmm6 {%k1}
+; AVX512-FCP-NEXT: vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm28, %zmm8 # 64-byte Folded Reload
+; AVX512-FCP-NEXT: vmovdqa32 %zmm9, %zmm8 {%k1}
; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512-FCP-NEXT: vmovdqa32 %zmm13, %zmm9 {%k1}
-; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
-; AVX512-FCP-NEXT: vmovdqa32 %zmm15, %zmm10 {%k1}
-; AVX512-FCP-NEXT: vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm29, %zmm1 # 64-byte Folded Reload
+; AVX512-FCP-NEXT: vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm28, %zmm9 # 64-byte Folded Reload
+; AVX512-FCP-NEXT: vmovdqa32 %zmm11, %zmm9 {%k1}
+; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512-FCP-NEXT: vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm28, %zmm11 # 64-byte Folded Reload
+; AVX512-FCP-NEXT: vmovdqa32 %zmm13, %zmm11 {%k1}
+; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
+; AVX512-FCP-NEXT: vmovdqa32 %zmm14, %zmm12 {%k1}
+; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
+; AVX512-FCP-NEXT: vmovdqa32 %zmm15, %zmm13 {%k1}
+; AVX512-FCP-NEXT: vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm28, %zmm1 # 64-byte Folded Reload
; AVX512-FCP-NEXT: vmovdqa32 %zmm18, %zmm1 {%k1}
-; AVX512-FCP-NEXT: vmovdqa64 %zmm21, (%rsi)
-; AVX512-FCP-NEXT: vmovdqa64 %zmm17, 64(%rsi)
-; AVX512-FCP-NEXT: vmovdqa64 %zmm11, 64(%rdx)
+; AVX512-FCP-NEXT: vmovdqa64 %zmm17, (%rsi)
+; AVX512-FCP-NEXT: vmovdqa64 %zmm6, 64(%rsi)
+; AVX512-FCP-NEXT: vmovdqa64 %zmm10, 64(%rdx)
; AVX512-FCP-NEXT: vmovdqa64 %zmm5, (%rdx)
-; AVX512-FCP-NEXT: vmovdqa64 %zmm2, 64(%rcx)
+; AVX512-FCP-NEXT: vmovdqa64 %zmm8, 64(%rcx)
; AVX512-FCP-NEXT: vmovdqa64 %zmm19, (%rcx)
-; AVX512-FCP-NEXT: vmovdqa64 %zmm6, 64(%r8)
-; AVX512-FCP-NEXT: vmovdqa64 %zmm8, (%r8)
-; AVX512-FCP-NEXT: vmovdqa64 %zmm10, 64(%r9)
-; AVX512-FCP-NEXT: vmovdqa64 %zmm9, (%r9)
+; AVX512-FCP-NEXT: vmovdqa64 %zmm11, 64(%r8)
+; AVX512-FCP-NEXT: vmovdqa64 %zmm9, (%r8)
+; AVX512-FCP-NEXT: vmovdqa64 %zmm13, 64(%r9)
+; AVX512-FCP-NEXT: vmovdqa64 %zmm12, (%r9)
; AVX512-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX512-FCP-NEXT: vmovdqa64 %zmm1, 64(%rax)
-; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512-FCP-NEXT: vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm29, %zmm1 # 64-byte Folded Reload
-; AVX512-FCP-NEXT: vmovdqa32 %zmm16, %zmm1 {%k1}
-; AVX512-FCP-NEXT: vmovdqa64 %zmm1, (%rax)
-; AVX512-FCP-NEXT: vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm29, %zmm4 # 64-byte Folded Reload
+; AVX512-FCP-NEXT: vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm28, %zmm2 # 64-byte Folded Reload
+; AVX512-FCP-NEXT: vmovdqa32 %zmm16, %zmm2 {%k1}
+; AVX512-FCP-NEXT: vmovdqa64 %zmm2, (%rax)
+; AVX512-FCP-NEXT: vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm28, %zmm4 # 64-byte Folded Reload
; AVX512-FCP-NEXT: vmovdqa32 %zmm3, %zmm4 {%k1}
; AVX512-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX512-FCP-NEXT: vmovdqa64 %zmm4, 64(%rax)
-; AVX512-FCP-NEXT: vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm29, %zmm0 # 64-byte Folded Reload
+; AVX512-FCP-NEXT: vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm28, %zmm0 # 64-byte Folded Reload
; AVX512-FCP-NEXT: vmovdqa32 %zmm7, %zmm0 {%k1}
; AVX512-FCP-NEXT: vmovdqa64 %zmm0, (%rax)
-; AVX512-FCP-NEXT: addq $1800, %rsp # imm = 0x708
+; AVX512-FCP-NEXT: addq $1736, %rsp # imm = 0x6C8
; AVX512-FCP-NEXT: vzeroupper
; AVX512-FCP-NEXT: retq
;
@@ -16537,34 +16560,36 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
;
; AVX512DQ-FCP-LABEL: load_i16_stride7_vf64:
; AVX512DQ-FCP: # %bb.0:
-; AVX512DQ-FCP-NEXT: subq $1240, %rsp # imm = 0x4D8
-; AVX512DQ-FCP-NEXT: vmovdqa64 512(%rdi), %zmm2
-; AVX512DQ-FCP-NEXT: vmovdqa64 64(%rdi), %zmm4
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm17 = [2,5,9,0,12,0,0,0]
-; AVX512DQ-FCP-NEXT: vpermd %zmm4, %zmm17, %zmm0
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm3 = [8,1,12,5,12,5,14,15]
-; AVX512DQ-FCP-NEXT: vpermd %zmm4, %zmm3, %zmm1
-; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm4, %zmm11
-; AVX512DQ-FCP-NEXT: vpermd %zmm2, %zmm3, %zmm4
-; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm2, %zmm18
-; AVX512DQ-FCP-NEXT: vmovdqa 480(%rdi), %ymm12
-; AVX512DQ-FCP-NEXT: vmovdqa 448(%rdi), %ymm14
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm14[0,1],ymm12[2],ymm14[3,4,5],ymm12[6],ymm14[7]
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm3, %xmm5
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm5 = xmm3[0,1,2,3],xmm5[4],xmm3[5],xmm5[6],xmm3[7]
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,14,15,12,13,10,11,8,9,128,128,128,128,128,128,128,128,128,128,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm3, %ymm5, %ymm6
+; AVX512DQ-FCP-NEXT: subq $1304, %rsp # imm = 0x518
+; AVX512DQ-FCP-NEXT: vmovdqa64 512(%rdi), %zmm27
+; AVX512DQ-FCP-NEXT: vmovdqa64 64(%rdi), %zmm3
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm16 = [2,5,9,0,12,0,0,0]
+; AVX512DQ-FCP-NEXT: vpermd %zmm3, %zmm16, %zmm0
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm2 = [8,1,12,5,12,5,14,15]
+; AVX512DQ-FCP-NEXT: vpermd %zmm3, %zmm2, %zmm1
+; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm3, %zmm22
+; AVX512DQ-FCP-NEXT: vpermd %zmm27, %zmm2, %zmm4
+; AVX512DQ-FCP-NEXT: vmovdqa 480(%rdi), %ymm5
+; AVX512DQ-FCP-NEXT: vmovdqa 448(%rdi), %ymm6
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm6[0,1],ymm5[2],ymm6[3,4,5],ymm5[6],ymm6[7]
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm6, %ymm21
+; AVX512DQ-FCP-NEXT: vmovdqa %ymm5, %ymm11
+; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm2, %xmm5
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm5 = xmm2[0,1,2,3],xmm5[4],xmm2[5],xmm5[6],xmm2[7]
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,14,15,12,13,10,11,8,9,128,128,128,128,128,128,128,128,128,128,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm2, %ymm5, %ymm6
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm5 = [128,128,128,128,128,128,128,128,128,128,6,7,12,13,2,3,16,17,30,31,128,128,128,128,128,128,128,128,128,128,128,128]
; AVX512DQ-FCP-NEXT: vpshufb %ymm5, %ymm4, %ymm4
-; AVX512DQ-FCP-NEXT: vporq %ymm4, %ymm6, %ymm20
+; AVX512DQ-FCP-NEXT: vporq %ymm4, %ymm6, %ymm29
; AVX512DQ-FCP-NEXT: vmovdqa 672(%rdi), %xmm7
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm4 = [0,1,2,3,4,5,6,7,0,1,14,15,12,13,14,15]
; AVX512DQ-FCP-NEXT: vpshufb %xmm4, %xmm7, %xmm6
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm7, %xmm19
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm7, %xmm17
; AVX512DQ-FCP-NEXT: vpbroadcastw 700(%rdi), %xmm7
-; AVX512DQ-FCP-NEXT: vpunpckhdq {{.*#+}} xmm26 = xmm6[2],xmm7[2],xmm6[3],xmm7[3]
-; AVX512DQ-FCP-NEXT: vmovdqa64 192(%rdi), %ymm24
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm9 = ymm24[0,1,0,2]
+; AVX512DQ-FCP-NEXT: vpunpckhdq {{.*#+}} xmm3 = xmm6[2],xmm7[2],xmm6[3],xmm7[3]
+; AVX512DQ-FCP-NEXT: vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512DQ-FCP-NEXT: vmovdqa64 192(%rdi), %ymm20
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm9 = ymm20[0,1,0,2]
; AVX512DQ-FCP-NEXT: vmovdqu %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX512DQ-FCP-NEXT: vpshufb %ymm5, %ymm1, %ymm1
; AVX512DQ-FCP-NEXT: vmovdqa (%rdi), %ymm6
@@ -16573,382 +16598,385 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512DQ-FCP-NEXT: vmovdqa %ymm6, %ymm8
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm5, %xmm6
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3],xmm6[4],xmm5[5],xmm6[6],xmm5[7]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm3, %ymm5, %ymm3
-; AVX512DQ-FCP-NEXT: vporq %ymm1, %ymm3, %ymm31
-; AVX512DQ-FCP-NEXT: vmovdqa 224(%rdi), %xmm5
-; AVX512DQ-FCP-NEXT: vpshufb %xmm4, %xmm5, %xmm1
-; AVX512DQ-FCP-NEXT: vpbroadcastw 252(%rdi), %xmm3
-; AVX512DQ-FCP-NEXT: vpunpckhdq {{.*#+}} xmm1 = xmm1[2],xmm3[2],xmm1[3],xmm3[3]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm2, %ymm5, %ymm2
+; AVX512DQ-FCP-NEXT: vporq %ymm1, %ymm2, %ymm19
+; AVX512DQ-FCP-NEXT: vmovdqa 224(%rdi), %xmm12
+; AVX512DQ-FCP-NEXT: vpshufb %xmm4, %xmm12, %xmm1
+; AVX512DQ-FCP-NEXT: vpbroadcastw 252(%rdi), %xmm2
+; AVX512DQ-FCP-NEXT: vpunpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
; AVX512DQ-FCP-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-FCP-NEXT: vmovdqa 240(%rdi), %xmm13
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm3 = [128,128,128,128,128,128,128,128,128,128,0,1,6,7,8,9,18,19,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm3, %ymm0, %ymm0
+; AVX512DQ-FCP-NEXT: vmovdqa 240(%rdi), %xmm3
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm2 = [128,128,128,128,128,128,128,128,128,128,0,1,6,7,8,9,18,19,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm2, %ymm0, %ymm0
; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm8[0,1,2],ymm7[3],ymm8[4,5],ymm7[6],ymm8[7]
-; AVX512DQ-FCP-NEXT: vmovdqa %ymm8, %ymm15
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm7, %ymm16
+; AVX512DQ-FCP-NEXT: vmovdqa %ymm8, %ymm13
+; AVX512DQ-FCP-NEXT: vmovdqa %ymm7, %ymm15
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm1, %xmm4
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm4[0],xmm1[1],xmm4[2,3,4,5],xmm1[6],xmm4[7]
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm8 = [2,3,0,1,14,15,12,13,10,11,128,128,128,128,128,128,128,128,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm8, %ymm1, %ymm1
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm7 = [2,3,0,1,14,15,12,13,10,11,128,128,128,128,128,128,128,128,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm7, %ymm1, %ymm1
; AVX512DQ-FCP-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-FCP-NEXT: vmovdqa 160(%rdi), %ymm0
; AVX512DQ-FCP-NEXT: vmovdqa 128(%rdi), %ymm1
; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm1[0,1],ymm0[2],ymm1[3,4,5],ymm0[6],ymm1[7]
; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm1, %ymm25
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm0, %ymm29
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm4, %xmm6
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm6 = xmm4[0,1,2,3],xmm6[4],xmm4[5],xmm6[6],xmm4[7]
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm0, %ymm26
+; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm4, %xmm5
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm5 = xmm4[0,1,2,3],xmm5[4],xmm4[5],xmm5[6],xmm4[7]
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm4 = [0,1,0,1,14,15,12,13,10,11,8,9,12,13,14,15]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm4, %xmm6, %xmm6
-; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm6, %ymm0, %ymm7
-; AVX512DQ-FCP-NEXT: vpbroadcastd {{.*#+}} ymm6 = [22,23,28,29,22,23,28,29,22,23,28,29,22,23,28,29,22,23,28,29,22,23,28,29,22,23,28,29,22,23,28,29]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm6, %ymm9, %ymm9
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm9 = ymm7[0,1,2,3,4,5,6],ymm9[7]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm10 = xmm13[0],xmm5[1],xmm13[2,3,4,5,6,7]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm13, %xmm27
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm7 = [2,3,0,1,14,15,14,15,8,9,10,11,12,13,14,15]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm7, %xmm10, %xmm10
+; AVX512DQ-FCP-NEXT: vpshufb %xmm4, %xmm5, %xmm5
+; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm5, %ymm0, %ymm8
+; AVX512DQ-FCP-NEXT: vpbroadcastd {{.*#+}} ymm5 = [22,23,28,29,22,23,28,29,22,23,28,29,22,23,28,29,22,23,28,29,22,23,28,29,22,23,28,29,22,23,28,29]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm5, %ymm9, %ymm9
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm9 = ymm8[0,1,2,3,4,5,6],ymm9[7]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm10 = xmm3[0],xmm12[1],xmm3[2,3,4,5,6,7]
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm3, %xmm18
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm8 = [2,3,0,1,14,15,14,15,8,9,10,11,12,13,14,15]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm8, %xmm10, %xmm10
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm10, %zmm9, %zmm9
; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm9 = ymm14[0,1,2],ymm12[3],ymm14[4,5],ymm12[6],ymm14[7]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm14, %ymm23
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm11, %ymm28
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm21, %ymm0
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm9 = ymm0[0,1,2],ymm11[3],ymm0[4,5],ymm11[6],ymm0[7]
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm9, %xmm10
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm9 = xmm10[0],xmm9[1],xmm10[2,3,4,5],xmm9[6],xmm10[7]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm8, %ymm9, %ymm8
-; AVX512DQ-FCP-NEXT: vpermd %zmm2, %zmm17, %zmm2
-; AVX512DQ-FCP-NEXT: vpshufb %ymm3, %ymm2, %ymm2
-; AVX512DQ-FCP-NEXT: vpor %ymm2, %ymm8, %ymm2
+; AVX512DQ-FCP-NEXT: vpshufb %ymm7, %ymm9, %ymm7
+; AVX512DQ-FCP-NEXT: vpermd %zmm27, %zmm16, %zmm3
+; AVX512DQ-FCP-NEXT: vpshufb %ymm2, %ymm3, %ymm2
+; AVX512DQ-FCP-NEXT: vpor %ymm2, %ymm7, %ymm2
; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FCP-NEXT: vmovdqa 608(%rdi), %ymm0
+; AVX512DQ-FCP-NEXT: vmovdqa 608(%rdi), %ymm14
; AVX512DQ-FCP-NEXT: vmovdqa 576(%rdi), %ymm1
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm1[0,1],ymm0[2],ymm1[3,4,5],ymm0[6],ymm1[7]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm1, %ymm21
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm0, %ymm30
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm3, %xmm8
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm8[4],xmm3[5],xmm8[6],xmm3[7]
-; AVX512DQ-FCP-NEXT: vmovdqa64 640(%rdi), %ymm22
-; AVX512DQ-FCP-NEXT: vpshufb %xmm4, %xmm3, %xmm4
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm22[0,1,0,2]
-; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4
-; AVX512DQ-FCP-NEXT: vpshufb %ymm6, %ymm0, %ymm6
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm0, %ymm17
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm4[0,1,2,3,4,5,6],ymm6[7]
-; AVX512DQ-FCP-NEXT: vmovdqa 688(%rdi), %xmm14
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm19, %xmm1
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm4 = xmm14[0],xmm1[1],xmm14[2,3,4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm7, %xmm4, %xmm7
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm4 = [2,6,9,0,13,0,0,0]
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm7, %zmm6, %zmm0
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm1[0,1],ymm14[2],ymm1[3,4,5],ymm14[6],ymm1[7]
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm1, %ymm23
+; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm2, %xmm3
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm3[4],xmm2[5],xmm3[6],xmm2[7]
+; AVX512DQ-FCP-NEXT: vmovdqa64 640(%rdi), %ymm24
+; AVX512DQ-FCP-NEXT: vpshufb %xmm4, %xmm2, %xmm2
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm24[0,1,0,2]
+; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512DQ-FCP-NEXT: vpshufb %ymm5, %ymm0, %ymm4
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm0, %ymm31
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm2[0,1,2,3,4,5,6],ymm4[7]
+; AVX512DQ-FCP-NEXT: vmovdqa 688(%rdi), %xmm11
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm17, %xmm1
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm2 = xmm11[0],xmm1[1],xmm11[2,3,4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm8, %xmm2, %xmm5
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm2 = [2,6,9,0,13,0,0,0]
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm5, %zmm4, %zmm0
; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm16, %ymm10
-; AVX512DQ-FCP-NEXT: vmovdqu64 %ymm16, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm10[0],ymm15[1],ymm10[2,3],ymm15[4],ymm10[5,6,7]
-; AVX512DQ-FCP-NEXT: vmovdqa %ymm15, %ymm2
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm15[0],ymm13[1],ymm15[2,3],ymm13[4],ymm15[5,6,7]
+; AVX512DQ-FCP-NEXT: vmovdqa %ymm15, %ymm6
; AVX512DQ-FCP-NEXT: vmovdqu %ymm15, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm6, %xmm7
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0],xmm7[1],xmm6[2,3,4,5],xmm7[6],xmm6[7]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm11, %zmm28
-; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FCP-NEXT: vpermd %zmm11, %zmm4, %zmm8
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm7 = [128,128,128,128,128,128,128,128,128,128,2,3,4,5,10,11,16,17,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm7, %ymm8, %ymm8
+; AVX512DQ-FCP-NEXT: vmovdqa %ymm13, %ymm3
+; AVX512DQ-FCP-NEXT: vmovdqu %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm4, %xmm5
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm5 = xmm4[0],xmm5[1],xmm4[2,3,4,5],xmm5[6],xmm4[7]
+; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm22, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FCP-NEXT: vpermd %zmm22, %zmm2, %zmm8
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm4 = [128,128,128,128,128,128,128,128,128,128,2,3,4,5,10,11,16,17,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm4, %ymm8, %ymm8
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm9 = [4,5,2,3,0,1,14,15,12,13,128,128,128,128,128,128,128,128,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm9, %ymm6, %ymm6
-; AVX512DQ-FCP-NEXT: vpor %ymm6, %ymm8, %ymm0
+; AVX512DQ-FCP-NEXT: vpshufb %ymm9, %ymm5, %ymm5
+; AVX512DQ-FCP-NEXT: vpor %ymm5, %ymm8, %ymm0
; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm25, %ymm3
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm29, %ymm11
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm3[0,1,2],ymm11[3],ymm3[4,5],ymm11[6],ymm3[7]
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm6, %xmm8
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm6 = xmm8[0],xmm6[1],xmm8[2,3,4,5],xmm6[6],xmm8[7]
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm25, %ymm7
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm26, %ymm10
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm7[0,1,2],ymm10[3],ymm7[4,5],ymm10[6],ymm7[7]
+; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm5, %xmm8
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm5 = xmm8[0],xmm5[1],xmm8[2,3,4,5],xmm5[6],xmm8[7]
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm0 = [0,1,2,3,0,1,14,15,12,13,10,11,12,13,14,15]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm0, %xmm6, %xmm6
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm0, %xmm25
-; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm6, %ymm0, %ymm6
+; AVX512DQ-FCP-NEXT: vpshufb %xmm0, %xmm5, %xmm5
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm0, %xmm17
+; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm5, %ymm0, %ymm5
; AVX512DQ-FCP-NEXT: vpbroadcastq {{.*#+}} ymm16 = [2,5,2,5,2,5,2,5]
-; AVX512DQ-FCP-NEXT: vpermd %ymm24, %ymm16, %ymm13
+; AVX512DQ-FCP-NEXT: vpermd %ymm20, %ymm16, %ymm13
; AVX512DQ-FCP-NEXT: vpshufhw {{.*#+}} ymm13 = ymm13[0,1,2,3,4,5,4,7,8,9,10,11,12,13,12,15]
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm13 = ymm6[0,1,2,3,4,5,6],ymm13[7]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm27, %xmm0
-; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm5[0],xmm0[0],xmm5[1],xmm0[1],xmm5[2],xmm0[2],xmm5[3],xmm0[3]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm5, %xmm19
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm13 = ymm5[0,1,2,3,4,5,6],ymm13[7]
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm18, %xmm0
+; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} xmm5 = xmm12[0],xmm0[0],xmm12[1],xmm0[1],xmm12[2],xmm0[2],xmm12[3],xmm0[3]
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm12, %xmm26
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm8 = [8,9,6,7,4,5,6,7,8,9,10,11,12,13,14,15]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm8, %xmm6, %xmm15
+; AVX512DQ-FCP-NEXT: vpshufb %xmm8, %xmm5, %xmm15
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm15, %zmm13, %zmm0
; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm23, %ymm0
-; AVX512DQ-FCP-NEXT: vmovdqu64 %ymm23, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FCP-NEXT: vmovdqu %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm13 = ymm12[0],ymm0[1],ymm12[2,3],ymm0[4],ymm12[5,6,7]
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm21, %ymm12
+; AVX512DQ-FCP-NEXT: vmovdqu64 %ymm21, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm28, %ymm0
+; AVX512DQ-FCP-NEXT: vmovdqu64 %ymm28, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm13 = ymm0[0],ymm12[1],ymm0[2,3],ymm12[4],ymm0[5,6,7]
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm13, %xmm15
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm13 = xmm13[0],xmm15[1],xmm13[2,3,4,5],xmm15[6],xmm13[7]
; AVX512DQ-FCP-NEXT: vpshufb %ymm9, %ymm13, %ymm9
-; AVX512DQ-FCP-NEXT: vpermd %zmm18, %zmm4, %zmm4
-; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm18, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FCP-NEXT: vpshufb %ymm7, %ymm4, %ymm4
-; AVX512DQ-FCP-NEXT: vpor %ymm4, %ymm9, %ymm4
-; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm21, %ymm5
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm30, %ymm4
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm5[0,1,2],ymm4[3],ymm5[4,5],ymm4[6],ymm5[7]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm30, %ymm21
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm4, %xmm7
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm4 = xmm7[0],xmm4[1],xmm7[2,3,4,5],xmm4[6],xmm7[7]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm25, %xmm7
-; AVX512DQ-FCP-NEXT: vpshufb %xmm7, %xmm4, %xmm4
-; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4
-; AVX512DQ-FCP-NEXT: vpermd %ymm22, %ymm16, %ymm7
-; AVX512DQ-FCP-NEXT: vpshufhw {{.*#+}} ymm7 = ymm7[0,1,2,3,4,5,4,7,8,9,10,11,12,13,12,15]
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5,6],ymm7[7]
-; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} xmm7 = xmm1[0],xmm14[0],xmm1[1],xmm14[1],xmm1[2],xmm14[2],xmm1[3],xmm14[3]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm14, %xmm30
-; AVX512DQ-FCP-NEXT: vpshufb %xmm8, %xmm7, %xmm8
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm8, %zmm4, %zmm1
+; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm27, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FCP-NEXT: vpermd %zmm27, %zmm2, %zmm2
+; AVX512DQ-FCP-NEXT: vpshufb %ymm4, %ymm2, %ymm2
+; AVX512DQ-FCP-NEXT: vpor %ymm2, %ymm9, %ymm2
+; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm23, %ymm2
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2],ymm14[3],ymm2[4,5],ymm14[6],ymm2[7]
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm14, %ymm21
+; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm2, %xmm4
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm2 = xmm4[0],xmm2[1],xmm4[2,3,4,5],xmm2[6],xmm4[7]
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm17, %xmm4
+; AVX512DQ-FCP-NEXT: vpshufb %xmm4, %xmm2, %xmm2
+; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512DQ-FCP-NEXT: vpermd %ymm24, %ymm16, %ymm4
+; AVX512DQ-FCP-NEXT: vpshufhw {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5,4,7,8,9,10,11,12,13,12,15]
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6],ymm4[7]
+; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} xmm4 = xmm1[0],xmm11[0],xmm1[1],xmm11[1],xmm1[2],xmm11[2],xmm1[3],xmm11[3]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm8, %xmm4, %xmm8
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm8, %zmm2, %zmm1
; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm10[0],ymm2[1],ymm10[2,3,4],ymm2[5],ymm10[6,7]
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm4, %xmm8
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm4 = xmm8[0],xmm4[1],xmm8[2],xmm4[3],xmm8[4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm6[0],ymm3[1],ymm6[2,3,4],ymm3[5],ymm6[6,7]
+; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm2, %xmm8
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm2 = xmm8[0],xmm2[1],xmm8[2],xmm2[3],xmm8[4,5,6,7]
; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm16 = [10,3,6,15,12,13,6,15]
-; AVX512DQ-FCP-NEXT: vpermd %zmm28, %zmm16, %zmm9
+; AVX512DQ-FCP-NEXT: vpermd %zmm22, %zmm16, %zmm9
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm8 = [128,128,128,128,128,128,128,128,128,128,4,5,10,11,0,1,22,23,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
; AVX512DQ-FCP-NEXT: vpshufb %ymm8, %ymm9, %ymm9
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm13 = [6,7,4,5,2,3,0,1,14,15,128,128,128,128,128,128,128,128,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm13, %ymm4, %ymm4
-; AVX512DQ-FCP-NEXT: vpor %ymm4, %ymm9, %ymm1
+; AVX512DQ-FCP-NEXT: vpshufb %ymm13, %ymm2, %ymm2
+; AVX512DQ-FCP-NEXT: vpor %ymm2, %ymm9, %ymm1
; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm11[0],ymm3[1],ymm11[2,3],ymm3[4],ymm11[5,6,7]
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm4, %xmm9
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0],xmm9[1],xmm4[2,3,4,5],xmm9[6],xmm4[7]
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm25, %ymm1
+; AVX512DQ-FCP-NEXT: vmovdqa %ymm10, %ymm3
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm10[0],ymm7[1],ymm10[2,3],ymm7[4],ymm10[5,6,7]
+; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm2, %xmm9
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm9[1],xmm2[2,3,4,5],xmm9[6],xmm2[7]
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm9 = [0,1,4,5,2,3,0,1,14,15,12,13,12,13,14,15]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm9, %xmm4, %xmm4
-; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm15
+; AVX512DQ-FCP-NEXT: vpshufb %xmm9, %xmm2, %xmm2
+; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm15
; AVX512DQ-FCP-NEXT: vpbroadcastd {{.*#+}} ymm10 = [18,19,24,25,18,19,24,25,18,19,24,25,18,19,24,25,18,19,24,25,18,19,24,25,18,19,24,25,18,19,24,25]
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm24[0,1,1,3]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm10, %ymm4, %ymm14
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm20[0,1,1,3]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm10, %ymm2, %ymm14
; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm14 = ymm15[0,1,2,3,4,5,6],ymm14[7]
; AVX512DQ-FCP-NEXT: vpbroadcastd {{.*#+}} xmm15 = [12,13,10,11,12,13,10,11,12,13,10,11,12,13,10,11]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm15, %xmm6, %xmm6
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm6, %zmm14, %zmm23
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm12[0],ymm0[1],ymm12[2,3,4],ymm0[5],ymm12[6,7]
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm6, %xmm14
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm6 = xmm14[0],xmm6[1],xmm14[2],xmm6[3],xmm14[4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm13, %ymm6, %ymm6
-; AVX512DQ-FCP-NEXT: vpermd %zmm18, %zmm16, %zmm13
+; AVX512DQ-FCP-NEXT: vpshufb %xmm15, %xmm5, %xmm5
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm5, %zmm14, %zmm5
+; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm0[0],ymm12[1],ymm0[2,3,4],ymm12[5],ymm0[6,7]
+; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm5, %xmm14
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm5 = xmm14[0],xmm5[1],xmm14[2],xmm5[3],xmm14[4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm13, %ymm5, %ymm5
+; AVX512DQ-FCP-NEXT: vpermd %zmm27, %zmm16, %zmm13
; AVX512DQ-FCP-NEXT: vpshufb %ymm8, %ymm13, %ymm8
-; AVX512DQ-FCP-NEXT: vpor %ymm6, %ymm8, %ymm0
+; AVX512DQ-FCP-NEXT: vpor %ymm5, %ymm8, %ymm0
; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm23, %ymm7
; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm21, %ymm12
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm12[0],ymm5[1],ymm12[2,3],ymm5[4],ymm12[5,6,7]
-; AVX512DQ-FCP-NEXT: vmovdqa %ymm5, %ymm2
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm6, %xmm8
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0],xmm8[1],xmm6[2,3,4,5],xmm8[6],xmm6[7]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm9, %xmm6, %xmm6
-; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm6, %ymm0, %ymm8
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm6 = ymm22[0,1,1,3]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm10, %ymm6, %ymm9
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm12[0],ymm7[1],ymm12[2,3],ymm7[4],ymm12[5,6,7]
+; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm5, %xmm8
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0],xmm8[1],xmm5[2,3,4,5],xmm8[6],xmm5[7]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm9, %xmm5, %xmm5
+; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm5, %ymm0, %ymm8
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm5 = ymm24[0,1,1,3]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm10, %ymm5, %ymm9
; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3,4,5,6],ymm9[7]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm15, %xmm7, %xmm7
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm7, %zmm8, %zmm29
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm11[0],ymm3[1],ymm11[2,3,4],ymm3[5],ymm11[6,7]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm15, %xmm4, %xmm4
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm4, %zmm8, %zmm23
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm3[0],ymm1[1],ymm3[2,3,4],ymm1[5],ymm3[6,7]
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm0[1],xmm1[2],xmm0[3],xmm1[4,5,6,7]
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm0 = [0,1,6,7,4,5,2,3,0,1,14,15,12,13,14,15]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm0, %xmm1, %xmm1
-; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3],xmm1[4,5,6,7]
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,6,7,4,5,2,3,0,1,14,15,12,13,14,15]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm1, %xmm0, %xmm0
+; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
; AVX512DQ-FCP-NEXT: vpbroadcastd {{.*#+}} ymm8 = [20,21,26,27,20,21,26,27,20,21,26,27,20,21,26,27,20,21,26,27,20,21,26,27,20,21,26,27,20,21,26,27]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm8, %ymm4, %ymm4
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm4[7]
-; AVX512DQ-FCP-NEXT: vpbroadcastw 232(%rdi), %xmm4
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm27, %xmm5
-; AVX512DQ-FCP-NEXT: vpsrlq $48, %xmm27, %xmm7
-; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} xmm4 = xmm4[0],xmm7[0],xmm4[1],xmm7[1],xmm4[2],xmm7[2],xmm4[3],xmm7[3]
-; AVX512DQ-FCP-NEXT: vinserti32x4 $2, %xmm4, %zmm1, %zmm28
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm17, %ymm1
-; AVX512DQ-FCP-NEXT: vpshufb %ymm8, %ymm1, %ymm1
+; AVX512DQ-FCP-NEXT: vpshufb %ymm8, %ymm2, %ymm2
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6],ymm2[7]
+; AVX512DQ-FCP-NEXT: vpbroadcastw 232(%rdi), %xmm2
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm18, %xmm6
+; AVX512DQ-FCP-NEXT: vpsrlq $48, %xmm18, %xmm4
+; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3]
+; AVX512DQ-FCP-NEXT: vinserti32x4 $2, %xmm2, %zmm0, %zmm25
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm0 = [0,1,2,3,0,1,6,7,8,9,14,15,12,13,14,15,16,17,18,19,16,17,22,23,24,25,30,31,20,21,26,27]
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm31, %ymm2
+; AVX512DQ-FCP-NEXT: vpshufb %ymm0, %ymm2, %ymm2
; AVX512DQ-FCP-NEXT: vmovdqa64 576(%rdi), %zmm21
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm27 = [3,6,10,13,3,6,10,13]
-; AVX512DQ-FCP-NEXT: vpermd %zmm21, %zmm27, %zmm3
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm9 = [0,1,2,3,0,1,6,7,8,9,14,15,12,13,14,15,16,17,18,19,16,17,22,23,24,25,30,31,u,u,u,u]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm9, %ymm3, %ymm3
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3,4,5,6],ymm1[7]
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm28 = [3,6,10,13,3,6,10,13]
+; AVX512DQ-FCP-NEXT: vpermd %zmm21, %zmm28, %zmm3
+; AVX512DQ-FCP-NEXT: vpshufb %ymm0, %ymm3, %ymm3
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3,4,5,6],ymm2[7]
; AVX512DQ-FCP-NEXT: movw $992, %ax # imm = 0x3E0
; AVX512DQ-FCP-NEXT: kmovw %eax, %k1
-; AVX512DQ-FCP-NEXT: vinserti32x4 $2, %xmm26, %zmm1, %zmm20 {%k1}
-; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm20, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm12[0],ymm2[1],ymm12[2,3,4],ymm2[5],ymm12[6,7]
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm1, %xmm2
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2],xmm1[3],xmm2[4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm0, %xmm1, %xmm0
-; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512DQ-FCP-NEXT: vpshufb %ymm8, %ymm6, %ymm1
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6],ymm1[7]
-; AVX512DQ-FCP-NEXT: vpbroadcastw 680(%rdi), %xmm1
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm30, %xmm4
-; AVX512DQ-FCP-NEXT: vpsrlq $48, %xmm30, %xmm2
-; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
-; AVX512DQ-FCP-NEXT: vinserti32x4 $2, %xmm1, %zmm0, %zmm26
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm0 = [0,3,3,3,0,3,7,7]
-; AVX512DQ-FCP-NEXT: vpermd %ymm24, %ymm0, %ymm1
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm7 = [0,1,6,7,4,5,6,7,8,9,0,1,6,7,8,9,16,17,22,23,20,21,22,23,24,25,16,17,22,23,24,25]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm7, %ymm1, %ymm3
-; AVX512DQ-FCP-NEXT: vmovdqa64 128(%rdi), %zmm2
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm1 = [2,5,9,12,2,5,9,12]
-; AVX512DQ-FCP-NEXT: vpermd %zmm2, %zmm1, %zmm6
+; AVX512DQ-FCP-NEXT: vinserti32x4 $2, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm29 {%k1} # 16-byte Folded Reload
+; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm29, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm12[0],ymm7[1],ymm12[2,3,4],ymm7[5],ymm12[6,7]
+; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm2, %xmm3
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0],xmm2[1],xmm3[2],xmm2[3],xmm3[4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm1, %xmm2, %xmm1
+; AVX512DQ-FCP-NEXT: vpshufb %ymm8, %ymm5, %ymm2
+; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm2[7]
+; AVX512DQ-FCP-NEXT: vpbroadcastw 680(%rdi), %xmm2
+; AVX512DQ-FCP-NEXT: vmovdqa %xmm11, %xmm4
+; AVX512DQ-FCP-NEXT: vpsrlq $48, %xmm11, %xmm3
+; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
+; AVX512DQ-FCP-NEXT: vinserti32x4 $2, %xmm2, %zmm1, %zmm30
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm1 = [0,3,3,3,0,3,7,7]
+; AVX512DQ-FCP-NEXT: vpermd %ymm20, %ymm1, %ymm2
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm15 = [0,1,6,7,4,5,6,7,8,9,0,1,6,7,8,9,16,17,22,23,20,21,22,23,24,25,16,17,22,23,24,25]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm15, %ymm2, %ymm2
+; AVX512DQ-FCP-NEXT: vmovdqa64 128(%rdi), %zmm3
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm5 = [2,5,9,12,2,5,9,12]
+; AVX512DQ-FCP-NEXT: vpermd %zmm3, %zmm5, %zmm7
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm13 = [0,1,0,1,6,7,8,9,14,15,14,15,14,15,14,15,16,17,16,17,22,23,24,25,30,31,30,31,30,31,30,31]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm13, %ymm6, %ymm6
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm6[0,1,2,3,4],ymm3[5,6,7],ymm6[8,9,10,11,12],ymm3[13,14,15]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm19, %xmm11
-; AVX512DQ-FCP-NEXT: vpsrld $16, %xmm19, %xmm6
-; AVX512DQ-FCP-NEXT: vpunpckhwd {{.*#+}} xmm6 = xmm6[4],xmm5[4],xmm6[5],xmm5[5],xmm6[6],xmm5[6],xmm6[7],xmm5[7]
-; AVX512DQ-FCP-NEXT: vinserti32x4 $2, %xmm6, %zmm3, %zmm3
-; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FCP-NEXT: vmovdqa 704(%rdi), %ymm6
-; AVX512DQ-FCP-NEXT: vmovdqa 736(%rdi), %ymm10
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm10[0,1],ymm6[2,3],ymm10[4,5],ymm6[6,7]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm10, %ymm16
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm6, %ymm17
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm3, %xmm6
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm3 = xmm6[0,1,2],xmm3[3],xmm6[4],xmm3[5],xmm6[6,7]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm13, %ymm7, %ymm7
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm7[0,1,2,3,4],ymm2[5,6,7],ymm7[8,9,10,11,12],ymm2[13,14,15]
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm26, %xmm11
+; AVX512DQ-FCP-NEXT: vpsrld $16, %xmm26, %xmm7
+; AVX512DQ-FCP-NEXT: vpunpckhwd {{.*#+}} xmm7 = xmm7[4],xmm6[4],xmm7[5],xmm6[5],xmm7[6],xmm6[6],xmm7[7],xmm6[7]
+; AVX512DQ-FCP-NEXT: vinserti32x4 $2, %xmm7, %zmm2, %zmm2
+; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FCP-NEXT: vmovdqa 704(%rdi), %ymm7
+; AVX512DQ-FCP-NEXT: vmovdqa 736(%rdi), %ymm8
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm8[0,1],ymm7[2,3],ymm8[4,5],ymm7[6,7]
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm8, %ymm16
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm7, %ymm18
+; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm2, %xmm8
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm2 = xmm8[0,1,2],xmm2[3],xmm8[4],xmm2[5],xmm8[6,7]
; AVX512DQ-FCP-NEXT: vpbroadcastq {{.*#+}} xmm10 = [8,9,6,7,4,5,10,11,8,9,6,7,4,5,10,11]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm10, %xmm3, %xmm3
-; AVX512DQ-FCP-NEXT: vmovdqa64 768(%rdi), %zmm30
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm20 = [0,0,0,0,4,7,11,14]
-; AVX512DQ-FCP-NEXT: vpermd %zmm30, %zmm20, %zmm14
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm6 = [0,1,6,7,8,9,14,15,8,9,14,15,4,5,2,3,16,17,22,23,24,25,30,31,24,25,30,31,20,21,18,19]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm6, %ymm14, %ymm14
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm3 = xmm14[0,1,2],xmm3[3,4,5,6],xmm14[7]
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm14[4,5,6,7]
-; AVX512DQ-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
-; AVX512DQ-FCP-NEXT: vpshufb %ymm8, %ymm12, %ymm8
-; AVX512DQ-FCP-NEXT: vpermd %zmm2, %zmm27, %zmm14
-; AVX512DQ-FCP-NEXT: vpshufb %ymm9, %ymm14, %ymm9
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm9[0,1,2,3,4,5,6],ymm8[7]
-; AVX512DQ-FCP-NEXT: vinserti32x4 $2, {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm31 {%k1} # 16-byte Folded Reload
-; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm31, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FCP-NEXT: vpermd %ymm22, %ymm0, %ymm0
-; AVX512DQ-FCP-NEXT: vpshufb %ymm7, %ymm0, %ymm0
-; AVX512DQ-FCP-NEXT: vpermd %zmm21, %zmm1, %zmm8
-; AVX512DQ-FCP-NEXT: vpshufb %ymm13, %ymm8, %ymm8
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm8[0,1,2,3,4],ymm0[5,6,7],ymm8[8,9,10,11,12],ymm0[13,14,15]
-; AVX512DQ-FCP-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
-; AVX512DQ-FCP-NEXT: vpsrld $16, %xmm9, %xmm8
-; AVX512DQ-FCP-NEXT: vpunpckhwd {{.*#+}} xmm8 = xmm8[4],xmm4[4],xmm8[5],xmm4[5],xmm8[6],xmm4[6],xmm8[7],xmm4[7]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm4, %xmm31
-; AVX512DQ-FCP-NEXT: vinserti32x4 $2, %xmm8, %zmm0, %zmm0
+; AVX512DQ-FCP-NEXT: vpshufb %xmm10, %xmm2, %xmm2
+; AVX512DQ-FCP-NEXT: vmovdqa64 768(%rdi), %zmm29
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm9 = [0,0,0,0,4,7,11,14]
+; AVX512DQ-FCP-NEXT: vpermd %zmm29, %zmm9, %zmm14
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm8 = [0,1,6,7,8,9,14,15,8,9,14,15,4,5,2,3,16,17,22,23,24,25,30,31,24,25,30,31,20,21,18,19]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm8, %ymm14, %ymm14
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm2 = xmm14[0,1,2],xmm2[3,4,5,6],xmm14[7]
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm14[4,5,6,7]
+; AVX512DQ-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX512DQ-FCP-NEXT: vpshufb %ymm0, %ymm7, %ymm14
+; AVX512DQ-FCP-NEXT: vpermd %zmm3, %zmm28, %zmm12
+; AVX512DQ-FCP-NEXT: vpshufb %ymm0, %ymm12, %ymm0
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6],ymm14[7]
+; AVX512DQ-FCP-NEXT: vinserti32x4 $2, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm19 {%k1} # 16-byte Folded Reload
+; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FCP-NEXT: vpermd %ymm24, %ymm1, %ymm0
+; AVX512DQ-FCP-NEXT: vpshufb %ymm15, %ymm0, %ymm0
+; AVX512DQ-FCP-NEXT: vpermd %zmm21, %zmm5, %zmm1
+; AVX512DQ-FCP-NEXT: vpshufb %ymm13, %ymm1, %ymm1
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6,7],ymm1[8,9,10,11,12],ymm0[13,14,15]
+; AVX512DQ-FCP-NEXT: vmovdqa64 {{[-0-9]+}}(%r{{[sb]}}p), %xmm17 # 16-byte Reload
+; AVX512DQ-FCP-NEXT: vpsrld $16, %xmm17, %xmm1
+; AVX512DQ-FCP-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm4, %xmm22
+; AVX512DQ-FCP-NEXT: vinserti32x4 $2, %xmm1, %zmm0, %zmm0
; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm7 = [0,4,7,0,0,4,7,0]
-; AVX512DQ-FCP-NEXT: # ymm7 = mem[0,1,0,1]
-; AVX512DQ-FCP-NEXT: vpermd %ymm24, %ymm7, %ymm0
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[4,5,2,3,4,5,6,7,8,9,2,3,4,5,10,11,20,21,18,19,20,21,22,23,24,25,18,19,20,21,26,27]
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm8 = [2,6,9,13,2,6,9,13]
-; AVX512DQ-FCP-NEXT: vpermd %zmm2, %zmm8, %zmm2
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[4,5,2,3,4,5,10,11,12,13,12,13,12,13,12,13,20,21,18,19,20,21,26,27,28,29,28,29,28,29,28,29]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm2[0,1,2,3,4],ymm0[5,6,7],ymm2[8,9,10,11,12],ymm0[13,14,15]
-; AVX512DQ-FCP-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm11[4],xmm5[4],xmm11[5],xmm5[5],xmm11[6],xmm5[6],xmm11[7],xmm5[7]
+; AVX512DQ-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [0,4,7,0,0,4,7,0]
+; AVX512DQ-FCP-NEXT: # ymm1 = mem[0,1,0,1]
+; AVX512DQ-FCP-NEXT: vpermd %ymm20, %ymm1, %ymm0
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm12 = ymm0[4,5,2,3,4,5,6,7,8,9,2,3,4,5,10,11,20,21,18,19,20,21,22,23,24,25,18,19,20,21,26,27]
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm4 = [2,6,9,13,2,6,9,13]
+; AVX512DQ-FCP-NEXT: vpermd %zmm3, %zmm4, %zmm3
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[4,5,2,3,4,5,10,11,12,13,12,13,12,13,12,13,20,21,18,19,20,21,26,27,28,29,28,29,28,29,28,29]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm3[0,1,2,3,4],ymm12[5,6,7],ymm3[8,9,10,11,12],ymm12[13,14,15]
+; AVX512DQ-FCP-NEXT: vpunpckhwd {{.*#+}} xmm6 = xmm11[4],xmm6[4],xmm11[5],xmm6[5],xmm11[6],xmm6[6],xmm11[7],xmm6[7]
; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} xmm19 = [8,9,6,7,4,5,6,7,8,9,10,11,12,13,14,15]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm19, %xmm4
-; AVX512DQ-FCP-NEXT: vpshufb %xmm4, %xmm2, %xmm2
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm19, %xmm0
+; AVX512DQ-FCP-NEXT: vpshufb %xmm0, %xmm6, %xmm6
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm6, %zmm3, %zmm0
; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FCP-NEXT: vmovdqa64 832(%rdi), %zmm4
-; AVX512DQ-FCP-NEXT: vpermd %zmm4, %zmm1, %zmm2
+; AVX512DQ-FCP-NEXT: vmovdqa64 832(%rdi), %zmm6
+; AVX512DQ-FCP-NEXT: vpermd %zmm6, %zmm5, %zmm3
; AVX512DQ-FCP-NEXT: vpbroadcastq {{.*#+}} ymm12 = [16,17,22,23,24,25,30,31,16,17,22,23,24,25,30,31,16,17,22,23,24,25,30,31,16,17,22,23,24,25,30,31]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm12, %ymm2, %ymm2
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3,4,5],ymm2[6,7]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm12, %ymm3, %ymm3
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3,4,5],ymm3[6,7]
; AVX512DQ-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX512DQ-FCP-NEXT: vmovdqa 256(%rdi), %ymm0
-; AVX512DQ-FCP-NEXT: vmovdqa 288(%rdi), %ymm15
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm15[0,1],ymm0[2,3],ymm15[4,5],ymm0[6,7]
+; AVX512DQ-FCP-NEXT: vmovdqa 288(%rdi), %ymm7
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm14 = ymm7[0,1],ymm0[2,3],ymm7[4,5],ymm0[6,7]
; AVX512DQ-FCP-NEXT: vmovdqa %ymm0, %ymm2
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm5, %xmm14
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm5 = xmm14[0,1,2],xmm5[3],xmm14[4],xmm5[5],xmm14[6,7]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm10, %xmm5, %xmm10
-; AVX512DQ-FCP-NEXT: vmovdqa64 320(%rdi), %zmm18
-; AVX512DQ-FCP-NEXT: vpermd %zmm18, %zmm20, %zmm14
-; AVX512DQ-FCP-NEXT: vpshufb %ymm6, %ymm14, %ymm6
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm10 = xmm6[0,1,2],xmm10[3,4,5,6],xmm6[7]
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm10[0,1,2,3],ymm6[4,5,6,7]
-; AVX512DQ-FCP-NEXT: vmovdqa64 384(%rdi), %zmm14
-; AVX512DQ-FCP-NEXT: vpermd %zmm14, %zmm1, %zmm1
-; AVX512DQ-FCP-NEXT: vpshufb %ymm12, %ymm1, %ymm1
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm6[0,1,2,3,4,5],ymm1[6,7]
+; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm14, %xmm15
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm14 = xmm15[0,1,2],xmm14[3],xmm15[4],xmm14[5],xmm15[6,7]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm10, %xmm14, %xmm10
+; AVX512DQ-FCP-NEXT: vmovdqa64 320(%rdi), %zmm26
+; AVX512DQ-FCP-NEXT: vpermd %zmm26, %zmm9, %zmm9
+; AVX512DQ-FCP-NEXT: vpshufb %ymm8, %ymm9, %ymm8
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm9 = xmm8[0,1,2],xmm10[3,4,5,6],xmm8[7]
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm9[0,1,2,3],ymm8[4,5,6,7]
+; AVX512DQ-FCP-NEXT: vmovdqa64 384(%rdi), %zmm9
+; AVX512DQ-FCP-NEXT: vpermd %zmm9, %zmm5, %zmm5
+; AVX512DQ-FCP-NEXT: vpshufb %ymm12, %ymm5, %ymm5
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm8[0,1,2,3,4,5],ymm5[6,7]
; AVX512DQ-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm15[0,1,2],ymm2[3],ymm15[4,5],ymm2[6],ymm15[7]
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm7[0,1,2],ymm2[3],ymm7[4,5],ymm2[6],ymm7[7]
; AVX512DQ-FCP-NEXT: vmovdqa %ymm2, %ymm11
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm1, %xmm6
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm6[0,1,2,3],xmm1[4],xmm6[5],xmm1[6],xmm6[7]
-; AVX512DQ-FCP-NEXT: vpbroadcastq {{.*#+}} xmm6 = [10,11,8,9,6,7,12,13,10,11,8,9,6,7,12,13]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm6, %xmm1, %xmm1
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm20 = [1,0,0,0,4,8,11,15]
-; AVX512DQ-FCP-NEXT: vpermd %zmm18, %zmm20, %zmm10
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm3 = [2,3,4,5,10,11,12,13,0,1,0,1,0,1,0,1,18,19,20,21,26,27,28,29,16,17,16,17,16,17,16,17]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm3, %ymm10, %ymm10
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm10[0,1,2],xmm1[3,4,5,6],xmm10[7]
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm10[4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpermd %zmm14, %zmm8, %zmm10
-; AVX512DQ-FCP-NEXT: vpbroadcastq {{.*#+}} ymm2 = [18,19,20,21,26,27,28,29,18,19,20,21,26,27,28,29,18,19,20,21,26,27,28,29,18,19,20,21,26,27,28,29]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm2, %ymm10, %ymm10
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm10[6,7]
+; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm5, %xmm8
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm5 = xmm8[0,1,2,3],xmm5[4],xmm8[5],xmm5[6],xmm8[7]
+; AVX512DQ-FCP-NEXT: vpbroadcastq {{.*#+}} xmm8 = [10,11,8,9,6,7,12,13,10,11,8,9,6,7,12,13]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm8, %xmm5, %xmm5
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm14 = [1,0,0,0,4,8,11,15]
+; AVX512DQ-FCP-NEXT: vpermd %zmm26, %zmm14, %zmm10
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm15 = [2,3,4,5,10,11,12,13,0,1,0,1,0,1,0,1,18,19,20,21,26,27,28,29,16,17,16,17,16,17,16,17]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm15, %ymm10, %ymm10
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm5 = xmm10[0,1,2],xmm5[3,4,5,6],xmm10[7]
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm10[4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpermd %zmm9, %zmm4, %zmm10
+; AVX512DQ-FCP-NEXT: vpbroadcastq {{.*#+}} ymm0 = [18,19,20,21,26,27,28,29,18,19,20,21,26,27,28,29,18,19,20,21,26,27,28,29,18,19,20,21,26,27,28,29]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm0, %ymm10, %ymm10
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm5[0,1,2,3,4,5],ymm10[6,7]
+; AVX512DQ-FCP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm16, %ymm2
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm18, %ymm3
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm2[0,1,2],ymm3[3],ymm2[4,5],ymm3[6],ymm2[7]
+; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm5, %xmm10
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm5 = xmm10[0,1,2,3],xmm5[4],xmm10[5],xmm5[6],xmm10[7]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm8, %xmm5, %xmm5
+; AVX512DQ-FCP-NEXT: vpermd %zmm29, %zmm14, %zmm8
+; AVX512DQ-FCP-NEXT: vpshufb %ymm15, %ymm8, %ymm8
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm5 = xmm8[0,1,2],xmm5[3,4,5,6],xmm8[7]
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm8[4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpermd %zmm6, %zmm4, %zmm8
+; AVX512DQ-FCP-NEXT: vpshufb %ymm0, %ymm8, %ymm0
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm5[0,1,2,3,4,5],ymm0[6,7]
; AVX512DQ-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm16, %ymm5
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm17, %ymm0
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm5[0,1,2],ymm0[3],ymm5[4,5],ymm0[6],ymm5[7]
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm1, %xmm10
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm10[0,1,2,3],xmm1[4],xmm10[5],xmm1[6],xmm10[7]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm6, %xmm1, %xmm1
-; AVX512DQ-FCP-NEXT: vpermd %zmm30, %zmm20, %zmm6
-; AVX512DQ-FCP-NEXT: vpshufb %ymm3, %ymm6, %ymm3
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm3[0,1,2],xmm1[3,4,5,6],xmm3[7]
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm3[4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpermd %zmm4, %zmm8, %zmm3
-; AVX512DQ-FCP-NEXT: vpshufb %ymm2, %ymm3, %ymm2
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm2[6,7]
-; AVX512DQ-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm11[0,1],ymm15[2],ymm11[3,4,5],ymm15[6],ymm11[7]
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm11[0,1],ymm7[2],ymm11[3,4,5],ymm7[6],ymm11[7]
; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm11, %ymm16
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm1, %xmm2
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4],xmm1[5],xmm2[6],xmm1[7]
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,2,3,0,1,14,15,12,13,10,11,8,9,128,128,128,128,128,128,128,128,128,128,u,u,u,u,u,u,u,u]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm2, %ymm1, %ymm1
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm3 = [1,0,0,0,5,8,12,15]
-; AVX512DQ-FCP-NEXT: vpermd %zmm18, %zmm3, %zmm6
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm10 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,2,3,16,17,22,23,24,25,30,31,128,128,128,128,128,128,128,128]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm10, %ymm6, %ymm6
-; AVX512DQ-FCP-NEXT: vpor %ymm6, %ymm1, %ymm1
-; AVX512DQ-FCP-NEXT: vpermd %zmm14, %zmm27, %zmm6
-; AVX512DQ-FCP-NEXT: vpshufb %ymm12, %ymm6, %ymm6
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm6[6,7]
-; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm25 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
-; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512DQ-FCP-NEXT: vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm25, %zmm6 # 64-byte Folded Reload
+; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm0, %xmm5
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm5[4],xmm0[5],xmm5[6],xmm0[7]
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm8 = [0,1,2,3,0,1,14,15,12,13,10,11,8,9,128,128,128,128,128,128,128,128,128,128,u,u,u,u,u,u,u,u]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm8, %ymm0, %ymm0
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm10 = [1,0,0,0,5,8,12,15]
+; AVX512DQ-FCP-NEXT: vpermd %zmm26, %zmm10, %zmm5
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm14 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,2,3,16,17,22,23,24,25,30,31,128,128,128,128,128,128,128,128]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm14, %ymm5, %ymm5
+; AVX512DQ-FCP-NEXT: vpor %ymm5, %ymm0, %ymm0
+; AVX512DQ-FCP-NEXT: vpermd %zmm9, %zmm28, %zmm5
+; AVX512DQ-FCP-NEXT: vpshufb %ymm12, %ymm5, %ymm5
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm5[6,7]
+; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm27 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
+; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQ-FCP-NEXT: vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm27, %zmm5 # 64-byte Folded Reload
; AVX512DQ-FCP-NEXT: movw $-512, %ax # imm = 0xFE00
; AVX512DQ-FCP-NEXT: kmovw %eax, %k1
-; AVX512DQ-FCP-NEXT: vinserti32x8 $1, %ymm1, %zmm0, %zmm6 {%k1}
-; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm0[0,1],ymm5[2],ymm0[3,4,5],ymm5[6],ymm0[7]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm17, %ymm14
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm1, %xmm6
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm6[4],xmm1[5],xmm6[6],xmm1[7]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm2, %ymm1, %ymm1
-; AVX512DQ-FCP-NEXT: vpermd %zmm30, %zmm3, %zmm2
-; AVX512DQ-FCP-NEXT: vpshufb %ymm10, %ymm2, %ymm2
-; AVX512DQ-FCP-NEXT: vpor %ymm2, %ymm1, %ymm1
-; AVX512DQ-FCP-NEXT: vpermd %zmm4, %zmm27, %zmm0
-; AVX512DQ-FCP-NEXT: vpshufb %ymm12, %ymm0, %ymm0
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
-; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-FCP-NEXT: vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm25, %zmm1 # 64-byte Folded Reload
-; AVX512DQ-FCP-NEXT: vinserti32x8 $1, %ymm0, %zmm0, %zmm1 {%k1}
-; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FCP-NEXT: vpermd %ymm22, %ymm7, %ymm0
+; AVX512DQ-FCP-NEXT: vinserti32x8 $1, %ymm0, %zmm0, %zmm5 {%k1}
+; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm3[0,1],ymm2[2],ymm3[3,4,5],ymm2[6],ymm3[7]
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm18, %ymm15
+; AVX512DQ-FCP-NEXT: vmovdqa %ymm2, %ymm3
+; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm0, %xmm9
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm9[4],xmm0[5],xmm9[6],xmm0[7]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm8, %ymm0, %ymm0
+; AVX512DQ-FCP-NEXT: vpermd %zmm29, %zmm10, %zmm8
+; AVX512DQ-FCP-NEXT: vpshufb %ymm14, %ymm8, %ymm8
+; AVX512DQ-FCP-NEXT: vpor %ymm0, %ymm8, %ymm0
+; AVX512DQ-FCP-NEXT: vpermd %zmm6, %zmm28, %zmm6
+; AVX512DQ-FCP-NEXT: vpshufb %ymm12, %ymm6, %ymm6
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm6[6,7]
+; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Reload
+; AVX512DQ-FCP-NEXT: vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm27, %zmm31 # 64-byte Folded Reload
+; AVX512DQ-FCP-NEXT: vinserti32x8 $1, %ymm0, %zmm0, %zmm31 {%k1}
+; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm31, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FCP-NEXT: vpermd %ymm24, %ymm1, %ymm0
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[4,5,2,3,4,5,6,7,8,9,2,3,4,5,10,11,20,21,18,19,20,21,22,23,24,25,18,19,20,21,26,27]
-; AVX512DQ-FCP-NEXT: vpermd %zmm21, %zmm8, %zmm1
+; AVX512DQ-FCP-NEXT: vpermd %zmm21, %zmm4, %zmm1
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[4,5,2,3,4,5,10,11,12,13,12,13,12,13,12,13,20,21,18,19,20,21,26,27,28,29,28,29,28,29,28,29]
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6,7],ymm1[8,9,10,11,12],ymm0[13,14,15]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm31, %xmm1
-; AVX512DQ-FCP-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm9[4],xmm1[4],xmm9[5],xmm1[5],xmm9[6],xmm1[6],xmm9[7],xmm1[7]
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm17, %xmm1
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm22, %xmm2
+; AVX512DQ-FCP-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm19, %xmm2
; AVX512DQ-FCP-NEXT: vpshufb %xmm2, %xmm1, %xmm1
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
@@ -16956,254 +16984,256 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512DQ-FCP-NEXT: vmovdqa 416(%rdi), %ymm1
; AVX512DQ-FCP-NEXT: vmovdqa 384(%rdi), %ymm2
; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1],ymm1[2],ymm2[3,4,5],ymm1[6],ymm2[7]
-; AVX512DQ-FCP-NEXT: vmovdqa %ymm2, %ymm11
-; AVX512DQ-FCP-NEXT: vmovdqa %ymm1, %ymm8
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm0, %xmm2
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4],xmm0[5],xmm2[6],xmm0[7]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm16, %ymm7
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm7[0,1,2],ymm15[3],ymm7[4,5],ymm15[6],ymm7[7]
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm2, %xmm3
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0],xmm2[1],xmm3[2,3,4,5],xmm2[6],xmm3[7]
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,0,1,0,1,0,1,14,15,12,13,10,11,8,9]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm3, %xmm0, %xmm0
+; AVX512DQ-FCP-NEXT: vmovdqa %ymm2, %ymm13
+; AVX512DQ-FCP-NEXT: vmovdqa %ymm1, %ymm11
+; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm0, %xmm6
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm6[4],xmm0[5],xmm6[6],xmm0[7]
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm16, %ymm5
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm5[0,1,2],ymm7[3],ymm5[4,5],ymm7[6],ymm5[7]
+; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm6, %xmm8
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm6 = xmm8[0],xmm6[1],xmm8[2,3,4,5],xmm6[6],xmm8[7]
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm8 = [0,1,0,1,0,1,0,1,14,15,12,13,10,11,8,9]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm8, %xmm0, %xmm0
; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm9 = [2,11,2,11,12,5,8,9]
-; AVX512DQ-FCP-NEXT: vpermd %zmm18, %zmm9, %zmm6
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm10 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,0,1,22,23,28,29,18,19,128,128,128,128,128,128,128,128,128,128]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm10, %ymm6, %ymm6
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm6[0,1,2],ymm0[3,4,5,6,7],ymm6[8,9,10],ymm0[11,12,13,14,15]
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm12 = [0,1,2,3,2,3,0,1,14,15,12,13,10,11,128,128]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm12, %xmm2, %xmm2
-; AVX512DQ-FCP-NEXT: vpor %ymm6, %ymm2, %ymm2
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm25, %zmm23 # 64-byte Folded Reload
-; AVX512DQ-FCP-NEXT: vinserti32x8 $1, %ymm0, %zmm0, %zmm23 {%k1}
-; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FCP-NEXT: vpermd %zmm26, %zmm9, %zmm10
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm12 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,0,1,22,23,28,29,18,19,128,128,128,128,128,128,128,128,128,128]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm12, %ymm10, %ymm10
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm10[0,1,2],ymm0[3,4,5,6,7],ymm10[8,9,10],ymm0[11,12,13,14,15]
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm14 = [0,1,2,3,2,3,0,1,14,15,12,13,10,11,128,128]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm14, %xmm6, %xmm6
+; AVX512DQ-FCP-NEXT: vpor %ymm6, %ymm10, %ymm6
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm6[0,1,2,3],ymm0[4,5,6,7]
+; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512DQ-FCP-NEXT: vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm27, %zmm1 # 64-byte Folded Reload
+; AVX512DQ-FCP-NEXT: vinserti32x8 $1, %ymm0, %zmm0, %zmm1 {%k1}
+; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-FCP-NEXT: vmovdqa 864(%rdi), %ymm1
-; AVX512DQ-FCP-NEXT: vmovdqa 832(%rdi), %ymm13
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm13[0,1],ymm1[2],ymm13[3,4,5],ymm1[6],ymm13[7]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm1, %ymm31
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm0, %xmm2
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4],xmm0[5],xmm2[6],xmm0[7]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm3, %xmm0, %xmm0
-; AVX512DQ-FCP-NEXT: vpermd %zmm30, %zmm9, %zmm2
-; AVX512DQ-FCP-NEXT: vpshufb %ymm10, %ymm2, %ymm2
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm14[0,1,2],ymm5[3],ymm14[4,5],ymm5[6],ymm14[7]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm5, %ymm23
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm17, %ymm22
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm3, %xmm9
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm3 = xmm9[0],xmm3[1],xmm9[2,3,4,5],xmm3[6],xmm9[7]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm12, %xmm3, %xmm3
+; AVX512DQ-FCP-NEXT: vmovdqa 832(%rdi), %ymm2
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1],ymm1[2],ymm2[3,4,5],ymm1[6],ymm2[7]
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm2, %ymm19
+; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm0, %xmm10
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm10[4],xmm0[5],xmm10[6],xmm0[7]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm8, %xmm0, %xmm0
+; AVX512DQ-FCP-NEXT: vpermd %zmm29, %zmm9, %zmm8
+; AVX512DQ-FCP-NEXT: vpshufb %ymm12, %ymm8, %ymm8
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm9 = ymm15[0,1,2],ymm3[3],ymm15[4,5],ymm3[6],ymm15[7]
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm3, %ymm22
+; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm9, %xmm10
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm9 = xmm10[0],xmm9[1],xmm10[2,3,4,5],xmm9[6],xmm10[7]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm14, %xmm9, %xmm9
; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm2[0,1,2],ymm0[3,4,5,6,7],ymm2[8,9,10],ymm0[11,12,13,14,15]
-; AVX512DQ-FCP-NEXT: vpor %ymm2, %ymm3, %ymm2
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm25, %zmm29 # 64-byte Folded Reload
-; AVX512DQ-FCP-NEXT: vinserti32x8 $1, %ymm0, %zmm0, %zmm29 {%k1}
-; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm29, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm8[0,1,2],ymm0[3,4,5,6,7],ymm8[8,9,10],ymm0[11,12,13,14,15]
+; AVX512DQ-FCP-NEXT: vpor %ymm8, %ymm9, %ymm8
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm8[0,1,2,3],ymm0[4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm27, %zmm23 # 64-byte Folded Reload
+; AVX512DQ-FCP-NEXT: vinserti32x8 $1, %ymm0, %zmm0, %zmm23 {%k1}
+; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
; AVX512DQ-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
-; AVX512DQ-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm6[0,1],ymm9[2],ymm6[3,4],ymm9[5],ymm6[6,7]
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm1 = [8,9,4,5,4,5,6,7,8,9,10,11,12,13,14,15]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm1, %xmm0, %xmm2
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm1, %xmm17
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm4[0,1],ymm6[2],ymm4[3,4],ymm6[5],ymm4[6,7]
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm2 = [8,9,4,5,4,5,6,7,8,9,10,11,12,13,14,15]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm2, %xmm0, %xmm8
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm2, %xmm21
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm0, %xmm0
; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[3,1,2,3,4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm21 = [0,3,7,10,14,0,0,0]
-; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
-; AVX512DQ-FCP-NEXT: vpermd %zmm27, %zmm21, %zmm3
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm1 = [u,u,u,u,u,u,u,u,0,1,6,7,8,9,14,15,16,17,22,23,20,21,22,23,16,17,22,23,24,25,30,31]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm1, %ymm3, %ymm3
-; AVX512DQ-FCP-NEXT: vmovdqa %ymm1, %ymm5
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm3[2,3,4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpternlogq $184, %zmm0, %zmm25, %zmm28
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm11[0,1,2],ymm8[3],ymm11[4,5],ymm8[6],ymm11[7]
-; AVX512DQ-FCP-NEXT: vmovdqa %ymm11, %ymm12
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm8, %ymm19
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm0, %xmm3
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm3[0],xmm0[1],xmm3[2,3,4,5],xmm0[6],xmm3[7]
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm15[0],ymm7[1],ymm15[2,3],ymm7[4],ymm15[5,6,7]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm16, %ymm11
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm3, %xmm10
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0],xmm10[1],xmm3[2,3,4,5],xmm10[6],xmm3[7]
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm7 = [2,3,2,3,2,3,2,3,0,1,14,15,12,13,10,11]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm7, %xmm0, %xmm0
+; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm8[0],xmm0[0],xmm8[1],xmm0[1],xmm8[2],xmm0[2],xmm8[3],xmm0[3]
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm16 = [0,3,7,10,14,0,0,0]
+; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
+; AVX512DQ-FCP-NEXT: vpermd %zmm28, %zmm16, %zmm12
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm2 = [u,u,u,u,u,u,u,u,0,1,6,7,8,9,14,15,16,17,22,23,20,21,22,23,16,17,22,23,24,25,30,31]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm2, %ymm12, %ymm12
+; AVX512DQ-FCP-NEXT: vmovdqa %ymm2, %ymm3
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm12[2,3,4,5,6,7]
+; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm25, %zmm31
+; AVX512DQ-FCP-NEXT: vpternlogq $184, %zmm0, %zmm27, %zmm31
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm13[0,1,2],ymm11[3],ymm13[4,5],ymm11[6],ymm13[7]
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm13, %ymm18
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm11, %ymm25
+; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm0, %xmm12
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm12[0],xmm0[1],xmm12[2,3,4,5],xmm0[6],xmm12[7]
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm12 = ymm7[0],ymm5[1],ymm7[2,3],ymm5[4],ymm7[5,6,7]
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm5, %ymm17
+; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm12, %xmm14
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm12 = xmm12[0],xmm14[1],xmm12[2,3,4,5],xmm14[6],xmm12[7]
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm14 = [2,3,2,3,2,3,2,3,0,1,14,15,12,13,10,11]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm14, %xmm0, %xmm0
; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm16 = [2,0,0,0,6,9,13,0]
-; AVX512DQ-FCP-NEXT: vpermd %zmm18, %zmm16, %zmm2
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm1 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,2,3,16,17,22,23,24,25,128,128,128,128,128,128,128,128,128,128]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm1, %ymm2, %ymm2
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm2[0,1,2],ymm0[3,4,5,6,7],ymm2[8,9,10],ymm0[11,12,13,14,15]
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm10 = [0,1,2,3,4,5,2,3,0,1,14,15,12,13,128,128]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm10, %xmm3, %xmm3
-; AVX512DQ-FCP-NEXT: vpor %ymm2, %ymm3, %ymm2
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
-; AVX512DQ-FCP-NEXT: vinserti32x8 $1, %ymm0, %zmm0, %zmm28 {%k1}
-; AVX512DQ-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
-; AVX512DQ-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm14[0,1],ymm2[2],ymm14[3,4],ymm2[5],ymm14[6,7]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm2, %ymm20
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm17, %xmm8
-; AVX512DQ-FCP-NEXT: vpshufb %xmm8, %xmm0, %xmm2
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm13 = [2,0,0,0,6,9,13,0]
+; AVX512DQ-FCP-NEXT: vpermd %zmm26, %zmm13, %zmm10
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm5 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,2,3,16,17,22,23,24,25,128,128,128,128,128,128,128,128,128,128]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm5, %ymm10, %ymm10
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm10[0,1,2],ymm0[3,4,5,6,7],ymm10[8,9,10],ymm0[11,12,13,14,15]
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,2,3,4,5,2,3,0,1,14,15,12,13,128,128]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm2, %xmm12, %xmm12
+; AVX512DQ-FCP-NEXT: vpor %ymm10, %ymm12, %ymm10
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm10[0,1,2,3],ymm0[4,5,6,7]
+; AVX512DQ-FCP-NEXT: vinserti32x8 $1, %ymm0, %zmm0, %zmm31 {%k1}
+; AVX512DQ-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
+; AVX512DQ-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm8[0,1],ymm9[2],ymm8[3,4],ymm9[5],ymm8[6,7]
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm9, %ymm24
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm21, %xmm9
+; AVX512DQ-FCP-NEXT: vpshufb %xmm9, %xmm0, %xmm10
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm0, %xmm0
; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[3,1,2,3,4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
-; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
-; AVX512DQ-FCP-NEXT: vpermd %zmm17, %zmm21, %zmm2
-; AVX512DQ-FCP-NEXT: vpshufb %ymm5, %ymm2, %ymm2
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm5, %ymm29
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm2[2,3,4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpternlogq $184, %zmm0, %zmm25, %zmm26
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm31, %ymm5
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm13[0,1,2],ymm5[3],ymm13[4,5],ymm5[6],ymm13[7]
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm0, %xmm2
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm2[0],xmm0[1],xmm2[2,3,4,5],xmm0[6],xmm2[7]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm7, %xmm0, %xmm0
-; AVX512DQ-FCP-NEXT: vpermd %zmm30, %zmm16, %zmm2
-; AVX512DQ-FCP-NEXT: vpshufb %ymm1, %ymm2, %ymm1
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm23, %ymm4
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm22, %ymm7
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm4[0],ymm7[1],ymm4[2,3],ymm7[4],ymm4[5,6,7]
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm2, %xmm3
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1],xmm2[2,3,4,5],xmm3[6],xmm2[7]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm10, %xmm2, %xmm2
+; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm10[0],xmm0[0],xmm10[1],xmm0[1],xmm10[2],xmm0[2],xmm10[3],xmm0[3]
+; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Reload
+; AVX512DQ-FCP-NEXT: vpermd %zmm21, %zmm16, %zmm10
+; AVX512DQ-FCP-NEXT: vpshufb %ymm3, %ymm10, %ymm10
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm10[2,3,4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpternlogq $184, %zmm0, %zmm27, %zmm30
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm19, %ymm3
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm3[0,1,2],ymm1[3],ymm3[4,5],ymm1[6],ymm3[7]
+; AVX512DQ-FCP-NEXT: vmovdqa %ymm1, %ymm11
+; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm0, %xmm10
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm10[0],xmm0[1],xmm10[2,3,4,5],xmm0[6],xmm10[7]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm14, %xmm0, %xmm0
+; AVX512DQ-FCP-NEXT: vpermd %zmm29, %zmm13, %zmm10
+; AVX512DQ-FCP-NEXT: vpshufb %ymm5, %ymm10, %ymm1
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm22, %ymm5
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm10 = ymm5[0],ymm15[1],ymm5[2,3],ymm15[4],ymm5[5,6,7]
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm15, %ymm19
+; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm10, %xmm12
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm10 = xmm10[0],xmm12[1],xmm10[2,3,4,5],xmm12[6],xmm10[7]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm2, %xmm10, %xmm2
; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7],ymm1[8,9,10],ymm0[11,12,13,14,15]
; AVX512DQ-FCP-NEXT: vpor %ymm1, %ymm2, %ymm1
; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; AVX512DQ-FCP-NEXT: vinserti32x8 $1, %ymm0, %zmm0, %zmm26 {%k1}
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm6[0,1],ymm9[2,3],ymm6[4,5],ymm9[6,7]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm9, %ymm23
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm6, %ymm31
+; AVX512DQ-FCP-NEXT: vinserti32x8 $1, %ymm0, %zmm0, %zmm30 {%k1}
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm4[0,1],ymm6[2,3],ymm4[4,5],ymm6[6,7]
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm6, %ymm20
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm4, %ymm23
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512DQ-FCP-NEXT: vpshufb %xmm8, %xmm1, %xmm1
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm2 = [10,11,6,7,4,5,6,7,8,9,10,11,12,13,14,15]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm2, %xmm0, %xmm0
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm2, %xmm24
+; AVX512DQ-FCP-NEXT: vpshufb %xmm9, %xmm1, %xmm1
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm6 = [10,11,6,7,4,5,6,7,8,9,10,11,12,13,14,15]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm6, %xmm0, %xmm0
; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm22 = [0,4,7,11,14,0,0,0]
-; AVX512DQ-FCP-NEXT: vpermd %zmm27, %zmm22, %zmm3
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm9 = [u,u,u,u,u,u,u,u,2,3,4,5,10,11,12,13,18,19,18,19,18,19,18,19,18,19,20,21,26,27,28,29]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm9, %ymm3, %ymm3
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm3[2,3,4,5,6,7]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm19, %ymm6
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm6[0],ymm12[1],ymm6[2,3],ymm12[4],ymm6[5,6,7]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm12, %ymm21
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm3, %xmm10
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0],xmm10[1],xmm3[2,3,4,5],xmm10[6],xmm3[7]
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm10 = ymm15[0],ymm11[1],ymm15[2,3,4],ymm11[5],ymm15[6,7]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm15, %ymm19
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm10, %xmm12
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm10 = xmm12[0],xmm10[1],xmm12[2],xmm10[3],xmm12[4,5,6,7]
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm12 = [4,5,4,5,4,5,4,5,2,3,0,1,14,15,12,13]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm12, %xmm3, %xmm3
-; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512DQ-FCP-NEXT: vpermd %zmm28, %zmm22, %zmm10
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm12 = [u,u,u,u,u,u,u,u,2,3,4,5,10,11,12,13,18,19,18,19,18,19,18,19,18,19,20,21,26,27,28,29]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm12, %ymm10, %ymm10
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm10[2,3,4,5,6,7]
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm18, %ymm13
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm25, %ymm1
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm10 = ymm1[0],ymm13[1],ymm1[2,3],ymm13[4],ymm1[5,6,7]
+; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm10, %xmm14
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm10 = xmm10[0],xmm14[1],xmm10[2,3,4,5],xmm14[6],xmm10[7]
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm17, %ymm4
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm14 = ymm7[0],ymm4[1],ymm7[2,3,4],ymm4[5],ymm7[6,7]
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm7, %ymm18
+; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm14, %xmm15
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm14 = xmm15[0],xmm14[1],xmm15[2],xmm14[3],xmm15[4,5,6,7]
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm15 = [4,5,4,5,4,5,4,5,2,3,0,1,14,15,12,13]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm15, %xmm10, %xmm10
+; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm10, %ymm0, %ymm10
; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm16 = [3,0,0,0,6,10,13,0]
-; AVX512DQ-FCP-NEXT: vpermd %zmm18, %zmm16, %zmm1
+; AVX512DQ-FCP-NEXT: vpermd %zmm26, %zmm16, %zmm1
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,0,1,18,19,20,21,26,27,128,128,128,128,128,128,128,128,128,128]
; AVX512DQ-FCP-NEXT: vpshufb %ymm2, %ymm1, %ymm1
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm1[0,1,2],ymm3[3,4,5,6,7],ymm1[8,9,10],ymm3[11,12,13,14,15]
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm15 = [0,1,2,3,6,7,4,5,2,3,0,1,14,15,128,128]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm15, %xmm10, %xmm10
-; AVX512DQ-FCP-NEXT: vpor %ymm1, %ymm10, %ymm1
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm3[4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm25, %zmm0 # 64-byte Folded Reload
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm10 = ymm1[0,1,2],ymm10[3,4,5,6,7],ymm1[8,9,10],ymm10[11,12,13,14,15]
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm7 = [0,1,2,3,6,7,4,5,2,3,0,1,14,15,128,128]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm7, %xmm14, %xmm14
+; AVX512DQ-FCP-NEXT: vpor %ymm1, %ymm14, %ymm1
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm10[4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm27, %zmm0 # 64-byte Folded Reload
; AVX512DQ-FCP-NEXT: vinserti32x8 $1, %ymm1, %zmm0, %zmm0 {%k1}
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm20, %ymm10
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm14[0,1],ymm10[2,3],ymm14[4,5],ymm10[6,7]
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm1, %xmm3
-; AVX512DQ-FCP-NEXT: vpshufb %xmm8, %xmm3, %xmm3
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm24, %xmm8
-; AVX512DQ-FCP-NEXT: vpshufb %xmm8, %xmm1, %xmm1
-; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
-; AVX512DQ-FCP-NEXT: vpermd %zmm17, %zmm22, %zmm3
-; AVX512DQ-FCP-NEXT: vpshufb %ymm9, %ymm3, %ymm3
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm9 = ymm1[0,1],ymm3[2,3,4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm5[0],ymm13[1],ymm5[2,3],ymm13[4],ymm5[5,6,7]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm5, %ymm20
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm1, %xmm3
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm3[1],xmm1[2,3,4,5],xmm3[6],xmm1[7]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm12, %xmm1, %xmm1
-; AVX512DQ-FCP-NEXT: vpermd %zmm30, %zmm16, %zmm3
-; AVX512DQ-FCP-NEXT: vpshufb %ymm2, %ymm3, %ymm2
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0],ymm7[1],ymm4[2,3,4],ymm7[5],ymm4[6,7]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm4, %ymm22
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm3, %xmm5
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm3 = xmm5[0],xmm3[1],xmm5[2],xmm3[3],xmm5[4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm15, %xmm3, %xmm3
+; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm24, %ymm14
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm8[0,1],ymm14[2,3],ymm8[4,5],ymm14[6,7]
+; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm1, %xmm10
+; AVX512DQ-FCP-NEXT: vpshufb %xmm9, %xmm10, %xmm9
+; AVX512DQ-FCP-NEXT: vpshufb %xmm6, %xmm1, %xmm1
+; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm9[0],xmm1[1],xmm9[1],xmm1[2],xmm9[2],xmm1[3],xmm9[3]
+; AVX512DQ-FCP-NEXT: vpermd %zmm21, %zmm22, %zmm6
+; AVX512DQ-FCP-NEXT: vpshufb %ymm12, %ymm6, %ymm6
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm9 = ymm1[0,1],ymm6[2,3,4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm11[0],ymm3[1],ymm11[2,3],ymm3[4],ymm11[5,6,7]
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm3, %ymm17
+; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm1, %xmm6
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm6[1],xmm1[2,3,4,5],xmm6[6],xmm1[7]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm15, %xmm1, %xmm1
+; AVX512DQ-FCP-NEXT: vpermd %zmm29, %zmm16, %zmm6
+; AVX512DQ-FCP-NEXT: vpshufb %ymm2, %ymm6, %ymm2
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm19, %ymm15
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm5[0],ymm15[1],ymm5[2,3,4],ymm15[5],ymm5[6,7]
+; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm6, %xmm10
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm6 = xmm10[0],xmm6[1],xmm10[2],xmm6[3],xmm10[4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm7, %xmm6, %xmm3
; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3,4,5,6,7],ymm2[8,9,10],ymm1[11,12,13,14,15]
; AVX512DQ-FCP-NEXT: vpor %ymm2, %ymm3, %ymm2
; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm25, %zmm9 # 64-byte Folded Reload
+; AVX512DQ-FCP-NEXT: vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm27, %zmm9 # 64-byte Folded Reload
; AVX512DQ-FCP-NEXT: vinserti32x8 $1, %ymm1, %zmm0, %zmm9 {%k1}
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm20, %ymm0
; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm23, %ymm1
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm31, %ymm2
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5],ymm1[6],ymm2[7]
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm0[3],ymm1[4,5],ymm0[6],ymm1[7]
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm1, %xmm2
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm4 = [0,1,2,3,4,5,6,7,10,11,6,7,4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm4, %xmm2, %xmm2
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm6 = [0,1,2,3,4,5,6,7,10,11,6,7,4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm6, %xmm2, %xmm2
; AVX512DQ-FCP-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,4,6,7]
; AVX512DQ-FCP-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm5 = [1,4,8,11,15,0,0,0]
-; AVX512DQ-FCP-NEXT: vpermd %zmm27, %zmm5, %zmm2
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm29, %ymm15
-; AVX512DQ-FCP-NEXT: vpshufb %ymm15, %ymm2, %ymm2
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm10 = [1,4,8,11,15,0,0,0]
+; AVX512DQ-FCP-NEXT: vpermd %zmm28, %zmm10, %zmm2
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm3 = [u,u,u,u,u,u,u,u,0,1,6,7,8,9,14,15,16,17,22,23,20,21,22,23,16,17,22,23,24,25,30,31]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm3, %ymm2, %ymm2
; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm12 = ymm1[0,1],ymm2[2,3,4,5,6,7]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm21, %ymm8
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm6[0],ymm8[1],ymm6[2,3,4],ymm8[5],ymm6[6,7]
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm25, %ymm0
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm0[0],ymm13[1],ymm0[2,3,4],ymm13[5],ymm0[6,7]
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm1, %xmm2
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2],xmm1[3],xmm2[4,5,6,7]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm19, %ymm2
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1],ymm11[2],ymm2[3,4],ymm11[5],ymm2[6,7]
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm3 = [8,9,8,9,4,5,6,7,8,9,10,11,12,13,14,15]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm3, %xmm2, %xmm6
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm18, %ymm0
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm0[0,1],ymm4[2],ymm0[3,4],ymm4[5],ymm0[6,7]
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm0 = [8,9,8,9,4,5,6,7,8,9,10,11,12,13,14,15]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm0, %xmm2, %xmm4
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm0, %xmm16
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm2, %xmm2
; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} xmm2 = xmm2[0,3,1,3,4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm6[0],xmm2[0],xmm6[1],xmm2[1],xmm6[2],xmm2[2],xmm6[3],xmm2[3]
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm16 = [0,1,10,3,14,7,10,3]
-; AVX512DQ-FCP-NEXT: vpermd %zmm18, %zmm16, %zmm8
+; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3]
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm13 = [0,1,10,3,14,7,10,3]
+; AVX512DQ-FCP-NEXT: vpermd %zmm26, %zmm13, %zmm7
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm2 = [4,5,10,11,0,1,10,11,0,1,4,5,0,1,14,15,20,21,26,27,16,17,26,27,16,17,20,21,16,17,30,31]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm2, %ymm8, %ymm8
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm8[0],ymm6[1,2],ymm8[3,4,5,6,7]
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm8 = [0,1,2,3,12,13,6,7,4,5,2,3,0,1,14,15]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm8, %xmm1, %xmm1
+; AVX512DQ-FCP-NEXT: vpshufb %ymm2, %ymm7, %ymm7
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm7[0],ymm4[1,2],ymm7[3,4,5,6,7]
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm7 = [0,1,2,3,12,13,6,7,4,5,2,3,0,1,14,15]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm7, %xmm1, %xmm1
; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm6[0,1,2],ymm1[3,4,5,6,7],ymm6[8,9,10],ymm1[11,12,13,14,15]
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm6[0,1,2,3],ymm1[4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm25, %zmm12 # 64-byte Folded Reload
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm4[0,1,2],ymm1[3,4,5,6,7],ymm4[8,9,10],ymm1[11,12,13,14,15]
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm4[0,1,2,3],ymm1[4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm27, %zmm12 # 64-byte Folded Reload
; AVX512DQ-FCP-NEXT: vinserti32x8 $1, %ymm1, %zmm0, %zmm12 {%k1}
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm14[0,1,2],ymm10[3],ymm14[4,5],ymm10[6],ymm14[7]
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm1, %xmm6
-; AVX512DQ-FCP-NEXT: vpshufb %xmm4, %xmm6, %xmm4
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm8[0,1,2],ymm14[3],ymm8[4,5],ymm14[6],ymm8[7]
+; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm1, %xmm4
+; AVX512DQ-FCP-NEXT: vpshufb %xmm6, %xmm4, %xmm4
; AVX512DQ-FCP-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,4,6,7]
; AVX512DQ-FCP-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
-; AVX512DQ-FCP-NEXT: vpermd %zmm17, %zmm5, %zmm4
-; AVX512DQ-FCP-NEXT: vpshufb %ymm15, %ymm4, %ymm4
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm5 # 32-byte Folded Reload
+; AVX512DQ-FCP-NEXT: vpermd %zmm21, %zmm10, %zmm4
+; AVX512DQ-FCP-NEXT: vpshufb %ymm3, %ymm4, %ymm4
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm6 # 32-byte Folded Reload
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm8 # 32-byte Folded Reload
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm10 # 32-byte Folded Reload
; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1],ymm4[2,3,4,5,6,7]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm20, %ymm4
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0],ymm13[1],ymm4[2,3,4],ymm13[5],ymm4[6,7]
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm17, %ymm0
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm11[0],ymm0[1],ymm11[2,3,4],ymm0[5],ymm11[6,7]
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm4, %xmm11
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm4 = xmm11[0],xmm4[1],xmm11[2],xmm4[3],xmm11[4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm8, %xmm4, %xmm4
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm22, %ymm8
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm8[0,1],ymm7[2],ymm8[3,4],ymm7[5],ymm8[6,7]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm3, %xmm8, %xmm3
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm8, %xmm8
-; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} xmm8 = xmm8[0,3,1,3,4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm3[0],xmm8[0],xmm3[1],xmm8[1],xmm3[2],xmm8[2],xmm3[3],xmm8[3]
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm8 # 32-byte Folded Reload
-; AVX512DQ-FCP-NEXT: vpermd %zmm30, %zmm16, %zmm7
-; AVX512DQ-FCP-NEXT: vpshufb %ymm2, %ymm7, %ymm2
+; AVX512DQ-FCP-NEXT: vpshufb %xmm7, %xmm4, %xmm4
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm5[0,1],ymm15[2],ymm5[3,4],ymm15[5],ymm5[6,7]
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm16, %xmm0
+; AVX512DQ-FCP-NEXT: vpshufb %xmm0, %xmm7, %xmm3
+; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm7, %xmm7
+; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} xmm7 = xmm7[0,3,1,3,4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm3[0],xmm7[0],xmm3[1],xmm7[1],xmm3[2],xmm7[2],xmm3[3],xmm7[3]
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm7 # 32-byte Folded Reload
+; AVX512DQ-FCP-NEXT: vpermd %zmm29, %zmm13, %zmm5
+; AVX512DQ-FCP-NEXT: vpshufb %ymm2, %ymm5, %ymm2
; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0],ymm3[1,2],ymm2[3,4,5,6,7]
; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm3
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm2[0,1,2],ymm3[3,4,5,6,7],ymm2[8,9,10],ymm3[11,12,13,14,15]
; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm25, %zmm1 # 64-byte Folded Reload
+; AVX512DQ-FCP-NEXT: vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm27, %zmm1 # 64-byte Folded Reload
; AVX512DQ-FCP-NEXT: vinserti32x8 $1, %ymm2, %zmm0, %zmm1 {%k1}
; AVX512DQ-FCP-NEXT: vpmovsxdq {{.*#+}} zmm2 = [0,0,18446744073709486080,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615]
; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
@@ -17211,31 +17241,32 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
; AVX512DQ-FCP-NEXT: vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm4 # 64-byte Folded Reload
; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm2 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0,0,0]
-; AVX512DQ-FCP-NEXT: vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm5 # 64-byte Folded Reload
; AVX512DQ-FCP-NEXT: vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm6 # 64-byte Folded Reload
+; AVX512DQ-FCP-NEXT: vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm8 # 64-byte Folded Reload
; AVX512DQ-FCP-NEXT: vpternlogq $184, %zmm3, %zmm2, %zmm10
-; AVX512DQ-FCP-NEXT: vpternlogq $184, %zmm4, %zmm2, %zmm8
-; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm6, (%rsi)
-; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm5, 64(%rsi)
-; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm8, 64(%rdx)
+; AVX512DQ-FCP-NEXT: vpternlogq $184, %zmm4, %zmm2, %zmm7
+; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm8, (%rsi)
+; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm6, 64(%rsi)
+; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm7, 64(%rdx)
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm10, (%rdx)
-; AVX512DQ-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-FCP-NEXT: vmovaps %zmm2, 64(%rcx)
+; AVX512DQ-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-FCP-NEXT: vmovaps %zmm0, 64(%rcx)
; AVX512DQ-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
; AVX512DQ-FCP-NEXT: vmovaps %zmm2, (%rcx)
-; AVX512DQ-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-FCP-NEXT: vmovaps %zmm2, 64(%r8)
-; AVX512DQ-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-FCP-NEXT: vmovaps %zmm2, (%r8)
-; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm26, 64(%r9)
-; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm28, (%r9)
+; AVX512DQ-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-FCP-NEXT: vmovaps %zmm0, 64(%r8)
+; AVX512DQ-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-FCP-NEXT: vmovaps %zmm0, (%r8)
+; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm30, 64(%r9)
+; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm31, (%r9)
; AVX512DQ-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm9, 64(%rax)
-; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm0, (%rax)
+; AVX512DQ-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-FCP-NEXT: vmovaps %zmm0, (%rax)
; AVX512DQ-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm1, 64(%rax)
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm12, (%rax)
-; AVX512DQ-FCP-NEXT: addq $1240, %rsp # imm = 0x4D8
+; AVX512DQ-FCP-NEXT: addq $1304, %rsp # imm = 0x518
; AVX512DQ-FCP-NEXT: vzeroupper
; AVX512DQ-FCP-NEXT: retq
;
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-2.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-2.ll
index 0db78440d3aa7..9f69a3cf44189 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-2.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-2.ll
@@ -645,12 +645,14 @@ define void @load_i8_stride2_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1) nou
; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa (%rdi), %ymm0
; AVX2-NEXT: vmovdqa 32(%rdi), %ymm1
-; AVX2-NEXT: vpshufb {{.*#+}} ymm2 = ymm1[u,u,u,u,u,u,u,u,0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u,16,18,20,22,24,26,28,30]
-; AVX2-NEXT: vpshufb {{.*#+}} ymm3 = ymm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u,16,18,20,22,24,26,28,30,u,u,u,u,u,u,u,u]
-; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3],ymm3[4,5],ymm2[6,7]
+; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [0,2,4,6,8,10,12,14,0,2,4,6,8,10,12,14,0,2,4,6,8,10,12,14,0,2,4,6,8,10,12,14]
+; AVX2-NEXT: vpshufb %ymm2, %ymm1, %ymm3
+; AVX2-NEXT: vpshufb %ymm2, %ymm0, %ymm2
+; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1],ymm3[2,3],ymm2[4,5],ymm3[6,7]
; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,1,3]
-; AVX2-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u,17,19,21,23,25,27,29,31]
-; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u,17,19,21,23,25,27,29,31,u,u,u,u,u,u,u,u]
+; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm3 = [1,3,5,7,9,11,13,15,1,3,5,7,9,11,13,15,1,3,5,7,9,11,13,15,1,3,5,7,9,11,13,15]
+; AVX2-NEXT: vpshufb %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: vpshufb %ymm3, %ymm0, %ymm0
; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
; AVX2-NEXT: vmovdqa %ymm2, (%rsi)
@@ -662,12 +664,14 @@ define void @load_i8_stride2_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1) nou
; AVX2-FP: # %bb.0:
; AVX2-FP-NEXT: vmovdqa (%rdi), %ymm0
; AVX2-FP-NEXT: vmovdqa 32(%rdi), %ymm1
-; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm2 = ymm1[u,u,u,u,u,u,u,u,0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u,16,18,20,22,24,26,28,30]
-; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm3 = ymm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u,16,18,20,22,24,26,28,30,u,u,u,u,u,u,u,u]
-; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3],ymm3[4,5],ymm2[6,7]
+; AVX2-FP-NEXT: vpbroadcastq {{.*#+}} ymm2 = [0,2,4,6,8,10,12,14,0,2,4,6,8,10,12,14,0,2,4,6,8,10,12,14,0,2,4,6,8,10,12,14]
+; AVX2-FP-NEXT: vpshufb %ymm2, %ymm1, %ymm3
+; AVX2-FP-NEXT: vpshufb %ymm2, %ymm0, %ymm2
+; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1],ymm3[2,3],ymm2[4,5],ymm3[6,7]
; AVX2-FP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,1,3]
-; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u,17,19,21,23,25,27,29,31]
-; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u,17,19,21,23,25,27,29,31,u,u,u,u,u,u,u,u]
+; AVX2-FP-NEXT: vpbroadcastq {{.*#+}} ymm3 = [1,3,5,7,9,11,13,15,1,3,5,7,9,11,13,15,1,3,5,7,9,11,13,15,1,3,5,7,9,11,13,15]
+; AVX2-FP-NEXT: vpshufb %ymm3, %ymm1, %ymm1
+; AVX2-FP-NEXT: vpshufb %ymm3, %ymm0, %ymm0
; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
; AVX2-FP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
; AVX2-FP-NEXT: vmovdqa %ymm2, (%rsi)
@@ -679,12 +683,14 @@ define void @load_i8_stride2_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1) nou
; AVX2-FCP: # %bb.0:
; AVX2-FCP-NEXT: vmovdqa (%rdi), %ymm0
; AVX2-FCP-NEXT: vmovdqa 32(%rdi), %ymm1
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm1[u,u,u,u,u,u,u,u,0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u,16,18,20,22,24,26,28,30]
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u,16,18,20,22,24,26,28,30,u,u,u,u,u,u,u,u]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3],ymm3[4,5],ymm2[6,7]
+; AVX2-FCP-NEXT: vpbroadcastq {{.*#+}} ymm2 = [0,2,4,6,8,10,12,14,0,2,4,6,8,10,12,14,0,2,4,6,8,10,12,14,0,2,4,6,8,10,12,14]
+; AVX2-FCP-NEXT: vpshufb %ymm2, %ymm1, %ymm3
+; AVX2-FCP-NEXT: vpshufb %ymm2, %ymm0, %ymm2
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1],ymm3[2,3],ymm2[4,5],ymm3[6,7]
; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,1,3]
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u,17,19,21,23,25,27,29,31]
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u,17,19,21,23,25,27,29,31,u,u,u,u,u,u,u,u]
+; AVX2-FCP-NEXT: vpbroadcastq {{.*#+}} ymm3 = [1,3,5,7,9,11,13,15,1,3,5,7,9,11,13,15,1,3,5,7,9,11,13,15,1,3,5,7,9,11,13,15]
+; AVX2-FCP-NEXT: vpshufb %ymm3, %ymm1, %ymm1
+; AVX2-FCP-NEXT: vpshufb %ymm3, %ymm0, %ymm0
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
; AVX2-FCP-NEXT: vmovdqa %ymm2, (%rsi)
@@ -694,18 +700,20 @@ define void @load_i8_stride2_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1) nou
;
; AVX512-LABEL: load_i8_stride2_vf32:
; AVX512: # %bb.0:
-; AVX512-NEXT: vmovdqa (%rdi), %ymm0
-; AVX512-NEXT: vmovdqa 32(%rdi), %ymm1
-; AVX512-NEXT: vpshufb {{.*#+}} ymm2 = ymm1[u,u,u,u,u,u,u,u,0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u,16,18,20,22,24,26,28,30]
-; AVX512-NEXT: vpshufb {{.*#+}} ymm3 = ymm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u,16,18,20,22,24,26,28,30,u,u,u,u,u,u,u,u]
-; AVX512-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3],ymm3[4,5],ymm2[6,7]
-; AVX512-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,1,3]
-; AVX512-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u,17,19,21,23,25,27,29,31]
-; AVX512-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u,17,19,21,23,25,27,29,31,u,u,u,u,u,u,u,u]
-; AVX512-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
+; AVX512-NEXT: vpbroadcastq {{.*#+}} ymm0 = [0,2,4,6,8,10,12,14,0,2,4,6,8,10,12,14,0,2,4,6,8,10,12,14,0,2,4,6,8,10,12,14]
+; AVX512-NEXT: vmovdqa (%rdi), %ymm1
+; AVX512-NEXT: vmovdqa 32(%rdi), %ymm2
+; AVX512-NEXT: vpshufb %ymm0, %ymm2, %ymm3
+; AVX512-NEXT: vpshufb %ymm0, %ymm1, %ymm0
+; AVX512-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm3[2,3],ymm0[4,5],ymm3[6,7]
; AVX512-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
-; AVX512-NEXT: vmovdqa %ymm2, (%rsi)
-; AVX512-NEXT: vmovdqa %ymm0, (%rdx)
+; AVX512-NEXT: vpbroadcastq {{.*#+}} ymm3 = [1,3,5,7,9,11,13,15,1,3,5,7,9,11,13,15,1,3,5,7,9,11,13,15,1,3,5,7,9,11,13,15]
+; AVX512-NEXT: vpshufb %ymm3, %ymm2, %ymm2
+; AVX512-NEXT: vpshufb %ymm3, %ymm1, %ymm1
+; AVX512-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3],ymm1[4,5],ymm2[6,7]
+; AVX512-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,1,3]
+; AVX512-NEXT: vmovdqa %ymm0, (%rsi)
+; AVX512-NEXT: vmovdqa %ymm1, (%rdx)
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
;
@@ -727,18 +735,20 @@ define void @load_i8_stride2_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1) nou
;
; AVX512DQ-LABEL: load_i8_stride2_vf32:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: vmovdqa (%rdi), %ymm0
-; AVX512DQ-NEXT: vmovdqa 32(%rdi), %ymm1
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm2 = ymm1[u,u,u,u,u,u,u,u,0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u,16,18,20,22,24,26,28,30]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm3 = ymm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u,16,18,20,22,24,26,28,30,u,u,u,u,u,u,u,u]
-; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3],ymm3[4,5],ymm2[6,7]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,1,3]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u,17,19,21,23,25,27,29,31]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u,17,19,21,23,25,27,29,31,u,u,u,u,u,u,u,u]
-; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
+; AVX512DQ-NEXT: vpbroadcastq {{.*#+}} ymm0 = [0,2,4,6,8,10,12,14,0,2,4,6,8,10,12,14,0,2,4,6,8,10,12,14,0,2,4,6,8,10,12,14]
+; AVX512DQ-NEXT: vmovdqa (%rdi), %ymm1
+; AVX512DQ-NEXT: vmovdqa 32(%rdi), %ymm2
+; AVX512DQ-NEXT: vpshufb %ymm0, %ymm2, %ymm3
+; AVX512DQ-NEXT: vpshufb %ymm0, %ymm1, %ymm0
+; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm3[2,3],ymm0[4,5],ymm3[6,7]
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
-; AVX512DQ-NEXT: vmovdqa %ymm2, (%rsi)
-; AVX512DQ-NEXT: vmovdqa %ymm0, (%rdx)
+; AVX512DQ-NEXT: vpbroadcastq {{.*#+}} ymm3 = [1,3,5,7,9,11,13,15,1,3,5,7,9,11,13,15,1,3,5,7,9,11,13,15,1,3,5,7,9,11,13,15]
+; AVX512DQ-NEXT: vpshufb %ymm3, %ymm2, %ymm2
+; AVX512DQ-NEXT: vpshufb %ymm3, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3],ymm1[4,5],ymm2[6,7]
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,1,3]
+; AVX512DQ-NEXT: vmovdqa %ymm0, (%rsi)
+; AVX512DQ-NEXT: vmovdqa %ymm1, (%rdx)
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-5.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-5.ll
index e05b5ab9ebe02..43a45b9fd59a7 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-5.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-5.ll
@@ -1419,31 +1419,37 @@ define void @load_i8_stride5_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX-NEXT: vpshufb %xmm7, %xmm6, %xmm6
; AVX-NEXT: vpshufb {{.*#+}} xmm8 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm4[2,7,12]
; AVX-NEXT: vpor %xmm6, %xmm8, %xmm6
-; AVX-NEXT: vpshufb {{.*#+}} xmm8 = zero,zero,zero,xmm2[1,6,11,u,u,u,u,u,u,u,u,u,u]
-; AVX-NEXT: vpshufb {{.*#+}} xmm9 = xmm3[u,u,u,u,u,u],zero,zero,zero,zero,xmm3[4,9,14,u,u,u]
-; AVX-NEXT: vpblendw {{.*#+}} xmm8 = xmm8[0,1,2],xmm9[3,4,5,6,7]
-; AVX-NEXT: vpshufb {{.*#+}} xmm9 = xmm0[2,7,12],zero,zero,zero,xmm0[u,u,u,u,u,u,u,u,u,u]
-; AVX-NEXT: vpshufb {{.*#+}} xmm10 = xmm1[u,u,u,u,u,u,0,5,10,15],zero,zero,zero,xmm1[u,u,u]
-; AVX-NEXT: vpblendw {{.*#+}} xmm9 = xmm9[0,1,2],xmm10[3,4,5,6,7]
+; AVX-NEXT: vmovdqa {{.*#+}} xmm8 = [128,128,128,1,6,11,128,128,128,128,4,9,14,u,u,u]
+; AVX-NEXT: vpshufb %xmm8, %xmm2, %xmm9
+; AVX-NEXT: vpshufb %xmm8, %xmm3, %xmm8
+; AVX-NEXT: vpblendw {{.*#+}} xmm8 = xmm9[0,1,2],xmm8[3,4,5,6,7]
+; AVX-NEXT: vmovdqa {{.*#+}} xmm9 = [2,7,12,128,128,128,0,5,10,15,128,128,128,u,u,u]
+; AVX-NEXT: vpshufb %xmm9, %xmm0, %xmm10
+; AVX-NEXT: vpshufb %xmm9, %xmm1, %xmm9
+; AVX-NEXT: vpblendw {{.*#+}} xmm9 = xmm10[0,1,2],xmm9[3,4,5,6,7]
; AVX-NEXT: vpor %xmm8, %xmm9, %xmm8
; AVX-NEXT: vpshufb %xmm7, %xmm8, %xmm8
; AVX-NEXT: vpshufb {{.*#+}} xmm9 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm4[3,8,13]
; AVX-NEXT: vpor %xmm9, %xmm8, %xmm8
-; AVX-NEXT: vpshufb {{.*#+}} xmm9 = zero,zero,zero,xmm2[2,7,12,u,u,u,u,u,u,u,u,u,u]
-; AVX-NEXT: vpshufb {{.*#+}} xmm10 = xmm1[u,u,u,u,u,u,1,6,11],zero,zero,zero,zero,xmm1[u,u,u]
-; AVX-NEXT: vpblendw {{.*#+}} xmm9 = xmm9[0,1,2],xmm10[3,4,5,6,7]
-; AVX-NEXT: vpshufb {{.*#+}} xmm10 = xmm0[3,8,13],zero,zero,zero,xmm0[u,u,u,u,u,u,u,u,u,u]
-; AVX-NEXT: vpshufb {{.*#+}} xmm11 = xmm3[u,u,u,u,u,u],zero,zero,zero,xmm3[0,5,10,15,u,u,u]
-; AVX-NEXT: vpblendw {{.*#+}} xmm10 = xmm10[0,1,2],xmm11[3,4,5,6,7]
+; AVX-NEXT: vmovdqa {{.*#+}} xmm9 = [128,128,128,2,7,12,1,6,11,128,128,128,128,u,u,u]
+; AVX-NEXT: vpshufb %xmm9, %xmm2, %xmm10
+; AVX-NEXT: vpshufb %xmm9, %xmm1, %xmm9
+; AVX-NEXT: vpblendw {{.*#+}} xmm9 = xmm10[0,1,2],xmm9[3,4,5,6,7]
+; AVX-NEXT: vmovdqa {{.*#+}} xmm10 = [3,8,13,128,128,128,128,128,128,0,5,10,15,u,u,u]
+; AVX-NEXT: vpshufb %xmm10, %xmm0, %xmm11
+; AVX-NEXT: vpshufb %xmm10, %xmm3, %xmm10
+; AVX-NEXT: vpblendw {{.*#+}} xmm10 = xmm11[0,1,2],xmm10[3,4,5,6,7]
; AVX-NEXT: vpor %xmm9, %xmm10, %xmm9
; AVX-NEXT: vpshufb %xmm7, %xmm9, %xmm7
; AVX-NEXT: vpshufb {{.*#+}} xmm9 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm4[4,9,14]
; AVX-NEXT: vpor %xmm7, %xmm9, %xmm7
-; AVX-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u,u,u],zero,zero,zero,xmm3[1,6,11,u,u,u,u]
-; AVX-NEXT: vpshufb {{.*#+}} xmm2 = zero,zero,zero,xmm2[3,8,13,u,u,u,u,u,u,u,u,u,u]
+; AVX-NEXT: vmovdqa {{.*#+}} xmm9 = [128,128,128,3,8,13,128,128,128,1,6,11,u,u,u,u]
+; AVX-NEXT: vpshufb %xmm9, %xmm3, %xmm3
+; AVX-NEXT: vpshufb %xmm9, %xmm2, %xmm2
; AVX-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2],xmm3[3,4,5],xmm2[6,7]
-; AVX-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,u,u,u,2,7,12],zero,zero,zero,xmm1[u,u,u,u]
-; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[4,9,14],zero,zero,zero,xmm0[u,u,u,u,u,u,u,u,u,u]
+; AVX-NEXT: vmovdqa {{.*#+}} xmm3 = [4,9,14,128,128,128,2,7,12,128,128,128,u,u,u,u]
+; AVX-NEXT: vpshufb %xmm3, %xmm1, %xmm1
+; AVX-NEXT: vpshufb %xmm3, %xmm0, %xmm0
; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3,4,5],xmm0[6,7]
; AVX-NEXT: vpor %xmm2, %xmm0, %xmm0
; AVX-NEXT: vpshufb {{.*#+}} xmm1 = xmm4[u,u,u,u,u,u,u,u,u,u,u,u,0,5,10,15]
@@ -2735,78 +2741,84 @@ define void @load_i8_stride5_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX-NEXT: vpblendvb %xmm13, %xmm14, %xmm6, %xmm6
; AVX-NEXT: vpshufb {{.*#+}} xmm13 = xmm2[u,u,u],zero,zero,zero,zero,xmm2[4,9,14,u,u,u,u,u,u]
; AVX-NEXT: vpshufb {{.*#+}} xmm14 = xmm3[u,u,u,0,5,10,15],zero,zero,zero,xmm3[u,u,u,u,u,u]
-; AVX-NEXT: vpor %xmm13, %xmm14, %xmm13
-; AVX-NEXT: vmovdqa {{.*#+}} xmm14 = [128,128,128,3,4,5,6,7,8,9,u,u,u,u,u,u]
-; AVX-NEXT: vpshufb %xmm14, %xmm13, %xmm13
+; AVX-NEXT: vpor %xmm13, %xmm14, %xmm14
+; AVX-NEXT: vmovdqa {{.*#+}} xmm13 = [128,128,128,3,4,5,6,7,8,9,u,u,u,u,u,u]
+; AVX-NEXT: vpshufb %xmm13, %xmm14, %xmm14
; AVX-NEXT: vpshufb {{.*#+}} xmm15 = xmm4[1,6,11],zero,zero,zero,zero,zero,zero,zero,xmm4[u,u,u,u,u,u]
-; AVX-NEXT: vpor %xmm15, %xmm13, %xmm13
-; AVX-NEXT: vpblendw {{.*#+}} xmm11 = xmm13[0,1,2,3,4],xmm11[5,6,7]
+; AVX-NEXT: vpor %xmm15, %xmm14, %xmm14
+; AVX-NEXT: vpblendw {{.*#+}} xmm11 = xmm14[0,1,2,3,4],xmm11[5,6,7]
; AVX-NEXT: vandps %ymm6, %ymm12, %ymm6
-; AVX-NEXT: vpshufb {{.*#+}} xmm13 = xmm5[u,u,u,u,u,u,u,u,u,u,u,u,u,2,7,12]
-; AVX-NEXT: vandnps %ymm13, %ymm12, %ymm13
-; AVX-NEXT: vorps %ymm6, %ymm13, %ymm6
+; AVX-NEXT: vpshufb {{.*#+}} xmm14 = xmm5[u,u,u,u,u,u,u,u,u,u,u,u,u,2,7,12]
+; AVX-NEXT: vandnps %ymm14, %ymm12, %ymm14
+; AVX-NEXT: vorps %ymm6, %ymm14, %ymm6
; AVX-NEXT: vinsertf128 $1, %xmm11, %ymm6, %ymm6
; AVX-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vpshufb {{.*#+}} xmm6 = xmm0[u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm0[3,8,13]
-; AVX-NEXT: vpshufb {{.*#+}} xmm13 = xmm1[u,u,u,u,u,u,u,u,u,u,4,9,14],zero,zero,zero
-; AVX-NEXT: vpor %xmm6, %xmm13, %xmm6
-; AVX-NEXT: vpshufb {{.*#+}} xmm13 = xmm3[u,u,u,1,6,11],zero,zero,zero,zero,xmm3[u,u,u,u,u,u]
+; AVX-NEXT: vmovdqa {{.*#+}} xmm6 = [128,128,128,1,6,11,128,128,128,128,4,9,14,u,u,u]
+; AVX-NEXT: vpshufb %xmm6, %xmm9, %xmm14
+; AVX-NEXT: vpshufb %xmm6, %xmm10, %xmm6
+; AVX-NEXT: vpblendw {{.*#+}} xmm6 = xmm14[0,1,2],xmm6[3,4,5,6,7]
+; AVX-NEXT: vmovdqa {{.*#+}} xmm14 = [2,7,12,128,128,128,0,5,10,15,128,128,128,u,u,u]
+; AVX-NEXT: vpshufb %xmm14, %xmm7, %xmm15
+; AVX-NEXT: vpshufb %xmm14, %xmm8, %xmm14
+; AVX-NEXT: vpblendw {{.*#+}} xmm14 = xmm15[0,1,2],xmm14[3,4,5,6,7]
+; AVX-NEXT: vpor %xmm6, %xmm14, %xmm6
+; AVX-NEXT: vandps %ymm6, %ymm12, %ymm6
+; AVX-NEXT: vpshufb {{.*#+}} xmm14 = xmm5[u,u,u,u,u,u,u,u,u,u,u,u,u,3,8,13]
+; AVX-NEXT: vandnps %ymm14, %ymm12, %ymm12
+; AVX-NEXT: vorps %ymm6, %ymm12, %ymm6
+; AVX-NEXT: vpshufb {{.*#+}} xmm12 = xmm0[u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm0[3,8,13]
+; AVX-NEXT: vpshufb {{.*#+}} xmm14 = xmm1[u,u,u,u,u,u,u,u,u,u,4,9,14],zero,zero,zero
+; AVX-NEXT: vpor %xmm12, %xmm14, %xmm12
+; AVX-NEXT: vpshufb {{.*#+}} xmm14 = xmm3[u,u,u,1,6,11],zero,zero,zero,zero,xmm3[u,u,u,u,u,u]
; AVX-NEXT: vpshufb {{.*#+}} xmm15 = xmm2[u,u,u],zero,zero,zero,xmm2[0,5,10,15,u,u,u,u,u,u]
-; AVX-NEXT: vpor %xmm13, %xmm15, %xmm13
-; AVX-NEXT: vpshufb %xmm14, %xmm13, %xmm13
+; AVX-NEXT: vpor %xmm14, %xmm15, %xmm14
+; AVX-NEXT: vpshufb %xmm13, %xmm14, %xmm13
; AVX-NEXT: vpshufb {{.*#+}} xmm14 = xmm4[2,7,12],zero,zero,zero,zero,zero,zero,zero,xmm4[u,u,u,u,u,u]
; AVX-NEXT: vpor %xmm14, %xmm13, %xmm13
-; AVX-NEXT: vpblendw {{.*#+}} xmm6 = xmm13[0,1,2,3,4],xmm6[5,6,7]
-; AVX-NEXT: vpshufb {{.*#+}} xmm13 = zero,zero,zero,xmm9[1,6,11,u,u,u,u,u,u,u,u,u,u]
-; AVX-NEXT: vpshufb {{.*#+}} xmm14 = xmm10[u,u,u,u,u,u],zero,zero,zero,zero,xmm10[4,9,14,u,u,u]
-; AVX-NEXT: vpblendw {{.*#+}} xmm13 = xmm13[0,1,2],xmm14[3,4,5,6,7]
-; AVX-NEXT: vpshufb {{.*#+}} xmm14 = xmm7[2,7,12],zero,zero,zero,xmm7[u,u,u,u,u,u,u,u,u,u]
-; AVX-NEXT: vpshufb {{.*#+}} xmm15 = xmm8[u,u,u,u,u,u,0,5,10,15],zero,zero,zero,xmm8[u,u,u]
-; AVX-NEXT: vpblendw {{.*#+}} xmm14 = xmm14[0,1,2],xmm15[3,4,5,6,7]
-; AVX-NEXT: vpor %xmm13, %xmm14, %xmm13
-; AVX-NEXT: vandps %ymm12, %ymm13, %ymm13
-; AVX-NEXT: vpshufb {{.*#+}} xmm14 = xmm5[u,u,u,u,u,u,u,u,u,u,u,u,u,3,8,13]
-; AVX-NEXT: vandnps %ymm14, %ymm12, %ymm12
-; AVX-NEXT: vorps %ymm12, %ymm13, %ymm12
-; AVX-NEXT: vinsertf128 $1, %xmm6, %ymm12, %ymm12
-; AVX-NEXT: vpshufb {{.*#+}} xmm6 = zero,zero,zero,xmm9[2,7,12,u,u,u,u,u,u,u,u,u,u]
-; AVX-NEXT: vpshufb {{.*#+}} xmm13 = xmm8[u,u,u,u,u,u,1,6,11],zero,zero,zero,zero,xmm8[u,u,u]
-; AVX-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0,1,2],xmm13[3,4,5,6,7]
-; AVX-NEXT: vpshufb {{.*#+}} xmm13 = xmm7[3,8,13],zero,zero,zero,xmm7[u,u,u,u,u,u,u,u,u,u]
-; AVX-NEXT: vpshufb {{.*#+}} xmm14 = xmm10[u,u,u,u,u,u],zero,zero,zero,xmm10[0,5,10,15,u,u,u]
-; AVX-NEXT: vpblendw {{.*#+}} xmm13 = xmm13[0,1,2],xmm14[3,4,5,6,7]
+; AVX-NEXT: vpblendw {{.*#+}} xmm12 = xmm13[0,1,2,3,4],xmm12[5,6,7]
+; AVX-NEXT: vinsertf128 $1, %xmm12, %ymm6, %ymm12
+; AVX-NEXT: vmovdqa {{.*#+}} xmm6 = [128,128,128,2,7,12,1,6,11,128,128,128,128,u,u,u]
+; AVX-NEXT: vpshufb %xmm6, %xmm9, %xmm13
+; AVX-NEXT: vpshufb %xmm6, %xmm8, %xmm6
+; AVX-NEXT: vpblendw {{.*#+}} xmm6 = xmm13[0,1,2],xmm6[3,4,5,6,7]
+; AVX-NEXT: vmovdqa {{.*#+}} xmm13 = [3,8,13,128,128,128,128,128,128,0,5,10,15,u,u,u]
+; AVX-NEXT: vpshufb %xmm13, %xmm7, %xmm14
+; AVX-NEXT: vpshufb %xmm13, %xmm10, %xmm13
+; AVX-NEXT: vpblendw {{.*#+}} xmm13 = xmm14[0,1,2],xmm13[3,4,5,6,7]
; AVX-NEXT: vpor %xmm6, %xmm13, %xmm6
; AVX-NEXT: vpshufb {{.*#+}} xmm13 = xmm2[u,u,u],zero,zero,zero,xmm2[1,6,11,u,u,u,u,u,u,u]
; AVX-NEXT: vpshufb {{.*#+}} xmm14 = xmm3[u,u,u,2,7,12],zero,zero,zero,xmm3[u,u,u,u,u,u,u]
-; AVX-NEXT: vpor %xmm13, %xmm14, %xmm13
-; AVX-NEXT: vmovdqa {{.*#+}} xmm14 = [128,128,128,3,4,5,6,7,8,u,u,u,u,u,u,u]
-; AVX-NEXT: vpshufb %xmm14, %xmm13, %xmm13
+; AVX-NEXT: vpor %xmm13, %xmm14, %xmm14
+; AVX-NEXT: vmovdqa {{.*#+}} xmm13 = [128,128,128,3,4,5,6,7,8,u,u,u,u,u,u,u]
+; AVX-NEXT: vpshufb %xmm13, %xmm14, %xmm14
; AVX-NEXT: vpshufb {{.*#+}} xmm15 = xmm4[3,8,13],zero,zero,zero,zero,zero,zero,xmm4[u,u,u,u,u,u,u]
-; AVX-NEXT: vpor %xmm15, %xmm13, %xmm13
+; AVX-NEXT: vpor %xmm15, %xmm14, %xmm14
; AVX-NEXT: vpshufb {{.*#+}} xmm15 = xmm5[u,u,u,u,u,u,u,u,u,u,u,u,u,4,9,14]
-; AVX-NEXT: vinsertf128 $1, %xmm13, %ymm15, %ymm13
+; AVX-NEXT: vinsertf128 $1, %xmm14, %ymm15, %ymm14
; AVX-NEXT: vmovaps {{.*#+}} ymm15 = [255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255]
; AVX-NEXT: vandps %ymm6, %ymm15, %ymm6
-; AVX-NEXT: vandnps %ymm13, %ymm15, %ymm13
-; AVX-NEXT: vorps %ymm6, %ymm13, %ymm6
-; AVX-NEXT: vpshufb {{.*#+}} xmm13 = xmm0[u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,xmm0[4,9,14]
+; AVX-NEXT: vandnps %ymm14, %ymm15, %ymm14
+; AVX-NEXT: vorps %ymm6, %ymm14, %ymm6
+; AVX-NEXT: vpshufb {{.*#+}} xmm14 = xmm0[u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,xmm0[4,9,14]
; AVX-NEXT: vpshufb {{.*#+}} xmm15 = xmm1[u,u,u,u,u,u,u,u,u,0,5,10,15],zero,zero,zero
-; AVX-NEXT: vpor %xmm13, %xmm15, %xmm13
+; AVX-NEXT: vpor %xmm14, %xmm15, %xmm14
; AVX-NEXT: vextractf128 $1, %ymm6, %xmm15
; AVX-NEXT: vpmovsxwq {{.*#+}} xmm11 = [18446744073709551615,255]
-; AVX-NEXT: vpblendvb %xmm11, %xmm15, %xmm13, %xmm13
-; AVX-NEXT: vinsertf128 $1, %xmm13, %ymm6, %ymm6
-; AVX-NEXT: vpshufb {{.*#+}} xmm10 = xmm10[u,u,u,u,u,u],zero,zero,zero,xmm10[1,6,11,u,u,u,u]
-; AVX-NEXT: vpshufb {{.*#+}} xmm9 = zero,zero,zero,xmm9[3,8,13,u,u,u,u,u,u,u,u,u,u]
+; AVX-NEXT: vpblendvb %xmm11, %xmm15, %xmm14, %xmm14
+; AVX-NEXT: vinsertf128 $1, %xmm14, %ymm6, %ymm6
+; AVX-NEXT: vmovdqa {{.*#+}} xmm14 = [128,128,128,3,8,13,128,128,128,1,6,11,u,u,u,u]
+; AVX-NEXT: vpshufb %xmm14, %xmm10, %xmm10
+; AVX-NEXT: vpshufb %xmm14, %xmm9, %xmm9
; AVX-NEXT: vpblendw {{.*#+}} xmm9 = xmm9[0,1,2],xmm10[3,4,5],xmm9[6,7]
-; AVX-NEXT: vpshufb {{.*#+}} xmm8 = xmm8[u,u,u,u,u,u,2,7,12],zero,zero,zero,xmm8[u,u,u,u]
-; AVX-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[4,9,14],zero,zero,zero,xmm7[u,u,u,u,u,u,u,u,u,u]
+; AVX-NEXT: vmovdqa {{.*#+}} xmm10 = [4,9,14,128,128,128,2,7,12,128,128,128,u,u,u,u]
+; AVX-NEXT: vpshufb %xmm10, %xmm8, %xmm8
+; AVX-NEXT: vpshufb %xmm10, %xmm7, %xmm7
; AVX-NEXT: vpblendw {{.*#+}} xmm7 = xmm7[0,1,2],xmm8[3,4,5],xmm7[6,7]
; AVX-NEXT: vpor %xmm7, %xmm9, %xmm7
; AVX-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[u,u,u],zero,zero,zero,xmm2[2,7,12,u,u,u,u,u,u,u]
; AVX-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,3,8,13],zero,zero,zero,xmm3[u,u,u,u,u,u,u]
; AVX-NEXT: vpor %xmm2, %xmm3, %xmm2
-; AVX-NEXT: vpshufb %xmm14, %xmm2, %xmm2
+; AVX-NEXT: vpshufb %xmm13, %xmm2, %xmm2
; AVX-NEXT: vpshufb {{.*#+}} xmm3 = xmm4[4,9,14],zero,zero,zero,zero,zero,zero,xmm4[u,u,u,u,u,u,u]
; AVX-NEXT: vpor %xmm3, %xmm2, %xmm2
; AVX-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,u,u,u,u,u,u,1,6,11],zero,zero,zero,zero
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-6.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-6.ll
index c77b232fde969..e4dc257543d20 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-6.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-6.ll
@@ -2015,20 +2015,24 @@ define void @load_i8_stride6_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[3,9,15],zero,zero,xmm7[1,7,13],zero,zero,zero,xmm7[u,u,u,u,u]
; AVX512-NEXT: vpor %xmm7, %xmm10, %xmm7
; AVX512-NEXT: vpternlogq $226, %xmm5, %xmm9, %xmm7
+; AVX512-NEXT: vmovdqa {{.*#+}} xmm5 = [128,128,0,6,12,128,128,128,4,10,128,128,128,2,8,14]
+; AVX512-NEXT: vpshufb %xmm5, %xmm2, %xmm9
; AVX512-NEXT: vpternlogq $202, %ymm3, %ymm4, %ymm0
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm3
-; AVX512-NEXT: vpshufb {{.*#+}} xmm4 = zero,zero,xmm3[0,6,12],zero,zero,zero,xmm3[4,10,u,u,u,u,u,u]
-; AVX512-NEXT: vpshufb {{.*#+}} xmm5 = xmm2[u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm2[2,8,14]
-; AVX512-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3,4],xmm5[5,6,7]
-; AVX512-NEXT: vpshufb {{.*#+}} xmm5 = xmm0[4,10],zero,zero,zero,xmm0[2,8,14],zero,zero,xmm0[u,u,u,u,u,u]
-; AVX512-NEXT: vpshufb {{.*#+}} xmm9 = xmm1[u,u,u,u,u,u,u,u,u,u,0,6,12],zero,zero,zero
+; AVX512-NEXT: vpshufb %xmm5, %xmm3, %xmm4
+; AVX512-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3,4],xmm9[5,6,7]
+; AVX512-NEXT: vmovdqa {{.*#+}} xmm5 = [4,10,128,128,128,2,8,14,128,128,0,6,12,128,128,128]
+; AVX512-NEXT: vpshufb %xmm5, %xmm1, %xmm9
+; AVX512-NEXT: vpshufb %xmm5, %xmm0, %xmm5
; AVX512-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3,4],xmm9[5,6,7]
; AVX512-NEXT: vpor %xmm4, %xmm5, %xmm4
-; AVX512-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,xmm3[1,7,13],zero,zero,zero,xmm3[5,11,u,u,u,u,u,u]
-; AVX512-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm2[3,9,15]
+; AVX512-NEXT: vmovdqa {{.*#+}} xmm5 = [128,128,1,7,13,128,128,128,5,11,128,128,128,3,9,15]
+; AVX512-NEXT: vpshufb %xmm5, %xmm2, %xmm2
+; AVX512-NEXT: vpshufb %xmm5, %xmm3, %xmm3
; AVX512-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3,4],xmm2[5,6,7]
-; AVX512-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[5,11],zero,zero,zero,xmm0[3,9,15],zero,zero,xmm0[u,u,u,u,u,u]
-; AVX512-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,u,u,u,u,u,u,u,1,7,13],zero,zero,zero
+; AVX512-NEXT: vmovdqa {{.*#+}} xmm3 = [5,11,128,128,128,3,9,15,128,128,1,7,13,128,128,128]
+; AVX512-NEXT: vpshufb %xmm3, %xmm1, %xmm1
+; AVX512-NEXT: vpshufb %xmm3, %xmm0, %xmm0
; AVX512-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm1[5,6,7]
; AVX512-NEXT: vpor %xmm2, %xmm0, %xmm0
; AVX512-NEXT: vmovdqa %xmm8, (%rsi)
@@ -2083,20 +2087,24 @@ define void @load_i8_stride6_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[3,9,15],zero,zero,xmm7[1,7,13],zero,zero,zero,xmm7[u,u,u,u,u]
; AVX512-FCP-NEXT: vpor %xmm7, %xmm10, %xmm7
; AVX512-FCP-NEXT: vpternlogq $226, %xmm5, %xmm9, %xmm7
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm5 = [128,128,0,6,12,128,128,128,4,10,128,128,128,2,8,14]
+; AVX512-FCP-NEXT: vpshufb %xmm5, %xmm2, %xmm9
; AVX512-FCP-NEXT: vpternlogq $202, %ymm3, %ymm4, %ymm0
; AVX512-FCP-NEXT: vextracti128 $1, %ymm0, %xmm3
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm4 = zero,zero,xmm3[0,6,12],zero,zero,zero,xmm3[4,10,u,u,u,u,u,u]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm5 = xmm2[u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm2[2,8,14]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3,4],xmm5[5,6,7]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm5 = xmm0[4,10],zero,zero,zero,xmm0[2,8,14],zero,zero,xmm0[u,u,u,u,u,u]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm9 = xmm1[u,u,u,u,u,u,u,u,u,u,0,6,12],zero,zero,zero
+; AVX512-FCP-NEXT: vpshufb %xmm5, %xmm3, %xmm4
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3,4],xmm9[5,6,7]
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm5 = [4,10,128,128,128,2,8,14,128,128,0,6,12,128,128,128]
+; AVX512-FCP-NEXT: vpshufb %xmm5, %xmm1, %xmm9
+; AVX512-FCP-NEXT: vpshufb %xmm5, %xmm0, %xmm5
; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3,4],xmm9[5,6,7]
; AVX512-FCP-NEXT: vpor %xmm4, %xmm5, %xmm4
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,xmm3[1,7,13],zero,zero,zero,xmm3[5,11,u,u,u,u,u,u]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm2[3,9,15]
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm5 = [128,128,1,7,13,128,128,128,5,11,128,128,128,3,9,15]
+; AVX512-FCP-NEXT: vpshufb %xmm5, %xmm2, %xmm2
+; AVX512-FCP-NEXT: vpshufb %xmm5, %xmm3, %xmm3
; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3,4],xmm2[5,6,7]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[5,11],zero,zero,zero,xmm0[3,9,15],zero,zero,xmm0[u,u,u,u,u,u]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,u,u,u,u,u,u,u,1,7,13],zero,zero,zero
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm3 = [5,11,128,128,128,3,9,15,128,128,1,7,13,128,128,128]
+; AVX512-FCP-NEXT: vpshufb %xmm3, %xmm1, %xmm1
+; AVX512-FCP-NEXT: vpshufb %xmm3, %xmm0, %xmm0
; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm1[5,6,7]
; AVX512-FCP-NEXT: vpor %xmm2, %xmm0, %xmm0
; AVX512-FCP-NEXT: vmovdqa %xmm8, (%rsi)
@@ -2151,20 +2159,24 @@ define void @load_i8_stride6_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[3,9,15],zero,zero,xmm7[1,7,13],zero,zero,zero,xmm7[u,u,u,u,u]
; AVX512DQ-NEXT: vpor %xmm7, %xmm10, %xmm7
; AVX512DQ-NEXT: vpternlogq $226, %xmm5, %xmm9, %xmm7
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm5 = [128,128,0,6,12,128,128,128,4,10,128,128,128,2,8,14]
+; AVX512DQ-NEXT: vpshufb %xmm5, %xmm2, %xmm9
; AVX512DQ-NEXT: vpternlogq $202, %ymm3, %ymm4, %ymm0
; AVX512DQ-NEXT: vextracti128 $1, %ymm0, %xmm3
-; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm4 = zero,zero,xmm3[0,6,12],zero,zero,zero,xmm3[4,10,u,u,u,u,u,u]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm5 = xmm2[u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm2[2,8,14]
-; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3,4],xmm5[5,6,7]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm5 = xmm0[4,10],zero,zero,zero,xmm0[2,8,14],zero,zero,xmm0[u,u,u,u,u,u]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm9 = xmm1[u,u,u,u,u,u,u,u,u,u,0,6,12],zero,zero,zero
+; AVX512DQ-NEXT: vpshufb %xmm5, %xmm3, %xmm4
+; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3,4],xmm9[5,6,7]
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm5 = [4,10,128,128,128,2,8,14,128,128,0,6,12,128,128,128]
+; AVX512DQ-NEXT: vpshufb %xmm5, %xmm1, %xmm9
+; AVX512DQ-NEXT: vpshufb %xmm5, %xmm0, %xmm5
; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3,4],xmm9[5,6,7]
; AVX512DQ-NEXT: vpor %xmm4, %xmm5, %xmm4
-; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,xmm3[1,7,13],zero,zero,zero,xmm3[5,11,u,u,u,u,u,u]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm2[3,9,15]
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm5 = [128,128,1,7,13,128,128,128,5,11,128,128,128,3,9,15]
+; AVX512DQ-NEXT: vpshufb %xmm5, %xmm2, %xmm2
+; AVX512DQ-NEXT: vpshufb %xmm5, %xmm3, %xmm3
; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3,4],xmm2[5,6,7]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[5,11],zero,zero,zero,xmm0[3,9,15],zero,zero,xmm0[u,u,u,u,u,u]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,u,u,u,u,u,u,u,1,7,13],zero,zero,zero
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm3 = [5,11,128,128,128,3,9,15,128,128,1,7,13,128,128,128]
+; AVX512DQ-NEXT: vpshufb %xmm3, %xmm1, %xmm1
+; AVX512DQ-NEXT: vpshufb %xmm3, %xmm0, %xmm0
; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm1[5,6,7]
; AVX512DQ-NEXT: vpor %xmm2, %xmm0, %xmm0
; AVX512DQ-NEXT: vmovdqa %xmm8, (%rsi)
@@ -2219,20 +2231,24 @@ define void @load_i8_stride6_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[3,9,15],zero,zero,xmm7[1,7,13],zero,zero,zero,xmm7[u,u,u,u,u]
; AVX512DQ-FCP-NEXT: vpor %xmm7, %xmm10, %xmm7
; AVX512DQ-FCP-NEXT: vpternlogq $226, %xmm5, %xmm9, %xmm7
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm5 = [128,128,0,6,12,128,128,128,4,10,128,128,128,2,8,14]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm5, %xmm2, %xmm9
; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm3, %ymm4, %ymm0
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm0, %xmm3
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm4 = zero,zero,xmm3[0,6,12],zero,zero,zero,xmm3[4,10,u,u,u,u,u,u]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm5 = xmm2[u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm2[2,8,14]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3,4],xmm5[5,6,7]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm5 = xmm0[4,10],zero,zero,zero,xmm0[2,8,14],zero,zero,xmm0[u,u,u,u,u,u]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm9 = xmm1[u,u,u,u,u,u,u,u,u,u,0,6,12],zero,zero,zero
+; AVX512DQ-FCP-NEXT: vpshufb %xmm5, %xmm3, %xmm4
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3,4],xmm9[5,6,7]
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm5 = [4,10,128,128,128,2,8,14,128,128,0,6,12,128,128,128]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm5, %xmm1, %xmm9
+; AVX512DQ-FCP-NEXT: vpshufb %xmm5, %xmm0, %xmm5
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3,4],xmm9[5,6,7]
; AVX512DQ-FCP-NEXT: vpor %xmm4, %xmm5, %xmm4
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,xmm3[1,7,13],zero,zero,zero,xmm3[5,11,u,u,u,u,u,u]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm2[3,9,15]
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm5 = [128,128,1,7,13,128,128,128,5,11,128,128,128,3,9,15]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm5, %xmm2, %xmm2
+; AVX512DQ-FCP-NEXT: vpshufb %xmm5, %xmm3, %xmm3
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3,4],xmm2[5,6,7]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[5,11],zero,zero,zero,xmm0[3,9,15],zero,zero,xmm0[u,u,u,u,u,u]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,u,u,u,u,u,u,u,1,7,13],zero,zero,zero
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm3 = [5,11,128,128,128,3,9,15,128,128,1,7,13,128,128,128]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm3, %xmm1, %xmm1
+; AVX512DQ-FCP-NEXT: vpshufb %xmm3, %xmm0, %xmm0
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm1[5,6,7]
; AVX512DQ-FCP-NEXT: vpor %xmm2, %xmm0, %xmm0
; AVX512DQ-FCP-NEXT: vmovdqa %xmm8, (%rsi)
@@ -2247,11 +2263,11 @@ define void @load_i8_stride6_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512BW-LABEL: load_i8_stride6_vf16:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-NEXT: vmovdqa (%rdi), %ymm0
-; AVX512BW-NEXT: vmovdqa 32(%rdi), %ymm1
+; AVX512BW-NEXT: vmovdqa (%rdi), %ymm1
+; AVX512BW-NEXT: vmovdqa 32(%rdi), %ymm0
; AVX512BW-NEXT: movw $18724, %r10w # imm = 0x4924
; AVX512BW-NEXT: kmovd %r10d, %k1
-; AVX512BW-NEXT: vpblendmw %ymm1, %ymm0, %ymm2 {%k1}
+; AVX512BW-NEXT: vpblendmw %ymm0, %ymm1, %ymm2 {%k1}
; AVX512BW-NEXT: vpshufb {{.*#+}} xmm3 = xmm2[0,6,12],zero,zero,zero,xmm2[4,10],zero,zero,zero,xmm2[u,u,u,u,u]
; AVX512BW-NEXT: vextracti128 $1, %ymm2, %xmm4
; AVX512BW-NEXT: vpshufb {{.*#+}} xmm5 = zero,zero,zero,xmm4[2,8,14],zero,zero,xmm4[0,6,12,u,u,u,u,u]
@@ -2273,7 +2289,7 @@ define void @load_i8_stride6_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512BW-NEXT: vmovdqu8 %xmm4, %xmm2 {%k2}
; AVX512BW-NEXT: movw $9362, %di # imm = 0x2492
; AVX512BW-NEXT: kmovd %edi, %k3
-; AVX512BW-NEXT: vpblendmw %ymm0, %ymm1, %ymm4 {%k3}
+; AVX512BW-NEXT: vpblendmw %ymm1, %ymm0, %ymm4 {%k3}
; AVX512BW-NEXT: vextracti128 $1, %ymm4, %xmm6
; AVX512BW-NEXT: vpshufb {{.*#+}} xmm8 = zero,zero,zero,xmm6[4,10],zero,zero,zero,xmm6[2,8,14,u,u,u,u,u]
; AVX512BW-NEXT: vpshufb {{.*#+}} xmm9 = xmm4[2,8,14],zero,zero,xmm4[0,6,12],zero,zero,zero,xmm4[u,u,u,u,u]
@@ -2289,22 +2305,26 @@ define void @load_i8_stride6_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512BW-NEXT: vpshufb {{.*#+}} xmm9 = xmm5[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm5[1,7,13]
; AVX512BW-NEXT: vpor %xmm6, %xmm9, %xmm6
; AVX512BW-NEXT: vmovdqu8 %xmm6, %xmm4 {%k2}
-; AVX512BW-NEXT: vmovdqu16 %ymm0, %ymm1 {%k1}
-; AVX512BW-NEXT: vextracti128 $1, %ymm1, %xmm0
-; AVX512BW-NEXT: vpshufb {{.*#+}} xmm6 = zero,zero,xmm0[0,6,12],zero,zero,zero,xmm0[4,10,u,u,u,u,u,u]
-; AVX512BW-NEXT: vpshufb {{.*#+}} xmm9 = xmm5[u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm5[2,8,14]
+; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm6 = [128,128,0,6,12,128,128,128,4,10,128,128,128,2,8,14]
+; AVX512BW-NEXT: vpshufb %xmm6, %xmm5, %xmm9
+; AVX512BW-NEXT: vmovdqu16 %ymm1, %ymm0 {%k1}
+; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX512BW-NEXT: vpshufb %xmm6, %xmm1, %xmm6
; AVX512BW-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0,1,2,3,4],xmm9[5,6,7]
-; AVX512BW-NEXT: vpshufb {{.*#+}} xmm9 = xmm1[4,10],zero,zero,zero,xmm1[2,8,14],zero,zero,xmm1[u,u,u,u,u,u]
-; AVX512BW-NEXT: vpshufb {{.*#+}} xmm10 = xmm7[u,u,u,u,u,u,u,u,u,u,0,6,12],zero,zero,zero
+; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm9 = [4,10,128,128,128,2,8,14,128,128,0,6,12,128,128,128]
+; AVX512BW-NEXT: vpshufb %xmm9, %xmm7, %xmm10
+; AVX512BW-NEXT: vpshufb %xmm9, %xmm0, %xmm9
; AVX512BW-NEXT: vpblendw {{.*#+}} xmm9 = xmm9[0,1,2,3,4],xmm10[5,6,7]
; AVX512BW-NEXT: vpor %xmm6, %xmm9, %xmm6
-; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,xmm0[1,7,13],zero,zero,zero,xmm0[5,11,u,u,u,u,u,u]
-; AVX512BW-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm5[3,9,15]
-; AVX512BW-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm5[5,6,7]
-; AVX512BW-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[5,11],zero,zero,zero,xmm1[3,9,15],zero,zero,xmm1[u,u,u,u,u,u]
-; AVX512BW-NEXT: vpshufb {{.*#+}} xmm5 = xmm7[u,u,u,u,u,u,u,u,u,u,1,7,13],zero,zero,zero
+; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm9 = [128,128,1,7,13,128,128,128,5,11,128,128,128,3,9,15]
+; AVX512BW-NEXT: vpshufb %xmm9, %xmm5, %xmm5
+; AVX512BW-NEXT: vpshufb %xmm9, %xmm1, %xmm1
; AVX512BW-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm5[5,6,7]
-; AVX512BW-NEXT: vpor %xmm0, %xmm1, %xmm0
+; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm5 = [5,11,128,128,128,3,9,15,128,128,1,7,13,128,128,128]
+; AVX512BW-NEXT: vpshufb %xmm5, %xmm7, %xmm7
+; AVX512BW-NEXT: vpshufb %xmm5, %xmm0, %xmm0
+; AVX512BW-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm7[5,6,7]
+; AVX512BW-NEXT: vpor %xmm1, %xmm0, %xmm0
; AVX512BW-NEXT: vmovdqa %xmm3, (%rsi)
; AVX512BW-NEXT: vmovdqa %xmm2, (%rdx)
; AVX512BW-NEXT: vmovdqa %xmm8, (%rcx)
@@ -2317,11 +2337,11 @@ define void @load_i8_stride6_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512BW-FCP-LABEL: load_i8_stride6_vf16:
; AVX512BW-FCP: # %bb.0:
; AVX512BW-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-FCP-NEXT: vmovdqa (%rdi), %ymm0
-; AVX512BW-FCP-NEXT: vmovdqa 32(%rdi), %ymm1
+; AVX512BW-FCP-NEXT: vmovdqa (%rdi), %ymm1
+; AVX512BW-FCP-NEXT: vmovdqa 32(%rdi), %ymm0
; AVX512BW-FCP-NEXT: movw $18724, %r10w # imm = 0x4924
; AVX512BW-FCP-NEXT: kmovd %r10d, %k1
-; AVX512BW-FCP-NEXT: vpblendmw %ymm1, %ymm0, %ymm2 {%k1}
+; AVX512BW-FCP-NEXT: vpblendmw %ymm0, %ymm1, %ymm2 {%k1}
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm2[0,6,12],zero,zero,zero,xmm2[4,10],zero,zero,zero,xmm2[u,u,u,u,u]
; AVX512BW-FCP-NEXT: vextracti128 $1, %ymm2, %xmm4
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm5 = zero,zero,zero,xmm4[2,8,14],zero,zero,xmm4[0,6,12,u,u,u,u,u]
@@ -2343,7 +2363,7 @@ define void @load_i8_stride6_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512BW-FCP-NEXT: vmovdqu8 %xmm4, %xmm2 {%k2}
; AVX512BW-FCP-NEXT: movw $9362, %di # imm = 0x2492
; AVX512BW-FCP-NEXT: kmovd %edi, %k3
-; AVX512BW-FCP-NEXT: vpblendmw %ymm0, %ymm1, %ymm4 {%k3}
+; AVX512BW-FCP-NEXT: vpblendmw %ymm1, %ymm0, %ymm4 {%k3}
; AVX512BW-FCP-NEXT: vextracti128 $1, %ymm4, %xmm6
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm8 = zero,zero,zero,xmm6[4,10],zero,zero,zero,xmm6[2,8,14,u,u,u,u,u]
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm9 = xmm4[2,8,14],zero,zero,xmm4[0,6,12],zero,zero,zero,xmm4[u,u,u,u,u]
@@ -2359,22 +2379,26 @@ define void @load_i8_stride6_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm9 = xmm5[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm5[1,7,13]
; AVX512BW-FCP-NEXT: vpor %xmm6, %xmm9, %xmm6
; AVX512BW-FCP-NEXT: vmovdqu8 %xmm6, %xmm4 {%k2}
-; AVX512BW-FCP-NEXT: vmovdqu16 %ymm0, %ymm1 {%k1}
-; AVX512BW-FCP-NEXT: vextracti128 $1, %ymm1, %xmm0
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm6 = zero,zero,xmm0[0,6,12],zero,zero,zero,xmm0[4,10,u,u,u,u,u,u]
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm9 = xmm5[u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm5[2,8,14]
+; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} xmm6 = [128,128,0,6,12,128,128,128,4,10,128,128,128,2,8,14]
+; AVX512BW-FCP-NEXT: vpshufb %xmm6, %xmm5, %xmm9
+; AVX512BW-FCP-NEXT: vmovdqu16 %ymm1, %ymm0 {%k1}
+; AVX512BW-FCP-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX512BW-FCP-NEXT: vpshufb %xmm6, %xmm1, %xmm6
; AVX512BW-FCP-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0,1,2,3,4],xmm9[5,6,7]
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm9 = xmm1[4,10],zero,zero,zero,xmm1[2,8,14],zero,zero,xmm1[u,u,u,u,u,u]
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm10 = xmm7[u,u,u,u,u,u,u,u,u,u,0,6,12],zero,zero,zero
+; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} xmm9 = [4,10,128,128,128,2,8,14,128,128,0,6,12,128,128,128]
+; AVX512BW-FCP-NEXT: vpshufb %xmm9, %xmm7, %xmm10
+; AVX512BW-FCP-NEXT: vpshufb %xmm9, %xmm0, %xmm9
; AVX512BW-FCP-NEXT: vpblendw {{.*#+}} xmm9 = xmm9[0,1,2,3,4],xmm10[5,6,7]
; AVX512BW-FCP-NEXT: vpor %xmm6, %xmm9, %xmm6
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,xmm0[1,7,13],zero,zero,zero,xmm0[5,11,u,u,u,u,u,u]
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm5[3,9,15]
-; AVX512BW-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm5[5,6,7]
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[5,11],zero,zero,zero,xmm1[3,9,15],zero,zero,xmm1[u,u,u,u,u,u]
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm5 = xmm7[u,u,u,u,u,u,u,u,u,u,1,7,13],zero,zero,zero
+; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} xmm9 = [128,128,1,7,13,128,128,128,5,11,128,128,128,3,9,15]
+; AVX512BW-FCP-NEXT: vpshufb %xmm9, %xmm5, %xmm5
+; AVX512BW-FCP-NEXT: vpshufb %xmm9, %xmm1, %xmm1
; AVX512BW-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm5[5,6,7]
-; AVX512BW-FCP-NEXT: vpor %xmm0, %xmm1, %xmm0
+; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} xmm5 = [5,11,128,128,128,3,9,15,128,128,1,7,13,128,128,128]
+; AVX512BW-FCP-NEXT: vpshufb %xmm5, %xmm7, %xmm7
+; AVX512BW-FCP-NEXT: vpshufb %xmm5, %xmm0, %xmm0
+; AVX512BW-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm7[5,6,7]
+; AVX512BW-FCP-NEXT: vpor %xmm1, %xmm0, %xmm0
; AVX512BW-FCP-NEXT: vmovdqa %xmm3, (%rsi)
; AVX512BW-FCP-NEXT: vmovdqa %xmm2, (%rdx)
; AVX512BW-FCP-NEXT: vmovdqa %xmm8, (%rcx)
@@ -2387,11 +2411,11 @@ define void @load_i8_stride6_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-BW-LABEL: load_i8_stride6_vf16:
; AVX512DQ-BW: # %bb.0:
; AVX512DQ-BW-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-BW-NEXT: vmovdqa (%rdi), %ymm0
-; AVX512DQ-BW-NEXT: vmovdqa 32(%rdi), %ymm1
+; AVX512DQ-BW-NEXT: vmovdqa (%rdi), %ymm1
+; AVX512DQ-BW-NEXT: vmovdqa 32(%rdi), %ymm0
; AVX512DQ-BW-NEXT: movw $18724, %r10w # imm = 0x4924
; AVX512DQ-BW-NEXT: kmovd %r10d, %k1
-; AVX512DQ-BW-NEXT: vpblendmw %ymm1, %ymm0, %ymm2 {%k1}
+; AVX512DQ-BW-NEXT: vpblendmw %ymm0, %ymm1, %ymm2 {%k1}
; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm3 = xmm2[0,6,12],zero,zero,zero,xmm2[4,10],zero,zero,zero,xmm2[u,u,u,u,u]
; AVX512DQ-BW-NEXT: vextracti128 $1, %ymm2, %xmm4
; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm5 = zero,zero,zero,xmm4[2,8,14],zero,zero,xmm4[0,6,12,u,u,u,u,u]
@@ -2413,7 +2437,7 @@ define void @load_i8_stride6_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-BW-NEXT: vmovdqu8 %xmm4, %xmm2 {%k2}
; AVX512DQ-BW-NEXT: movw $9362, %di # imm = 0x2492
; AVX512DQ-BW-NEXT: kmovd %edi, %k3
-; AVX512DQ-BW-NEXT: vpblendmw %ymm0, %ymm1, %ymm4 {%k3}
+; AVX512DQ-BW-NEXT: vpblendmw %ymm1, %ymm0, %ymm4 {%k3}
; AVX512DQ-BW-NEXT: vextracti128 $1, %ymm4, %xmm6
; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm8 = zero,zero,zero,xmm6[4,10],zero,zero,zero,xmm6[2,8,14,u,u,u,u,u]
; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm9 = xmm4[2,8,14],zero,zero,xmm4[0,6,12],zero,zero,zero,xmm4[u,u,u,u,u]
@@ -2429,22 +2453,26 @@ define void @load_i8_stride6_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm9 = xmm5[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm5[1,7,13]
; AVX512DQ-BW-NEXT: vpor %xmm6, %xmm9, %xmm6
; AVX512DQ-BW-NEXT: vmovdqu8 %xmm6, %xmm4 {%k2}
-; AVX512DQ-BW-NEXT: vmovdqu16 %ymm0, %ymm1 {%k1}
-; AVX512DQ-BW-NEXT: vextracti128 $1, %ymm1, %xmm0
-; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm6 = zero,zero,xmm0[0,6,12],zero,zero,zero,xmm0[4,10,u,u,u,u,u,u]
-; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm9 = xmm5[u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm5[2,8,14]
+; AVX512DQ-BW-NEXT: vmovdqa {{.*#+}} xmm6 = [128,128,0,6,12,128,128,128,4,10,128,128,128,2,8,14]
+; AVX512DQ-BW-NEXT: vpshufb %xmm6, %xmm5, %xmm9
+; AVX512DQ-BW-NEXT: vmovdqu16 %ymm1, %ymm0 {%k1}
+; AVX512DQ-BW-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX512DQ-BW-NEXT: vpshufb %xmm6, %xmm1, %xmm6
; AVX512DQ-BW-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0,1,2,3,4],xmm9[5,6,7]
-; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm9 = xmm1[4,10],zero,zero,zero,xmm1[2,8,14],zero,zero,xmm1[u,u,u,u,u,u]
-; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm10 = xmm7[u,u,u,u,u,u,u,u,u,u,0,6,12],zero,zero,zero
+; AVX512DQ-BW-NEXT: vmovdqa {{.*#+}} xmm9 = [4,10,128,128,128,2,8,14,128,128,0,6,12,128,128,128]
+; AVX512DQ-BW-NEXT: vpshufb %xmm9, %xmm7, %xmm10
+; AVX512DQ-BW-NEXT: vpshufb %xmm9, %xmm0, %xmm9
; AVX512DQ-BW-NEXT: vpblendw {{.*#+}} xmm9 = xmm9[0,1,2,3,4],xmm10[5,6,7]
; AVX512DQ-BW-NEXT: vpor %xmm6, %xmm9, %xmm6
-; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,xmm0[1,7,13],zero,zero,zero,xmm0[5,11,u,u,u,u,u,u]
-; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm5[3,9,15]
-; AVX512DQ-BW-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm5[5,6,7]
-; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[5,11],zero,zero,zero,xmm1[3,9,15],zero,zero,xmm1[u,u,u,u,u,u]
-; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm5 = xmm7[u,u,u,u,u,u,u,u,u,u,1,7,13],zero,zero,zero
+; AVX512DQ-BW-NEXT: vmovdqa {{.*#+}} xmm9 = [128,128,1,7,13,128,128,128,5,11,128,128,128,3,9,15]
+; AVX512DQ-BW-NEXT: vpshufb %xmm9, %xmm5, %xmm5
+; AVX512DQ-BW-NEXT: vpshufb %xmm9, %xmm1, %xmm1
; AVX512DQ-BW-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm5[5,6,7]
-; AVX512DQ-BW-NEXT: vpor %xmm0, %xmm1, %xmm0
+; AVX512DQ-BW-NEXT: vmovdqa {{.*#+}} xmm5 = [5,11,128,128,128,3,9,15,128,128,1,7,13,128,128,128]
+; AVX512DQ-BW-NEXT: vpshufb %xmm5, %xmm7, %xmm7
+; AVX512DQ-BW-NEXT: vpshufb %xmm5, %xmm0, %xmm0
+; AVX512DQ-BW-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm7[5,6,7]
+; AVX512DQ-BW-NEXT: vpor %xmm1, %xmm0, %xmm0
; AVX512DQ-BW-NEXT: vmovdqa %xmm3, (%rsi)
; AVX512DQ-BW-NEXT: vmovdqa %xmm2, (%rdx)
; AVX512DQ-BW-NEXT: vmovdqa %xmm8, (%rcx)
@@ -2457,11 +2485,11 @@ define void @load_i8_stride6_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-BW-FCP-LABEL: load_i8_stride6_vf16:
; AVX512DQ-BW-FCP: # %bb.0:
; AVX512DQ-BW-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdi), %ymm0
-; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%rdi), %ymm1
+; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdi), %ymm1
+; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%rdi), %ymm0
; AVX512DQ-BW-FCP-NEXT: movw $18724, %r10w # imm = 0x4924
; AVX512DQ-BW-FCP-NEXT: kmovd %r10d, %k1
-; AVX512DQ-BW-FCP-NEXT: vpblendmw %ymm1, %ymm0, %ymm2 {%k1}
+; AVX512DQ-BW-FCP-NEXT: vpblendmw %ymm0, %ymm1, %ymm2 {%k1}
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm2[0,6,12],zero,zero,zero,xmm2[4,10],zero,zero,zero,xmm2[u,u,u,u,u]
; AVX512DQ-BW-FCP-NEXT: vextracti128 $1, %ymm2, %xmm4
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm5 = zero,zero,zero,xmm4[2,8,14],zero,zero,xmm4[0,6,12,u,u,u,u,u]
@@ -2483,7 +2511,7 @@ define void @load_i8_stride6_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %xmm4, %xmm2 {%k2}
; AVX512DQ-BW-FCP-NEXT: movw $9362, %di # imm = 0x2492
; AVX512DQ-BW-FCP-NEXT: kmovd %edi, %k3
-; AVX512DQ-BW-FCP-NEXT: vpblendmw %ymm0, %ymm1, %ymm4 {%k3}
+; AVX512DQ-BW-FCP-NEXT: vpblendmw %ymm1, %ymm0, %ymm4 {%k3}
; AVX512DQ-BW-FCP-NEXT: vextracti128 $1, %ymm4, %xmm6
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm8 = zero,zero,zero,xmm6[4,10],zero,zero,zero,xmm6[2,8,14,u,u,u,u,u]
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm9 = xmm4[2,8,14],zero,zero,xmm4[0,6,12],zero,zero,zero,xmm4[u,u,u,u,u]
@@ -2499,22 +2527,26 @@ define void @load_i8_stride6_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm9 = xmm5[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm5[1,7,13]
; AVX512DQ-BW-FCP-NEXT: vpor %xmm6, %xmm9, %xmm6
; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %xmm6, %xmm4 {%k2}
-; AVX512DQ-BW-FCP-NEXT: vmovdqu16 %ymm0, %ymm1 {%k1}
-; AVX512DQ-BW-FCP-NEXT: vextracti128 $1, %ymm1, %xmm0
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm6 = zero,zero,xmm0[0,6,12],zero,zero,zero,xmm0[4,10,u,u,u,u,u,u]
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm9 = xmm5[u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm5[2,8,14]
+; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} xmm6 = [128,128,0,6,12,128,128,128,4,10,128,128,128,2,8,14]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm6, %xmm5, %xmm9
+; AVX512DQ-BW-FCP-NEXT: vmovdqu16 %ymm1, %ymm0 {%k1}
+; AVX512DQ-BW-FCP-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm6, %xmm1, %xmm6
; AVX512DQ-BW-FCP-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0,1,2,3,4],xmm9[5,6,7]
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm9 = xmm1[4,10],zero,zero,zero,xmm1[2,8,14],zero,zero,xmm1[u,u,u,u,u,u]
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm10 = xmm7[u,u,u,u,u,u,u,u,u,u,0,6,12],zero,zero,zero
+; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} xmm9 = [4,10,128,128,128,2,8,14,128,128,0,6,12,128,128,128]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm9, %xmm7, %xmm10
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm9, %xmm0, %xmm9
; AVX512DQ-BW-FCP-NEXT: vpblendw {{.*#+}} xmm9 = xmm9[0,1,2,3,4],xmm10[5,6,7]
; AVX512DQ-BW-FCP-NEXT: vpor %xmm6, %xmm9, %xmm6
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,xmm0[1,7,13],zero,zero,zero,xmm0[5,11,u,u,u,u,u,u]
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm5[3,9,15]
-; AVX512DQ-BW-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm5[5,6,7]
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[5,11],zero,zero,zero,xmm1[3,9,15],zero,zero,xmm1[u,u,u,u,u,u]
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm5 = xmm7[u,u,u,u,u,u,u,u,u,u,1,7,13],zero,zero,zero
+; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} xmm9 = [128,128,1,7,13,128,128,128,5,11,128,128,128,3,9,15]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm9, %xmm5, %xmm5
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm9, %xmm1, %xmm1
; AVX512DQ-BW-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm5[5,6,7]
-; AVX512DQ-BW-FCP-NEXT: vpor %xmm0, %xmm1, %xmm0
+; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} xmm5 = [5,11,128,128,128,3,9,15,128,128,1,7,13,128,128,128]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm5, %xmm7, %xmm7
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm5, %xmm0, %xmm0
+; AVX512DQ-BW-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm7[5,6,7]
+; AVX512DQ-BW-FCP-NEXT: vpor %xmm1, %xmm0, %xmm0
; AVX512DQ-BW-FCP-NEXT: vmovdqa %xmm3, (%rsi)
; AVX512DQ-BW-FCP-NEXT: vmovdqa %xmm2, (%rdx)
; AVX512DQ-BW-FCP-NEXT: vmovdqa %xmm8, (%rcx)
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-7.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-7.ll
index 0ee10a33c1d0c..130ae31b37bfe 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-7.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-7.ll
@@ -2809,20 +2809,22 @@ define void @load_i8_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512: # %bb.0:
; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r10
+; AVX512-NEXT: vmovdqa {{.*#+}} xmm3 = [128,128,128,5,12,128,128,1,8,15,128,128,4,11,128,128]
; AVX512-NEXT: vmovdqa 80(%rdi), %xmm0
-; AVX512-NEXT: vpshufb {{.*#+}} xmm3 = xmm0[u,u,u,u,u,u,u,u,u,u],zero,zero,xmm0[4,11],zero,zero
+; AVX512-NEXT: vpshufb %xmm3, %xmm0, %xmm4
; AVX512-NEXT: vmovdqa (%rdi), %ymm1
; AVX512-NEXT: vmovdqa 32(%rdi), %ymm2
; AVX512-NEXT: vmovdqa {{.*#+}} ymm5 = [65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0]
; AVX512-NEXT: vpternlogq $202, %ymm2, %ymm1, %ymm5
-; AVX512-NEXT: vextracti128 $1, %ymm5, %xmm4
-; AVX512-NEXT: vpshufb {{.*#+}} xmm4 = zero,zero,zero,xmm4[5,12],zero,zero,xmm4[1,8,15,u,u,u,u,u,u]
-; AVX512-NEXT: vpblendw {{.*#+}} xmm6 = xmm4[0,1,2,3,4],xmm3[5,6,7]
+; AVX512-NEXT: vextracti128 $1, %ymm5, %xmm6
+; AVX512-NEXT: vpshufb %xmm3, %xmm6, %xmm3
+; AVX512-NEXT: vpblendw {{.*#+}} xmm6 = xmm3[0,1,2,3,4],xmm4[5,6,7]
; AVX512-NEXT: vmovdqa 96(%rdi), %xmm3
; AVX512-NEXT: vmovdqa 64(%rdi), %xmm4
; AVX512-NEXT: vpblendd {{.*#+}} xmm7 = xmm3[0],xmm4[1],xmm3[2],xmm4[3]
-; AVX512-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[u,u,u,u,u,u,u,u,u,u,6,13],zero,zero,xmm7[2,9]
-; AVX512-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[0,7,14],zero,zero,xmm5[3,10],zero,zero,zero,xmm5[u,u,u,u,u,u]
+; AVX512-NEXT: vmovdqa {{.*#+}} xmm8 = [0,7,14,128,128,3,10,128,128,128,6,13,128,128,2,9]
+; AVX512-NEXT: vpshufb %xmm8, %xmm7, %xmm7
+; AVX512-NEXT: vpshufb %xmm8, %xmm5, %xmm5
; AVX512-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3,4],xmm7[5,6,7]
; AVX512-NEXT: vpor %xmm6, %xmm5, %xmm5
; AVX512-NEXT: vmovdqa {{.*#+}} ymm8 = [65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,65535]
@@ -2905,20 +2907,22 @@ define void @load_i8_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-FCP: # %bb.0:
; AVX512-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX512-FCP-NEXT: movq {{[0-9]+}}(%rsp), %r10
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm3 = [128,128,128,5,12,128,128,1,8,15,128,128,4,11,128,128]
; AVX512-FCP-NEXT: vmovdqa 80(%rdi), %xmm0
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm0[u,u,u,u,u,u,u,u,u,u],zero,zero,xmm0[4,11],zero,zero
+; AVX512-FCP-NEXT: vpshufb %xmm3, %xmm0, %xmm4
; AVX512-FCP-NEXT: vmovdqa (%rdi), %ymm1
; AVX512-FCP-NEXT: vmovdqa 32(%rdi), %ymm2
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm5 = [65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0]
; AVX512-FCP-NEXT: vpternlogq $202, %ymm2, %ymm1, %ymm5
-; AVX512-FCP-NEXT: vextracti128 $1, %ymm5, %xmm4
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm4 = zero,zero,zero,xmm4[5,12],zero,zero,xmm4[1,8,15,u,u,u,u,u,u]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm6 = xmm4[0,1,2,3,4],xmm3[5,6,7]
+; AVX512-FCP-NEXT: vextracti128 $1, %ymm5, %xmm6
+; AVX512-FCP-NEXT: vpshufb %xmm3, %xmm6, %xmm3
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm6 = xmm3[0,1,2,3,4],xmm4[5,6,7]
; AVX512-FCP-NEXT: vmovdqa 96(%rdi), %xmm3
; AVX512-FCP-NEXT: vmovdqa 64(%rdi), %xmm4
; AVX512-FCP-NEXT: vpblendd {{.*#+}} xmm7 = xmm3[0],xmm4[1],xmm3[2],xmm4[3]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[u,u,u,u,u,u,u,u,u,u,6,13],zero,zero,xmm7[2,9]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[0,7,14],zero,zero,xmm5[3,10],zero,zero,zero,xmm5[u,u,u,u,u,u]
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm8 = [0,7,14,128,128,3,10,128,128,128,6,13,128,128,2,9]
+; AVX512-FCP-NEXT: vpshufb %xmm8, %xmm7, %xmm7
+; AVX512-FCP-NEXT: vpshufb %xmm8, %xmm5, %xmm5
; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3,4],xmm7[5,6,7]
; AVX512-FCP-NEXT: vpor %xmm6, %xmm5, %xmm5
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm8 = [65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,65535]
@@ -3001,20 +3005,22 @@ define void @load_i8_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX512DQ-NEXT: movq {{[0-9]+}}(%rsp), %r10
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm3 = [128,128,128,5,12,128,128,1,8,15,128,128,4,11,128,128]
; AVX512DQ-NEXT: vmovdqa 80(%rdi), %xmm0
-; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm3 = xmm0[u,u,u,u,u,u,u,u,u,u],zero,zero,xmm0[4,11],zero,zero
+; AVX512DQ-NEXT: vpshufb %xmm3, %xmm0, %xmm4
; AVX512DQ-NEXT: vmovdqa (%rdi), %ymm1
; AVX512DQ-NEXT: vmovdqa 32(%rdi), %ymm2
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm5 = [65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0]
; AVX512DQ-NEXT: vpternlogq $202, %ymm2, %ymm1, %ymm5
-; AVX512DQ-NEXT: vextracti128 $1, %ymm5, %xmm4
-; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm4 = zero,zero,zero,xmm4[5,12],zero,zero,xmm4[1,8,15,u,u,u,u,u,u]
-; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm6 = xmm4[0,1,2,3,4],xmm3[5,6,7]
+; AVX512DQ-NEXT: vextracti128 $1, %ymm5, %xmm6
+; AVX512DQ-NEXT: vpshufb %xmm3, %xmm6, %xmm3
+; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm6 = xmm3[0,1,2,3,4],xmm4[5,6,7]
; AVX512DQ-NEXT: vmovdqa 96(%rdi), %xmm3
; AVX512DQ-NEXT: vmovdqa 64(%rdi), %xmm4
; AVX512DQ-NEXT: vpblendd {{.*#+}} xmm7 = xmm3[0],xmm4[1],xmm3[2],xmm4[3]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[u,u,u,u,u,u,u,u,u,u,6,13],zero,zero,xmm7[2,9]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[0,7,14],zero,zero,xmm5[3,10],zero,zero,zero,xmm5[u,u,u,u,u,u]
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm8 = [0,7,14,128,128,3,10,128,128,128,6,13,128,128,2,9]
+; AVX512DQ-NEXT: vpshufb %xmm8, %xmm7, %xmm7
+; AVX512DQ-NEXT: vpshufb %xmm8, %xmm5, %xmm5
; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3,4],xmm7[5,6,7]
; AVX512DQ-NEXT: vpor %xmm6, %xmm5, %xmm5
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm8 = [65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,65535]
@@ -3097,20 +3103,22 @@ define void @load_i8_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-FCP: # %bb.0:
; AVX512DQ-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX512DQ-FCP-NEXT: movq {{[0-9]+}}(%rsp), %r10
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm3 = [128,128,128,5,12,128,128,1,8,15,128,128,4,11,128,128]
; AVX512DQ-FCP-NEXT: vmovdqa 80(%rdi), %xmm0
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm0[u,u,u,u,u,u,u,u,u,u],zero,zero,xmm0[4,11],zero,zero
+; AVX512DQ-FCP-NEXT: vpshufb %xmm3, %xmm0, %xmm4
; AVX512DQ-FCP-NEXT: vmovdqa (%rdi), %ymm1
; AVX512DQ-FCP-NEXT: vmovdqa 32(%rdi), %ymm2
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm5 = [65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0]
; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm2, %ymm1, %ymm5
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm5, %xmm4
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm4 = zero,zero,zero,xmm4[5,12],zero,zero,xmm4[1,8,15,u,u,u,u,u,u]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm6 = xmm4[0,1,2,3,4],xmm3[5,6,7]
+; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm5, %xmm6
+; AVX512DQ-FCP-NEXT: vpshufb %xmm3, %xmm6, %xmm3
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm6 = xmm3[0,1,2,3,4],xmm4[5,6,7]
; AVX512DQ-FCP-NEXT: vmovdqa 96(%rdi), %xmm3
; AVX512DQ-FCP-NEXT: vmovdqa 64(%rdi), %xmm4
; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} xmm7 = xmm3[0],xmm4[1],xmm3[2],xmm4[3]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[u,u,u,u,u,u,u,u,u,u,6,13],zero,zero,xmm7[2,9]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[0,7,14],zero,zero,xmm5[3,10],zero,zero,zero,xmm5[u,u,u,u,u,u]
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm8 = [0,7,14,128,128,3,10,128,128,128,6,13,128,128,2,9]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm8, %xmm7, %xmm7
+; AVX512DQ-FCP-NEXT: vpshufb %xmm8, %xmm5, %xmm5
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3,4],xmm7[5,6,7]
; AVX512DQ-FCP-NEXT: vpor %xmm6, %xmm5, %xmm5
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm8 = [65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,65535]
@@ -3193,97 +3201,99 @@ define void @load_i8_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX512BW-NEXT: movq {{[0-9]+}}(%rsp), %r10
-; AVX512BW-NEXT: vmovdqa 96(%rdi), %xmm0
-; AVX512BW-NEXT: vmovdqa 64(%rdi), %xmm1
-; AVX512BW-NEXT: vpblendd {{.*#+}} xmm2 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
-; AVX512BW-NEXT: vpshufb {{.*#+}} xmm4 = xmm2[u,u,u,u,u,u,u,u,u,u,6,13],zero,zero,xmm2[2,9]
-; AVX512BW-NEXT: vmovdqa (%rdi), %ymm3
-; AVX512BW-NEXT: vmovdqa 32(%rdi), %ymm2
+; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm3 = [128,128,128,5,12,128,128,1,8,15,128,128,4,11,128,128]
+; AVX512BW-NEXT: vmovdqa 80(%rdi), %xmm0
+; AVX512BW-NEXT: vpshufb %xmm3, %xmm0, %xmm4
+; AVX512BW-NEXT: vmovdqa (%rdi), %ymm2
+; AVX512BW-NEXT: vmovdqa 32(%rdi), %ymm1
; AVX512BW-NEXT: movw $-28382, %r11w # imm = 0x9122
; AVX512BW-NEXT: kmovd %r11d, %k1
-; AVX512BW-NEXT: vpblendmw %ymm2, %ymm3, %ymm5 {%k1}
-; AVX512BW-NEXT: vpshufb {{.*#+}} xmm6 = xmm5[0,7,14],zero,zero,xmm5[3,10],zero,zero,zero,xmm5[u,u,u,u,u,u]
-; AVX512BW-NEXT: vpblendw {{.*#+}} xmm4 = xmm6[0,1,2,3,4],xmm4[5,6,7]
-; AVX512BW-NEXT: vextracti128 $1, %ymm5, %xmm5
-; AVX512BW-NEXT: vpshufb {{.*#+}} xmm6 = zero,zero,zero,xmm5[5,12],zero,zero,xmm5[1,8,15,u,u,u,u,u,u]
-; AVX512BW-NEXT: vmovdqa 80(%rdi), %xmm5
-; AVX512BW-NEXT: vpshufb {{.*#+}} xmm7 = xmm5[u,u,u,u,u,u,u,u,u,u],zero,zero,xmm5[4,11],zero,zero
-; AVX512BW-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0,1,2,3,4],xmm7[5,6,7]
-; AVX512BW-NEXT: vpor %xmm6, %xmm4, %xmm4
+; AVX512BW-NEXT: vpblendmw %ymm1, %ymm2, %ymm5 {%k1}
+; AVX512BW-NEXT: vextracti128 $1, %ymm5, %xmm6
+; AVX512BW-NEXT: vpshufb %xmm3, %xmm6, %xmm3
+; AVX512BW-NEXT: vpblendw {{.*#+}} xmm6 = xmm3[0,1,2,3,4],xmm4[5,6,7]
+; AVX512BW-NEXT: vmovdqa 96(%rdi), %xmm3
+; AVX512BW-NEXT: vmovdqa 64(%rdi), %xmm4
+; AVX512BW-NEXT: vpblendd {{.*#+}} xmm7 = xmm3[0],xmm4[1],xmm3[2],xmm4[3]
+; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm8 = [0,7,14,128,128,3,10,128,128,128,6,13,128,128,2,9]
+; AVX512BW-NEXT: vpshufb %xmm8, %xmm7, %xmm7
+; AVX512BW-NEXT: vpshufb %xmm8, %xmm5, %xmm5
+; AVX512BW-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3,4],xmm7[5,6,7]
+; AVX512BW-NEXT: vpor %xmm6, %xmm5, %xmm5
; AVX512BW-NEXT: movw $4644, %di # imm = 0x1224
; AVX512BW-NEXT: kmovd %edi, %k2
-; AVX512BW-NEXT: vpblendmw %ymm2, %ymm3, %ymm6 {%k2}
+; AVX512BW-NEXT: vpblendmw %ymm1, %ymm2, %ymm6 {%k2}
; AVX512BW-NEXT: vextracti128 $1, %ymm6, %xmm7
; AVX512BW-NEXT: vpshufb {{.*#+}} xmm7 = zero,zero,zero,xmm7[6,13],zero,zero,xmm7[2,9,u,u,u,u,u,u,u]
; AVX512BW-NEXT: vpshufb {{.*#+}} xmm6 = xmm6[1,8,15],zero,zero,xmm6[4,11],zero,zero,xmm6[u,u,u,u,u,u,u]
; AVX512BW-NEXT: vpor %xmm7, %xmm6, %xmm6
-; AVX512BW-NEXT: vpblendw {{.*#+}} xmm7 = xmm1[0],xmm0[1],xmm1[2,3,4],xmm0[5],xmm1[6,7]
+; AVX512BW-NEXT: vpblendw {{.*#+}} xmm7 = xmm4[0],xmm3[1],xmm4[2,3,4],xmm3[5],xmm4[6,7]
; AVX512BW-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[u,u,u,u,u,u,u,u,u,0,7,14],zero,zero,xmm7[3,10]
-; AVX512BW-NEXT: vpshufb {{.*#+}} xmm8 = xmm5[u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm5[5,12],zero,zero
+; AVX512BW-NEXT: vpshufb {{.*#+}} xmm8 = xmm0[u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm0[5,12],zero,zero
; AVX512BW-NEXT: vpor %xmm7, %xmm8, %xmm7
; AVX512BW-NEXT: movw $-512, %di # imm = 0xFE00
; AVX512BW-NEXT: kmovd %edi, %k1
; AVX512BW-NEXT: vmovdqu8 %xmm7, %xmm6 {%k1}
; AVX512BW-NEXT: movw $8772, %di # imm = 0x2244
; AVX512BW-NEXT: kmovd %edi, %k3
-; AVX512BW-NEXT: vpblendmw %ymm2, %ymm3, %ymm7 {%k3}
+; AVX512BW-NEXT: vpblendmw %ymm1, %ymm2, %ymm7 {%k3}
; AVX512BW-NEXT: vpshufb {{.*#+}} xmm8 = xmm7[2,9],zero,zero,zero,xmm7[5,12],zero,zero,xmm7[u,u,u,u,u,u,u]
; AVX512BW-NEXT: vextracti128 $1, %ymm7, %xmm7
; AVX512BW-NEXT: vpshufb {{.*#+}} xmm7 = zero,zero,xmm7[0,7,14],zero,zero,xmm7[3,10,u,u,u,u,u,u,u]
; AVX512BW-NEXT: vpor %xmm7, %xmm8, %xmm7
-; AVX512BW-NEXT: vpblendw {{.*#+}} xmm8 = xmm1[0,1],xmm0[2],xmm1[3,4],xmm0[5],xmm1[6,7]
+; AVX512BW-NEXT: vpblendw {{.*#+}} xmm8 = xmm4[0,1],xmm3[2],xmm4[3,4],xmm3[5],xmm4[6,7]
; AVX512BW-NEXT: vpshufb {{.*#+}} xmm8 = xmm8[u,u,u,u,u,u,u,u,u,1,8,15],zero,zero,xmm8[4,11]
-; AVX512BW-NEXT: vpshufb {{.*#+}} xmm9 = xmm5[u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm5[6,13],zero,zero
+; AVX512BW-NEXT: vpshufb {{.*#+}} xmm9 = xmm0[u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm0[6,13],zero,zero
; AVX512BW-NEXT: vpor %xmm9, %xmm8, %xmm8
; AVX512BW-NEXT: vmovdqu8 %xmm8, %xmm7 {%k1}
; AVX512BW-NEXT: movw $9288, %di # imm = 0x2448
; AVX512BW-NEXT: kmovd %edi, %k4
-; AVX512BW-NEXT: vpblendmw %ymm2, %ymm3, %ymm8 {%k4}
+; AVX512BW-NEXT: vpblendmw %ymm1, %ymm2, %ymm8 {%k4}
; AVX512BW-NEXT: vpshufb {{.*#+}} xmm9 = xmm8[3,10],zero,zero,zero,xmm8[6,13],zero,zero,xmm8[u,u,u,u,u,u,u]
; AVX512BW-NEXT: vextracti128 $1, %ymm8, %xmm8
; AVX512BW-NEXT: vpshufb {{.*#+}} xmm8 = zero,zero,xmm8[1,8,15],zero,zero,xmm8[4,11,u,u,u,u,u,u,u]
; AVX512BW-NEXT: vpor %xmm9, %xmm8, %xmm8
-; AVX512BW-NEXT: vpblendd {{.*#+}} xmm9 = xmm1[0],xmm0[1],xmm1[2],xmm0[3]
+; AVX512BW-NEXT: vpblendd {{.*#+}} xmm9 = xmm4[0],xmm3[1],xmm4[2],xmm3[3]
; AVX512BW-NEXT: vpshufb {{.*#+}} xmm10 = xmm9[u,u,u,u,u,u,u,u,u,2,9],zero,zero,zero,xmm9[5,12]
-; AVX512BW-NEXT: vpshufb {{.*#+}} xmm11 = xmm5[u,u,u,u,u,u,u,u,u],zero,zero,xmm5[0,7,14],zero,zero
+; AVX512BW-NEXT: vpshufb {{.*#+}} xmm11 = xmm0[u,u,u,u,u,u,u,u,u],zero,zero,xmm0[0,7,14],zero,zero
; AVX512BW-NEXT: vpor %xmm11, %xmm10, %xmm10
; AVX512BW-NEXT: vmovdqu8 %xmm10, %xmm8 {%k1}
-; AVX512BW-NEXT: vpblendmw %ymm3, %ymm2, %ymm10 {%k2}
+; AVX512BW-NEXT: vpblendmw %ymm2, %ymm1, %ymm10 {%k2}
; AVX512BW-NEXT: vextracti128 $1, %ymm10, %xmm11
; AVX512BW-NEXT: vpshufb {{.*#+}} xmm11 = zero,zero,xmm11[2,9],zero,zero,zero,xmm11[5,12,u,u,u,u,u,u,u]
; AVX512BW-NEXT: vpshufb {{.*#+}} xmm10 = xmm10[4,11],zero,zero,xmm10[0,7,14],zero,zero,xmm10[u,u,u,u,u,u,u]
; AVX512BW-NEXT: vpor %xmm11, %xmm10, %xmm10
; AVX512BW-NEXT: vpshufb {{.*#+}} xmm9 = xmm9[u,u,u,u,u,u,u,u,u,3,10],zero,zero,zero,xmm9[6,13]
-; AVX512BW-NEXT: vpshufb {{.*#+}} xmm11 = xmm5[u,u,u,u,u,u,u,u,u],zero,zero,xmm5[1,8,15],zero,zero
+; AVX512BW-NEXT: vpshufb {{.*#+}} xmm11 = xmm0[u,u,u,u,u,u,u,u,u],zero,zero,xmm0[1,8,15],zero,zero
; AVX512BW-NEXT: vpor %xmm11, %xmm9, %xmm9
; AVX512BW-NEXT: vmovdqu8 %xmm9, %xmm10 {%k1}
-; AVX512BW-NEXT: vpblendmw %ymm3, %ymm2, %ymm9 {%k3}
+; AVX512BW-NEXT: vpblendmw %ymm2, %ymm1, %ymm9 {%k3}
; AVX512BW-NEXT: vextracti128 $1, %ymm9, %xmm11
; AVX512BW-NEXT: vpshufb {{.*#+}} xmm11 = zero,zero,xmm11[3,10],zero,zero,zero,xmm11[6,13,u,u,u,u,u,u,u]
; AVX512BW-NEXT: vpshufb {{.*#+}} xmm9 = xmm9[5,12],zero,zero,xmm9[1,8,15],zero,zero,xmm9[u,u,u,u,u,u,u]
; AVX512BW-NEXT: vpor %xmm11, %xmm9, %xmm9
-; AVX512BW-NEXT: vpblendw {{.*#+}} xmm11 = xmm0[0],xmm1[1,2],xmm0[3],xmm1[4,5,6],xmm0[7]
+; AVX512BW-NEXT: vpblendw {{.*#+}} xmm11 = xmm3[0],xmm4[1,2],xmm3[3],xmm4[4,5,6],xmm3[7]
; AVX512BW-NEXT: vpshufb {{.*#+}} xmm11 = xmm11[u,u,u,u,u,u,u,u,u,4,11],zero,zero,xmm11[0,7,14]
-; AVX512BW-NEXT: vpshufb {{.*#+}} xmm12 = xmm5[u,u,u,u,u,u,u,u,u],zero,zero,xmm5[2,9],zero,zero,zero
+; AVX512BW-NEXT: vpshufb {{.*#+}} xmm12 = xmm0[u,u,u,u,u,u,u,u,u],zero,zero,xmm0[2,9],zero,zero,zero
; AVX512BW-NEXT: vpor %xmm12, %xmm11, %xmm11
; AVX512BW-NEXT: vmovdqu8 %xmm11, %xmm9 {%k1}
-; AVX512BW-NEXT: vmovdqu16 %ymm3, %ymm2 {%k4}
-; AVX512BW-NEXT: vpshufb {{.*#+}} xmm3 = xmm2[6,13],zero,zero,xmm2[2,9],zero,zero,zero,xmm2[u,u,u,u,u,u,u]
-; AVX512BW-NEXT: vextracti128 $1, %ymm2, %xmm2
-; AVX512BW-NEXT: vpshufb {{.*#+}} xmm2 = zero,zero,xmm2[4,11],zero,zero,xmm2[0,7,14,u,u,u,u,u,u,u]
-; AVX512BW-NEXT: vpor %xmm3, %xmm2, %xmm2
-; AVX512BW-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3],xmm0[4],xmm1[5,6],xmm0[7]
-; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u,u,u,5,12],zero,zero,xmm0[1,8,15]
-; AVX512BW-NEXT: vpshufb {{.*#+}} xmm1 = xmm5[u,u,u,u,u,u,u,u,u],zero,zero,xmm5[3,10],zero,zero,zero
-; AVX512BW-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX512BW-NEXT: vmovdqu8 %xmm0, %xmm2 {%k1}
-; AVX512BW-NEXT: vmovdqa %xmm4, (%rsi)
+; AVX512BW-NEXT: vmovdqu16 %ymm2, %ymm1 {%k4}
+; AVX512BW-NEXT: vpshufb {{.*#+}} xmm2 = xmm1[6,13],zero,zero,xmm1[2,9],zero,zero,zero,xmm1[u,u,u,u,u,u,u]
+; AVX512BW-NEXT: vextracti128 $1, %ymm1, %xmm1
+; AVX512BW-NEXT: vpshufb {{.*#+}} xmm1 = zero,zero,xmm1[4,11],zero,zero,xmm1[0,7,14,u,u,u,u,u,u,u]
+; AVX512BW-NEXT: vpor %xmm2, %xmm1, %xmm1
+; AVX512BW-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0],xmm4[1,2,3],xmm3[4],xmm4[5,6],xmm3[7]
+; AVX512BW-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u,u,u,u,u,5,12],zero,zero,xmm2[1,8,15]
+; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u,u,u],zero,zero,xmm0[3,10],zero,zero,zero
+; AVX512BW-NEXT: vpor %xmm0, %xmm2, %xmm0
+; AVX512BW-NEXT: vmovdqu8 %xmm0, %xmm1 {%k1}
+; AVX512BW-NEXT: vmovdqa %xmm5, (%rsi)
; AVX512BW-NEXT: vmovdqa %xmm6, (%rdx)
; AVX512BW-NEXT: vmovdqa %xmm7, (%rcx)
; AVX512BW-NEXT: vmovdqa %xmm8, (%r8)
; AVX512BW-NEXT: vmovdqa %xmm10, (%r9)
; AVX512BW-NEXT: vmovdqa %xmm9, (%r10)
-; AVX512BW-NEXT: vmovdqa %xmm2, (%rax)
+; AVX512BW-NEXT: vmovdqa %xmm1, (%rax)
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -3291,97 +3301,99 @@ define void @load_i8_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512BW-FCP: # %bb.0:
; AVX512BW-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX512BW-FCP-NEXT: movq {{[0-9]+}}(%rsp), %r10
-; AVX512BW-FCP-NEXT: vmovdqa 96(%rdi), %xmm0
-; AVX512BW-FCP-NEXT: vmovdqa 64(%rdi), %xmm1
-; AVX512BW-FCP-NEXT: vpblendd {{.*#+}} xmm2 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm4 = xmm2[u,u,u,u,u,u,u,u,u,u,6,13],zero,zero,xmm2[2,9]
-; AVX512BW-FCP-NEXT: vmovdqa (%rdi), %ymm3
-; AVX512BW-FCP-NEXT: vmovdqa 32(%rdi), %ymm2
+; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} xmm3 = [128,128,128,5,12,128,128,1,8,15,128,128,4,11,128,128]
+; AVX512BW-FCP-NEXT: vmovdqa 80(%rdi), %xmm0
+; AVX512BW-FCP-NEXT: vpshufb %xmm3, %xmm0, %xmm4
+; AVX512BW-FCP-NEXT: vmovdqa (%rdi), %ymm2
+; AVX512BW-FCP-NEXT: vmovdqa 32(%rdi), %ymm1
; AVX512BW-FCP-NEXT: movw $-28382, %r11w # imm = 0x9122
; AVX512BW-FCP-NEXT: kmovd %r11d, %k1
-; AVX512BW-FCP-NEXT: vpblendmw %ymm2, %ymm3, %ymm5 {%k1}
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm6 = xmm5[0,7,14],zero,zero,xmm5[3,10],zero,zero,zero,xmm5[u,u,u,u,u,u]
-; AVX512BW-FCP-NEXT: vpblendw {{.*#+}} xmm4 = xmm6[0,1,2,3,4],xmm4[5,6,7]
-; AVX512BW-FCP-NEXT: vextracti128 $1, %ymm5, %xmm5
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm6 = zero,zero,zero,xmm5[5,12],zero,zero,xmm5[1,8,15,u,u,u,u,u,u]
-; AVX512BW-FCP-NEXT: vmovdqa 80(%rdi), %xmm5
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm5[u,u,u,u,u,u,u,u,u,u],zero,zero,xmm5[4,11],zero,zero
-; AVX512BW-FCP-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0,1,2,3,4],xmm7[5,6,7]
-; AVX512BW-FCP-NEXT: vpor %xmm6, %xmm4, %xmm4
+; AVX512BW-FCP-NEXT: vpblendmw %ymm1, %ymm2, %ymm5 {%k1}
+; AVX512BW-FCP-NEXT: vextracti128 $1, %ymm5, %xmm6
+; AVX512BW-FCP-NEXT: vpshufb %xmm3, %xmm6, %xmm3
+; AVX512BW-FCP-NEXT: vpblendw {{.*#+}} xmm6 = xmm3[0,1,2,3,4],xmm4[5,6,7]
+; AVX512BW-FCP-NEXT: vmovdqa 96(%rdi), %xmm3
+; AVX512BW-FCP-NEXT: vmovdqa 64(%rdi), %xmm4
+; AVX512BW-FCP-NEXT: vpblendd {{.*#+}} xmm7 = xmm3[0],xmm4[1],xmm3[2],xmm4[3]
+; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} xmm8 = [0,7,14,128,128,3,10,128,128,128,6,13,128,128,2,9]
+; AVX512BW-FCP-NEXT: vpshufb %xmm8, %xmm7, %xmm7
+; AVX512BW-FCP-NEXT: vpshufb %xmm8, %xmm5, %xmm5
+; AVX512BW-FCP-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3,4],xmm7[5,6,7]
+; AVX512BW-FCP-NEXT: vpor %xmm6, %xmm5, %xmm5
; AVX512BW-FCP-NEXT: movw $4644, %di # imm = 0x1224
; AVX512BW-FCP-NEXT: kmovd %edi, %k2
-; AVX512BW-FCP-NEXT: vpblendmw %ymm2, %ymm3, %ymm6 {%k2}
+; AVX512BW-FCP-NEXT: vpblendmw %ymm1, %ymm2, %ymm6 {%k2}
; AVX512BW-FCP-NEXT: vextracti128 $1, %ymm6, %xmm7
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm7 = zero,zero,zero,xmm7[6,13],zero,zero,xmm7[2,9,u,u,u,u,u,u,u]
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm6 = xmm6[1,8,15],zero,zero,xmm6[4,11],zero,zero,xmm6[u,u,u,u,u,u,u]
; AVX512BW-FCP-NEXT: vpor %xmm7, %xmm6, %xmm6
-; AVX512BW-FCP-NEXT: vpblendw {{.*#+}} xmm7 = xmm1[0],xmm0[1],xmm1[2,3,4],xmm0[5],xmm1[6,7]
+; AVX512BW-FCP-NEXT: vpblendw {{.*#+}} xmm7 = xmm4[0],xmm3[1],xmm4[2,3,4],xmm3[5],xmm4[6,7]
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[u,u,u,u,u,u,u,u,u,0,7,14],zero,zero,xmm7[3,10]
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm8 = xmm5[u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm5[5,12],zero,zero
+; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm8 = xmm0[u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm0[5,12],zero,zero
; AVX512BW-FCP-NEXT: vpor %xmm7, %xmm8, %xmm7
; AVX512BW-FCP-NEXT: movw $-512, %di # imm = 0xFE00
; AVX512BW-FCP-NEXT: kmovd %edi, %k1
; AVX512BW-FCP-NEXT: vmovdqu8 %xmm7, %xmm6 {%k1}
; AVX512BW-FCP-NEXT: movw $8772, %di # imm = 0x2244
; AVX512BW-FCP-NEXT: kmovd %edi, %k3
-; AVX512BW-FCP-NEXT: vpblendmw %ymm2, %ymm3, %ymm7 {%k3}
+; AVX512BW-FCP-NEXT: vpblendmw %ymm1, %ymm2, %ymm7 {%k3}
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm8 = xmm7[2,9],zero,zero,zero,xmm7[5,12],zero,zero,xmm7[u,u,u,u,u,u,u]
; AVX512BW-FCP-NEXT: vextracti128 $1, %ymm7, %xmm7
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm7 = zero,zero,xmm7[0,7,14],zero,zero,xmm7[3,10,u,u,u,u,u,u,u]
; AVX512BW-FCP-NEXT: vpor %xmm7, %xmm8, %xmm7
-; AVX512BW-FCP-NEXT: vpblendw {{.*#+}} xmm8 = xmm1[0,1],xmm0[2],xmm1[3,4],xmm0[5],xmm1[6,7]
+; AVX512BW-FCP-NEXT: vpblendw {{.*#+}} xmm8 = xmm4[0,1],xmm3[2],xmm4[3,4],xmm3[5],xmm4[6,7]
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm8 = xmm8[u,u,u,u,u,u,u,u,u,1,8,15],zero,zero,xmm8[4,11]
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm9 = xmm5[u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm5[6,13],zero,zero
+; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm9 = xmm0[u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm0[6,13],zero,zero
; AVX512BW-FCP-NEXT: vpor %xmm9, %xmm8, %xmm8
; AVX512BW-FCP-NEXT: vmovdqu8 %xmm8, %xmm7 {%k1}
; AVX512BW-FCP-NEXT: movw $9288, %di # imm = 0x2448
; AVX512BW-FCP-NEXT: kmovd %edi, %k4
-; AVX512BW-FCP-NEXT: vpblendmw %ymm2, %ymm3, %ymm8 {%k4}
+; AVX512BW-FCP-NEXT: vpblendmw %ymm1, %ymm2, %ymm8 {%k4}
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm9 = xmm8[3,10],zero,zero,zero,xmm8[6,13],zero,zero,xmm8[u,u,u,u,u,u,u]
; AVX512BW-FCP-NEXT: vextracti128 $1, %ymm8, %xmm8
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm8 = zero,zero,xmm8[1,8,15],zero,zero,xmm8[4,11,u,u,u,u,u,u,u]
; AVX512BW-FCP-NEXT: vpor %xmm9, %xmm8, %xmm8
-; AVX512BW-FCP-NEXT: vpblendd {{.*#+}} xmm9 = xmm1[0],xmm0[1],xmm1[2],xmm0[3]
+; AVX512BW-FCP-NEXT: vpblendd {{.*#+}} xmm9 = xmm4[0],xmm3[1],xmm4[2],xmm3[3]
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm10 = xmm9[u,u,u,u,u,u,u,u,u,2,9],zero,zero,zero,xmm9[5,12]
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm11 = xmm5[u,u,u,u,u,u,u,u,u],zero,zero,xmm5[0,7,14],zero,zero
+; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm11 = xmm0[u,u,u,u,u,u,u,u,u],zero,zero,xmm0[0,7,14],zero,zero
; AVX512BW-FCP-NEXT: vpor %xmm11, %xmm10, %xmm10
; AVX512BW-FCP-NEXT: vmovdqu8 %xmm10, %xmm8 {%k1}
-; AVX512BW-FCP-NEXT: vpblendmw %ymm3, %ymm2, %ymm10 {%k2}
+; AVX512BW-FCP-NEXT: vpblendmw %ymm2, %ymm1, %ymm10 {%k2}
; AVX512BW-FCP-NEXT: vextracti128 $1, %ymm10, %xmm11
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm11 = zero,zero,xmm11[2,9],zero,zero,zero,xmm11[5,12,u,u,u,u,u,u,u]
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm10 = xmm10[4,11],zero,zero,xmm10[0,7,14],zero,zero,xmm10[u,u,u,u,u,u,u]
; AVX512BW-FCP-NEXT: vpor %xmm11, %xmm10, %xmm10
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm9 = xmm9[u,u,u,u,u,u,u,u,u,3,10],zero,zero,zero,xmm9[6,13]
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm11 = xmm5[u,u,u,u,u,u,u,u,u],zero,zero,xmm5[1,8,15],zero,zero
+; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm11 = xmm0[u,u,u,u,u,u,u,u,u],zero,zero,xmm0[1,8,15],zero,zero
; AVX512BW-FCP-NEXT: vpor %xmm11, %xmm9, %xmm9
; AVX512BW-FCP-NEXT: vmovdqu8 %xmm9, %xmm10 {%k1}
-; AVX512BW-FCP-NEXT: vpblendmw %ymm3, %ymm2, %ymm9 {%k3}
+; AVX512BW-FCP-NEXT: vpblendmw %ymm2, %ymm1, %ymm9 {%k3}
; AVX512BW-FCP-NEXT: vextracti128 $1, %ymm9, %xmm11
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm11 = zero,zero,xmm11[3,10],zero,zero,zero,xmm11[6,13,u,u,u,u,u,u,u]
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm9 = xmm9[5,12],zero,zero,xmm9[1,8,15],zero,zero,xmm9[u,u,u,u,u,u,u]
; AVX512BW-FCP-NEXT: vpor %xmm11, %xmm9, %xmm9
-; AVX512BW-FCP-NEXT: vpblendw {{.*#+}} xmm11 = xmm0[0],xmm1[1,2],xmm0[3],xmm1[4,5,6],xmm0[7]
+; AVX512BW-FCP-NEXT: vpblendw {{.*#+}} xmm11 = xmm3[0],xmm4[1,2],xmm3[3],xmm4[4,5,6],xmm3[7]
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm11 = xmm11[u,u,u,u,u,u,u,u,u,4,11],zero,zero,xmm11[0,7,14]
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm12 = xmm5[u,u,u,u,u,u,u,u,u],zero,zero,xmm5[2,9],zero,zero,zero
+; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm12 = xmm0[u,u,u,u,u,u,u,u,u],zero,zero,xmm0[2,9],zero,zero,zero
; AVX512BW-FCP-NEXT: vpor %xmm12, %xmm11, %xmm11
; AVX512BW-FCP-NEXT: vmovdqu8 %xmm11, %xmm9 {%k1}
-; AVX512BW-FCP-NEXT: vmovdqu16 %ymm3, %ymm2 {%k4}
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm2[6,13],zero,zero,xmm2[2,9],zero,zero,zero,xmm2[u,u,u,u,u,u,u]
-; AVX512BW-FCP-NEXT: vextracti128 $1, %ymm2, %xmm2
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm2 = zero,zero,xmm2[4,11],zero,zero,xmm2[0,7,14,u,u,u,u,u,u,u]
-; AVX512BW-FCP-NEXT: vpor %xmm3, %xmm2, %xmm2
-; AVX512BW-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3],xmm0[4],xmm1[5,6],xmm0[7]
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u,u,u,5,12],zero,zero,xmm0[1,8,15]
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm5[u,u,u,u,u,u,u,u,u],zero,zero,xmm5[3,10],zero,zero,zero
-; AVX512BW-FCP-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX512BW-FCP-NEXT: vmovdqu8 %xmm0, %xmm2 {%k1}
-; AVX512BW-FCP-NEXT: vmovdqa %xmm4, (%rsi)
+; AVX512BW-FCP-NEXT: vmovdqu16 %ymm2, %ymm1 {%k4}
+; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm1[6,13],zero,zero,xmm1[2,9],zero,zero,zero,xmm1[u,u,u,u,u,u,u]
+; AVX512BW-FCP-NEXT: vextracti128 $1, %ymm1, %xmm1
+; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm1 = zero,zero,xmm1[4,11],zero,zero,xmm1[0,7,14,u,u,u,u,u,u,u]
+; AVX512BW-FCP-NEXT: vpor %xmm2, %xmm1, %xmm1
+; AVX512BW-FCP-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0],xmm4[1,2,3],xmm3[4],xmm4[5,6],xmm3[7]
+; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u,u,u,u,u,5,12],zero,zero,xmm2[1,8,15]
+; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u,u,u],zero,zero,xmm0[3,10],zero,zero,zero
+; AVX512BW-FCP-NEXT: vpor %xmm0, %xmm2, %xmm0
+; AVX512BW-FCP-NEXT: vmovdqu8 %xmm0, %xmm1 {%k1}
+; AVX512BW-FCP-NEXT: vmovdqa %xmm5, (%rsi)
; AVX512BW-FCP-NEXT: vmovdqa %xmm6, (%rdx)
; AVX512BW-FCP-NEXT: vmovdqa %xmm7, (%rcx)
; AVX512BW-FCP-NEXT: vmovdqa %xmm8, (%r8)
; AVX512BW-FCP-NEXT: vmovdqa %xmm10, (%r9)
; AVX512BW-FCP-NEXT: vmovdqa %xmm9, (%r10)
-; AVX512BW-FCP-NEXT: vmovdqa %xmm2, (%rax)
+; AVX512BW-FCP-NEXT: vmovdqa %xmm1, (%rax)
; AVX512BW-FCP-NEXT: vzeroupper
; AVX512BW-FCP-NEXT: retq
;
@@ -3389,97 +3401,99 @@ define void @load_i8_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-BW: # %bb.0:
; AVX512DQ-BW-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX512DQ-BW-NEXT: movq {{[0-9]+}}(%rsp), %r10
-; AVX512DQ-BW-NEXT: vmovdqa 96(%rdi), %xmm0
-; AVX512DQ-BW-NEXT: vmovdqa 64(%rdi), %xmm1
-; AVX512DQ-BW-NEXT: vpblendd {{.*#+}} xmm2 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
-; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm4 = xmm2[u,u,u,u,u,u,u,u,u,u,6,13],zero,zero,xmm2[2,9]
-; AVX512DQ-BW-NEXT: vmovdqa (%rdi), %ymm3
-; AVX512DQ-BW-NEXT: vmovdqa 32(%rdi), %ymm2
+; AVX512DQ-BW-NEXT: vmovdqa {{.*#+}} xmm3 = [128,128,128,5,12,128,128,1,8,15,128,128,4,11,128,128]
+; AVX512DQ-BW-NEXT: vmovdqa 80(%rdi), %xmm0
+; AVX512DQ-BW-NEXT: vpshufb %xmm3, %xmm0, %xmm4
+; AVX512DQ-BW-NEXT: vmovdqa (%rdi), %ymm2
+; AVX512DQ-BW-NEXT: vmovdqa 32(%rdi), %ymm1
; AVX512DQ-BW-NEXT: movw $-28382, %r11w # imm = 0x9122
; AVX512DQ-BW-NEXT: kmovd %r11d, %k1
-; AVX512DQ-BW-NEXT: vpblendmw %ymm2, %ymm3, %ymm5 {%k1}
-; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm6 = xmm5[0,7,14],zero,zero,xmm5[3,10],zero,zero,zero,xmm5[u,u,u,u,u,u]
-; AVX512DQ-BW-NEXT: vpblendw {{.*#+}} xmm4 = xmm6[0,1,2,3,4],xmm4[5,6,7]
-; AVX512DQ-BW-NEXT: vextracti128 $1, %ymm5, %xmm5
-; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm6 = zero,zero,zero,xmm5[5,12],zero,zero,xmm5[1,8,15,u,u,u,u,u,u]
-; AVX512DQ-BW-NEXT: vmovdqa 80(%rdi), %xmm5
-; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm7 = xmm5[u,u,u,u,u,u,u,u,u,u],zero,zero,xmm5[4,11],zero,zero
-; AVX512DQ-BW-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0,1,2,3,4],xmm7[5,6,7]
-; AVX512DQ-BW-NEXT: vpor %xmm6, %xmm4, %xmm4
+; AVX512DQ-BW-NEXT: vpblendmw %ymm1, %ymm2, %ymm5 {%k1}
+; AVX512DQ-BW-NEXT: vextracti128 $1, %ymm5, %xmm6
+; AVX512DQ-BW-NEXT: vpshufb %xmm3, %xmm6, %xmm3
+; AVX512DQ-BW-NEXT: vpblendw {{.*#+}} xmm6 = xmm3[0,1,2,3,4],xmm4[5,6,7]
+; AVX512DQ-BW-NEXT: vmovdqa 96(%rdi), %xmm3
+; AVX512DQ-BW-NEXT: vmovdqa 64(%rdi), %xmm4
+; AVX512DQ-BW-NEXT: vpblendd {{.*#+}} xmm7 = xmm3[0],xmm4[1],xmm3[2],xmm4[3]
+; AVX512DQ-BW-NEXT: vmovdqa {{.*#+}} xmm8 = [0,7,14,128,128,3,10,128,128,128,6,13,128,128,2,9]
+; AVX512DQ-BW-NEXT: vpshufb %xmm8, %xmm7, %xmm7
+; AVX512DQ-BW-NEXT: vpshufb %xmm8, %xmm5, %xmm5
+; AVX512DQ-BW-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3,4],xmm7[5,6,7]
+; AVX512DQ-BW-NEXT: vpor %xmm6, %xmm5, %xmm5
; AVX512DQ-BW-NEXT: movw $4644, %di # imm = 0x1224
; AVX512DQ-BW-NEXT: kmovd %edi, %k2
-; AVX512DQ-BW-NEXT: vpblendmw %ymm2, %ymm3, %ymm6 {%k2}
+; AVX512DQ-BW-NEXT: vpblendmw %ymm1, %ymm2, %ymm6 {%k2}
; AVX512DQ-BW-NEXT: vextracti128 $1, %ymm6, %xmm7
; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm7 = zero,zero,zero,xmm7[6,13],zero,zero,xmm7[2,9,u,u,u,u,u,u,u]
; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm6 = xmm6[1,8,15],zero,zero,xmm6[4,11],zero,zero,xmm6[u,u,u,u,u,u,u]
; AVX512DQ-BW-NEXT: vpor %xmm7, %xmm6, %xmm6
-; AVX512DQ-BW-NEXT: vpblendw {{.*#+}} xmm7 = xmm1[0],xmm0[1],xmm1[2,3,4],xmm0[5],xmm1[6,7]
+; AVX512DQ-BW-NEXT: vpblendw {{.*#+}} xmm7 = xmm4[0],xmm3[1],xmm4[2,3,4],xmm3[5],xmm4[6,7]
; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[u,u,u,u,u,u,u,u,u,0,7,14],zero,zero,xmm7[3,10]
-; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm8 = xmm5[u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm5[5,12],zero,zero
+; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm8 = xmm0[u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm0[5,12],zero,zero
; AVX512DQ-BW-NEXT: vpor %xmm7, %xmm8, %xmm7
; AVX512DQ-BW-NEXT: movw $-512, %di # imm = 0xFE00
; AVX512DQ-BW-NEXT: kmovd %edi, %k1
; AVX512DQ-BW-NEXT: vmovdqu8 %xmm7, %xmm6 {%k1}
; AVX512DQ-BW-NEXT: movw $8772, %di # imm = 0x2244
; AVX512DQ-BW-NEXT: kmovd %edi, %k3
-; AVX512DQ-BW-NEXT: vpblendmw %ymm2, %ymm3, %ymm7 {%k3}
+; AVX512DQ-BW-NEXT: vpblendmw %ymm1, %ymm2, %ymm7 {%k3}
; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm8 = xmm7[2,9],zero,zero,zero,xmm7[5,12],zero,zero,xmm7[u,u,u,u,u,u,u]
; AVX512DQ-BW-NEXT: vextracti128 $1, %ymm7, %xmm7
; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm7 = zero,zero,xmm7[0,7,14],zero,zero,xmm7[3,10,u,u,u,u,u,u,u]
; AVX512DQ-BW-NEXT: vpor %xmm7, %xmm8, %xmm7
-; AVX512DQ-BW-NEXT: vpblendw {{.*#+}} xmm8 = xmm1[0,1],xmm0[2],xmm1[3,4],xmm0[5],xmm1[6,7]
+; AVX512DQ-BW-NEXT: vpblendw {{.*#+}} xmm8 = xmm4[0,1],xmm3[2],xmm4[3,4],xmm3[5],xmm4[6,7]
; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm8 = xmm8[u,u,u,u,u,u,u,u,u,1,8,15],zero,zero,xmm8[4,11]
-; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm9 = xmm5[u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm5[6,13],zero,zero
+; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm9 = xmm0[u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm0[6,13],zero,zero
; AVX512DQ-BW-NEXT: vpor %xmm9, %xmm8, %xmm8
; AVX512DQ-BW-NEXT: vmovdqu8 %xmm8, %xmm7 {%k1}
; AVX512DQ-BW-NEXT: movw $9288, %di # imm = 0x2448
; AVX512DQ-BW-NEXT: kmovd %edi, %k4
-; AVX512DQ-BW-NEXT: vpblendmw %ymm2, %ymm3, %ymm8 {%k4}
+; AVX512DQ-BW-NEXT: vpblendmw %ymm1, %ymm2, %ymm8 {%k4}
; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm9 = xmm8[3,10],zero,zero,zero,xmm8[6,13],zero,zero,xmm8[u,u,u,u,u,u,u]
; AVX512DQ-BW-NEXT: vextracti128 $1, %ymm8, %xmm8
; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm8 = zero,zero,xmm8[1,8,15],zero,zero,xmm8[4,11,u,u,u,u,u,u,u]
; AVX512DQ-BW-NEXT: vpor %xmm9, %xmm8, %xmm8
-; AVX512DQ-BW-NEXT: vpblendd {{.*#+}} xmm9 = xmm1[0],xmm0[1],xmm1[2],xmm0[3]
+; AVX512DQ-BW-NEXT: vpblendd {{.*#+}} xmm9 = xmm4[0],xmm3[1],xmm4[2],xmm3[3]
; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm10 = xmm9[u,u,u,u,u,u,u,u,u,2,9],zero,zero,zero,xmm9[5,12]
-; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm11 = xmm5[u,u,u,u,u,u,u,u,u],zero,zero,xmm5[0,7,14],zero,zero
+; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm11 = xmm0[u,u,u,u,u,u,u,u,u],zero,zero,xmm0[0,7,14],zero,zero
; AVX512DQ-BW-NEXT: vpor %xmm11, %xmm10, %xmm10
; AVX512DQ-BW-NEXT: vmovdqu8 %xmm10, %xmm8 {%k1}
-; AVX512DQ-BW-NEXT: vpblendmw %ymm3, %ymm2, %ymm10 {%k2}
+; AVX512DQ-BW-NEXT: vpblendmw %ymm2, %ymm1, %ymm10 {%k2}
; AVX512DQ-BW-NEXT: vextracti128 $1, %ymm10, %xmm11
; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm11 = zero,zero,xmm11[2,9],zero,zero,zero,xmm11[5,12,u,u,u,u,u,u,u]
; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm10 = xmm10[4,11],zero,zero,xmm10[0,7,14],zero,zero,xmm10[u,u,u,u,u,u,u]
; AVX512DQ-BW-NEXT: vpor %xmm11, %xmm10, %xmm10
; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm9 = xmm9[u,u,u,u,u,u,u,u,u,3,10],zero,zero,zero,xmm9[6,13]
-; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm11 = xmm5[u,u,u,u,u,u,u,u,u],zero,zero,xmm5[1,8,15],zero,zero
+; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm11 = xmm0[u,u,u,u,u,u,u,u,u],zero,zero,xmm0[1,8,15],zero,zero
; AVX512DQ-BW-NEXT: vpor %xmm11, %xmm9, %xmm9
; AVX512DQ-BW-NEXT: vmovdqu8 %xmm9, %xmm10 {%k1}
-; AVX512DQ-BW-NEXT: vpblendmw %ymm3, %ymm2, %ymm9 {%k3}
+; AVX512DQ-BW-NEXT: vpblendmw %ymm2, %ymm1, %ymm9 {%k3}
; AVX512DQ-BW-NEXT: vextracti128 $1, %ymm9, %xmm11
; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm11 = zero,zero,xmm11[3,10],zero,zero,zero,xmm11[6,13,u,u,u,u,u,u,u]
; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm9 = xmm9[5,12],zero,zero,xmm9[1,8,15],zero,zero,xmm9[u,u,u,u,u,u,u]
; AVX512DQ-BW-NEXT: vpor %xmm11, %xmm9, %xmm9
-; AVX512DQ-BW-NEXT: vpblendw {{.*#+}} xmm11 = xmm0[0],xmm1[1,2],xmm0[3],xmm1[4,5,6],xmm0[7]
+; AVX512DQ-BW-NEXT: vpblendw {{.*#+}} xmm11 = xmm3[0],xmm4[1,2],xmm3[3],xmm4[4,5,6],xmm3[7]
; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm11 = xmm11[u,u,u,u,u,u,u,u,u,4,11],zero,zero,xmm11[0,7,14]
-; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm12 = xmm5[u,u,u,u,u,u,u,u,u],zero,zero,xmm5[2,9],zero,zero,zero
+; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm12 = xmm0[u,u,u,u,u,u,u,u,u],zero,zero,xmm0[2,9],zero,zero,zero
; AVX512DQ-BW-NEXT: vpor %xmm12, %xmm11, %xmm11
; AVX512DQ-BW-NEXT: vmovdqu8 %xmm11, %xmm9 {%k1}
-; AVX512DQ-BW-NEXT: vmovdqu16 %ymm3, %ymm2 {%k4}
-; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm3 = xmm2[6,13],zero,zero,xmm2[2,9],zero,zero,zero,xmm2[u,u,u,u,u,u,u]
-; AVX512DQ-BW-NEXT: vextracti128 $1, %ymm2, %xmm2
-; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm2 = zero,zero,xmm2[4,11],zero,zero,xmm2[0,7,14,u,u,u,u,u,u,u]
-; AVX512DQ-BW-NEXT: vpor %xmm3, %xmm2, %xmm2
-; AVX512DQ-BW-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3],xmm0[4],xmm1[5,6],xmm0[7]
-; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u,u,u,5,12],zero,zero,xmm0[1,8,15]
-; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm1 = xmm5[u,u,u,u,u,u,u,u,u],zero,zero,xmm5[3,10],zero,zero,zero
-; AVX512DQ-BW-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX512DQ-BW-NEXT: vmovdqu8 %xmm0, %xmm2 {%k1}
-; AVX512DQ-BW-NEXT: vmovdqa %xmm4, (%rsi)
+; AVX512DQ-BW-NEXT: vmovdqu16 %ymm2, %ymm1 {%k4}
+; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm2 = xmm1[6,13],zero,zero,xmm1[2,9],zero,zero,zero,xmm1[u,u,u,u,u,u,u]
+; AVX512DQ-BW-NEXT: vextracti128 $1, %ymm1, %xmm1
+; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm1 = zero,zero,xmm1[4,11],zero,zero,xmm1[0,7,14,u,u,u,u,u,u,u]
+; AVX512DQ-BW-NEXT: vpor %xmm2, %xmm1, %xmm1
+; AVX512DQ-BW-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0],xmm4[1,2,3],xmm3[4],xmm4[5,6],xmm3[7]
+; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u,u,u,u,u,5,12],zero,zero,xmm2[1,8,15]
+; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u,u,u],zero,zero,xmm0[3,10],zero,zero,zero
+; AVX512DQ-BW-NEXT: vpor %xmm0, %xmm2, %xmm0
+; AVX512DQ-BW-NEXT: vmovdqu8 %xmm0, %xmm1 {%k1}
+; AVX512DQ-BW-NEXT: vmovdqa %xmm5, (%rsi)
; AVX512DQ-BW-NEXT: vmovdqa %xmm6, (%rdx)
; AVX512DQ-BW-NEXT: vmovdqa %xmm7, (%rcx)
; AVX512DQ-BW-NEXT: vmovdqa %xmm8, (%r8)
; AVX512DQ-BW-NEXT: vmovdqa %xmm10, (%r9)
; AVX512DQ-BW-NEXT: vmovdqa %xmm9, (%r10)
-; AVX512DQ-BW-NEXT: vmovdqa %xmm2, (%rax)
+; AVX512DQ-BW-NEXT: vmovdqa %xmm1, (%rax)
; AVX512DQ-BW-NEXT: vzeroupper
; AVX512DQ-BW-NEXT: retq
;
@@ -3487,97 +3501,99 @@ define void @load_i8_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-BW-FCP: # %bb.0:
; AVX512DQ-BW-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX512DQ-BW-FCP-NEXT: movq {{[0-9]+}}(%rsp), %r10
-; AVX512DQ-BW-FCP-NEXT: vmovdqa 96(%rdi), %xmm0
-; AVX512DQ-BW-FCP-NEXT: vmovdqa 64(%rdi), %xmm1
-; AVX512DQ-BW-FCP-NEXT: vpblendd {{.*#+}} xmm2 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm4 = xmm2[u,u,u,u,u,u,u,u,u,u,6,13],zero,zero,xmm2[2,9]
-; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdi), %ymm3
-; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%rdi), %ymm2
+; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} xmm3 = [128,128,128,5,12,128,128,1,8,15,128,128,4,11,128,128]
+; AVX512DQ-BW-FCP-NEXT: vmovdqa 80(%rdi), %xmm0
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm3, %xmm0, %xmm4
+; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdi), %ymm2
+; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%rdi), %ymm1
; AVX512DQ-BW-FCP-NEXT: movw $-28382, %r11w # imm = 0x9122
; AVX512DQ-BW-FCP-NEXT: kmovd %r11d, %k1
-; AVX512DQ-BW-FCP-NEXT: vpblendmw %ymm2, %ymm3, %ymm5 {%k1}
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm6 = xmm5[0,7,14],zero,zero,xmm5[3,10],zero,zero,zero,xmm5[u,u,u,u,u,u]
-; AVX512DQ-BW-FCP-NEXT: vpblendw {{.*#+}} xmm4 = xmm6[0,1,2,3,4],xmm4[5,6,7]
-; AVX512DQ-BW-FCP-NEXT: vextracti128 $1, %ymm5, %xmm5
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm6 = zero,zero,zero,xmm5[5,12],zero,zero,xmm5[1,8,15,u,u,u,u,u,u]
-; AVX512DQ-BW-FCP-NEXT: vmovdqa 80(%rdi), %xmm5
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm5[u,u,u,u,u,u,u,u,u,u],zero,zero,xmm5[4,11],zero,zero
-; AVX512DQ-BW-FCP-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0,1,2,3,4],xmm7[5,6,7]
-; AVX512DQ-BW-FCP-NEXT: vpor %xmm6, %xmm4, %xmm4
+; AVX512DQ-BW-FCP-NEXT: vpblendmw %ymm1, %ymm2, %ymm5 {%k1}
+; AVX512DQ-BW-FCP-NEXT: vextracti128 $1, %ymm5, %xmm6
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm3, %xmm6, %xmm3
+; AVX512DQ-BW-FCP-NEXT: vpblendw {{.*#+}} xmm6 = xmm3[0,1,2,3,4],xmm4[5,6,7]
+; AVX512DQ-BW-FCP-NEXT: vmovdqa 96(%rdi), %xmm3
+; AVX512DQ-BW-FCP-NEXT: vmovdqa 64(%rdi), %xmm4
+; AVX512DQ-BW-FCP-NEXT: vpblendd {{.*#+}} xmm7 = xmm3[0],xmm4[1],xmm3[2],xmm4[3]
+; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} xmm8 = [0,7,14,128,128,3,10,128,128,128,6,13,128,128,2,9]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm8, %xmm7, %xmm7
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm8, %xmm5, %xmm5
+; AVX512DQ-BW-FCP-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3,4],xmm7[5,6,7]
+; AVX512DQ-BW-FCP-NEXT: vpor %xmm6, %xmm5, %xmm5
; AVX512DQ-BW-FCP-NEXT: movw $4644, %di # imm = 0x1224
; AVX512DQ-BW-FCP-NEXT: kmovd %edi, %k2
-; AVX512DQ-BW-FCP-NEXT: vpblendmw %ymm2, %ymm3, %ymm6 {%k2}
+; AVX512DQ-BW-FCP-NEXT: vpblendmw %ymm1, %ymm2, %ymm6 {%k2}
; AVX512DQ-BW-FCP-NEXT: vextracti128 $1, %ymm6, %xmm7
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm7 = zero,zero,zero,xmm7[6,13],zero,zero,xmm7[2,9,u,u,u,u,u,u,u]
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm6 = xmm6[1,8,15],zero,zero,xmm6[4,11],zero,zero,xmm6[u,u,u,u,u,u,u]
; AVX512DQ-BW-FCP-NEXT: vpor %xmm7, %xmm6, %xmm6
-; AVX512DQ-BW-FCP-NEXT: vpblendw {{.*#+}} xmm7 = xmm1[0],xmm0[1],xmm1[2,3,4],xmm0[5],xmm1[6,7]
+; AVX512DQ-BW-FCP-NEXT: vpblendw {{.*#+}} xmm7 = xmm4[0],xmm3[1],xmm4[2,3,4],xmm3[5],xmm4[6,7]
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[u,u,u,u,u,u,u,u,u,0,7,14],zero,zero,xmm7[3,10]
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm8 = xmm5[u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm5[5,12],zero,zero
+; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm8 = xmm0[u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm0[5,12],zero,zero
; AVX512DQ-BW-FCP-NEXT: vpor %xmm7, %xmm8, %xmm7
; AVX512DQ-BW-FCP-NEXT: movw $-512, %di # imm = 0xFE00
; AVX512DQ-BW-FCP-NEXT: kmovd %edi, %k1
; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %xmm7, %xmm6 {%k1}
; AVX512DQ-BW-FCP-NEXT: movw $8772, %di # imm = 0x2244
; AVX512DQ-BW-FCP-NEXT: kmovd %edi, %k3
-; AVX512DQ-BW-FCP-NEXT: vpblendmw %ymm2, %ymm3, %ymm7 {%k3}
+; AVX512DQ-BW-FCP-NEXT: vpblendmw %ymm1, %ymm2, %ymm7 {%k3}
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm8 = xmm7[2,9],zero,zero,zero,xmm7[5,12],zero,zero,xmm7[u,u,u,u,u,u,u]
; AVX512DQ-BW-FCP-NEXT: vextracti128 $1, %ymm7, %xmm7
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm7 = zero,zero,xmm7[0,7,14],zero,zero,xmm7[3,10,u,u,u,u,u,u,u]
; AVX512DQ-BW-FCP-NEXT: vpor %xmm7, %xmm8, %xmm7
-; AVX512DQ-BW-FCP-NEXT: vpblendw {{.*#+}} xmm8 = xmm1[0,1],xmm0[2],xmm1[3,4],xmm0[5],xmm1[6,7]
+; AVX512DQ-BW-FCP-NEXT: vpblendw {{.*#+}} xmm8 = xmm4[0,1],xmm3[2],xmm4[3,4],xmm3[5],xmm4[6,7]
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm8 = xmm8[u,u,u,u,u,u,u,u,u,1,8,15],zero,zero,xmm8[4,11]
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm9 = xmm5[u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm5[6,13],zero,zero
+; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm9 = xmm0[u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm0[6,13],zero,zero
; AVX512DQ-BW-FCP-NEXT: vpor %xmm9, %xmm8, %xmm8
; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %xmm8, %xmm7 {%k1}
; AVX512DQ-BW-FCP-NEXT: movw $9288, %di # imm = 0x2448
; AVX512DQ-BW-FCP-NEXT: kmovd %edi, %k4
-; AVX512DQ-BW-FCP-NEXT: vpblendmw %ymm2, %ymm3, %ymm8 {%k4}
+; AVX512DQ-BW-FCP-NEXT: vpblendmw %ymm1, %ymm2, %ymm8 {%k4}
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm9 = xmm8[3,10],zero,zero,zero,xmm8[6,13],zero,zero,xmm8[u,u,u,u,u,u,u]
; AVX512DQ-BW-FCP-NEXT: vextracti128 $1, %ymm8, %xmm8
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm8 = zero,zero,xmm8[1,8,15],zero,zero,xmm8[4,11,u,u,u,u,u,u,u]
; AVX512DQ-BW-FCP-NEXT: vpor %xmm9, %xmm8, %xmm8
-; AVX512DQ-BW-FCP-NEXT: vpblendd {{.*#+}} xmm9 = xmm1[0],xmm0[1],xmm1[2],xmm0[3]
+; AVX512DQ-BW-FCP-NEXT: vpblendd {{.*#+}} xmm9 = xmm4[0],xmm3[1],xmm4[2],xmm3[3]
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm10 = xmm9[u,u,u,u,u,u,u,u,u,2,9],zero,zero,zero,xmm9[5,12]
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm11 = xmm5[u,u,u,u,u,u,u,u,u],zero,zero,xmm5[0,7,14],zero,zero
+; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm11 = xmm0[u,u,u,u,u,u,u,u,u],zero,zero,xmm0[0,7,14],zero,zero
; AVX512DQ-BW-FCP-NEXT: vpor %xmm11, %xmm10, %xmm10
; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %xmm10, %xmm8 {%k1}
-; AVX512DQ-BW-FCP-NEXT: vpblendmw %ymm3, %ymm2, %ymm10 {%k2}
+; AVX512DQ-BW-FCP-NEXT: vpblendmw %ymm2, %ymm1, %ymm10 {%k2}
; AVX512DQ-BW-FCP-NEXT: vextracti128 $1, %ymm10, %xmm11
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm11 = zero,zero,xmm11[2,9],zero,zero,zero,xmm11[5,12,u,u,u,u,u,u,u]
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm10 = xmm10[4,11],zero,zero,xmm10[0,7,14],zero,zero,xmm10[u,u,u,u,u,u,u]
; AVX512DQ-BW-FCP-NEXT: vpor %xmm11, %xmm10, %xmm10
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm9 = xmm9[u,u,u,u,u,u,u,u,u,3,10],zero,zero,zero,xmm9[6,13]
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm11 = xmm5[u,u,u,u,u,u,u,u,u],zero,zero,xmm5[1,8,15],zero,zero
+; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm11 = xmm0[u,u,u,u,u,u,u,u,u],zero,zero,xmm0[1,8,15],zero,zero
; AVX512DQ-BW-FCP-NEXT: vpor %xmm11, %xmm9, %xmm9
; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %xmm9, %xmm10 {%k1}
-; AVX512DQ-BW-FCP-NEXT: vpblendmw %ymm3, %ymm2, %ymm9 {%k3}
+; AVX512DQ-BW-FCP-NEXT: vpblendmw %ymm2, %ymm1, %ymm9 {%k3}
; AVX512DQ-BW-FCP-NEXT: vextracti128 $1, %ymm9, %xmm11
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm11 = zero,zero,xmm11[3,10],zero,zero,zero,xmm11[6,13,u,u,u,u,u,u,u]
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm9 = xmm9[5,12],zero,zero,xmm9[1,8,15],zero,zero,xmm9[u,u,u,u,u,u,u]
; AVX512DQ-BW-FCP-NEXT: vpor %xmm11, %xmm9, %xmm9
-; AVX512DQ-BW-FCP-NEXT: vpblendw {{.*#+}} xmm11 = xmm0[0],xmm1[1,2],xmm0[3],xmm1[4,5,6],xmm0[7]
+; AVX512DQ-BW-FCP-NEXT: vpblendw {{.*#+}} xmm11 = xmm3[0],xmm4[1,2],xmm3[3],xmm4[4,5,6],xmm3[7]
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm11 = xmm11[u,u,u,u,u,u,u,u,u,4,11],zero,zero,xmm11[0,7,14]
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm12 = xmm5[u,u,u,u,u,u,u,u,u],zero,zero,xmm5[2,9],zero,zero,zero
+; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm12 = xmm0[u,u,u,u,u,u,u,u,u],zero,zero,xmm0[2,9],zero,zero,zero
; AVX512DQ-BW-FCP-NEXT: vpor %xmm12, %xmm11, %xmm11
; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %xmm11, %xmm9 {%k1}
-; AVX512DQ-BW-FCP-NEXT: vmovdqu16 %ymm3, %ymm2 {%k4}
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm2[6,13],zero,zero,xmm2[2,9],zero,zero,zero,xmm2[u,u,u,u,u,u,u]
-; AVX512DQ-BW-FCP-NEXT: vextracti128 $1, %ymm2, %xmm2
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm2 = zero,zero,xmm2[4,11],zero,zero,xmm2[0,7,14,u,u,u,u,u,u,u]
-; AVX512DQ-BW-FCP-NEXT: vpor %xmm3, %xmm2, %xmm2
-; AVX512DQ-BW-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3],xmm0[4],xmm1[5,6],xmm0[7]
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u,u,u,5,12],zero,zero,xmm0[1,8,15]
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm5[u,u,u,u,u,u,u,u,u],zero,zero,xmm5[3,10],zero,zero,zero
-; AVX512DQ-BW-FCP-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %xmm0, %xmm2 {%k1}
-; AVX512DQ-BW-FCP-NEXT: vmovdqa %xmm4, (%rsi)
+; AVX512DQ-BW-FCP-NEXT: vmovdqu16 %ymm2, %ymm1 {%k4}
+; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm1[6,13],zero,zero,xmm1[2,9],zero,zero,zero,xmm1[u,u,u,u,u,u,u]
+; AVX512DQ-BW-FCP-NEXT: vextracti128 $1, %ymm1, %xmm1
+; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm1 = zero,zero,xmm1[4,11],zero,zero,xmm1[0,7,14,u,u,u,u,u,u,u]
+; AVX512DQ-BW-FCP-NEXT: vpor %xmm2, %xmm1, %xmm1
+; AVX512DQ-BW-FCP-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0],xmm4[1,2,3],xmm3[4],xmm4[5,6],xmm3[7]
+; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u,u,u,u,u,5,12],zero,zero,xmm2[1,8,15]
+; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u,u,u],zero,zero,xmm0[3,10],zero,zero,zero
+; AVX512DQ-BW-FCP-NEXT: vpor %xmm0, %xmm2, %xmm0
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %xmm0, %xmm1 {%k1}
+; AVX512DQ-BW-FCP-NEXT: vmovdqa %xmm5, (%rsi)
; AVX512DQ-BW-FCP-NEXT: vmovdqa %xmm6, (%rdx)
; AVX512DQ-BW-FCP-NEXT: vmovdqa %xmm7, (%rcx)
; AVX512DQ-BW-FCP-NEXT: vmovdqa %xmm8, (%r8)
; AVX512DQ-BW-FCP-NEXT: vmovdqa %xmm10, (%r9)
; AVX512DQ-BW-FCP-NEXT: vmovdqa %xmm9, (%r10)
-; AVX512DQ-BW-FCP-NEXT: vmovdqa %xmm2, (%rax)
+; AVX512DQ-BW-FCP-NEXT: vmovdqa %xmm1, (%rax)
; AVX512DQ-BW-FCP-NEXT: vzeroupper
; AVX512DQ-BW-FCP-NEXT: retq
%wide.vec = load <112 x i8>, ptr %in.vec, align 64
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-3.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-3.ll
index 23ddcd7cd0262..9d1939f66219f 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-3.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-3.ll
@@ -465,10 +465,11 @@ define void @store_i16_stride3_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-NEXT: vmovdqa (%rsi), %xmm1
; AVX2-NEXT: vmovdqa (%rdx), %xmm2
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm3
-; AVX2-NEXT: vpshufb {{.*#+}} ymm4 = ymm3[0,1,u,u,6,7,2,3,u,u,8,9,4,5,u,u,16,17,u,u,22,23,18,19,u,u,24,25,20,21,u,u]
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,0,1,6,7,2,3,2,3,8,9,4,5,4,5,16,17,6,7,22,23,18,19,8,9,24,25,20,21,10,11]
+; AVX2-NEXT: vpshufb %ymm4, %ymm3, %ymm5
; AVX2-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,3,0,1]
-; AVX2-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,0,1,u,u,u,u,2,3,u,u,u,u,4,5,u,u,22,23,u,u,u,u,24,25,u,u,u,u,26,27]
-; AVX2-NEXT: vpblendw {{.*#+}} ymm3 = ymm4[0],ymm3[1],ymm4[2,3],ymm3[4],ymm4[5,6],ymm3[7],ymm4[8],ymm3[9],ymm4[10,11],ymm3[12],ymm4[13,14],ymm3[15]
+; AVX2-NEXT: vpshufb %ymm4, %ymm3, %ymm3
+; AVX2-NEXT: vpblendw {{.*#+}} ymm3 = ymm5[0],ymm3[1],ymm5[2,3],ymm3[4],ymm5[5,6],ymm3[7],ymm5[8],ymm3[9],ymm5[10,11],ymm3[12],ymm5[13,14],ymm3[15]
; AVX2-NEXT: vpmovsxbd {{.*#+}} ymm4 = [0,0,0,0,1,1,0,2]
; AVX2-NEXT: vpermd %ymm2, %ymm4, %ymm4
; AVX2-NEXT: vpmovsxbw {{.*#+}} ymm5 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535]
@@ -488,10 +489,11 @@ define void @store_i16_stride3_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-FP-NEXT: vmovdqa (%rsi), %xmm1
; AVX2-FP-NEXT: vmovdqa (%rdx), %xmm2
; AVX2-FP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm3
-; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm4 = ymm3[0,1,u,u,6,7,2,3,u,u,8,9,4,5,u,u,16,17,u,u,22,23,18,19,u,u,24,25,20,21,u,u]
+; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,0,1,6,7,2,3,2,3,8,9,4,5,4,5,16,17,6,7,22,23,18,19,8,9,24,25,20,21,10,11]
+; AVX2-FP-NEXT: vpshufb %ymm4, %ymm3, %ymm5
; AVX2-FP-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,3,0,1]
-; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,0,1,u,u,u,u,2,3,u,u,u,u,4,5,u,u,22,23,u,u,u,u,24,25,u,u,u,u,26,27]
-; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm3 = ymm4[0],ymm3[1],ymm4[2,3],ymm3[4],ymm4[5,6],ymm3[7],ymm4[8],ymm3[9],ymm4[10,11],ymm3[12],ymm4[13,14],ymm3[15]
+; AVX2-FP-NEXT: vpshufb %ymm4, %ymm3, %ymm3
+; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm3 = ymm5[0],ymm3[1],ymm5[2,3],ymm3[4],ymm5[5,6],ymm3[7],ymm5[8],ymm3[9],ymm5[10,11],ymm3[12],ymm5[13,14],ymm3[15]
; AVX2-FP-NEXT: vpmovsxbd {{.*#+}} ymm4 = [0,0,0,0,1,1,0,2]
; AVX2-FP-NEXT: vpermd %ymm2, %ymm4, %ymm4
; AVX2-FP-NEXT: vpmovsxbw {{.*#+}} ymm5 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535]
@@ -533,10 +535,11 @@ define void @store_i16_stride3_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-NEXT: vmovdqa (%rsi), %xmm1
; AVX512-NEXT: vmovdqa (%rdx), %xmm2
; AVX512-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm3
-; AVX512-NEXT: vpshufb {{.*#+}} ymm4 = ymm3[0,1,u,u,u,u,2,3,u,u,u,u,4,5,u,u,u,u,u,u,22,23,u,u,u,u,24,25,u,u,u,u]
+; AVX512-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,0,1,6,7,2,3,2,3,8,9,4,5,4,5,16,17,6,7,22,23,18,19,8,9,24,25,20,21,10,11]
+; AVX512-NEXT: vpshufb %ymm4, %ymm3, %ymm5
; AVX512-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,3,0,1]
-; AVX512-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,0,1,u,u,u,u,2,3,u,u,u,u,4,5,u,u,22,23,u,u,u,u,24,25,u,u,u,u,26,27]
-; AVX512-NEXT: vpblendw {{.*#+}} ymm3 = ymm4[0],ymm3[1],ymm4[2,3],ymm3[4],ymm4[5,6],ymm3[7],ymm4[8],ymm3[9],ymm4[10,11],ymm3[12],ymm4[13,14],ymm3[15]
+; AVX512-NEXT: vpshufb %ymm4, %ymm3, %ymm3
+; AVX512-NEXT: vpblendw {{.*#+}} ymm3 = ymm5[0],ymm3[1],ymm5[2,3],ymm3[4],ymm5[5,6],ymm3[7],ymm5[8],ymm3[9],ymm5[10,11],ymm3[12],ymm5[13,14],ymm3[15]
; AVX512-NEXT: vpmovsxbd {{.*#+}} ymm4 = [0,0,0,0,1,1,0,2]
; AVX512-NEXT: vpermd %ymm2, %ymm4, %ymm4
; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm4
@@ -579,10 +582,11 @@ define void @store_i16_stride3_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-NEXT: vmovdqa (%rsi), %xmm1
; AVX512DQ-NEXT: vmovdqa (%rdx), %xmm2
; AVX512DQ-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm3
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm4 = ymm3[0,1,u,u,u,u,2,3,u,u,u,u,4,5,u,u,u,u,u,u,22,23,u,u,u,u,24,25,u,u,u,u]
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,0,1,6,7,2,3,2,3,8,9,4,5,4,5,16,17,6,7,22,23,18,19,8,9,24,25,20,21,10,11]
+; AVX512DQ-NEXT: vpshufb %ymm4, %ymm3, %ymm5
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,3,0,1]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,0,1,u,u,u,u,2,3,u,u,u,u,4,5,u,u,22,23,u,u,u,u,24,25,u,u,u,u,26,27]
-; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm3 = ymm4[0],ymm3[1],ymm4[2,3],ymm3[4],ymm4[5,6],ymm3[7],ymm4[8],ymm3[9],ymm4[10,11],ymm3[12],ymm4[13,14],ymm3[15]
+; AVX512DQ-NEXT: vpshufb %ymm4, %ymm3, %ymm3
+; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm3 = ymm5[0],ymm3[1],ymm5[2,3],ymm3[4],ymm5[5,6],ymm3[7],ymm5[8],ymm3[9],ymm5[10,11],ymm3[12],ymm5[13,14],ymm3[15]
; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} ymm4 = [0,0,0,0,1,1,0,2]
; AVX512DQ-NEXT: vpermd %ymm2, %ymm4, %ymm4
; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm4
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-4.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-4.ll
index f054c7edfff16..704c92924abfb 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-4.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-4.ll
@@ -218,10 +218,11 @@ define void @store_i16_stride4_vf4(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero
; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
-; AVX2-NEXT: vpshufb {{.*#+}} ymm1 = ymm0[0,1,8,9,u,u,u,u,2,3,10,11,u,u,u,u,u,u,u,u,20,21,28,29,u,u,u,u,22,23,30,31]
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [0,1,8,9,0,1,8,9,2,3,10,11,2,3,10,11,20,21,28,29,20,21,28,29,22,23,30,31,22,23,30,31]
+; AVX2-NEXT: vpshufb %ymm1, %ymm0, %ymm2
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,0,1]
-; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,0,1,8,9,u,u,u,u,2,3,10,11,20,21,28,29,u,u,u,u,22,23,30,31,u,u,u,u]
-; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6],ymm1[7]
+; AVX2-NEXT: vpshufb %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0],ymm0[1],ymm2[2],ymm0[3,4],ymm2[5],ymm0[6],ymm2[7]
; AVX2-NEXT: vmovdqa %ymm0, (%r8)
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
@@ -235,10 +236,11 @@ define void @store_i16_stride4_vf4(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-FP-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero
; AVX2-FP-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
; AVX2-FP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
-; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm1 = ymm0[0,1,8,9,u,u,u,u,2,3,10,11,u,u,u,u,u,u,u,u,20,21,28,29,u,u,u,u,22,23,30,31]
+; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm1 = [0,1,8,9,0,1,8,9,2,3,10,11,2,3,10,11,20,21,28,29,20,21,28,29,22,23,30,31,22,23,30,31]
+; AVX2-FP-NEXT: vpshufb %ymm1, %ymm0, %ymm2
; AVX2-FP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,0,1]
-; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,0,1,8,9,u,u,u,u,2,3,10,11,20,21,28,29,u,u,u,u,22,23,30,31,u,u,u,u]
-; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6],ymm1[7]
+; AVX2-FP-NEXT: vpshufb %ymm1, %ymm0, %ymm0
+; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0],ymm0[1],ymm2[2],ymm0[3,4],ymm2[5],ymm0[6],ymm2[7]
; AVX2-FP-NEXT: vmovdqa %ymm0, (%r8)
; AVX2-FP-NEXT: vzeroupper
; AVX2-FP-NEXT: retq
@@ -268,10 +270,11 @@ define void @store_i16_stride4_vf4(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero
; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
; AVX512-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
-; AVX512-NEXT: vpshufb {{.*#+}} ymm1 = ymm0[0,1,8,9,u,u,u,u,2,3,10,11,u,u,u,u,u,u,u,u,20,21,28,29,u,u,u,u,22,23,30,31]
+; AVX512-NEXT: vmovdqa {{.*#+}} ymm1 = [0,1,8,9,0,1,8,9,2,3,10,11,2,3,10,11,20,21,28,29,20,21,28,29,22,23,30,31,22,23,30,31]
+; AVX512-NEXT: vpshufb %ymm1, %ymm0, %ymm2
; AVX512-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,0,1]
-; AVX512-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,0,1,8,9,u,u,u,u,2,3,10,11,20,21,28,29,u,u,u,u,22,23,30,31,u,u,u,u]
-; AVX512-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6],ymm1[7]
+; AVX512-NEXT: vpshufb %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0],ymm0[1],ymm2[2],ymm0[3,4],ymm2[5],ymm0[6],ymm2[7]
; AVX512-NEXT: vmovdqa %ymm0, (%r8)
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
@@ -301,10 +304,11 @@ define void @store_i16_stride4_vf4(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero
; AVX512DQ-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
; AVX512DQ-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm1 = ymm0[0,1,8,9,u,u,u,u,2,3,10,11,u,u,u,u,u,u,u,u,20,21,28,29,u,u,u,u,22,23,30,31]
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm1 = [0,1,8,9,0,1,8,9,2,3,10,11,2,3,10,11,20,21,28,29,20,21,28,29,22,23,30,31,22,23,30,31]
+; AVX512DQ-NEXT: vpshufb %ymm1, %ymm0, %ymm2
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,0,1]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,0,1,8,9,u,u,u,u,2,3,10,11,20,21,28,29,u,u,u,u,22,23,30,31,u,u,u,u]
-; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6],ymm1[7]
+; AVX512DQ-NEXT: vpshufb %ymm1, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0],ymm0[1],ymm2[2],ymm0[3,4],ymm2[5],ymm0[6],ymm2[7]
; AVX512DQ-NEXT: vmovdqa %ymm0, (%r8)
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-5.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-5.ll
index c1e7f1e8c6c72..7d2f52d3c5830 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-5.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-5.ll
@@ -336,8 +336,9 @@ define void @store_i16_stride5_vf4(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1],xmm5[2,3],xmm4[4,5],xmm5[6,7]
; AVX-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,1,0,1]
; AVX-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0],xmm2[1],xmm4[2,3,4,5],xmm2[6],xmm4[7]
-; AVX-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,u,0,1,8,9,u,u,u,u,u,u,2,3]
-; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,8,9,u,u,u,u,u,u,2,3,10,11,u,u]
+; AVX-NEXT: vmovdqa {{.*#+}} xmm5 = [0,1,8,9,0,1,8,9,u,u,2,3,10,11,2,3]
+; AVX-NEXT: vpshufb %xmm5, %xmm1, %xmm1
+; AVX-NEXT: vpshufb %xmm5, %xmm0, %xmm0
; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5,6],xmm1[7]
; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4],xmm0[5,6,7]
; AVX-NEXT: vmovdqa %xmm0, (%r9)
@@ -356,10 +357,11 @@ define void @store_i16_stride5_vf4(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm2
; AVX2-NEXT: vmovq {{.*#+}} xmm3 = mem[0],zero
; AVX2-NEXT: vpbroadcastq %xmm3, %ymm3
-; AVX2-NEXT: vpshufb {{.*#+}} ymm4 = ymm2[0,1,8,9,u,u,u,u,u,u,2,3,10,11,u,u,26,27,u,u,u,u,u,u,20,21,28,29,u,u,u,u]
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,8,9,0,1,8,9,u,u,2,3,10,11,2,3,10,11,u,u,4,5,12,13,4,5,12,13,u,u,6,7]
+; AVX2-NEXT: vpshufb %ymm4, %ymm2, %ymm5
; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,3,0,1]
-; AVX2-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,0,1,8,9,u,u,u,u,u,u,2,3,u,u,u,u,20,21,28,29,u,u,u,u,u,u,22,23]
-; AVX2-NEXT: vpblendw {{.*#+}} ymm2 = ymm4[0,1],ymm2[2,3],ymm4[4,5,6],ymm2[7],ymm4[8,9],ymm2[10,11],ymm4[12,13,14],ymm2[15]
+; AVX2-NEXT: vpshufb %ymm4, %ymm2, %ymm2
+; AVX2-NEXT: vpblendw {{.*#+}} ymm2 = ymm5[0,1],ymm2[2,3],ymm5[4,5,6],ymm2[7],ymm5[8,9],ymm2[10,11],ymm5[12,13,14],ymm2[15]
; AVX2-NEXT: vpmovsxbw {{.*#+}} ymm4 = [65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535]
; AVX2-NEXT: vpblendvb %ymm4, %ymm2, %ymm3, %ymm2
; AVX2-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
@@ -383,10 +385,11 @@ define void @store_i16_stride5_vf4(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-FP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm2
; AVX2-FP-NEXT: vmovq {{.*#+}} xmm3 = mem[0],zero
; AVX2-FP-NEXT: vpbroadcastq %xmm3, %ymm3
-; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm4 = ymm2[0,1,8,9,u,u,u,u,u,u,2,3,10,11,u,u,26,27,u,u,u,u,u,u,20,21,28,29,u,u,u,u]
+; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,8,9,0,1,8,9,u,u,2,3,10,11,2,3,10,11,u,u,4,5,12,13,4,5,12,13,u,u,6,7]
+; AVX2-FP-NEXT: vpshufb %ymm4, %ymm2, %ymm5
; AVX2-FP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,3,0,1]
-; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,0,1,8,9,u,u,u,u,u,u,2,3,u,u,u,u,20,21,28,29,u,u,u,u,u,u,22,23]
-; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm2 = ymm4[0,1],ymm2[2,3],ymm4[4,5,6],ymm2[7],ymm4[8,9],ymm2[10,11],ymm4[12,13,14],ymm2[15]
+; AVX2-FP-NEXT: vpshufb %ymm4, %ymm2, %ymm2
+; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm2 = ymm5[0,1],ymm2[2,3],ymm5[4,5,6],ymm2[7],ymm5[8,9],ymm2[10,11],ymm5[12,13,14],ymm2[15]
; AVX2-FP-NEXT: vpmovsxbw {{.*#+}} ymm4 = [65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535]
; AVX2-FP-NEXT: vpblendvb %ymm4, %ymm2, %ymm3, %ymm2
; AVX2-FP-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
@@ -409,10 +412,11 @@ define void @store_i16_stride5_vf4(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm2
; AVX2-FCP-NEXT: vmovq {{.*#+}} xmm3 = mem[0],zero
; AVX2-FCP-NEXT: vpbroadcastq %xmm3, %ymm3
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm4 = ymm2[0,1,8,9,u,u,u,u,u,u,2,3,10,11,u,u,26,27,u,u,u,u,u,u,20,21,28,29,u,u,u,u]
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,8,9,0,1,8,9,u,u,2,3,10,11,2,3,10,11,u,u,4,5,12,13,4,5,12,13,u,u,6,7]
+; AVX2-FCP-NEXT: vpshufb %ymm4, %ymm2, %ymm5
; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,3,0,1]
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,0,1,8,9,u,u,u,u,u,u,2,3,u,u,u,u,20,21,28,29,u,u,u,u,u,u,22,23]
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm4[0,1],ymm2[2,3],ymm4[4,5,6],ymm2[7],ymm4[8,9],ymm2[10,11],ymm4[12,13,14],ymm2[15]
+; AVX2-FCP-NEXT: vpshufb %ymm4, %ymm2, %ymm2
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm5[0,1],ymm2[2,3],ymm5[4,5,6],ymm2[7],ymm5[8,9],ymm2[10,11],ymm5[12,13,14],ymm2[15]
; AVX2-FCP-NEXT: vpmovsxbw {{.*#+}} ymm4 = [65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535]
; AVX2-FCP-NEXT: vpblendvb %ymm4, %ymm2, %ymm3, %ymm2
; AVX2-FCP-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
@@ -435,10 +439,11 @@ define void @store_i16_stride5_vf4(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
; AVX512-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm2
; AVX512-NEXT: vpbroadcastq %rax, %ymm3
-; AVX512-NEXT: vpshufb {{.*#+}} ymm4 = ymm2[0,1,8,9,u,u,u,u,u,u,2,3,10,11,u,u,26,27,u,u,u,u,u,u,20,21,28,29,u,u,u,u]
+; AVX512-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,8,9,0,1,8,9,u,u,2,3,10,11,2,3,10,11,u,u,4,5,12,13,4,5,12,13,u,u,6,7]
+; AVX512-NEXT: vpshufb %ymm4, %ymm2, %ymm5
; AVX512-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,3,0,1]
-; AVX512-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,0,1,8,9,u,u,u,u,u,u,2,3,u,u,u,u,20,21,28,29,u,u,u,u,u,u,22,23]
-; AVX512-NEXT: vpblendw {{.*#+}} ymm2 = ymm4[0,1],ymm2[2,3],ymm4[4,5,6],ymm2[7],ymm4[8,9],ymm2[10,11],ymm4[12,13,14],ymm2[15]
+; AVX512-NEXT: vpshufb %ymm4, %ymm2, %ymm2
+; AVX512-NEXT: vpblendw {{.*#+}} ymm2 = ymm5[0,1],ymm2[2,3],ymm5[4,5,6],ymm2[7],ymm5[8,9],ymm2[10,11],ymm5[12,13,14],ymm2[15]
; AVX512-NEXT: vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm2
; AVX512-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[3,1,2,3]
@@ -462,10 +467,11 @@ define void @store_i16_stride5_vf4(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-FCP-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
; AVX512-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm2
; AVX512-FCP-NEXT: vpbroadcastq %rax, %ymm3
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm4 = ymm2[0,1,8,9,u,u,u,u,u,u,2,3,10,11,u,u,26,27,u,u,u,u,u,u,20,21,28,29,u,u,u,u]
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,8,9,0,1,8,9,u,u,2,3,10,11,2,3,10,11,u,u,4,5,12,13,4,5,12,13,u,u,6,7]
+; AVX512-FCP-NEXT: vpshufb %ymm4, %ymm2, %ymm5
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,3,0,1]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,0,1,8,9,u,u,u,u,u,u,2,3,u,u,u,u,20,21,28,29,u,u,u,u,u,u,22,23]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm4[0,1],ymm2[2,3],ymm4[4,5,6],ymm2[7],ymm4[8,9],ymm2[10,11],ymm4[12,13,14],ymm2[15]
+; AVX512-FCP-NEXT: vpshufb %ymm4, %ymm2, %ymm2
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm5[0,1],ymm2[2,3],ymm5[4,5,6],ymm2[7],ymm5[8,9],ymm2[10,11],ymm5[12,13,14],ymm2[15]
; AVX512-FCP-NEXT: vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm2
; AVX512-FCP-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[u,u,6,7,14,15,u,u,8,9,10,11,12,13,14,15]
@@ -488,10 +494,11 @@ define void @store_i16_stride5_vf4(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
; AVX512DQ-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm2
; AVX512DQ-NEXT: vpbroadcastq %rax, %ymm3
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm4 = ymm2[0,1,8,9,u,u,u,u,u,u,2,3,10,11,u,u,26,27,u,u,u,u,u,u,20,21,28,29,u,u,u,u]
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,8,9,0,1,8,9,u,u,2,3,10,11,2,3,10,11,u,u,4,5,12,13,4,5,12,13,u,u,6,7]
+; AVX512DQ-NEXT: vpshufb %ymm4, %ymm2, %ymm5
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,3,0,1]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,0,1,8,9,u,u,u,u,u,u,2,3,u,u,u,u,20,21,28,29,u,u,u,u,u,u,22,23]
-; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm2 = ymm4[0,1],ymm2[2,3],ymm4[4,5,6],ymm2[7],ymm4[8,9],ymm2[10,11],ymm4[12,13,14],ymm2[15]
+; AVX512DQ-NEXT: vpshufb %ymm4, %ymm2, %ymm2
+; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm2 = ymm5[0,1],ymm2[2,3],ymm5[4,5,6],ymm2[7],ymm5[8,9],ymm2[10,11],ymm5[12,13,14],ymm2[15]
; AVX512DQ-NEXT: vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm2
; AVX512DQ-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[3,1,2,3]
@@ -515,10 +522,11 @@ define void @store_i16_stride5_vf4(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-FCP-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm2
; AVX512DQ-FCP-NEXT: vpbroadcastq %rax, %ymm3
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm4 = ymm2[0,1,8,9,u,u,u,u,u,u,2,3,10,11,u,u,26,27,u,u,u,u,u,u,20,21,28,29,u,u,u,u]
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,8,9,0,1,8,9,u,u,2,3,10,11,2,3,10,11,u,u,4,5,12,13,4,5,12,13,u,u,6,7]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm4, %ymm2, %ymm5
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,3,0,1]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,0,1,8,9,u,u,u,u,u,u,2,3,u,u,u,u,20,21,28,29,u,u,u,u,u,u,22,23]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm4[0,1],ymm2[2,3],ymm4[4,5,6],ymm2[7],ymm4[8,9],ymm2[10,11],ymm4[12,13,14],ymm2[15]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm4, %ymm2, %ymm2
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm5[0,1],ymm2[2,3],ymm5[4,5,6],ymm2[7],ymm5[8,9],ymm2[10,11],ymm5[12,13,14],ymm2[15]
; AVX512DQ-FCP-NEXT: vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm2
; AVX512DQ-FCP-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[u,u,6,7,14,15,u,u,8,9,10,11,12,13,14,15]
@@ -814,20 +822,22 @@ define void @store_i16_stride5_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-FP-LABEL: store_i16_stride5_vf8:
; AVX2-FP: # %bb.0:
; AVX2-FP-NEXT: vmovdqa (%rdi), %xmm0
-; AVX2-FP-NEXT: vmovdqa (%rsi), %xmm1
-; AVX2-FP-NEXT: vmovdqa (%rdx), %xmm2
+; AVX2-FP-NEXT: vmovdqa (%rsi), %xmm2
+; AVX2-FP-NEXT: vmovdqa (%rdx), %xmm1
; AVX2-FP-NEXT: vmovdqa (%rcx), %xmm3
; AVX2-FP-NEXT: vmovdqa (%r8), %xmm4
-; AVX2-FP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm5
-; AVX2-FP-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm6
-; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm7 = ymm5[u,u,u,u,10,11,u,u,8,9,u,u,u,u,12,13,u,u,u,u,26,27,u,u,24,25,u,u,u,u,28,29]
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm8 = ymm5[2,3,0,1]
-; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm8 = ymm8[6,7,10,11,u,u,6,7,u,u,8,9,12,13,u,u,22,23,26,27,u,u,22,23,u,u,24,25,28,29,u,u]
-; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm7 = ymm8[0,1],ymm7[2],ymm8[3],ymm7[4],ymm8[5,6],ymm7[7],ymm8[8,9],ymm7[10],ymm8[11],ymm7[12],ymm8[13,14],ymm7[15]
-; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm8 = ymm6[6,7,6,7,u,u,u,u,10,11,10,11,8,9,u,u,22,23,22,23,u,u,u,u,26,27,26,27,24,25,u,u]
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm9 = ymm6[2,3,0,1]
-; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm9 = ymm9[u,u,u,u,6,7,10,11,u,u,u,u,u,u,8,9,u,u,u,u,22,23,26,27,u,u,u,u,u,u,24,25]
-; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm8 = ymm8[0,1],ymm9[2,3],ymm8[4,5,6],ymm9[7],ymm8[8,9],ymm9[10,11],ymm8[12,13,14],ymm9[15]
+; AVX2-FP-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm5
+; AVX2-FP-NEXT: vinserti128 $1, %xmm3, %ymm1, %ymm6
+; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm7 = [6,7,10,11,10,11,6,7,8,9,8,9,12,13,12,13,22,23,26,27,26,27,22,23,24,25,24,25,28,29,28,29]
+; AVX2-FP-NEXT: vpshufb %ymm7, %ymm5, %ymm8
+; AVX2-FP-NEXT: vpermq {{.*#+}} ymm9 = ymm5[2,3,0,1]
+; AVX2-FP-NEXT: vpshufb %ymm7, %ymm9, %ymm7
+; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm7 = ymm7[0,1],ymm8[2],ymm7[3],ymm8[4],ymm7[5,6],ymm8[7],ymm7[8,9],ymm8[10],ymm7[11],ymm8[12],ymm7[13,14],ymm8[15]
+; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm8 = [6,7,6,7,6,7,10,11,10,11,10,11,8,9,8,9,22,23,22,23,22,23,26,27,26,27,26,27,24,25,24,25]
+; AVX2-FP-NEXT: vpshufb %ymm8, %ymm6, %ymm9
+; AVX2-FP-NEXT: vpermq {{.*#+}} ymm10 = ymm6[2,3,0,1]
+; AVX2-FP-NEXT: vpshufb %ymm8, %ymm10, %ymm8
+; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm8 = ymm9[0,1],ymm8[2,3],ymm9[4,5,6],ymm8[7],ymm9[8,9],ymm8[10,11],ymm9[12,13,14],ymm8[15]
; AVX2-FP-NEXT: vpmovsxbw {{.*#+}} ymm9 = [65535,0,0,0,65535,65535,0,0,0,65535,65535,0,0,0,65535,65535]
; AVX2-FP-NEXT: vpblendvb %ymm9, %ymm7, %ymm8, %ymm7
; AVX2-FP-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,1,1,1]
@@ -841,9 +851,9 @@ define void @store_i16_stride5_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-FP-NEXT: vpbroadcastq (%r8), %ymm6
; AVX2-FP-NEXT: vpmovsxbw {{.*#+}} ymm7 = [65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535]
; AVX2-FP-NEXT: vpblendvb %ymm7, %ymm5, %ymm6, %ymm5
-; AVX2-FP-NEXT: vpsrlq $48, %xmm1, %xmm1
-; AVX2-FP-NEXT: vpunpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm1[1]
-; AVX2-FP-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
+; AVX2-FP-NEXT: vpsrlq $48, %xmm2, %xmm2
+; AVX2-FP-NEXT: vpunpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm2[1]
+; AVX2-FP-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[8,9,10,11,u,u,u,u,u,u,12,13,14,15,u,u]
; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3,4],xmm1[5,6,7]
; AVX2-FP-NEXT: vpbroadcastd 12(%r8), %xmm1
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-6.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-6.ll
index 824bd6e023c79..b33cc83ac3f79 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-6.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-6.ll
@@ -100,10 +100,11 @@ define void @store_i16_stride6_vf2(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-FP-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX2-FP-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm2[0],mem[0],xmm2[1],mem[1]
; AVX2-FP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
-; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm1 = ymm0[0,1,4,5,8,9,12,13,u,u,u,u,2,3,6,7,u,u,u,u,18,19,22,23,u,u,u,u,u,u,u,u]
+; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm1 = [0,1,4,5,8,9,12,13,8,9,12,13,2,3,6,7,18,19,22,23,2,3,6,7,u,u,u,u,u,u,u,u]
+; AVX2-FP-NEXT: vpshufb %ymm1, %ymm0, %ymm2
; AVX2-FP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
-; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,8,9,12,13,u,u,u,u,18,19,22,23,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3],ymm0[4],ymm1[5,6,7]
+; AVX2-FP-NEXT: vpshufb %ymm1, %ymm0, %ymm0
+; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1],ymm0[2],ymm2[3],ymm0[4],ymm2[5,6,7]
; AVX2-FP-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-FP-NEXT: vmovq %xmm1, 16(%rax)
; AVX2-FP-NEXT: vmovdqa %xmm0, (%rax)
@@ -121,10 +122,11 @@ define void @store_i16_stride6_vf2(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-FCP-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm2[0],mem[0],xmm2[1],mem[1]
; AVX2-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm0[0,1,4,5,8,9,12,13,u,u,u,u,2,3,6,7,u,u,u,u,18,19,22,23,u,u,u,u,u,u,u,u]
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm1 = [0,1,4,5,8,9,12,13,8,9,12,13,2,3,6,7,18,19,22,23,2,3,6,7,u,u,u,u,u,u,u,u]
+; AVX2-FCP-NEXT: vpshufb %ymm1, %ymm0, %ymm2
; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,8,9,12,13,u,u,u,u,18,19,22,23,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3],ymm0[4],ymm1[5,6,7]
+; AVX2-FCP-NEXT: vpshufb %ymm1, %ymm0, %ymm0
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1],ymm0[2],ymm2[3],ymm0[4],ymm2[5,6,7]
; AVX2-FCP-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-FCP-NEXT: vmovq %xmm1, 16(%rax)
; AVX2-FCP-NEXT: vmovdqa %xmm0, (%rax)
@@ -164,10 +166,11 @@ define void @store_i16_stride6_vf2(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-FCP-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX512-FCP-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm2[0],mem[0],xmm2[1],mem[1]
; AVX512-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm0[0,1,4,5,8,9,12,13,u,u,u,u,2,3,6,7,u,u,u,u,18,19,22,23,u,u,u,u,u,u,u,u]
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm1 = [0,1,4,5,8,9,12,13,8,9,12,13,2,3,6,7,18,19,22,23,2,3,6,7,u,u,u,u,u,u,u,u]
+; AVX512-FCP-NEXT: vpshufb %ymm1, %ymm0, %ymm2
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,8,9,12,13,u,u,u,u,18,19,22,23,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3],ymm0[4],ymm1[5,6,7]
+; AVX512-FCP-NEXT: vpshufb %ymm1, %ymm0, %ymm0
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1],ymm0[2],ymm2[3],ymm0[4],ymm2[5,6,7]
; AVX512-FCP-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512-FCP-NEXT: vmovq %xmm1, 16(%rax)
; AVX512-FCP-NEXT: vmovdqa %xmm0, (%rax)
@@ -207,10 +210,11 @@ define void @store_i16_stride6_vf2(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-FCP-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX512DQ-FCP-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm2[0],mem[0],xmm2[1],mem[1]
; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm0[0,1,4,5,8,9,12,13,u,u,u,u,2,3,6,7,u,u,u,u,18,19,22,23,u,u,u,u,u,u,u,u]
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm1 = [0,1,4,5,8,9,12,13,8,9,12,13,2,3,6,7,18,19,22,23,2,3,6,7,u,u,u,u,u,u,u,u]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm1, %ymm0, %ymm2
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,8,9,12,13,u,u,u,u,18,19,22,23,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3],ymm0[4],ymm1[5,6,7]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm1, %ymm0, %ymm0
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1],ymm0[2],ymm2[3],ymm0[4],ymm2[5,6,7]
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512DQ-FCP-NEXT: vmovq %xmm1, 16(%rax)
; AVX512DQ-FCP-NEXT: vmovdqa %xmm0, (%rax)
@@ -396,10 +400,11 @@ define void @store_i16_stride6_vf4(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-NEXT: vmovq {{.*#+}} xmm3 = mem[0],zero
; AVX2-NEXT: vmovq {{.*#+}} xmm4 = mem[0],zero
; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm5 = xmm4[0],xmm3[0]
-; AVX2-NEXT: vpshufb {{.*#+}} ymm6 = ymm2[0,1,8,9,u,u,u,u,u,u,u,u,2,3,10,11,18,19,26,27,u,u,u,u,u,u,u,u,20,21,28,29]
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm6 = [0,1,8,9,0,1,8,9,u,u,u,u,2,3,10,11,2,3,10,11,u,u,u,u,20,21,28,29,4,5,12,13]
+; AVX2-NEXT: vpshufb %ymm6, %ymm2, %ymm7
; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,3,0,1]
-; AVX2-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,0,1,8,9,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20,21,28,29,u,u,u,u]
-; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm6[0],ymm2[1],ymm6[2,3,4,5],ymm2[6],ymm6[7]
+; AVX2-NEXT: vpshufb %ymm6, %ymm2, %ymm2
+; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm7[0],ymm2[1],ymm7[2,3,4,5],ymm2[6],ymm7[7]
; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
; AVX2-NEXT: vpbroadcastq %xmm3, %ymm3
; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1],ymm3[2],ymm2[3,4],ymm3[5],ymm2[6,7]
@@ -428,10 +433,11 @@ define void @store_i16_stride6_vf4(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-FP-NEXT: vmovq {{.*#+}} xmm3 = mem[0],zero
; AVX2-FP-NEXT: vmovq {{.*#+}} xmm4 = mem[0],zero
; AVX2-FP-NEXT: vpunpcklqdq {{.*#+}} xmm5 = xmm4[0],xmm3[0]
-; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm6 = ymm2[0,1,8,9,u,u,u,u,u,u,u,u,2,3,10,11,18,19,26,27,u,u,u,u,u,u,u,u,20,21,28,29]
+; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm6 = [0,1,8,9,0,1,8,9,u,u,u,u,2,3,10,11,2,3,10,11,u,u,u,u,20,21,28,29,4,5,12,13]
+; AVX2-FP-NEXT: vpshufb %ymm6, %ymm2, %ymm7
; AVX2-FP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,3,0,1]
-; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,0,1,8,9,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20,21,28,29,u,u,u,u]
-; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm2 = ymm6[0],ymm2[1],ymm6[2,3,4,5],ymm2[6],ymm6[7]
+; AVX2-FP-NEXT: vpshufb %ymm6, %ymm2, %ymm2
+; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm2 = ymm7[0],ymm2[1],ymm7[2,3,4,5],ymm2[6],ymm7[7]
; AVX2-FP-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
; AVX2-FP-NEXT: vpbroadcastq %xmm3, %ymm3
; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1],ymm3[2],ymm2[3,4],ymm3[5],ymm2[6,7]
@@ -458,12 +464,13 @@ define void @store_i16_stride6_vf4(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-FCP-NEXT: vmovq {{.*#+}} xmm3 = mem[0],zero
; AVX2-FCP-NEXT: vmovq {{.*#+}} xmm4 = mem[0],zero
; AVX2-FCP-NEXT: vpunpcklqdq {{.*#+}} xmm5 = xmm4[0],xmm3[0]
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm6 = ymm2[0,1,8,9,u,u,u,u,u,u,u,u,2,3,10,11,18,19,26,27,u,u,u,u,u,u,u,u,20,21,28,29]
-; AVX2-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm7 = [4,6,1,3,4,6,1,3]
-; AVX2-FCP-NEXT: # ymm7 = mem[0,1,0,1]
-; AVX2-FCP-NEXT: vpermd %ymm2, %ymm7, %ymm2
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,0,1,4,5,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,28,29,u,u,u,u]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm6[0],ymm2[1],ymm6[2,3,4,5],ymm2[6],ymm6[7]
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm6 = [0,1,8,9,0,1,4,5,u,u,u,u,2,3,10,11,2,3,10,11,u,u,u,u,24,25,28,29,4,5,12,13]
+; AVX2-FCP-NEXT: vpshufb %ymm6, %ymm2, %ymm7
+; AVX2-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm8 = [4,6,1,3,4,6,1,3]
+; AVX2-FCP-NEXT: # ymm8 = mem[0,1,0,1]
+; AVX2-FCP-NEXT: vpermd %ymm2, %ymm8, %ymm2
+; AVX2-FCP-NEXT: vpshufb %ymm6, %ymm2, %ymm2
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm7[0],ymm2[1],ymm7[2,3,4,5],ymm2[6],ymm7[7]
; AVX2-FCP-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
; AVX2-FCP-NEXT: vpbroadcastq %xmm3, %ymm3
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1],ymm3[2],ymm2[3,4],ymm3[5],ymm2[6,7]
@@ -490,10 +497,11 @@ define void @store_i16_stride6_vf4(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-NEXT: vmovq {{.*#+}} xmm3 = mem[0],zero
; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm3[0],xmm2[0]
; AVX512-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm3
-; AVX512-NEXT: vpshufb {{.*#+}} ymm4 = ymm3[0,1,8,9,u,u,u,u,u,u,u,u,2,3,10,11,18,19,26,27,u,u,u,u,u,u,u,u,20,21,28,29]
+; AVX512-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,8,9,0,1,8,9,u,u,u,u,2,3,10,11,2,3,10,11,u,u,u,u,20,21,28,29,4,5,12,13]
+; AVX512-NEXT: vpshufb %ymm4, %ymm3, %ymm5
; AVX512-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,3,0,1]
-; AVX512-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,0,1,8,9,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20,21,28,29,u,u,u,u]
-; AVX512-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0],ymm3[1],ymm4[2,3,4,5],ymm3[6],ymm4[7]
+; AVX512-NEXT: vpshufb %ymm4, %ymm3, %ymm3
+; AVX512-NEXT: vpblendd {{.*#+}} ymm3 = ymm5[0],ymm3[1],ymm5[2,3,4,5],ymm3[6],ymm5[7]
; AVX512-NEXT: vpshufd {{.*#+}} xmm4 = xmm2[0,2,2,3]
; AVX512-NEXT: vpshuflw {{.*#+}} xmm4 = xmm4[0,2,1,3,4,5,6,7]
; AVX512-NEXT: vpbroadcastq %xmm4, %ymm4
@@ -524,12 +532,13 @@ define void @store_i16_stride6_vf4(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-FCP-NEXT: vmovq {{.*#+}} xmm3 = mem[0],zero
; AVX512-FCP-NEXT: vpunpcklqdq {{.*#+}} xmm4 = xmm3[0],xmm2[0]
; AVX512-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm5
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm6 = ymm5[0,1,8,9,u,u,u,u,u,u,u,u,2,3,10,11,18,19,26,27,u,u,u,u,u,u,u,u,20,21,28,29]
-; AVX512-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm7 = [4,6,1,3,4,6,1,3]
-; AVX512-FCP-NEXT: # ymm7 = mem[0,1,0,1]
-; AVX512-FCP-NEXT: vpermd %ymm5, %ymm7, %ymm5
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm5 = ymm5[u,u,u,u,0,1,4,5,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,28,29,u,u,u,u]
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm6[0],ymm5[1],ymm6[2,3,4,5],ymm5[6],ymm6[7]
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm6 = [0,1,8,9,0,1,4,5,u,u,u,u,2,3,10,11,2,3,10,11,u,u,u,u,24,25,28,29,4,5,12,13]
+; AVX512-FCP-NEXT: vpshufb %ymm6, %ymm5, %ymm7
+; AVX512-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm8 = [4,6,1,3,4,6,1,3]
+; AVX512-FCP-NEXT: # ymm8 = mem[0,1,0,1]
+; AVX512-FCP-NEXT: vpermd %ymm5, %ymm8, %ymm5
+; AVX512-FCP-NEXT: vpshufb %ymm6, %ymm5, %ymm5
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm7[0],ymm5[1],ymm7[2,3,4,5],ymm5[6],ymm7[7]
; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm3 = [0,1,8,3,4,9,6,7]
; AVX512-FCP-NEXT: vpermi2d %ymm2, %ymm5, %ymm3
@@ -557,10 +566,11 @@ define void @store_i16_stride6_vf4(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-NEXT: vmovq {{.*#+}} xmm3 = mem[0],zero
; AVX512DQ-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm3[0],xmm2[0]
; AVX512DQ-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm3
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm4 = ymm3[0,1,8,9,u,u,u,u,u,u,u,u,2,3,10,11,18,19,26,27,u,u,u,u,u,u,u,u,20,21,28,29]
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,8,9,0,1,8,9,u,u,u,u,2,3,10,11,2,3,10,11,u,u,u,u,20,21,28,29,4,5,12,13]
+; AVX512DQ-NEXT: vpshufb %ymm4, %ymm3, %ymm5
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,3,0,1]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,0,1,8,9,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20,21,28,29,u,u,u,u]
-; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0],ymm3[1],ymm4[2,3,4,5],ymm3[6],ymm4[7]
+; AVX512DQ-NEXT: vpshufb %ymm4, %ymm3, %ymm3
+; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm3 = ymm5[0],ymm3[1],ymm5[2,3,4,5],ymm3[6],ymm5[7]
; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm4 = xmm2[0,2,2,3]
; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm4 = xmm4[0,2,1,3,4,5,6,7]
; AVX512DQ-NEXT: vpbroadcastq %xmm4, %ymm4
@@ -591,12 +601,13 @@ define void @store_i16_stride6_vf4(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-FCP-NEXT: vmovq {{.*#+}} xmm3 = mem[0],zero
; AVX512DQ-FCP-NEXT: vpunpcklqdq {{.*#+}} xmm4 = xmm3[0],xmm2[0]
; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm5
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm6 = ymm5[0,1,8,9,u,u,u,u,u,u,u,u,2,3,10,11,18,19,26,27,u,u,u,u,u,u,u,u,20,21,28,29]
-; AVX512DQ-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm7 = [4,6,1,3,4,6,1,3]
-; AVX512DQ-FCP-NEXT: # ymm7 = mem[0,1,0,1]
-; AVX512DQ-FCP-NEXT: vpermd %ymm5, %ymm7, %ymm5
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm5 = ymm5[u,u,u,u,0,1,4,5,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,28,29,u,u,u,u]
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm6[0],ymm5[1],ymm6[2,3,4,5],ymm5[6],ymm6[7]
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm6 = [0,1,8,9,0,1,4,5,u,u,u,u,2,3,10,11,2,3,10,11,u,u,u,u,24,25,28,29,4,5,12,13]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm6, %ymm5, %ymm7
+; AVX512DQ-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm8 = [4,6,1,3,4,6,1,3]
+; AVX512DQ-FCP-NEXT: # ymm8 = mem[0,1,0,1]
+; AVX512DQ-FCP-NEXT: vpermd %ymm5, %ymm8, %ymm5
+; AVX512DQ-FCP-NEXT: vpshufb %ymm6, %ymm5, %ymm5
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm7[0],ymm5[1],ymm7[2,3,4,5],ymm5[6],ymm7[7]
; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm3 = [0,1,8,3,4,9,6,7]
; AVX512DQ-FCP-NEXT: vpermi2d %ymm2, %ymm5, %ymm3
@@ -854,25 +865,28 @@ define void @store_i16_stride6_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-NEXT: vinserti128 $1, (%rcx), %ymm1, %ymm1
; AVX2-NEXT: vinserti128 $1, (%r9), %ymm2, %ymm2
; AVX2-NEXT: vpermq {{.*#+}} ymm3 = ymm1[0,2,0,2]
-; AVX2-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,0,1,8,9,u,u,u,u,u,u,u,u,18,19,26,27,u,u,u,u,u,u,u,u,20,21,28,29]
-; AVX2-NEXT: vpermq {{.*#+}} ymm4 = ymm0[0,2,0,2]
-; AVX2-NEXT: vpshufb {{.*#+}} ymm4 = ymm4[0,1,8,9,u,u,u,u,u,u,u,u,2,3,10,11,u,u,u,u,u,u,u,u,20,21,28,29,u,u,u,u]
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,8,9,0,1,8,9,4,5,12,13,2,3,10,11,18,19,26,27,24,25,30,31,20,21,28,29,20,21,28,29]
+; AVX2-NEXT: vpshufb %ymm4, %ymm3, %ymm3
+; AVX2-NEXT: vpermq {{.*#+}} ymm5 = ymm0[0,2,0,2]
+; AVX2-NEXT: vpshufb %ymm4, %ymm5, %ymm4
; AVX2-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0],ymm3[1],ymm4[2,3],ymm3[4],ymm4[5,6],ymm3[7]
; AVX2-NEXT: vpermq {{.*#+}} ymm4 = ymm2[0,2,0,2]
; AVX2-NEXT: vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,u,u,u,u,0,1,8,9,u,u,u,u,u,u,u,u,18,19,26,27,u,u,u,u,u,u,u,u]
; AVX2-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1],ymm4[2],ymm3[3,4],ymm4[5],ymm3[6,7]
; AVX2-NEXT: vpermq {{.*#+}} ymm4 = ymm1[0,2,1,3]
-; AVX2-NEXT: vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,u,u,u,u,6,7,14,15,u,u,u,u,u,u,u,u,16,17,24,25,u,u,u,u,u,u,u,u]
-; AVX2-NEXT: vpermq {{.*#+}} ymm5 = ymm0[0,2,1,3]
-; AVX2-NEXT: vpshufb {{.*#+}} ymm5 = ymm5[u,u,u,u,6,7,14,15,u,u,u,u,u,u,u,u,16,17,24,25,u,u,u,u,u,u,u,u,18,19,26,27]
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm5 = [0,1,8,9,6,7,14,15,6,7,14,15,2,3,10,11,16,17,24,25,16,17,24,25,24,25,26,27,18,19,26,27]
+; AVX2-NEXT: vpshufb %ymm5, %ymm4, %ymm4
+; AVX2-NEXT: vpermq {{.*#+}} ymm6 = ymm0[0,2,1,3]
+; AVX2-NEXT: vpshufb %ymm5, %ymm6, %ymm5
; AVX2-NEXT: vpblendd {{.*#+}} ymm4 = ymm5[0,1],ymm4[2],ymm5[3,4],ymm4[5],ymm5[6,7]
; AVX2-NEXT: vpermq {{.*#+}} ymm5 = ymm2[0,2,1,3]
; AVX2-NEXT: vpshufb {{.*#+}} ymm5 = ymm5[4,5,12,13,u,u,u,u,u,u,u,u,6,7,14,15,u,u,u,u,u,u,u,u,16,17,24,25,u,u,u,u]
; AVX2-NEXT: vpblendd {{.*#+}} ymm4 = ymm5[0],ymm4[1,2],ymm5[3],ymm4[4,5],ymm5[6],ymm4[7]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[1,3,1,3]
-; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,4,5,12,13,u,u,u,u,u,u,u,u,22,23,30,31,u,u,u,u,u,u,u,u]
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm5 = [2,3,10,11,8,9,10,11,4,5,12,13,4,5,12,13,18,19,26,27,22,23,30,31,22,23,30,31,20,21,28,29]
+; AVX2-NEXT: vpshufb %ymm5, %ymm0, %ymm0
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[1,3,1,3]
-; AVX2-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[2,3,10,11,u,u,u,u,u,u,u,u,4,5,12,13,u,u,u,u,u,u,u,u,22,23,30,31,u,u,u,u]
+; AVX2-NEXT: vpshufb %ymm5, %ymm1, %ymm1
; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm2[1,3,1,3]
; AVX2-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,2,3,10,11,u,u,u,u,u,u,u,u,20,21,28,29,u,u,u,u,u,u,u,u,22,23,30,31]
@@ -893,25 +907,28 @@ define void @store_i16_stride6_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-FP-NEXT: vinserti128 $1, (%rcx), %ymm1, %ymm1
; AVX2-FP-NEXT: vinserti128 $1, (%r9), %ymm2, %ymm2
; AVX2-FP-NEXT: vpermq {{.*#+}} ymm3 = ymm1[0,2,0,2]
-; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,0,1,8,9,u,u,u,u,u,u,u,u,18,19,26,27,u,u,u,u,u,u,u,u,20,21,28,29]
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm4 = ymm0[0,2,0,2]
-; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm4 = ymm4[0,1,8,9,u,u,u,u,u,u,u,u,2,3,10,11,u,u,u,u,u,u,u,u,20,21,28,29,u,u,u,u]
+; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,8,9,0,1,8,9,4,5,12,13,2,3,10,11,18,19,26,27,24,25,30,31,20,21,28,29,20,21,28,29]
+; AVX2-FP-NEXT: vpshufb %ymm4, %ymm3, %ymm3
+; AVX2-FP-NEXT: vpermq {{.*#+}} ymm5 = ymm0[0,2,0,2]
+; AVX2-FP-NEXT: vpshufb %ymm4, %ymm5, %ymm4
; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0],ymm3[1],ymm4[2,3],ymm3[4],ymm4[5,6],ymm3[7]
; AVX2-FP-NEXT: vpermq {{.*#+}} ymm4 = ymm2[0,2,0,2]
; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,u,u,u,u,0,1,8,9,u,u,u,u,u,u,u,u,18,19,26,27,u,u,u,u,u,u,u,u]
; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1],ymm4[2],ymm3[3,4],ymm4[5],ymm3[6,7]
; AVX2-FP-NEXT: vpermq {{.*#+}} ymm4 = ymm1[0,2,1,3]
-; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,u,u,u,u,6,7,14,15,u,u,u,u,u,u,u,u,16,17,24,25,u,u,u,u,u,u,u,u]
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm5 = ymm0[0,2,1,3]
-; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm5 = ymm5[u,u,u,u,6,7,14,15,u,u,u,u,u,u,u,u,16,17,24,25,u,u,u,u,u,u,u,u,18,19,26,27]
+; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm5 = [0,1,8,9,6,7,14,15,6,7,14,15,2,3,10,11,16,17,24,25,16,17,24,25,24,25,26,27,18,19,26,27]
+; AVX2-FP-NEXT: vpshufb %ymm5, %ymm4, %ymm4
+; AVX2-FP-NEXT: vpermq {{.*#+}} ymm6 = ymm0[0,2,1,3]
+; AVX2-FP-NEXT: vpshufb %ymm5, %ymm6, %ymm5
; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm4 = ymm5[0,1],ymm4[2],ymm5[3,4],ymm4[5],ymm5[6,7]
; AVX2-FP-NEXT: vpermq {{.*#+}} ymm5 = ymm2[0,2,1,3]
; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm5 = ymm5[4,5,12,13,u,u,u,u,u,u,u,u,6,7,14,15,u,u,u,u,u,u,u,u,16,17,24,25,u,u,u,u]
; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm4 = ymm5[0],ymm4[1,2],ymm5[3],ymm4[4,5],ymm5[6],ymm4[7]
; AVX2-FP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[1,3,1,3]
-; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,4,5,12,13,u,u,u,u,u,u,u,u,22,23,30,31,u,u,u,u,u,u,u,u]
+; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm5 = [2,3,10,11,8,9,10,11,4,5,12,13,4,5,12,13,18,19,26,27,22,23,30,31,22,23,30,31,20,21,28,29]
+; AVX2-FP-NEXT: vpshufb %ymm5, %ymm0, %ymm0
; AVX2-FP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[1,3,1,3]
-; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[2,3,10,11,u,u,u,u,u,u,u,u,4,5,12,13,u,u,u,u,u,u,u,u,22,23,30,31,u,u,u,u]
+; AVX2-FP-NEXT: vpshufb %ymm5, %ymm1, %ymm1
; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
; AVX2-FP-NEXT: vpermq {{.*#+}} ymm1 = ymm2[1,3,1,3]
; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,2,3,10,11,u,u,u,u,u,u,u,u,20,21,28,29,u,u,u,u,u,u,u,u,22,23,30,31]
@@ -934,9 +951,10 @@ define void @store_i16_stride6_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [0,4,1,5,0,4,1,5]
; AVX2-FCP-NEXT: # ymm3 = mem[0,1,0,1]
; AVX2-FCP-NEXT: vpermd %ymm1, %ymm3, %ymm3
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,0,1,4,5,u,u,u,u,u,u,u,u,18,19,22,23,u,u,u,u,u,u,u,u,24,25,28,29]
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm0[0,2,0,2]
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm4 = ymm4[0,1,8,9,u,u,u,u,u,u,u,u,2,3,10,11,u,u,u,u,u,u,u,u,20,21,28,29,u,u,u,u]
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,8,9,0,1,4,5,4,5,12,13,2,3,10,11,18,19,22,23,24,25,30,31,20,21,28,29,24,25,28,29]
+; AVX2-FCP-NEXT: vpshufb %ymm4, %ymm3, %ymm3
+; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm5 = ymm0[0,2,0,2]
+; AVX2-FCP-NEXT: vpshufb %ymm4, %ymm5, %ymm4
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0],ymm3[1],ymm4[2,3],ymm3[4],ymm4[5,6],ymm3[7]
; AVX2-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm4 = [0,4,4,0,0,4,4,0]
; AVX2-FCP-NEXT: # ymm4 = mem[0,1,0,1]
@@ -946,9 +964,10 @@ define void @store_i16_stride6_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm4 = [2,6,1,5,2,6,1,5]
; AVX2-FCP-NEXT: # ymm4 = mem[0,1,0,1]
; AVX2-FCP-NEXT: vpermd %ymm1, %ymm4, %ymm4
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,u,u,u,u,10,11,14,15,u,u,u,u,u,u,u,u,16,17,20,21,u,u,u,u,u,u,u,u]
-; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm5 = ymm0[0,2,1,3]
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm5 = ymm5[u,u,u,u,6,7,14,15,u,u,u,u,u,u,u,u,16,17,24,25,u,u,u,u,u,u,u,u,18,19,26,27]
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm5 = [0,1,8,9,6,7,14,15,10,11,14,15,2,3,10,11,16,17,24,25,16,17,20,21,24,25,26,27,18,19,26,27]
+; AVX2-FCP-NEXT: vpshufb %ymm5, %ymm4, %ymm4
+; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm6 = ymm0[0,2,1,3]
+; AVX2-FCP-NEXT: vpshufb %ymm5, %ymm6, %ymm5
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm5[0,1],ymm4[2],ymm5[3,4],ymm4[5],ymm5[6,7]
; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm5 = ymm2[0,2,1,3]
; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm5 = ymm5[4,5,12,13,u,u,u,u,u,u,u,u,6,7,14,15,u,u,u,u,u,u,u,u,16,17,24,25,u,u,u,u]
@@ -956,9 +975,10 @@ define void @store_i16_stride6_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm5 = [7,3,3,7,7,3,3,7]
; AVX2-FCP-NEXT: # ymm5 = mem[0,1,0,1]
; AVX2-FCP-NEXT: vpermd %ymm0, %ymm5, %ymm0
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,8,9,12,13,u,u,u,u,u,u,u,u,22,23,18,19,u,u,u,u,u,u,u,u]
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm5 = [2,3,10,11,8,9,10,11,8,9,12,13,4,5,12,13,18,19,26,27,22,23,18,19,22,23,30,31,20,21,28,29]
+; AVX2-FCP-NEXT: vpshufb %ymm5, %ymm0, %ymm0
; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[1,3,1,3]
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[2,3,10,11,u,u,u,u,u,u,u,u,4,5,12,13,u,u,u,u,u,u,u,u,22,23,30,31,u,u,u,u]
+; AVX2-FCP-NEXT: vpshufb %ymm5, %ymm1, %ymm1
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm1 = ymm2[1,3,1,3]
; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,2,3,10,11,u,u,u,u,u,u,u,u,20,21,28,29,u,u,u,u,u,u,u,u,22,23,30,31]
@@ -979,26 +999,29 @@ define void @store_i16_stride6_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-NEXT: vinserti128 $1, (%rcx), %ymm1, %ymm1
; AVX512-NEXT: vinserti32x4 $1, (%r9), %zmm2, %zmm2
; AVX512-NEXT: vpermq {{.*#+}} ymm3 = ymm1[0,2,1,3]
-; AVX512-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,u,u,u,u,6,7,14,15,u,u,u,u,u,u,u,u,16,17,24,25,u,u,u,u,u,u,u,u]
-; AVX512-NEXT: vpermq {{.*#+}} ymm4 = ymm0[0,2,1,3]
-; AVX512-NEXT: vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,6,7,14,15,u,u,u,u,u,u,u,u,16,17,24,25,u,u,u,u,u,u,u,u,18,19,26,27]
+; AVX512-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,8,9,6,7,14,15,6,7,14,15,2,3,10,11,16,17,24,25,16,17,24,25,24,25,26,27,18,19,26,27]
+; AVX512-NEXT: vpshufb %ymm4, %ymm3, %ymm3
+; AVX512-NEXT: vpermq {{.*#+}} ymm5 = ymm0[0,2,1,3]
+; AVX512-NEXT: vpshufb %ymm4, %ymm5, %ymm4
; AVX512-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0,1],ymm3[2],ymm4[3,4],ymm3[5],ymm4[6,7]
; AVX512-NEXT: vpermq {{.*#+}} ymm4 = ymm2[0,2,1,3]
; AVX512-NEXT: vpshufb {{.*#+}} ymm4 = ymm4[4,5,12,13,u,u,u,u,u,u,u,u,6,7,14,15,u,u,u,u,u,u,u,u,16,17,24,25,u,u,u,u]
; AVX512-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0],ymm3[1,2],ymm4[3],ymm3[4,5],ymm4[6],ymm3[7]
; AVX512-NEXT: vpermq {{.*#+}} ymm4 = ymm1[0,2,0,2]
-; AVX512-NEXT: vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,0,1,8,9,u,u,u,u,u,u,u,u,18,19,26,27,u,u,u,u,u,u,u,u,20,21,28,29]
-; AVX512-NEXT: vpermq {{.*#+}} ymm5 = ymm0[0,2,0,2]
-; AVX512-NEXT: vpshufb {{.*#+}} ymm5 = ymm5[0,1,8,9,u,u,u,u,u,u,u,u,2,3,10,11,u,u,u,u,u,u,u,u,20,21,28,29,u,u,u,u]
+; AVX512-NEXT: vmovdqa {{.*#+}} ymm5 = [0,1,8,9,0,1,8,9,4,5,12,13,2,3,10,11,18,19,26,27,24,25,30,31,20,21,28,29,20,21,28,29]
+; AVX512-NEXT: vpshufb %ymm5, %ymm4, %ymm4
+; AVX512-NEXT: vpermq {{.*#+}} ymm6 = ymm0[0,2,0,2]
+; AVX512-NEXT: vpshufb %ymm5, %ymm6, %ymm5
; AVX512-NEXT: vpblendd {{.*#+}} ymm4 = ymm5[0],ymm4[1],ymm5[2,3],ymm4[4],ymm5[5,6],ymm4[7]
; AVX512-NEXT: vpermq {{.*#+}} ymm5 = ymm2[0,2,0,2]
; AVX512-NEXT: vpshufb {{.*#+}} ymm5 = ymm5[u,u,u,u,u,u,u,u,0,1,8,9,u,u,u,u,u,u,u,u,18,19,26,27,u,u,u,u,u,u,u,u]
; AVX512-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1],ymm5[2],ymm4[3,4],ymm5[5],ymm4[6,7]
; AVX512-NEXT: vinserti64x4 $1, %ymm3, %zmm4, %zmm3
; AVX512-NEXT: vpermq {{.*#+}} ymm0 = ymm0[1,3,1,3]
-; AVX512-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,4,5,12,13,u,u,u,u,u,u,u,u,22,23,30,31,u,u,u,u,u,u,u,u]
+; AVX512-NEXT: vmovdqa {{.*#+}} ymm4 = [2,3,10,11,8,9,10,11,4,5,12,13,4,5,12,13,18,19,26,27,22,23,30,31,22,23,30,31,20,21,28,29]
+; AVX512-NEXT: vpshufb %ymm4, %ymm0, %ymm0
; AVX512-NEXT: vpermq {{.*#+}} ymm1 = ymm1[1,3,1,3]
-; AVX512-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[2,3,10,11,u,u,u,u,u,u,u,u,4,5,12,13,u,u,u,u,u,u,u,u,22,23,30,31,u,u,u,u]
+; AVX512-NEXT: vpshufb %ymm4, %ymm1, %ymm1
; AVX512-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
; AVX512-NEXT: vpermq {{.*#+}} ymm1 = ymm2[1,3,1,3]
; AVX512-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,2,3,10,11,u,u,u,u,u,u,u,u,20,21,28,29,u,u,u,u,u,u,u,u,22,23,30,31]
@@ -1020,9 +1043,10 @@ define void @store_i16_stride6_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [2,6,1,5,2,6,1,5]
; AVX512-FCP-NEXT: # ymm3 = mem[0,1,0,1]
; AVX512-FCP-NEXT: vpermd %ymm1, %ymm3, %ymm3
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,u,u,u,u,10,11,14,15,u,u,u,u,u,u,u,u,16,17,20,21,u,u,u,u,u,u,u,u]
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm0[0,2,1,3]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,6,7,14,15,u,u,u,u,u,u,u,u,16,17,24,25,u,u,u,u,u,u,u,u,18,19,26,27]
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,8,9,6,7,14,15,10,11,14,15,2,3,10,11,16,17,24,25,16,17,20,21,24,25,26,27,18,19,26,27]
+; AVX512-FCP-NEXT: vpshufb %ymm4, %ymm3, %ymm3
+; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm5 = ymm0[0,2,1,3]
+; AVX512-FCP-NEXT: vpshufb %ymm4, %ymm5, %ymm4
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0,1],ymm3[2],ymm4[3,4],ymm3[5],ymm4[6,7]
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm2[0,2,1,3]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm4 = ymm4[4,5,12,13,u,u,u,u,u,u,u,u,6,7,14,15,u,u,u,u,u,u,u,u,16,17,24,25,u,u,u,u]
@@ -1030,9 +1054,10 @@ define void @store_i16_stride6_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm4 = [0,4,1,5,0,4,1,5]
; AVX512-FCP-NEXT: # ymm4 = mem[0,1,0,1]
; AVX512-FCP-NEXT: vpermd %ymm1, %ymm4, %ymm4
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,0,1,4,5,u,u,u,u,u,u,u,u,18,19,22,23,u,u,u,u,u,u,u,u,24,25,28,29]
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm5 = ymm0[0,2,0,2]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm5 = ymm5[0,1,8,9,u,u,u,u,u,u,u,u,2,3,10,11,u,u,u,u,u,u,u,u,20,21,28,29,u,u,u,u]
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm5 = [0,1,8,9,0,1,4,5,4,5,12,13,2,3,10,11,18,19,22,23,24,25,30,31,20,21,28,29,24,25,28,29]
+; AVX512-FCP-NEXT: vpshufb %ymm5, %ymm4, %ymm4
+; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm6 = ymm0[0,2,0,2]
+; AVX512-FCP-NEXT: vpshufb %ymm5, %ymm6, %ymm5
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm5[0],ymm4[1],ymm5[2,3],ymm4[4],ymm5[5,6],ymm4[7]
; AVX512-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm5 = [0,4,4,0,0,4,4,0]
; AVX512-FCP-NEXT: # ymm5 = mem[0,1,0,1]
@@ -1043,9 +1068,10 @@ define void @store_i16_stride6_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm4 = [7,3,3,7,7,3,3,7]
; AVX512-FCP-NEXT: # ymm4 = mem[0,1,0,1]
; AVX512-FCP-NEXT: vpermd %ymm0, %ymm4, %ymm0
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,8,9,12,13,u,u,u,u,u,u,u,u,22,23,18,19,u,u,u,u,u,u,u,u]
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm4 = [2,3,10,11,8,9,10,11,8,9,12,13,4,5,12,13,18,19,26,27,22,23,18,19,22,23,30,31,20,21,28,29]
+; AVX512-FCP-NEXT: vpshufb %ymm4, %ymm0, %ymm0
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[1,3,1,3]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[2,3,10,11,u,u,u,u,u,u,u,u,4,5,12,13,u,u,u,u,u,u,u,u,22,23,30,31,u,u,u,u]
+; AVX512-FCP-NEXT: vpshufb %ymm4, %ymm1, %ymm1
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm1 = ymm2[1,3,1,3]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,2,3,10,11,u,u,u,u,u,u,u,u,20,21,28,29,u,u,u,u,u,u,u,u,22,23,30,31]
@@ -1065,26 +1091,29 @@ define void @store_i16_stride6_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-NEXT: vinserti128 $1, (%rcx), %ymm1, %ymm1
; AVX512DQ-NEXT: vinserti32x4 $1, (%r9), %zmm2, %zmm2
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm3 = ymm1[0,2,1,3]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,u,u,u,u,6,7,14,15,u,u,u,u,u,u,u,u,16,17,24,25,u,u,u,u,u,u,u,u]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm4 = ymm0[0,2,1,3]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,6,7,14,15,u,u,u,u,u,u,u,u,16,17,24,25,u,u,u,u,u,u,u,u,18,19,26,27]
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,8,9,6,7,14,15,6,7,14,15,2,3,10,11,16,17,24,25,16,17,24,25,24,25,26,27,18,19,26,27]
+; AVX512DQ-NEXT: vpshufb %ymm4, %ymm3, %ymm3
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm5 = ymm0[0,2,1,3]
+; AVX512DQ-NEXT: vpshufb %ymm4, %ymm5, %ymm4
; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0,1],ymm3[2],ymm4[3,4],ymm3[5],ymm4[6,7]
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm4 = ymm2[0,2,1,3]
; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm4 = ymm4[4,5,12,13,u,u,u,u,u,u,u,u,6,7,14,15,u,u,u,u,u,u,u,u,16,17,24,25,u,u,u,u]
; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0],ymm3[1,2],ymm4[3],ymm3[4,5],ymm4[6],ymm3[7]
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm4 = ymm1[0,2,0,2]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,0,1,8,9,u,u,u,u,u,u,u,u,18,19,26,27,u,u,u,u,u,u,u,u,20,21,28,29]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm5 = ymm0[0,2,0,2]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm5 = ymm5[0,1,8,9,u,u,u,u,u,u,u,u,2,3,10,11,u,u,u,u,u,u,u,u,20,21,28,29,u,u,u,u]
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm5 = [0,1,8,9,0,1,8,9,4,5,12,13,2,3,10,11,18,19,26,27,24,25,30,31,20,21,28,29,20,21,28,29]
+; AVX512DQ-NEXT: vpshufb %ymm5, %ymm4, %ymm4
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm6 = ymm0[0,2,0,2]
+; AVX512DQ-NEXT: vpshufb %ymm5, %ymm6, %ymm5
; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm4 = ymm5[0],ymm4[1],ymm5[2,3],ymm4[4],ymm5[5,6],ymm4[7]
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm5 = ymm2[0,2,0,2]
; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm5 = ymm5[u,u,u,u,u,u,u,u,0,1,8,9,u,u,u,u,u,u,u,u,18,19,26,27,u,u,u,u,u,u,u,u]
; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1],ymm5[2],ymm4[3,4],ymm5[5],ymm4[6,7]
; AVX512DQ-NEXT: vinserti64x4 $1, %ymm3, %zmm4, %zmm3
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm0 = ymm0[1,3,1,3]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,4,5,12,13,u,u,u,u,u,u,u,u,22,23,30,31,u,u,u,u,u,u,u,u]
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm4 = [2,3,10,11,8,9,10,11,4,5,12,13,4,5,12,13,18,19,26,27,22,23,30,31,22,23,30,31,20,21,28,29]
+; AVX512DQ-NEXT: vpshufb %ymm4, %ymm0, %ymm0
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm1 = ymm1[1,3,1,3]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[2,3,10,11,u,u,u,u,u,u,u,u,4,5,12,13,u,u,u,u,u,u,u,u,22,23,30,31,u,u,u,u]
+; AVX512DQ-NEXT: vpshufb %ymm4, %ymm1, %ymm1
; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm1 = ymm2[1,3,1,3]
; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,2,3,10,11,u,u,u,u,u,u,u,u,20,21,28,29,u,u,u,u,u,u,u,u,22,23,30,31]
@@ -1106,9 +1135,10 @@ define void @store_i16_stride6_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [2,6,1,5,2,6,1,5]
; AVX512DQ-FCP-NEXT: # ymm3 = mem[0,1,0,1]
; AVX512DQ-FCP-NEXT: vpermd %ymm1, %ymm3, %ymm3
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,u,u,u,u,10,11,14,15,u,u,u,u,u,u,u,u,16,17,20,21,u,u,u,u,u,u,u,u]
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm0[0,2,1,3]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,6,7,14,15,u,u,u,u,u,u,u,u,16,17,24,25,u,u,u,u,u,u,u,u,18,19,26,27]
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,8,9,6,7,14,15,10,11,14,15,2,3,10,11,16,17,24,25,16,17,20,21,24,25,26,27,18,19,26,27]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm4, %ymm3, %ymm3
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm5 = ymm0[0,2,1,3]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm4, %ymm5, %ymm4
; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0,1],ymm3[2],ymm4[3,4],ymm3[5],ymm4[6,7]
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm2[0,2,1,3]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm4 = ymm4[4,5,12,13,u,u,u,u,u,u,u,u,6,7,14,15,u,u,u,u,u,u,u,u,16,17,24,25,u,u,u,u]
@@ -1116,9 +1146,10 @@ define void @store_i16_stride6_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm4 = [0,4,1,5,0,4,1,5]
; AVX512DQ-FCP-NEXT: # ymm4 = mem[0,1,0,1]
; AVX512DQ-FCP-NEXT: vpermd %ymm1, %ymm4, %ymm4
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,0,1,4,5,u,u,u,u,u,u,u,u,18,19,22,23,u,u,u,u,u,u,u,u,24,25,28,29]
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm5 = ymm0[0,2,0,2]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm5 = ymm5[0,1,8,9,u,u,u,u,u,u,u,u,2,3,10,11,u,u,u,u,u,u,u,u,20,21,28,29,u,u,u,u]
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm5 = [0,1,8,9,0,1,4,5,4,5,12,13,2,3,10,11,18,19,22,23,24,25,30,31,20,21,28,29,24,25,28,29]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm5, %ymm4, %ymm4
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm6 = ymm0[0,2,0,2]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm5, %ymm6, %ymm5
; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm5[0],ymm4[1],ymm5[2,3],ymm4[4],ymm5[5,6],ymm4[7]
; AVX512DQ-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm5 = [0,4,4,0,0,4,4,0]
; AVX512DQ-FCP-NEXT: # ymm5 = mem[0,1,0,1]
@@ -1129,9 +1160,10 @@ define void @store_i16_stride6_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm4 = [7,3,3,7,7,3,3,7]
; AVX512DQ-FCP-NEXT: # ymm4 = mem[0,1,0,1]
; AVX512DQ-FCP-NEXT: vpermd %ymm0, %ymm4, %ymm0
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,8,9,12,13,u,u,u,u,u,u,u,u,22,23,18,19,u,u,u,u,u,u,u,u]
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm4 = [2,3,10,11,8,9,10,11,8,9,12,13,4,5,12,13,18,19,26,27,22,23,18,19,22,23,30,31,20,21,28,29]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm4, %ymm0, %ymm0
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[1,3,1,3]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[2,3,10,11,u,u,u,u,u,u,u,u,4,5,12,13,u,u,u,u,u,u,u,u,22,23,30,31,u,u,u,u]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm4, %ymm1, %ymm1
; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm1 = ymm2[1,3,1,3]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,2,3,10,11,u,u,u,u,u,u,u,u,20,21,28,29,u,u,u,u,u,u,u,u,22,23,30,31]
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-7.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-7.ll
index e2a33019fffee..208ee607909ed 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-7.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-7.ll
@@ -66,11 +66,13 @@ define void @store_i16_stride7_vf2(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm2[0],mem[0],xmm2[1],mem[1]
; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],mem[0]
-; AVX-NEXT: vpshufb {{.*#+}} xmm2 = xmm0[0,1,4,5,8,9,12,13,u,u,u,u,u,u,2,3]
-; AVX-NEXT: vpshufb {{.*#+}} xmm3 = xmm1[u,u,u,u,u,u,u,u,0,1,4,5,8,9,u,u]
-; AVX-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm3[4,5,6],xmm2[7]
-; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[6,7,10,11,14,15,u,u,u,u,u,u,12,13,14,15]
-; AVX-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,u,u,u,2,3,6,7,10,11,u,u,u,u]
+; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,0,1,4,5,8,9,2,3]
+; AVX-NEXT: vpshufb %xmm2, %xmm0, %xmm3
+; AVX-NEXT: vpshufb %xmm2, %xmm1, %xmm2
+; AVX-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4,5,6],xmm3[7]
+; AVX-NEXT: vmovdqa {{.*#+}} xmm3 = [6,7,10,11,14,15,2,3,6,7,10,11,12,13,14,15]
+; AVX-NEXT: vpshufb %xmm3, %xmm0, %xmm0
+; AVX-NEXT: vpshufb %xmm3, %xmm1, %xmm1
; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3,4,5],xmm0[6,7]
; AVX-NEXT: vpextrd $2, %xmm1, 24(%rax)
; AVX-NEXT: vmovq %xmm0, 16(%rax)
@@ -1222,10 +1224,11 @@ define void @store_i16_stride7_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-FP-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm7
; AVX2-FP-NEXT: vinserti128 $1, %xmm5, %ymm4, %ymm8
; AVX2-FP-NEXT: vinserti128 $1, %xmm3, %ymm1, %ymm9
-; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm6 = ymm8[4,5,u,u,u,u,u,u,u,u,u,u,u,u,6,7,22,23,u,u,u,u,u,u,u,u,u,u,u,u,24,25]
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm10 = ymm8[2,3,0,1]
-; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm10 = ymm10[u,u,4,5,u,u,u,u,u,u,u,u,8,9,u,u,u,u,20,21,u,u,u,u,u,u,u,u,24,25,u,u]
-; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm6 = ymm6[0],ymm10[1],ymm6[2,3,4,5],ymm10[6],ymm6[7,8],ymm10[9],ymm6[10,11,12,13],ymm10[14],ymm6[15]
+; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm6 = [4,5,4,5,u,u,u,u,u,u,u,u,8,9,6,7,6,7,20,21,u,u,u,u,u,u,u,u,24,25,8,9]
+; AVX2-FP-NEXT: vpshufb %ymm6, %ymm8, %ymm10
+; AVX2-FP-NEXT: vpermq {{.*#+}} ymm11 = ymm8[2,3,0,1]
+; AVX2-FP-NEXT: vpshufb %ymm6, %ymm11, %ymm6
+; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm6 = ymm10[0],ymm6[1],ymm10[2,3,4,5],ymm6[6],ymm10[7,8],ymm6[9],ymm10[10,11,12,13],ymm6[14],ymm10[15]
; AVX2-FP-NEXT: vpermq {{.*#+}} ymm10 = ymm7[0,2,1,3]
; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm10 = ymm10[u,u,u,u,u,u,u,u,u,u,6,7,14,15,u,u,u,u,u,u,u,u,u,u,16,17,24,25,u,u,u,u]
; AVX2-FP-NEXT: vpmovsxbw {{.*#+}} ymm11 = [65535,65535,0,0,0,0,0,65535,65535,0,0,0,0,0,65535,65535]
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-8.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-8.ll
index 2b268af107f6b..13c3c6a9939c1 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-8.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-8.ll
@@ -506,20 +506,24 @@ define void @store_i16_stride8_vf4(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-FP-NEXT: vpunpcklqdq {{.*#+}} xmm3 = xmm4[0],xmm3[0]
; AVX2-FP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX2-FP-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm1
-; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm2 = ymm1[u,u,u,u,u,u,u,u,0,1,8,9,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,26,27]
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm3 = ymm1[2,3,0,1]
-; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm4 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,0,1,8,9,u,u,u,u,u,u,u,u,18,19,26,27,u,u,u,u]
-; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2],ymm4[3],ymm2[4,5],ymm4[6],ymm2[7]
-; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm4 = ymm0[0,1,8,9,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,26,27,u,u,u,u,u,u,u,u]
-; AVX2-FP-NEXT: vpermq {{.*#+}} ymm5 = ymm0[2,3,0,1]
-; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm6 = ymm5[u,u,u,u,0,1,8,9,u,u,u,u,u,u,u,u,18,19,26,27,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0],ymm6[1],ymm4[2,3],ymm6[4],ymm4[5,6,7]
-; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm2 = ymm4[0,1],ymm2[2,3],ymm4[4,5],ymm2[6,7]
-; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,4,5,12,13,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,30,31]
-; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,4,5,12,13,u,u,u,u,u,u,u,u,22,23,30,31,u,u,u,u]
+; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,2,3,4,5,6,7,0,1,8,9,0,1,8,9,16,17,18,19,20,21,22,23,18,19,26,27,18,19,26,27]
+; AVX2-FP-NEXT: vpshufb %ymm2, %ymm1, %ymm3
+; AVX2-FP-NEXT: vpermq {{.*#+}} ymm4 = ymm1[2,3,0,1]
+; AVX2-FP-NEXT: vpshufb %ymm2, %ymm4, %ymm2
+; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1,2],ymm2[3],ymm3[4,5],ymm2[6],ymm3[7]
+; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,8,9,0,1,8,9,8,9,10,11,12,13,14,15,18,19,26,27,18,19,26,27,24,25,26,27,28,29,30,31]
+; AVX2-FP-NEXT: vpshufb %ymm3, %ymm0, %ymm5
+; AVX2-FP-NEXT: vpermq {{.*#+}} ymm6 = ymm0[2,3,0,1]
+; AVX2-FP-NEXT: vpshufb %ymm3, %ymm6, %ymm3
+; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm3 = ymm5[0],ymm3[1],ymm5[2,3],ymm3[4],ymm5[5,6,7]
+; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3],ymm3[4,5],ymm2[6,7]
+; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,2,3,4,5,6,7,4,5,12,13,4,5,12,13,16,17,18,19,20,21,22,23,22,23,30,31,22,23,30,31]
+; AVX2-FP-NEXT: vpshufb %ymm3, %ymm1, %ymm1
+; AVX2-FP-NEXT: vpshufb %ymm3, %ymm4, %ymm3
; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm3[3],ymm1[4,5],ymm3[6],ymm1[7]
-; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[4,5,12,13,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,30,31,u,u,u,u,u,u,u,u]
-; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm3 = ymm5[u,u,u,u,4,5,12,13,u,u,u,u,u,u,u,u,22,23,30,31,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm3 = [4,5,12,13,4,5,12,13,8,9,10,11,12,13,14,15,22,23,30,31,22,23,30,31,24,25,26,27,28,29,30,31]
+; AVX2-FP-NEXT: vpshufb %ymm3, %ymm0, %ymm0
+; AVX2-FP-NEXT: vpshufb %ymm3, %ymm6, %ymm3
; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm3[1],ymm0[2,3],ymm3[4],ymm0[5,6,7]
; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
; AVX2-FP-NEXT: vmovdqa %ymm0, 32(%rax)
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-6.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-6.ll
index de34e48c01d7d..e43aa56c96c28 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-6.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-6.ll
@@ -704,10 +704,11 @@ define void @store_i8_stride6_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecp
; AVX2-NEXT: vmovq {{.*#+}} xmm3 = mem[0],zero
; AVX2-NEXT: vmovq {{.*#+}} xmm4 = mem[0],zero
; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm3 = xmm4[0],xmm3[0]
-; AVX2-NEXT: vpshufb {{.*#+}} ymm4 = ymm2[0,8,u,u,u,u,1,9,u,u,u,u,2,10,u,u,u,u,u,u,19,27,u,u,u,u,20,28,u,u,u,u]
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm4 = [0,8,0,8,u,u,1,9,1,9,u,u,2,10,2,10,u,u,3,11,3,11,u,u,4,12,4,12,u,u,5,13]
+; AVX2-NEXT: vpshufb %ymm4, %ymm2, %ymm5
; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,3,0,1]
-; AVX2-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,0,8,u,u,u,u,1,9,u,u,u,u,2,10,u,u,19,27,u,u,u,u,20,28,u,u,u,u,21,29]
-; AVX2-NEXT: vpblendw {{.*#+}} ymm2 = ymm4[0],ymm2[1],ymm4[2,3],ymm2[4],ymm4[5,6],ymm2[7],ymm4[8],ymm2[9],ymm4[10,11],ymm2[12],ymm4[13,14],ymm2[15]
+; AVX2-NEXT: vpshufb %ymm4, %ymm2, %ymm2
+; AVX2-NEXT: vpblendw {{.*#+}} ymm2 = ymm5[0],ymm2[1],ymm5[2,3],ymm2[4],ymm5[5,6],ymm2[7],ymm5[8],ymm2[9],ymm5[10,11],ymm2[12],ymm5[13,14],ymm2[15]
; AVX2-NEXT: vpshufb {{.*#+}} xmm4 = xmm3[2,10,1,9,0,8,3,11,u,u,u,u,4,12,u,u]
; AVX2-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,0,0,1]
; AVX2-NEXT: vpmovsxbw {{.*#+}} ymm5 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535]
@@ -735,10 +736,11 @@ define void @store_i8_stride6_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecp
; AVX2-FP-NEXT: vmovq {{.*#+}} xmm3 = mem[0],zero
; AVX2-FP-NEXT: vmovq {{.*#+}} xmm4 = mem[0],zero
; AVX2-FP-NEXT: vpunpcklqdq {{.*#+}} xmm3 = xmm4[0],xmm3[0]
-; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm4 = ymm2[0,8,u,u,u,u,1,9,u,u,u,u,2,10,u,u,u,u,u,u,19,27,u,u,u,u,20,28,u,u,u,u]
+; AVX2-FP-NEXT: vmovdqa {{.*#+}} ymm4 = [0,8,0,8,u,u,1,9,1,9,u,u,2,10,2,10,u,u,3,11,3,11,u,u,4,12,4,12,u,u,5,13]
+; AVX2-FP-NEXT: vpshufb %ymm4, %ymm2, %ymm5
; AVX2-FP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,3,0,1]
-; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,0,8,u,u,u,u,1,9,u,u,u,u,2,10,u,u,19,27,u,u,u,u,20,28,u,u,u,u,21,29]
-; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm2 = ymm4[0],ymm2[1],ymm4[2,3],ymm2[4],ymm4[5,6],ymm2[7],ymm4[8],ymm2[9],ymm4[10,11],ymm2[12],ymm4[13,14],ymm2[15]
+; AVX2-FP-NEXT: vpshufb %ymm4, %ymm2, %ymm2
+; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm2 = ymm5[0],ymm2[1],ymm5[2,3],ymm2[4],ymm5[5,6],ymm2[7],ymm5[8],ymm2[9],ymm5[10,11],ymm2[12],ymm5[13,14],ymm2[15]
; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm4 = xmm3[2,10,1,9,0,8,3,11,u,u,u,u,4,12,u,u]
; AVX2-FP-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,0,0,1]
; AVX2-FP-NEXT: vpmovsxbw {{.*#+}} ymm5 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535]
@@ -766,10 +768,11 @@ define void @store_i8_stride6_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecp
; AVX2-FCP-NEXT: vmovq {{.*#+}} xmm3 = mem[0],zero
; AVX2-FCP-NEXT: vmovq {{.*#+}} xmm4 = mem[0],zero
; AVX2-FCP-NEXT: vpunpcklqdq {{.*#+}} xmm3 = xmm4[0],xmm3[0]
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm4 = ymm2[0,8,u,u,u,u,1,9,u,u,u,u,2,10,u,u,u,u,u,u,19,27,u,u,u,u,20,28,u,u,u,u]
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm4 = [0,8,0,8,u,u,1,9,1,9,u,u,2,10,2,10,u,u,3,11,3,11,u,u,4,12,4,12,u,u,5,13]
+; AVX2-FCP-NEXT: vpshufb %ymm4, %ymm2, %ymm5
; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,3,0,1]
-; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,0,8,u,u,u,u,1,9,u,u,u,u,2,10,u,u,19,27,u,u,u,u,20,28,u,u,u,u,21,29]
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm4[0],ymm2[1],ymm4[2,3],ymm2[4],ymm4[5,6],ymm2[7],ymm4[8],ymm2[9],ymm4[10,11],ymm2[12],ymm4[13,14],ymm2[15]
+; AVX2-FCP-NEXT: vpshufb %ymm4, %ymm2, %ymm2
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm5[0],ymm2[1],ymm5[2,3],ymm2[4],ymm5[5,6],ymm2[7],ymm5[8],ymm2[9],ymm5[10,11],ymm2[12],ymm5[13,14],ymm2[15]
; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm4 = xmm3[2,10,1,9,0,8,3,11,u,u,u,u,4,12,u,u]
; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,0,0,1]
; AVX2-FCP-NEXT: vpmovsxbw {{.*#+}} ymm5 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535]
@@ -797,10 +800,11 @@ define void @store_i8_stride6_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecp
; AVX512-NEXT: vmovq {{.*#+}} xmm3 = mem[0],zero
; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm3[0],xmm2[0]
; AVX512-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm3
-; AVX512-NEXT: vpshufb {{.*#+}} ymm4 = ymm3[0,8,u,u,u,u,1,9,u,u,u,u,2,10,u,u,u,u,u,u,19,27,u,u,u,u,20,28,u,u,u,u]
+; AVX512-NEXT: vmovdqa {{.*#+}} ymm4 = [0,8,0,8,u,u,1,9,1,9,u,u,2,10,2,10,u,u,3,11,3,11,u,u,4,12,4,12,u,u,5,13]
+; AVX512-NEXT: vpshufb %ymm4, %ymm3, %ymm5
; AVX512-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,3,0,1]
-; AVX512-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,0,8,u,u,u,u,1,9,u,u,u,u,2,10,u,u,19,27,u,u,u,u,20,28,u,u,u,u,21,29]
-; AVX512-NEXT: vpblendw {{.*#+}} ymm3 = ymm4[0],ymm3[1],ymm4[2,3],ymm3[4],ymm4[5,6],ymm3[7],ymm4[8],ymm3[9],ymm4[10,11],ymm3[12],ymm4[13,14],ymm3[15]
+; AVX512-NEXT: vpshufb %ymm4, %ymm3, %ymm3
+; AVX512-NEXT: vpblendw {{.*#+}} ymm3 = ymm5[0],ymm3[1],ymm5[2,3],ymm3[4],ymm5[5,6],ymm3[7],ymm5[8],ymm3[9],ymm5[10,11],ymm3[12],ymm5[13,14],ymm3[15]
; AVX512-NEXT: vpshufb {{.*#+}} xmm4 = xmm2[2,10,1,9,0,8,3,11,u,u,u,u,4,12,u,u]
; AVX512-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,0,0,1]
; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm4
@@ -828,10 +832,11 @@ define void @store_i8_stride6_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecp
; AVX512-FCP-NEXT: vmovq {{.*#+}} xmm3 = mem[0],zero
; AVX512-FCP-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm3[0],xmm2[0]
; AVX512-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm3
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm4 = ymm3[0,8,u,u,u,u,1,9,u,u,u,u,2,10,u,u,u,u,u,u,19,27,u,u,u,u,20,28,u,u,u,u]
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm4 = [0,8,0,8,u,u,1,9,1,9,u,u,2,10,2,10,u,u,3,11,3,11,u,u,4,12,4,12,u,u,5,13]
+; AVX512-FCP-NEXT: vpshufb %ymm4, %ymm3, %ymm5
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,3,0,1]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,0,8,u,u,u,u,1,9,u,u,u,u,2,10,u,u,19,27,u,u,u,u,20,28,u,u,u,u,21,29]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm4[0],ymm3[1],ymm4[2,3],ymm3[4],ymm4[5,6],ymm3[7],ymm4[8],ymm3[9],ymm4[10,11],ymm3[12],ymm4[13,14],ymm3[15]
+; AVX512-FCP-NEXT: vpshufb %ymm4, %ymm3, %ymm3
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm5[0],ymm3[1],ymm5[2,3],ymm3[4],ymm5[5,6],ymm3[7],ymm5[8],ymm3[9],ymm5[10,11],ymm3[12],ymm5[13,14],ymm3[15]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm4 = xmm2[2,10,1,9,0,8,3,11,u,u,u,u,4,12,u,u]
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,0,0,1]
; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm4
@@ -859,10 +864,11 @@ define void @store_i8_stride6_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecp
; AVX512DQ-NEXT: vmovq {{.*#+}} xmm3 = mem[0],zero
; AVX512DQ-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm3[0],xmm2[0]
; AVX512DQ-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm3
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm4 = ymm3[0,8,u,u,u,u,1,9,u,u,u,u,2,10,u,u,u,u,u,u,19,27,u,u,u,u,20,28,u,u,u,u]
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm4 = [0,8,0,8,u,u,1,9,1,9,u,u,2,10,2,10,u,u,3,11,3,11,u,u,4,12,4,12,u,u,5,13]
+; AVX512DQ-NEXT: vpshufb %ymm4, %ymm3, %ymm5
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,3,0,1]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,0,8,u,u,u,u,1,9,u,u,u,u,2,10,u,u,19,27,u,u,u,u,20,28,u,u,u,u,21,29]
-; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm3 = ymm4[0],ymm3[1],ymm4[2,3],ymm3[4],ymm4[5,6],ymm3[7],ymm4[8],ymm3[9],ymm4[10,11],ymm3[12],ymm4[13,14],ymm3[15]
+; AVX512DQ-NEXT: vpshufb %ymm4, %ymm3, %ymm3
+; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm3 = ymm5[0],ymm3[1],ymm5[2,3],ymm3[4],ymm5[5,6],ymm3[7],ymm5[8],ymm3[9],ymm5[10,11],ymm3[12],ymm5[13,14],ymm3[15]
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm4 = xmm2[2,10,1,9,0,8,3,11,u,u,u,u,4,12,u,u]
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,0,0,1]
; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm4
@@ -890,10 +896,11 @@ define void @store_i8_stride6_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecp
; AVX512DQ-FCP-NEXT: vmovq {{.*#+}} xmm3 = mem[0],zero
; AVX512DQ-FCP-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm3[0],xmm2[0]
; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm3
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm4 = ymm3[0,8,u,u,u,u,1,9,u,u,u,u,2,10,u,u,u,u,u,u,19,27,u,u,u,u,20,28,u,u,u,u]
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm4 = [0,8,0,8,u,u,1,9,1,9,u,u,2,10,2,10,u,u,3,11,3,11,u,u,4,12,4,12,u,u,5,13]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm4, %ymm3, %ymm5
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,3,0,1]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,0,8,u,u,u,u,1,9,u,u,u,u,2,10,u,u,19,27,u,u,u,u,20,28,u,u,u,u,21,29]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm4[0],ymm3[1],ymm4[2,3],ymm3[4],ymm4[5,6],ymm3[7],ymm4[8],ymm3[9],ymm4[10,11],ymm3[12],ymm4[13,14],ymm3[15]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm4, %ymm3, %ymm3
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm5[0],ymm3[1],ymm5[2,3],ymm3[4],ymm5[5,6],ymm3[7],ymm5[8],ymm3[9],ymm5[10,11],ymm3[12],ymm5[13,14],ymm3[15]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm4 = xmm2[2,10,1,9,0,8,3,11,u,u,u,u,4,12,u,u]
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,0,0,1]
; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm4
@@ -921,10 +928,11 @@ define void @store_i8_stride6_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecp
; AVX512BW-NEXT: vmovq {{.*#+}} xmm3 = mem[0],zero
; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm3[0],xmm2[0]
; AVX512BW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm3
-; AVX512BW-NEXT: vpshufb {{.*#+}} ymm4 = ymm3[0,8,u,u,u,u,1,9,u,u,u,u,2,10,u,u,u,u,u,u,19,27,u,u,u,u,20,28,u,u,u,u]
+; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm4 = [0,8,0,8,u,u,1,9,1,9,u,u,2,10,2,10,u,u,3,11,3,11,u,u,4,12,4,12,u,u,5,13]
+; AVX512BW-NEXT: vpshufb %ymm4, %ymm3, %ymm5
; AVX512BW-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,3,0,1]
-; AVX512BW-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,0,8,u,u,u,u,1,9,u,u,u,u,2,10,u,u,19,27,u,u,u,u,20,28,u,u,u,u,21,29]
-; AVX512BW-NEXT: vpblendw {{.*#+}} ymm3 = ymm4[0],ymm3[1],ymm4[2,3],ymm3[4],ymm4[5,6],ymm3[7],ymm4[8],ymm3[9],ymm4[10,11],ymm3[12],ymm4[13,14],ymm3[15]
+; AVX512BW-NEXT: vpshufb %ymm4, %ymm3, %ymm3
+; AVX512BW-NEXT: vpblendw {{.*#+}} ymm3 = ymm5[0],ymm3[1],ymm5[2,3],ymm3[4],ymm5[5,6],ymm3[7],ymm5[8],ymm3[9],ymm5[10,11],ymm3[12],ymm5[13,14],ymm3[15]
; AVX512BW-NEXT: vpshufb {{.*#+}} xmm4 = xmm2[2,10,1,9,0,8,3,11,u,u,u,u,4,12,u,u]
; AVX512BW-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,0,0,1]
; AVX512BW-NEXT: movw $18724, %cx # imm = 0x4924
@@ -954,10 +962,11 @@ define void @store_i8_stride6_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecp
; AVX512BW-FCP-NEXT: vmovq {{.*#+}} xmm3 = mem[0],zero
; AVX512BW-FCP-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm3[0],xmm2[0]
; AVX512BW-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm3
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm4 = ymm3[0,8,u,u,u,u,1,9,u,u,u,u,2,10,u,u,u,u,u,u,19,27,u,u,u,u,20,28,u,u,u,u]
+; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} ymm4 = [0,8,0,8,u,u,1,9,1,9,u,u,2,10,2,10,u,u,3,11,3,11,u,u,4,12,4,12,u,u,5,13]
+; AVX512BW-FCP-NEXT: vpshufb %ymm4, %ymm3, %ymm5
; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,3,0,1]
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,0,8,u,u,u,u,1,9,u,u,u,u,2,10,u,u,19,27,u,u,u,u,20,28,u,u,u,u,21,29]
-; AVX512BW-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm4[0],ymm3[1],ymm4[2,3],ymm3[4],ymm4[5,6],ymm3[7],ymm4[8],ymm3[9],ymm4[10,11],ymm3[12],ymm4[13,14],ymm3[15]
+; AVX512BW-FCP-NEXT: vpshufb %ymm4, %ymm3, %ymm3
+; AVX512BW-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm5[0],ymm3[1],ymm5[2,3],ymm3[4],ymm5[5,6],ymm3[7],ymm5[8],ymm3[9],ymm5[10,11],ymm3[12],ymm5[13,14],ymm3[15]
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm4 = xmm2[2,10,1,9,0,8,3,11,u,u,u,u,4,12,u,u]
; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,0,0,1]
; AVX512BW-FCP-NEXT: movw $18724, %cx # imm = 0x4924
@@ -987,10 +996,11 @@ define void @store_i8_stride6_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecp
; AVX512DQ-BW-NEXT: vmovq {{.*#+}} xmm3 = mem[0],zero
; AVX512DQ-BW-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm3[0],xmm2[0]
; AVX512DQ-BW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm3
-; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm4 = ymm3[0,8,u,u,u,u,1,9,u,u,u,u,2,10,u,u,u,u,u,u,19,27,u,u,u,u,20,28,u,u,u,u]
+; AVX512DQ-BW-NEXT: vmovdqa {{.*#+}} ymm4 = [0,8,0,8,u,u,1,9,1,9,u,u,2,10,2,10,u,u,3,11,3,11,u,u,4,12,4,12,u,u,5,13]
+; AVX512DQ-BW-NEXT: vpshufb %ymm4, %ymm3, %ymm5
; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,3,0,1]
-; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,0,8,u,u,u,u,1,9,u,u,u,u,2,10,u,u,19,27,u,u,u,u,20,28,u,u,u,u,21,29]
-; AVX512DQ-BW-NEXT: vpblendw {{.*#+}} ymm3 = ymm4[0],ymm3[1],ymm4[2,3],ymm3[4],ymm4[5,6],ymm3[7],ymm4[8],ymm3[9],ymm4[10,11],ymm3[12],ymm4[13,14],ymm3[15]
+; AVX512DQ-BW-NEXT: vpshufb %ymm4, %ymm3, %ymm3
+; AVX512DQ-BW-NEXT: vpblendw {{.*#+}} ymm3 = ymm5[0],ymm3[1],ymm5[2,3],ymm3[4],ymm5[5,6],ymm3[7],ymm5[8],ymm3[9],ymm5[10,11],ymm3[12],ymm5[13,14],ymm3[15]
; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm4 = xmm2[2,10,1,9,0,8,3,11,u,u,u,u,4,12,u,u]
; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,0,0,1]
; AVX512DQ-BW-NEXT: movw $18724, %cx # imm = 0x4924
@@ -1020,10 +1030,11 @@ define void @store_i8_stride6_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecp
; AVX512DQ-BW-FCP-NEXT: vmovq {{.*#+}} xmm3 = mem[0],zero
; AVX512DQ-BW-FCP-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm3[0],xmm2[0]
; AVX512DQ-BW-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm3
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm4 = ymm3[0,8,u,u,u,u,1,9,u,u,u,u,2,10,u,u,u,u,u,u,19,27,u,u,u,u,20,28,u,u,u,u]
+; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} ymm4 = [0,8,0,8,u,u,1,9,1,9,u,u,2,10,2,10,u,u,3,11,3,11,u,u,4,12,4,12,u,u,5,13]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm4, %ymm3, %ymm5
; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,3,0,1]
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,0,8,u,u,u,u,1,9,u,u,u,u,2,10,u,u,19,27,u,u,u,u,20,28,u,u,u,u,21,29]
-; AVX512DQ-BW-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm4[0],ymm3[1],ymm4[2,3],ymm3[4],ymm4[5,6],ymm3[7],ymm4[8],ymm3[9],ymm4[10,11],ymm3[12],ymm4[13,14],ymm3[15]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm4, %ymm3, %ymm3
+; AVX512DQ-BW-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm5[0],ymm3[1],ymm5[2,3],ymm3[4],ymm5[5,6],ymm3[7],ymm5[8],ymm3[9],ymm5[10,11],ymm3[12],ymm5[13,14],ymm3[15]
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm4 = xmm2[2,10,1,9,0,8,3,11,u,u,u,u,4,12,u,u]
; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,0,0,1]
; AVX512DQ-BW-FCP-NEXT: movw $18724, %cx # imm = 0x4924
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-8.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-8.ll
index 2b4d0b1409a79..c1e61c6685715 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-8.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-8.ll
@@ -991,20 +991,24 @@ define void @store_i8_stride8_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecp
; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm3 = xmm4[0],xmm3[0]
; AVX512-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX512-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm1
-; AVX512-NEXT: vpshufb {{.*#+}} ymm2 = ymm1[u,u,u,u,4,12],zero,zero,ymm1[u,u,u,u,5,13],zero,zero,ymm1[u,u,u,u],zero,zero,ymm1[22,30,u,u,u,u],zero,zero,ymm1[23,31]
-; AVX512-NEXT: vpshufb {{.*#+}} ymm3 = ymm0[4,12],zero,zero,ymm0[u,u,u,u,5,13],zero,zero,ymm0[u,u,u,u],zero,zero,ymm0[22,30,u,u,u,u],zero,zero,ymm0[23,31,u,u,u,u]
-; AVX512-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0],ymm2[1],ymm3[2],ymm2[3],ymm3[4],ymm2[5],ymm3[6],ymm2[7]
-; AVX512-NEXT: vpshufb {{.*#+}} ymm3 = ymm1[u,u,u,u,0,8],zero,zero,ymm1[u,u,u,u,1,9],zero,zero,ymm1[u,u,u,u],zero,zero,ymm1[18,26,u,u,u,u],zero,zero,ymm1[19,27]
-; AVX512-NEXT: vpshufb {{.*#+}} ymm4 = ymm0[0,8],zero,zero,ymm0[u,u,u,u,1,9],zero,zero,ymm0[u,u,u,u],zero,zero,ymm0[18,26,u,u,u,u],zero,zero,ymm0[19,27,u,u,u,u]
-; AVX512-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0],ymm3[1],ymm4[2],ymm3[3],ymm4[4],ymm3[5],ymm4[6],ymm3[7]
+; AVX512-NEXT: vmovdqa {{.*#+}} ymm2 = [4,12,128,128,4,12,128,128,5,13,128,128,5,13,128,128,128,128,22,30,128,128,22,30,128,128,23,31,128,128,23,31]
+; AVX512-NEXT: vpshufb %ymm2, %ymm1, %ymm3
+; AVX512-NEXT: vpshufb %ymm2, %ymm0, %ymm2
+; AVX512-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0],ymm3[1],ymm2[2],ymm3[3],ymm2[4],ymm3[5],ymm2[6],ymm3[7]
+; AVX512-NEXT: vmovdqa {{.*#+}} ymm3 = [0,8,128,128,0,8,128,128,1,9,128,128,1,9,128,128,128,128,18,26,128,128,18,26,128,128,19,27,128,128,19,27]
+; AVX512-NEXT: vpshufb %ymm3, %ymm1, %ymm4
+; AVX512-NEXT: vpshufb %ymm3, %ymm0, %ymm3
+; AVX512-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0],ymm4[1],ymm3[2],ymm4[3],ymm3[4],ymm4[5],ymm3[6],ymm4[7]
; AVX512-NEXT: vinserti64x4 $1, %ymm2, %zmm3, %zmm2
; AVX512-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,0,1]
-; AVX512-NEXT: vpshufb {{.*#+}} ymm3 = ymm1[u,u,u,u],zero,zero,ymm1[4,12,u,u,u,u],zero,zero,ymm1[5,13,u,u,u,u,22,30],zero,zero,ymm1[u,u,u,u,23,31],zero,zero
+; AVX512-NEXT: vmovdqa {{.*#+}} ymm3 = [128,128,4,12,128,128,4,12,128,128,5,13,128,128,5,13,22,30,128,128,22,30,128,128,23,31,128,128,23,31,128,128]
+; AVX512-NEXT: vpshufb %ymm3, %ymm1, %ymm4
; AVX512-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,0,1]
-; AVX512-NEXT: vpshufb {{.*#+}} ymm4 = zero,zero,ymm0[4,12,u,u,u,u],zero,zero,ymm0[5,13,u,u,u,u,22,30],zero,zero,ymm0[u,u,u,u,23,31],zero,zero,ymm0[u,u,u,u]
-; AVX512-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0],ymm3[1],ymm4[2],ymm3[3],ymm4[4],ymm3[5],ymm4[6],ymm3[7]
-; AVX512-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u],zero,zero,ymm1[0,8,u,u,u,u],zero,zero,ymm1[1,9,u,u,u,u,18,26],zero,zero,ymm1[u,u,u,u,19,27],zero,zero
-; AVX512-NEXT: vpshufb {{.*#+}} ymm0 = zero,zero,ymm0[0,8,u,u,u,u],zero,zero,ymm0[1,9,u,u,u,u,18,26],zero,zero,ymm0[u,u,u,u,19,27],zero,zero,ymm0[u,u,u,u]
+; AVX512-NEXT: vpshufb %ymm3, %ymm0, %ymm3
+; AVX512-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0],ymm4[1],ymm3[2],ymm4[3],ymm3[4],ymm4[5],ymm3[6],ymm4[7]
+; AVX512-NEXT: vmovdqa {{.*#+}} ymm4 = [128,128,0,8,128,128,0,8,128,128,1,9,128,128,1,9,18,26,128,128,18,26,128,128,19,27,128,128,19,27,128,128]
+; AVX512-NEXT: vpshufb %ymm4, %ymm1, %ymm1
+; AVX512-NEXT: vpshufb %ymm4, %ymm0, %ymm0
; AVX512-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7]
; AVX512-NEXT: vinserti64x4 $1, %ymm3, %zmm0, %zmm0
; AVX512-NEXT: vpord %zmm0, %zmm2, %zmm0
@@ -1071,20 +1075,24 @@ define void @store_i8_stride8_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecp
; AVX512DQ-NEXT: vpunpcklqdq {{.*#+}} xmm3 = xmm4[0],xmm3[0]
; AVX512DQ-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX512DQ-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm1
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm2 = ymm1[u,u,u,u,4,12],zero,zero,ymm1[u,u,u,u,5,13],zero,zero,ymm1[u,u,u,u],zero,zero,ymm1[22,30,u,u,u,u],zero,zero,ymm1[23,31]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm3 = ymm0[4,12],zero,zero,ymm0[u,u,u,u,5,13],zero,zero,ymm0[u,u,u,u],zero,zero,ymm0[22,30,u,u,u,u],zero,zero,ymm0[23,31,u,u,u,u]
-; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0],ymm2[1],ymm3[2],ymm2[3],ymm3[4],ymm2[5],ymm3[6],ymm2[7]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm3 = ymm1[u,u,u,u,0,8],zero,zero,ymm1[u,u,u,u,1,9],zero,zero,ymm1[u,u,u,u],zero,zero,ymm1[18,26,u,u,u,u],zero,zero,ymm1[19,27]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm4 = ymm0[0,8],zero,zero,ymm0[u,u,u,u,1,9],zero,zero,ymm0[u,u,u,u],zero,zero,ymm0[18,26,u,u,u,u],zero,zero,ymm0[19,27,u,u,u,u]
-; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0],ymm3[1],ymm4[2],ymm3[3],ymm4[4],ymm3[5],ymm4[6],ymm3[7]
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm2 = [4,12,128,128,4,12,128,128,5,13,128,128,5,13,128,128,128,128,22,30,128,128,22,30,128,128,23,31,128,128,23,31]
+; AVX512DQ-NEXT: vpshufb %ymm2, %ymm1, %ymm3
+; AVX512DQ-NEXT: vpshufb %ymm2, %ymm0, %ymm2
+; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0],ymm3[1],ymm2[2],ymm3[3],ymm2[4],ymm3[5],ymm2[6],ymm3[7]
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm3 = [0,8,128,128,0,8,128,128,1,9,128,128,1,9,128,128,128,128,18,26,128,128,18,26,128,128,19,27,128,128,19,27]
+; AVX512DQ-NEXT: vpshufb %ymm3, %ymm1, %ymm4
+; AVX512DQ-NEXT: vpshufb %ymm3, %ymm0, %ymm3
+; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0],ymm4[1],ymm3[2],ymm4[3],ymm3[4],ymm4[5],ymm3[6],ymm4[7]
; AVX512DQ-NEXT: vinserti64x4 $1, %ymm2, %zmm3, %zmm2
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,0,1]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm3 = ymm1[u,u,u,u],zero,zero,ymm1[4,12,u,u,u,u],zero,zero,ymm1[5,13,u,u,u,u,22,30],zero,zero,ymm1[u,u,u,u,23,31],zero,zero
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm3 = [128,128,4,12,128,128,4,12,128,128,5,13,128,128,5,13,22,30,128,128,22,30,128,128,23,31,128,128,23,31,128,128]
+; AVX512DQ-NEXT: vpshufb %ymm3, %ymm1, %ymm4
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,0,1]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm4 = zero,zero,ymm0[4,12,u,u,u,u],zero,zero,ymm0[5,13,u,u,u,u,22,30],zero,zero,ymm0[u,u,u,u,23,31],zero,zero,ymm0[u,u,u,u]
-; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0],ymm3[1],ymm4[2],ymm3[3],ymm4[4],ymm3[5],ymm4[6],ymm3[7]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u],zero,zero,ymm1[0,8,u,u,u,u],zero,zero,ymm1[1,9,u,u,u,u,18,26],zero,zero,ymm1[u,u,u,u,19,27],zero,zero
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm0 = zero,zero,ymm0[0,8,u,u,u,u],zero,zero,ymm0[1,9,u,u,u,u,18,26],zero,zero,ymm0[u,u,u,u,19,27],zero,zero,ymm0[u,u,u,u]
+; AVX512DQ-NEXT: vpshufb %ymm3, %ymm0, %ymm3
+; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0],ymm4[1],ymm3[2],ymm4[3],ymm3[4],ymm4[5],ymm3[6],ymm4[7]
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm4 = [128,128,0,8,128,128,0,8,128,128,1,9,128,128,1,9,18,26,128,128,18,26,128,128,19,27,128,128,19,27,128,128]
+; AVX512DQ-NEXT: vpshufb %ymm4, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpshufb %ymm4, %ymm0, %ymm0
; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7]
; AVX512DQ-NEXT: vinserti64x4 $1, %ymm3, %zmm0, %zmm0
; AVX512DQ-NEXT: vpord %zmm0, %zmm2, %zmm0
@@ -2076,42 +2084,40 @@ define void @store_i8_stride8_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512BW-NEXT: vinserti64x4 $1, %ymm3, %zmm2, %zmm3
; AVX512BW-NEXT: vinserti64x4 $1, %ymm2, %zmm2, %zmm2
; AVX512BW-NEXT: vpermq {{.*#+}} zmm4 = zmm2[0,2,0,2,4,6,4,6]
-; AVX512BW-NEXT: vpmovsxwd {{.*#+}} zmm5 = [0,2048,0,2305,0,2562,0,2819,0,3076,0,3333,0,3590,0,3847]
-; AVX512BW-NEXT: vpshufb %zmm5, %zmm4, %zmm4
+; AVX512BW-NEXT: vpshufb {{.*#+}} zmm4 = zmm4[u,u,u,u,0,8,u,u,u,u,u,u,1,9,u,u,u,u,u,u,18,26,u,u,u,u,u,u,19,27,u,u,u,u,u,u,36,44,u,u,u,u,u,u,37,45,u,u,u,u,u,u,54,62,u,u,u,u,u,u,55,63,u,u]
; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm3 = zmm3[4,5,6,7,4,5,6,7]
-; AVX512BW-NEXT: vpermq {{.*#+}} zmm6 = zmm3[0,2,0,2,4,6,4,6]
-; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm7 = [u,u,u,u,u,u,0,8,u,u,u,u,u,u,1,9,u,u,u,u,u,u,2,10,u,u,u,u,u,u,3,11,u,u,u,u,u,u,4,12,u,u,u,u,u,u,5,13,u,u,u,u,u,u,6,14,u,u,u,u,u,u,7,15]
-; AVX512BW-NEXT: vpshufb %zmm7, %zmm6, %zmm6
+; AVX512BW-NEXT: vpermq {{.*#+}} zmm5 = zmm3[0,2,0,2,4,6,4,6]
+; AVX512BW-NEXT: vpshufb {{.*#+}} zmm5 = zmm5[u,u,u,u,u,u,0,8,u,u,u,u,u,u,1,9,u,u,u,u,u,u,18,26,u,u,u,u,u,u,19,27,u,u,u,u,u,u,36,44,u,u,u,u,u,u,37,45,u,u,u,u,u,u,54,62,u,u,u,u,u,u,55,63]
; AVX512BW-NEXT: movl $-2004318072, %ecx # imm = 0x88888888
; AVX512BW-NEXT: kmovd %ecx, %k1
-; AVX512BW-NEXT: vmovdqu16 %zmm6, %zmm4 {%k1}
+; AVX512BW-NEXT: vmovdqu16 %zmm5, %zmm4 {%k1}
; AVX512BW-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
-; AVX512BW-NEXT: vpermq {{.*#+}} zmm6 = zmm0[0,2,0,2,4,6,4,6]
-; AVX512BW-NEXT: vpmovsxwq {{.*#+}} zmm8 = [2048,2305,2562,2819,3076,3333,3590,3847]
-; AVX512BW-NEXT: vpshufb %zmm8, %zmm6, %zmm6
+; AVX512BW-NEXT: vpermq {{.*#+}} zmm5 = zmm0[0,2,0,2,4,6,4,6]
+; AVX512BW-NEXT: vpshufb {{.*#+}} zmm5 = zmm5[0,8,u,u,u,u,u,u,1,9,u,u,u,u,u,u,18,26,u,u,u,u,u,u,19,27,u,u,u,u,u,u,36,44,u,u,u,u,u,u,37,45,u,u,u,u,u,u,54,62,u,u,u,u,u,u,55,63,u,u,u,u,u,u]
; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm1 = zmm1[4,5,6,7,4,5,6,7]
-; AVX512BW-NEXT: vpermq {{.*#+}} zmm9 = zmm1[0,2,0,2,4,6,4,6]
-; AVX512BW-NEXT: vpmovsxdq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm10
-; AVX512BW-NEXT: vpshufb %zmm10, %zmm9, %zmm9
+; AVX512BW-NEXT: vpermq {{.*#+}} zmm6 = zmm1[0,2,0,2,4,6,4,6]
+; AVX512BW-NEXT: vpshufb {{.*#+}} zmm6 = zmm6[u,u,0,8,u,u,u,u,u,u,1,9,u,u,u,u,u,u,18,26,u,u,u,u,u,u,19,27,u,u,u,u,u,u,36,44,u,u,u,u,u,u,37,45,u,u,u,u,u,u,54,62,u,u,u,u,u,u,55,63,u,u,u,u]
; AVX512BW-NEXT: movl $572662306, %ecx # imm = 0x22222222
; AVX512BW-NEXT: kmovd %ecx, %k2
-; AVX512BW-NEXT: vmovdqu16 %zmm9, %zmm6 {%k2}
+; AVX512BW-NEXT: vmovdqu16 %zmm6, %zmm5 {%k2}
; AVX512BW-NEXT: movw $-21846, %cx # imm = 0xAAAA
; AVX512BW-NEXT: kmovd %ecx, %k3
-; AVX512BW-NEXT: vmovdqa32 %zmm4, %zmm6 {%k3}
+; AVX512BW-NEXT: vmovdqa32 %zmm4, %zmm5 {%k3}
; AVX512BW-NEXT: vpermq {{.*#+}} zmm2 = zmm2[1,3,1,3,5,7,5,7]
-; AVX512BW-NEXT: vpshufb %zmm5, %zmm2, %zmm2
+; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm4 = [u,u,u,u,0,8,0,8,u,u,u,u,1,9,1,9,u,u,u,u,2,10,2,10,u,u,u,u,3,11,3,11,u,u,u,u,4,12,4,12,u,u,u,u,5,13,5,13,u,u,u,u,6,14,6,14,u,u,u,u,7,15,7,15]
+; AVX512BW-NEXT: vpshufb %zmm4, %zmm2, %zmm2
; AVX512BW-NEXT: vpermq {{.*#+}} zmm3 = zmm3[1,3,1,3,5,7,5,7]
-; AVX512BW-NEXT: vpshufb %zmm7, %zmm3, %zmm3
+; AVX512BW-NEXT: vpshufb %zmm4, %zmm3, %zmm3
; AVX512BW-NEXT: vmovdqu16 %zmm3, %zmm2 {%k1}
; AVX512BW-NEXT: vpermq {{.*#+}} zmm0 = zmm0[1,3,1,3,5,7,5,7]
-; AVX512BW-NEXT: vpshufb %zmm8, %zmm0, %zmm0
+; AVX512BW-NEXT: vpmovsxdq {{.*#+}} zmm3 = [134219776,151062785,167905794,184748803,201591812,218434821,235277830,252120839]
+; AVX512BW-NEXT: vpshufb %zmm3, %zmm0, %zmm0
; AVX512BW-NEXT: vpermq {{.*#+}} zmm1 = zmm1[1,3,1,3,5,7,5,7]
-; AVX512BW-NEXT: vpshufb %zmm10, %zmm1, %zmm1
+; AVX512BW-NEXT: vpshufb %zmm3, %zmm1, %zmm1
; AVX512BW-NEXT: vmovdqu16 %zmm1, %zmm0 {%k2}
; AVX512BW-NEXT: vmovdqa32 %zmm2, %zmm0 {%k3}
; AVX512BW-NEXT: vmovdqa64 %zmm0, 64(%rax)
-; AVX512BW-NEXT: vmovdqa64 %zmm6, (%rax)
+; AVX512BW-NEXT: vmovdqa64 %zmm5, (%rax)
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -2132,43 +2138,41 @@ define void @store_i8_stride8_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm3, %zmm2, %zmm5
; AVX512BW-FCP-NEXT: vpmovsxbq {{.*#+}} zmm6 = [0,2,0,2,12,14,12,14]
; AVX512BW-FCP-NEXT: vpermt2q %zmm5, %zmm6, %zmm3
-; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm7 = [u,u,u,u,u,u,0,8,u,u,u,u,u,u,1,9,u,u,u,u,u,u,2,10,u,u,u,u,u,u,3,11,u,u,u,u,u,u,4,12,u,u,u,u,u,u,5,13,u,u,u,u,u,u,6,14,u,u,u,u,u,u,7,15]
-; AVX512BW-FCP-NEXT: vpshufb %zmm7, %zmm3, %zmm3
+; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} zmm3 = zmm3[u,u,u,u,u,u,0,8,u,u,u,u,u,u,1,9,u,u,u,u,u,u,18,26,u,u,u,u,u,u,19,27,u,u,u,u,u,u,36,44,u,u,u,u,u,u,37,45,u,u,u,u,u,u,54,62,u,u,u,u,u,u,55,63]
; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm2, %zmm2, %zmm2
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm8 = zmm2[0,2,0,2,4,6,4,6]
-; AVX512BW-FCP-NEXT: vpmovsxwd {{.*#+}} zmm9 = [0,2048,0,2305,0,2562,0,2819,0,3076,0,3333,0,3590,0,3847]
-; AVX512BW-FCP-NEXT: vpshufb %zmm9, %zmm8, %zmm8
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm7 = zmm2[0,2,0,2,4,6,4,6]
+; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} zmm7 = zmm7[u,u,u,u,0,8,u,u,u,u,u,u,1,9,u,u,u,u,u,u,18,26,u,u,u,u,u,u,19,27,u,u,u,u,u,u,36,44,u,u,u,u,u,u,37,45,u,u,u,u,u,u,54,62,u,u,u,u,u,u,55,63,u,u]
; AVX512BW-FCP-NEXT: movl $-2004318072, %ecx # imm = 0x88888888
; AVX512BW-FCP-NEXT: kmovd %ecx, %k1
-; AVX512BW-FCP-NEXT: vmovdqu16 %zmm3, %zmm8 {%k1}
+; AVX512BW-FCP-NEXT: vmovdqu16 %zmm3, %zmm7 {%k1}
; AVX512BW-FCP-NEXT: vpermt2q %zmm4, %zmm6, %zmm1
-; AVX512BW-FCP-NEXT: vpmovsxdq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3
-; AVX512BW-FCP-NEXT: vpshufb %zmm3, %zmm1, %zmm1
+; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} zmm1 = zmm1[u,u,0,8,u,u,u,u,u,u,1,9,u,u,u,u,u,u,18,26,u,u,u,u,u,u,19,27,u,u,u,u,u,u,36,44,u,u,u,u,u,u,37,45,u,u,u,u,u,u,54,62,u,u,u,u,u,u,55,63,u,u,u,u]
; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm6 = zmm0[0,2,0,2,4,6,4,6]
-; AVX512BW-FCP-NEXT: vpmovsxwq {{.*#+}} zmm10 = [2048,2305,2562,2819,3076,3333,3590,3847]
-; AVX512BW-FCP-NEXT: vpshufb %zmm10, %zmm6, %zmm6
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm3 = zmm0[0,2,0,2,4,6,4,6]
+; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} zmm3 = zmm3[0,8,u,u,u,u,u,u,1,9,u,u,u,u,u,u,18,26,u,u,u,u,u,u,19,27,u,u,u,u,u,u,36,44,u,u,u,u,u,u,37,45,u,u,u,u,u,u,54,62,u,u,u,u,u,u,55,63,u,u,u,u,u,u]
; AVX512BW-FCP-NEXT: movl $572662306, %ecx # imm = 0x22222222
; AVX512BW-FCP-NEXT: kmovd %ecx, %k2
-; AVX512BW-FCP-NEXT: vmovdqu16 %zmm1, %zmm6 {%k2}
+; AVX512BW-FCP-NEXT: vmovdqu16 %zmm1, %zmm3 {%k2}
; AVX512BW-FCP-NEXT: movw $-21846, %cx # imm = 0xAAAA
; AVX512BW-FCP-NEXT: kmovd %ecx, %k3
-; AVX512BW-FCP-NEXT: vmovdqa32 %zmm8, %zmm6 {%k3}
+; AVX512BW-FCP-NEXT: vmovdqa32 %zmm7, %zmm3 {%k3}
; AVX512BW-FCP-NEXT: vbroadcasti32x4 {{.*#+}} zmm1 = [5,7,5,7,5,7,5,7]
; AVX512BW-FCP-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512BW-FCP-NEXT: vpermq %zmm5, %zmm1, %zmm5
-; AVX512BW-FCP-NEXT: vpshufb %zmm7, %zmm5, %zmm5
+; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm6 = [u,u,u,u,0,8,0,8,u,u,u,u,1,9,1,9,u,u,u,u,2,10,2,10,u,u,u,u,3,11,3,11,u,u,u,u,4,12,4,12,u,u,u,u,5,13,5,13,u,u,u,u,6,14,6,14,u,u,u,u,7,15,7,15]
+; AVX512BW-FCP-NEXT: vpshufb %zmm6, %zmm5, %zmm5
; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm2 = zmm2[1,3,1,3,5,7,5,7]
-; AVX512BW-FCP-NEXT: vpshufb %zmm9, %zmm2, %zmm2
+; AVX512BW-FCP-NEXT: vpshufb %zmm6, %zmm2, %zmm2
; AVX512BW-FCP-NEXT: vmovdqu16 %zmm5, %zmm2 {%k1}
; AVX512BW-FCP-NEXT: vpermq %zmm4, %zmm1, %zmm1
-; AVX512BW-FCP-NEXT: vpshufb %zmm3, %zmm1, %zmm1
+; AVX512BW-FCP-NEXT: vpmovsxdq {{.*#+}} zmm4 = [134219776,151062785,167905794,184748803,201591812,218434821,235277830,252120839]
+; AVX512BW-FCP-NEXT: vpshufb %zmm4, %zmm1, %zmm1
; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm0 = zmm0[1,3,1,3,5,7,5,7]
-; AVX512BW-FCP-NEXT: vpshufb %zmm10, %zmm0, %zmm0
+; AVX512BW-FCP-NEXT: vpshufb %zmm4, %zmm0, %zmm0
; AVX512BW-FCP-NEXT: vmovdqu16 %zmm1, %zmm0 {%k2}
; AVX512BW-FCP-NEXT: vmovdqa32 %zmm2, %zmm0 {%k3}
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm0, 64(%rax)
-; AVX512BW-FCP-NEXT: vmovdqa64 %zmm6, (%rax)
+; AVX512BW-FCP-NEXT: vmovdqa64 %zmm3, (%rax)
; AVX512BW-FCP-NEXT: vzeroupper
; AVX512BW-FCP-NEXT: retq
;
@@ -2189,42 +2193,40 @@ define void @store_i8_stride8_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm3, %zmm2, %zmm3
; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm2, %zmm2, %zmm2
; AVX512DQ-BW-NEXT: vpermq {{.*#+}} zmm4 = zmm2[0,2,0,2,4,6,4,6]
-; AVX512DQ-BW-NEXT: vpmovsxwd {{.*#+}} zmm5 = [0,2048,0,2305,0,2562,0,2819,0,3076,0,3333,0,3590,0,3847]
-; AVX512DQ-BW-NEXT: vpshufb %zmm5, %zmm4, %zmm4
+; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} zmm4 = zmm4[u,u,u,u,0,8,u,u,u,u,u,u,1,9,u,u,u,u,u,u,18,26,u,u,u,u,u,u,19,27,u,u,u,u,u,u,36,44,u,u,u,u,u,u,37,45,u,u,u,u,u,u,54,62,u,u,u,u,u,u,55,63,u,u]
; AVX512DQ-BW-NEXT: vshufi64x2 {{.*#+}} zmm3 = zmm3[4,5,6,7,4,5,6,7]
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} zmm6 = zmm3[0,2,0,2,4,6,4,6]
-; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} zmm7 = [u,u,u,u,u,u,0,8,u,u,u,u,u,u,1,9,u,u,u,u,u,u,2,10,u,u,u,u,u,u,3,11,u,u,u,u,u,u,4,12,u,u,u,u,u,u,5,13,u,u,u,u,u,u,6,14,u,u,u,u,u,u,7,15]
-; AVX512DQ-BW-NEXT: vpshufb %zmm7, %zmm6, %zmm6
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} zmm5 = zmm3[0,2,0,2,4,6,4,6]
+; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} zmm5 = zmm5[u,u,u,u,u,u,0,8,u,u,u,u,u,u,1,9,u,u,u,u,u,u,18,26,u,u,u,u,u,u,19,27,u,u,u,u,u,u,36,44,u,u,u,u,u,u,37,45,u,u,u,u,u,u,54,62,u,u,u,u,u,u,55,63]
; AVX512DQ-BW-NEXT: movl $-2004318072, %ecx # imm = 0x88888888
; AVX512DQ-BW-NEXT: kmovd %ecx, %k1
-; AVX512DQ-BW-NEXT: vmovdqu16 %zmm6, %zmm4 {%k1}
+; AVX512DQ-BW-NEXT: vmovdqu16 %zmm5, %zmm4 {%k1}
; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} zmm6 = zmm0[0,2,0,2,4,6,4,6]
-; AVX512DQ-BW-NEXT: vpmovsxwq {{.*#+}} zmm8 = [2048,2305,2562,2819,3076,3333,3590,3847]
-; AVX512DQ-BW-NEXT: vpshufb %zmm8, %zmm6, %zmm6
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} zmm5 = zmm0[0,2,0,2,4,6,4,6]
+; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} zmm5 = zmm5[0,8,u,u,u,u,u,u,1,9,u,u,u,u,u,u,18,26,u,u,u,u,u,u,19,27,u,u,u,u,u,u,36,44,u,u,u,u,u,u,37,45,u,u,u,u,u,u,54,62,u,u,u,u,u,u,55,63,u,u,u,u,u,u]
; AVX512DQ-BW-NEXT: vshufi64x2 {{.*#+}} zmm1 = zmm1[4,5,6,7,4,5,6,7]
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} zmm9 = zmm1[0,2,0,2,4,6,4,6]
-; AVX512DQ-BW-NEXT: vpmovsxdq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm10
-; AVX512DQ-BW-NEXT: vpshufb %zmm10, %zmm9, %zmm9
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} zmm6 = zmm1[0,2,0,2,4,6,4,6]
+; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} zmm6 = zmm6[u,u,0,8,u,u,u,u,u,u,1,9,u,u,u,u,u,u,18,26,u,u,u,u,u,u,19,27,u,u,u,u,u,u,36,44,u,u,u,u,u,u,37,45,u,u,u,u,u,u,54,62,u,u,u,u,u,u,55,63,u,u,u,u]
; AVX512DQ-BW-NEXT: movl $572662306, %ecx # imm = 0x22222222
; AVX512DQ-BW-NEXT: kmovd %ecx, %k2
-; AVX512DQ-BW-NEXT: vmovdqu16 %zmm9, %zmm6 {%k2}
+; AVX512DQ-BW-NEXT: vmovdqu16 %zmm6, %zmm5 {%k2}
; AVX512DQ-BW-NEXT: movw $-21846, %cx # imm = 0xAAAA
; AVX512DQ-BW-NEXT: kmovd %ecx, %k3
-; AVX512DQ-BW-NEXT: vmovdqa32 %zmm4, %zmm6 {%k3}
+; AVX512DQ-BW-NEXT: vmovdqa32 %zmm4, %zmm5 {%k3}
; AVX512DQ-BW-NEXT: vpermq {{.*#+}} zmm2 = zmm2[1,3,1,3,5,7,5,7]
-; AVX512DQ-BW-NEXT: vpshufb %zmm5, %zmm2, %zmm2
+; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} zmm4 = [u,u,u,u,0,8,0,8,u,u,u,u,1,9,1,9,u,u,u,u,2,10,2,10,u,u,u,u,3,11,3,11,u,u,u,u,4,12,4,12,u,u,u,u,5,13,5,13,u,u,u,u,6,14,6,14,u,u,u,u,7,15,7,15]
+; AVX512DQ-BW-NEXT: vpshufb %zmm4, %zmm2, %zmm2
; AVX512DQ-BW-NEXT: vpermq {{.*#+}} zmm3 = zmm3[1,3,1,3,5,7,5,7]
-; AVX512DQ-BW-NEXT: vpshufb %zmm7, %zmm3, %zmm3
+; AVX512DQ-BW-NEXT: vpshufb %zmm4, %zmm3, %zmm3
; AVX512DQ-BW-NEXT: vmovdqu16 %zmm3, %zmm2 {%k1}
; AVX512DQ-BW-NEXT: vpermq {{.*#+}} zmm0 = zmm0[1,3,1,3,5,7,5,7]
-; AVX512DQ-BW-NEXT: vpshufb %zmm8, %zmm0, %zmm0
+; AVX512DQ-BW-NEXT: vpmovsxdq {{.*#+}} zmm3 = [134219776,151062785,167905794,184748803,201591812,218434821,235277830,252120839]
+; AVX512DQ-BW-NEXT: vpshufb %zmm3, %zmm0, %zmm0
; AVX512DQ-BW-NEXT: vpermq {{.*#+}} zmm1 = zmm1[1,3,1,3,5,7,5,7]
-; AVX512DQ-BW-NEXT: vpshufb %zmm10, %zmm1, %zmm1
+; AVX512DQ-BW-NEXT: vpshufb %zmm3, %zmm1, %zmm1
; AVX512DQ-BW-NEXT: vmovdqu16 %zmm1, %zmm0 {%k2}
; AVX512DQ-BW-NEXT: vmovdqa32 %zmm2, %zmm0 {%k3}
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm0, 64(%rax)
-; AVX512DQ-BW-NEXT: vmovdqa64 %zmm6, (%rax)
+; AVX512DQ-BW-NEXT: vmovdqa64 %zmm5, (%rax)
; AVX512DQ-BW-NEXT: vzeroupper
; AVX512DQ-BW-NEXT: retq
;
@@ -2245,43 +2247,41 @@ define void @store_i8_stride8_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm3, %zmm2, %zmm5
; AVX512DQ-BW-FCP-NEXT: vpmovsxbq {{.*#+}} zmm6 = [0,2,0,2,12,14,12,14]
; AVX512DQ-BW-FCP-NEXT: vpermt2q %zmm5, %zmm6, %zmm3
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm7 = [u,u,u,u,u,u,0,8,u,u,u,u,u,u,1,9,u,u,u,u,u,u,2,10,u,u,u,u,u,u,3,11,u,u,u,u,u,u,4,12,u,u,u,u,u,u,5,13,u,u,u,u,u,u,6,14,u,u,u,u,u,u,7,15]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %zmm7, %zmm3, %zmm3
+; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} zmm3 = zmm3[u,u,u,u,u,u,0,8,u,u,u,u,u,u,1,9,u,u,u,u,u,u,18,26,u,u,u,u,u,u,19,27,u,u,u,u,u,u,36,44,u,u,u,u,u,u,37,45,u,u,u,u,u,u,54,62,u,u,u,u,u,u,55,63]
; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm2, %zmm2, %zmm2
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm8 = zmm2[0,2,0,2,4,6,4,6]
-; AVX512DQ-BW-FCP-NEXT: vpmovsxwd {{.*#+}} zmm9 = [0,2048,0,2305,0,2562,0,2819,0,3076,0,3333,0,3590,0,3847]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %zmm9, %zmm8, %zmm8
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm7 = zmm2[0,2,0,2,4,6,4,6]
+; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} zmm7 = zmm7[u,u,u,u,0,8,u,u,u,u,u,u,1,9,u,u,u,u,u,u,18,26,u,u,u,u,u,u,19,27,u,u,u,u,u,u,36,44,u,u,u,u,u,u,37,45,u,u,u,u,u,u,54,62,u,u,u,u,u,u,55,63,u,u]
; AVX512DQ-BW-FCP-NEXT: movl $-2004318072, %ecx # imm = 0x88888888
; AVX512DQ-BW-FCP-NEXT: kmovd %ecx, %k1
-; AVX512DQ-BW-FCP-NEXT: vmovdqu16 %zmm3, %zmm8 {%k1}
+; AVX512DQ-BW-FCP-NEXT: vmovdqu16 %zmm3, %zmm7 {%k1}
; AVX512DQ-BW-FCP-NEXT: vpermt2q %zmm4, %zmm6, %zmm1
-; AVX512DQ-BW-FCP-NEXT: vpmovsxdq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3
-; AVX512DQ-BW-FCP-NEXT: vpshufb %zmm3, %zmm1, %zmm1
+; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} zmm1 = zmm1[u,u,0,8,u,u,u,u,u,u,1,9,u,u,u,u,u,u,18,26,u,u,u,u,u,u,19,27,u,u,u,u,u,u,36,44,u,u,u,u,u,u,37,45,u,u,u,u,u,u,54,62,u,u,u,u,u,u,55,63,u,u,u,u]
; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm6 = zmm0[0,2,0,2,4,6,4,6]
-; AVX512DQ-BW-FCP-NEXT: vpmovsxwq {{.*#+}} zmm10 = [2048,2305,2562,2819,3076,3333,3590,3847]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %zmm10, %zmm6, %zmm6
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm3 = zmm0[0,2,0,2,4,6,4,6]
+; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} zmm3 = zmm3[0,8,u,u,u,u,u,u,1,9,u,u,u,u,u,u,18,26,u,u,u,u,u,u,19,27,u,u,u,u,u,u,36,44,u,u,u,u,u,u,37,45,u,u,u,u,u,u,54,62,u,u,u,u,u,u,55,63,u,u,u,u,u,u]
; AVX512DQ-BW-FCP-NEXT: movl $572662306, %ecx # imm = 0x22222222
; AVX512DQ-BW-FCP-NEXT: kmovd %ecx, %k2
-; AVX512DQ-BW-FCP-NEXT: vmovdqu16 %zmm1, %zmm6 {%k2}
+; AVX512DQ-BW-FCP-NEXT: vmovdqu16 %zmm1, %zmm3 {%k2}
; AVX512DQ-BW-FCP-NEXT: movw $-21846, %cx # imm = 0xAAAA
; AVX512DQ-BW-FCP-NEXT: kmovd %ecx, %k3
-; AVX512DQ-BW-FCP-NEXT: vmovdqa32 %zmm8, %zmm6 {%k3}
+; AVX512DQ-BW-FCP-NEXT: vmovdqa32 %zmm7, %zmm3 {%k3}
; AVX512DQ-BW-FCP-NEXT: vbroadcasti32x4 {{.*#+}} zmm1 = [5,7,5,7,5,7,5,7]
; AVX512DQ-BW-FCP-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512DQ-BW-FCP-NEXT: vpermq %zmm5, %zmm1, %zmm5
-; AVX512DQ-BW-FCP-NEXT: vpshufb %zmm7, %zmm5, %zmm5
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm6 = [u,u,u,u,0,8,0,8,u,u,u,u,1,9,1,9,u,u,u,u,2,10,2,10,u,u,u,u,3,11,3,11,u,u,u,u,4,12,4,12,u,u,u,u,5,13,5,13,u,u,u,u,6,14,6,14,u,u,u,u,7,15,7,15]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %zmm6, %zmm5, %zmm5
; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm2 = zmm2[1,3,1,3,5,7,5,7]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %zmm9, %zmm2, %zmm2
+; AVX512DQ-BW-FCP-NEXT: vpshufb %zmm6, %zmm2, %zmm2
; AVX512DQ-BW-FCP-NEXT: vmovdqu16 %zmm5, %zmm2 {%k1}
; AVX512DQ-BW-FCP-NEXT: vpermq %zmm4, %zmm1, %zmm1
-; AVX512DQ-BW-FCP-NEXT: vpshufb %zmm3, %zmm1, %zmm1
+; AVX512DQ-BW-FCP-NEXT: vpmovsxdq {{.*#+}} zmm4 = [134219776,151062785,167905794,184748803,201591812,218434821,235277830,252120839]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %zmm4, %zmm1, %zmm1
; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm0 = zmm0[1,3,1,3,5,7,5,7]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %zmm10, %zmm0, %zmm0
+; AVX512DQ-BW-FCP-NEXT: vpshufb %zmm4, %zmm0, %zmm0
; AVX512DQ-BW-FCP-NEXT: vmovdqu16 %zmm1, %zmm0 {%k2}
; AVX512DQ-BW-FCP-NEXT: vmovdqa32 %zmm2, %zmm0 {%k3}
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm0, 64(%rax)
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm6, (%rax)
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm3, (%rax)
; AVX512DQ-BW-FCP-NEXT: vzeroupper
; AVX512DQ-BW-FCP-NEXT: retq
%in.vec0 = load <16 x i8>, ptr %in.vecptr0, align 64
@@ -3774,9 +3774,9 @@ define void @store_i8_stride8_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm2[0,1,2],ymm0[3],ymm2[4,5,6],ymm0[7],ymm2[8,9,10],ymm0[11],ymm2[12,13,14],ymm0[15]
; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm23
; AVX512-FCP-NEXT: vmovdqa (%rcx), %xmm1
-; AVX512-FCP-NEXT: vmovdqa (%rdx), %xmm10
-; AVX512-FCP-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm10[8],xmm1[8],xmm10[9],xmm1[9],xmm10[10],xmm1[10],xmm10[11],xmm1[11],xmm10[12],xmm1[12],xmm10[13],xmm1[13],xmm10[14],xmm1[14],xmm10[15],xmm1[15]
-; AVX512-FCP-NEXT: vmovdqa64 %xmm1, %xmm30
+; AVX512-FCP-NEXT: vmovdqa (%rdx), %xmm14
+; AVX512-FCP-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm14[8],xmm1[8],xmm14[9],xmm1[9],xmm14[10],xmm1[10],xmm14[11],xmm1[11],xmm14[12],xmm1[12],xmm14[13],xmm1[13],xmm14[14],xmm1[14],xmm14[15],xmm1[15]
+; AVX512-FCP-NEXT: vmovdqa64 %xmm1, %xmm31
; AVX512-FCP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
; AVX512-FCP-NEXT: vmovdqa (%rsi), %xmm12
; AVX512-FCP-NEXT: vmovdqa (%rdi), %xmm13
@@ -3784,82 +3784,85 @@ define void @store_i8_stride8_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-FCP-NEXT: vinserti128 $1, %xmm1, %ymm1, %ymm2
; AVX512-FCP-NEXT: vpmovsxwq {{.*#+}} ymm3 = [2312,2826,3340,3854]
; AVX512-FCP-NEXT: vpshufb %ymm3, %ymm2, %ymm2
-; AVX512-FCP-NEXT: vmovdqa %ymm3, %ymm14
+; AVX512-FCP-NEXT: vmovdqa64 %ymm3, %ymm29
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm4 = [8,9,8,9,4,5,6,7,12,13,10,11,12,13,10,11,8,9,12,13,4,5,6,7,12,13,14,15,12,13,14,15]
; AVX512-FCP-NEXT: vpshufb %ymm4, %ymm0, %ymm3
; AVX512-FCP-NEXT: vmovdqa64 %ymm4, %ymm28
; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm2[0],ymm3[1],ymm2[2,3,4],ymm3[5],ymm2[6,7,8],ymm3[9],ymm2[10,11,12],ymm3[13],ymm2[14,15]
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,0,1,4,5,2,3,4,5,2,3,12,13,14,15,0,1,4,5,4,5,6,7,4,5,6,7,12,13,14,15]
; AVX512-FCP-NEXT: vpshufb %ymm3, %ymm0, %ymm0
-; AVX512-FCP-NEXT: vmovdqa64 %ymm3, %ymm29
+; AVX512-FCP-NEXT: vmovdqa64 %ymm3, %ymm30
; AVX512-FCP-NEXT: vpmovzxwq {{.*#+}} xmm3 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
-; AVX512-FCP-NEXT: vpmovsxwq {{.*#+}} xmm9 = [1284,1798]
-; AVX512-FCP-NEXT: vpshufb %xmm9, %xmm1, %xmm1
+; AVX512-FCP-NEXT: vpmovsxwq {{.*#+}} xmm4 = [1284,1798]
+; AVX512-FCP-NEXT: vpshufb %xmm4, %xmm1, %xmm1
+; AVX512-FCP-NEXT: vmovdqa %xmm4, %xmm10
; AVX512-FCP-NEXT: vinserti128 $1, %xmm1, %ymm3, %ymm1
; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3,4],ymm0[5],ymm1[6,7,8],ymm0[9],ymm1[10,11,12],ymm0[13],ymm1[14,15]
; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm19
-; AVX512-FCP-NEXT: vmovdqa 16(%r10), %xmm8
+; AVX512-FCP-NEXT: vmovdqa 16(%r10), %xmm9
; AVX512-FCP-NEXT: vmovdqa 16(%rax), %xmm11
-; AVX512-FCP-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm11[0],xmm8[0],xmm11[1],xmm8[1],xmm11[2],xmm8[2],xmm11[3],xmm8[3],xmm11[4],xmm8[4],xmm11[5],xmm8[5],xmm11[6],xmm8[6],xmm11[7],xmm8[7]
+; AVX512-FCP-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm11[0],xmm9[0],xmm11[1],xmm9[1],xmm11[2],xmm9[2],xmm11[3],xmm9[3],xmm11[4],xmm9[4],xmm11[5],xmm9[5],xmm11[6],xmm9[6],xmm11[7],xmm9[7]
; AVX512-FCP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512-FCP-NEXT: vmovdqa 16(%r9), %xmm7
-; AVX512-FCP-NEXT: vmovdqa 16(%r8), %xmm6
-; AVX512-FCP-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3],xmm6[4],xmm7[4],xmm6[5],xmm7[5],xmm6[6],xmm7[6],xmm6[7],xmm7[7]
+; AVX512-FCP-NEXT: vmovdqa 16(%r9), %xmm8
+; AVX512-FCP-NEXT: vmovdqa 16(%r8), %xmm7
+; AVX512-FCP-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm7[0],xmm8[0],xmm7[1],xmm8[1],xmm7[2],xmm8[2],xmm7[3],xmm8[3],xmm7[4],xmm8[4],xmm7[5],xmm8[5],xmm7[6],xmm8[6],xmm7[7],xmm8[7]
; AVX512-FCP-NEXT: vinserti128 $1, %xmm1, %ymm1, %ymm1
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm4 = ymm0[u,u,u,u,u,u,8,9,u,u,u,u,u,u,10,11,u,u,u,u,u,u,28,29,u,u,u,u,u,u,30,31]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm5 = ymm1[0,1,2,3,8,9,u,u,8,9,10,11,10,11,u,u,16,17,18,19,28,29,u,u,28,29,26,27,30,31,u,u]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm4 = ymm5[0,1,2],ymm4[3],ymm5[4,5,6],ymm4[7],ymm5[8,9,10],ymm4[11],ymm5[12,13,14],ymm4[15]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,0,1,u,u,u,u,u,u,2,3,u,u,u,u,u,u,20,21,u,u,u,u,u,u,22,23]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[0,1,2,3,0,1,u,u,8,9,10,11,2,3,u,u,20,21,18,19,20,21,u,u,24,25,26,27,22,23,u,u]
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,2,3,8,9,8,9,8,9,10,11,10,11,10,11,0,1,2,3,12,13,12,13,12,13,10,11,14,15,14,15]
+; AVX512-FCP-NEXT: vpshufb %ymm2, %ymm0, %ymm3
+; AVX512-FCP-NEXT: vpshufb %ymm2, %ymm1, %ymm2
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm2[0,1,2],ymm3[3],ymm2[4,5,6],ymm3[7],ymm2[8,9,10],ymm3[11],ymm2[12,13,14],ymm3[15]
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,2,3,0,1,0,1,8,9,10,11,2,3,2,3,4,5,2,3,4,5,4,5,8,9,10,11,6,7,6,7]
+; AVX512-FCP-NEXT: vpshufb %ymm3, %ymm0, %ymm0
+; AVX512-FCP-NEXT: vpshufb %ymm3, %ymm1, %ymm1
; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5,6],ymm0[7],ymm1[8,9,10],ymm0[11],ymm1[12,13,14],ymm0[15]
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm4, %zmm0, %zmm17
-; AVX512-FCP-NEXT: vmovdqa 16(%rsi), %xmm5
-; AVX512-FCP-NEXT: vmovdqa 16(%rdi), %xmm4
-; AVX512-FCP-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3],xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7]
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm17
+; AVX512-FCP-NEXT: vmovdqa 16(%rcx), %xmm6
+; AVX512-FCP-NEXT: vmovdqa 16(%rdx), %xmm4
+; AVX512-FCP-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm4[0],xmm6[0],xmm4[1],xmm6[1],xmm4[2],xmm6[2],xmm4[3],xmm6[3],xmm4[4],xmm6[4],xmm4[5],xmm6[5],xmm4[6],xmm6[6],xmm4[7],xmm6[7]
; AVX512-FCP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm1
-; AVX512-FCP-NEXT: vpshufb %ymm14, %ymm1, %ymm1
-; AVX512-FCP-NEXT: vmovdqa64 %ymm14, %ymm16
-; AVX512-FCP-NEXT: vmovdqa 16(%rcx), %xmm3
-; AVX512-FCP-NEXT: vmovdqa 16(%rdx), %xmm2
-; AVX512-FCP-NEXT: vpunpcklbw {{.*#+}} xmm15 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
-; AVX512-FCP-NEXT: vinserti128 $1, %xmm15, %ymm15, %ymm15
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm14 = ymm15[u,u,8,9,u,u,u,u,u,u,10,11,u,u,u,u,u,u,28,29,u,u,u,u,u,u,30,31,u,u,u,u]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0],ymm14[1],ymm1[2,3,4],ymm14[5],ymm1[6,7,8],ymm14[9],ymm1[10,11,12],ymm14[13],ymm1[14,15]
-; AVX512-FCP-NEXT: vpmovzxwq {{.*#+}} xmm14 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
-; AVX512-FCP-NEXT: vpshufb %xmm9, %xmm0, %xmm0
-; AVX512-FCP-NEXT: vmovdqa64 %xmm9, %xmm31
-; AVX512-FCP-NEXT: vinserti128 $1, %xmm0, %ymm14, %ymm0
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm14 = ymm15[u,u,0,1,u,u,u,u,u,u,2,3,u,u,u,u,u,u,20,21,u,u,u,u,u,u,22,23,u,u,u,u]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm14[1],ymm0[2,3,4],ymm14[5],ymm0[6,7,8],ymm14[9],ymm0[10,11,12],ymm14[13],ymm0[14,15]
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm18
-; AVX512-FCP-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm11[8],xmm8[8],xmm11[9],xmm8[9],xmm11[10],xmm8[10],xmm11[11],xmm8[11],xmm11[12],xmm8[12],xmm11[13],xmm8[13],xmm11[14],xmm8[14],xmm11[15],xmm8[15]
-; AVX512-FCP-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm6[8],xmm7[8],xmm6[9],xmm7[9],xmm6[10],xmm7[10],xmm6[11],xmm7[11],xmm6[12],xmm7[12],xmm6[13],xmm7[13],xmm6[14],xmm7[14],xmm6[15],xmm7[15]
+; AVX512-FCP-NEXT: vmovdqa 16(%rsi), %xmm5
+; AVX512-FCP-NEXT: vmovdqa 16(%rdi), %xmm3
+; AVX512-FCP-NEXT: vpmovsxdq {{.*#+}} ymm2 = [151521544,185207562,218893580,252579598]
+; AVX512-FCP-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm3[0],xmm5[0],xmm3[1],xmm5[1],xmm3[2],xmm5[2],xmm3[3],xmm5[3],xmm3[4],xmm5[4],xmm3[5],xmm5[5],xmm3[6],xmm5[6],xmm3[7],xmm5[7]
+; AVX512-FCP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm15
+; AVX512-FCP-NEXT: vpshufb %ymm2, %ymm15, %ymm15
+; AVX512-FCP-NEXT: vpshufb %ymm2, %ymm1, %ymm2
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm15[0],ymm2[1],ymm15[2,3,4],ymm2[5],ymm15[6,7,8],ymm2[9],ymm15[10,11,12],ymm2[13],ymm15[14,15]
+; AVX512-FCP-NEXT: vpmovzxwq {{.*#+}} xmm15 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
+; AVX512-FCP-NEXT: vpshufb %xmm10, %xmm0, %xmm0
+; AVX512-FCP-NEXT: vmovdqa64 %xmm10, %xmm16
+; AVX512-FCP-NEXT: vinserti128 $1, %xmm0, %ymm15, %ymm0
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,0,1,u,u,u,u,u,u,2,3,u,u,u,u,u,u,20,21,u,u,u,u,u,u,22,23,u,u,u,u]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4],ymm1[5],ymm0[6,7,8],ymm1[9],ymm0[10,11,12],ymm1[13],ymm0[14,15]
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm18
+; AVX512-FCP-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm11[8],xmm9[8],xmm11[9],xmm9[9],xmm11[10],xmm9[10],xmm11[11],xmm9[11],xmm11[12],xmm9[12],xmm11[13],xmm9[13],xmm11[14],xmm9[14],xmm11[15],xmm9[15]
+; AVX512-FCP-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm7[8],xmm8[8],xmm7[9],xmm8[9],xmm7[10],xmm8[10],xmm7[11],xmm8[11],xmm7[12],xmm8[12],xmm7[13],xmm8[13],xmm7[14],xmm8[14],xmm7[15],xmm8[15]
; AVX512-FCP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
; AVX512-FCP-NEXT: vinserti128 $1, %xmm1, %ymm1, %ymm1
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm8 = [0,1,2,3,8,9,8,9,8,9,8,9,12,13,10,11,0,1,2,3,8,9,12,13,8,9,12,13,12,13,14,15]
-; AVX512-FCP-NEXT: vpshufb %ymm8, %ymm0, %ymm6
-; AVX512-FCP-NEXT: vmovdqa64 %ymm20, %ymm11
-; AVX512-FCP-NEXT: vpshufb %ymm11, %ymm1, %ymm7
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm6 = ymm7[0,1,2],ymm6[3],ymm7[4,5,6],ymm6[7],ymm7[8,9,10],ymm6[11],ymm7[12,13,14],ymm6[15]
+; AVX512-FCP-NEXT: vpshufb %ymm8, %ymm0, %ymm2
+; AVX512-FCP-NEXT: vmovdqa64 %ymm20, %ymm9
+; AVX512-FCP-NEXT: vpshufb %ymm9, %ymm1, %ymm7
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm7[0,1,2],ymm2[3],ymm7[4,5,6],ymm2[7],ymm7[8,9,10],ymm2[11],ymm7[12,13,14],ymm2[15]
; AVX512-FCP-NEXT: vmovdqa64 %ymm26, %ymm7
; AVX512-FCP-NEXT: vpshufb %ymm7, %ymm0, %ymm0
-; AVX512-FCP-NEXT: vmovdqa64 %ymm27, %ymm14
-; AVX512-FCP-NEXT: vpshufb %ymm14, %ymm1, %ymm1
+; AVX512-FCP-NEXT: vmovdqa64 %ymm27, %ymm11
+; AVX512-FCP-NEXT: vpshufb %ymm11, %ymm1, %ymm1
; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5,6],ymm0[7],ymm1[8,9,10],ymm0[11],ymm1[12,13,14],ymm0[15]
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm6, %zmm0, %zmm20
-; AVX512-FCP-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm2[8],xmm3[8],xmm2[9],xmm3[9],xmm2[10],xmm3[10],xmm2[11],xmm3[11],xmm2[12],xmm3[12],xmm2[13],xmm3[13],xmm2[14],xmm3[14],xmm2[15],xmm3[15]
-; AVX512-FCP-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm4[8],xmm5[8],xmm4[9],xmm5[9],xmm4[10],xmm5[10],xmm4[11],xmm5[11],xmm4[12],xmm5[12],xmm4[13],xmm5[13],xmm4[14],xmm5[14],xmm4[15],xmm5[15]
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm20
+; AVX512-FCP-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm4[8],xmm6[8],xmm4[9],xmm6[9],xmm4[10],xmm6[10],xmm4[11],xmm6[11],xmm4[12],xmm6[12],xmm4[13],xmm6[13],xmm4[14],xmm6[14],xmm4[15],xmm6[15]
+; AVX512-FCP-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm3[8],xmm5[8],xmm3[9],xmm5[9],xmm3[10],xmm5[10],xmm3[11],xmm5[11],xmm3[12],xmm5[12],xmm3[13],xmm5[13],xmm3[14],xmm5[14],xmm3[15],xmm5[15]
; AVX512-FCP-NEXT: vinserti128 $1, %xmm1, %ymm1, %ymm1
; AVX512-FCP-NEXT: vinserti128 $1, %xmm2, %ymm2, %ymm3
-; AVX512-FCP-NEXT: vmovdqa64 %ymm16, %ymm15
+; AVX512-FCP-NEXT: vmovdqa64 %ymm29, %ymm15
; AVX512-FCP-NEXT: vpshufb %ymm15, %ymm3, %ymm3
; AVX512-FCP-NEXT: vmovdqa64 %ymm28, %ymm6
; AVX512-FCP-NEXT: vpshufb %ymm6, %ymm1, %ymm4
; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm3[0],ymm4[1],ymm3[2,3,4],ymm4[5],ymm3[6,7,8],ymm4[9],ymm3[10,11,12],ymm4[13],ymm3[14,15]
-; AVX512-FCP-NEXT: vmovdqa64 %ymm29, %ymm9
-; AVX512-FCP-NEXT: vpshufb %ymm9, %ymm1, %ymm1
+; AVX512-FCP-NEXT: vmovdqa64 %ymm30, %ymm10
+; AVX512-FCP-NEXT: vpshufb %ymm10, %ymm1, %ymm1
; AVX512-FCP-NEXT: vpmovzxwq {{.*#+}} xmm4 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
-; AVX512-FCP-NEXT: vmovdqa64 %xmm31, %xmm0
+; AVX512-FCP-NEXT: vmovdqa64 %xmm16, %xmm0
; AVX512-FCP-NEXT: vpshufb %xmm0, %xmm2, %xmm2
; AVX512-FCP-NEXT: vinserti128 $1, %xmm2, %ymm4, %ymm2
; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3,4],ymm1[5],ymm2[6,7,8],ymm1[9],ymm2[10,11,12],ymm1[13],ymm2[14,15]
@@ -3873,21 +3876,21 @@ define void @store_i8_stride8_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-FCP-NEXT: vinserti128 $1, %xmm2, %ymm2, %ymm2
; AVX512-FCP-NEXT: vpshufb %ymm8, %ymm2, %ymm4
; AVX512-FCP-NEXT: vinserti128 $1, %xmm3, %ymm3, %ymm3
-; AVX512-FCP-NEXT: vpshufb %ymm11, %ymm3, %ymm5
+; AVX512-FCP-NEXT: vpshufb %ymm9, %ymm3, %ymm5
; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm4 = ymm5[0,1,2],ymm4[3],ymm5[4,5,6],ymm4[7],ymm5[8,9,10],ymm4[11],ymm5[12,13,14],ymm4[15]
; AVX512-FCP-NEXT: vpshufb %ymm7, %ymm2, %ymm2
-; AVX512-FCP-NEXT: vpshufb %ymm14, %ymm3, %ymm3
+; AVX512-FCP-NEXT: vpshufb %ymm11, %ymm3, %ymm3
; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm3[0,1,2],ymm2[3],ymm3[4,5,6],ymm2[7],ymm3[8,9,10],ymm2[11],ymm3[12,13,14],ymm2[15]
; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm4, %zmm2, %zmm2
-; AVX512-FCP-NEXT: vmovdqa64 %xmm30, %xmm3
-; AVX512-FCP-NEXT: vpunpcklbw {{.*#+}} xmm3 = xmm10[0],xmm3[0],xmm10[1],xmm3[1],xmm10[2],xmm3[2],xmm10[3],xmm3[3],xmm10[4],xmm3[4],xmm10[5],xmm3[5],xmm10[6],xmm3[6],xmm10[7],xmm3[7]
+; AVX512-FCP-NEXT: vmovdqa64 %xmm31, %xmm3
+; AVX512-FCP-NEXT: vpunpcklbw {{.*#+}} xmm3 = xmm14[0],xmm3[0],xmm14[1],xmm3[1],xmm14[2],xmm3[2],xmm14[3],xmm3[3],xmm14[4],xmm3[4],xmm14[5],xmm3[5],xmm14[6],xmm3[6],xmm14[7],xmm3[7]
; AVX512-FCP-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm13[0],xmm12[0],xmm13[1],xmm12[1],xmm13[2],xmm12[2],xmm13[3],xmm12[3],xmm13[4],xmm12[4],xmm13[5],xmm12[5],xmm13[6],xmm12[6],xmm13[7],xmm12[7]
; AVX512-FCP-NEXT: vinserti128 $1, %xmm4, %ymm4, %ymm5
; AVX512-FCP-NEXT: vpshufb %ymm15, %ymm5, %ymm5
; AVX512-FCP-NEXT: vinserti128 $1, %xmm3, %ymm3, %ymm3
; AVX512-FCP-NEXT: vpshufb %ymm6, %ymm3, %ymm6
; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm5 = ymm5[0],ymm6[1],ymm5[2,3,4],ymm6[5],ymm5[6,7,8],ymm6[9],ymm5[10,11,12],ymm6[13],ymm5[14,15]
-; AVX512-FCP-NEXT: vpshufb %ymm9, %ymm3, %ymm3
+; AVX512-FCP-NEXT: vpshufb %ymm10, %ymm3, %ymm3
; AVX512-FCP-NEXT: vpshufb %xmm0, %xmm4, %xmm6
; AVX512-FCP-NEXT: vpmovzxwq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
; AVX512-FCP-NEXT: vinserti128 $1, %xmm6, %ymm4, %ymm4
@@ -4131,9 +4134,8 @@ define void @store_i8_stride8_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm2, %xmm19
; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm1, %xmm20
; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm2 = [8,9,8,9,4,5,6,7,12,13,10,11,12,13,10,11,8,9,12,13,4,5,6,7,12,13,14,15,12,13,14,15]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm2, %ymm0, %ymm1
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm2, %ymm18
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm11 = [8,9,8,9,4,5,6,7,12,13,10,11,12,13,10,11,8,9,12,13,4,5,6,7,12,13,14,15,12,13,14,15]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm11, %ymm0, %ymm1
; AVX512DQ-FCP-NEXT: vmovdqa (%rsi), %xmm3
; AVX512DQ-FCP-NEXT: vmovdqa (%rdi), %xmm4
; AVX512DQ-FCP-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm4[8],xmm3[8],xmm4[9],xmm3[9],xmm4[10],xmm3[10],xmm4[11],xmm3[11],xmm4[12],xmm3[12],xmm4[13],xmm3[13],xmm4[14],xmm3[14],xmm4[15],xmm3[15]
@@ -4142,107 +4144,108 @@ define void @store_i8_stride8_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm2, %ymm2, %ymm3
; AVX512DQ-FCP-NEXT: vpmovsxwq {{.*#+}} ymm4 = [2312,2826,3340,3854]
; AVX512DQ-FCP-NEXT: vpshufb %ymm4, %ymm3, %ymm3
-; AVX512DQ-FCP-NEXT: vmovdqa %ymm4, %ymm5
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm4, %ymm24
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm3[0],ymm1[1],ymm3[2,3,4],ymm1[5],ymm3[6,7,8],ymm1[9],ymm3[10,11,12],ymm1[13],ymm3[14,15]
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,0,1,4,5,2,3,4,5,2,3,12,13,14,15,0,1,4,5,4,5,6,7,4,5,6,7,12,13,14,15]
; AVX512DQ-FCP-NEXT: vpshufb %ymm3, %ymm0, %ymm0
; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm3, %ymm23
; AVX512DQ-FCP-NEXT: vpmovsxwq {{.*#+}} xmm4 = [1284,1798]
; AVX512DQ-FCP-NEXT: vpshufb %xmm4, %xmm2, %xmm3
-; AVX512DQ-FCP-NEXT: vmovdqa %xmm4, %xmm10
+; AVX512DQ-FCP-NEXT: vmovdqa %xmm4, %xmm5
; AVX512DQ-FCP-NEXT: vpmovzxwq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm2
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm2[0],ymm0[1],ymm2[2,3,4],ymm0[5],ymm2[6,7,8],ymm0[9],ymm2[10,11,12],ymm0[13],ymm2[14,15]
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm17
; AVX512DQ-FCP-NEXT: vmovdqa (%r10), %xmm1
-; AVX512DQ-FCP-NEXT: vmovdqa (%rax), %xmm11
-; AVX512DQ-FCP-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm11[8],xmm1[8],xmm11[9],xmm1[9],xmm11[10],xmm1[10],xmm11[11],xmm1[11],xmm11[12],xmm1[12],xmm11[13],xmm1[13],xmm11[14],xmm1[14],xmm11[15],xmm1[15]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm1, %xmm28
+; AVX512DQ-FCP-NEXT: vmovdqa (%rax), %xmm14
+; AVX512DQ-FCP-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm14[8],xmm1[8],xmm14[9],xmm1[9],xmm14[10],xmm1[10],xmm14[11],xmm1[11],xmm14[12],xmm1[12],xmm14[13],xmm1[13],xmm14[14],xmm1[14],xmm14[15],xmm1[15]
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm1, %xmm29
; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,2,3,8,9,8,9,8,9,8,9,12,13,10,11,0,1,2,3,8,9,12,13,8,9,12,13,12,13,14,15]
; AVX512DQ-FCP-NEXT: vpshufb %ymm2, %ymm0, %ymm1
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm2, %ymm24
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm2, %ymm25
; AVX512DQ-FCP-NEXT: vmovdqa (%r9), %xmm12
; AVX512DQ-FCP-NEXT: vmovdqa (%r8), %xmm13
; AVX512DQ-FCP-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm13[8],xmm12[8],xmm13[9],xmm12[9],xmm13[10],xmm12[10],xmm13[11],xmm12[11],xmm13[12],xmm12[12],xmm13[13],xmm12[13],xmm13[14],xmm12[14],xmm13[15],xmm12[15]
; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm2, %ymm2, %ymm2
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,2,3,8,9,10,11,8,9,10,11,10,11,14,15,0,1,2,3,12,13,10,11,12,13,10,11,14,15,14,15]
; AVX512DQ-FCP-NEXT: vpshufb %ymm4, %ymm2, %ymm3
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm4, %ymm25
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm4, %ymm26
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm3[0,1,2],ymm1[3],ymm3[4,5,6],ymm1[7],ymm3[8,9,10],ymm1[11],ymm3[12,13,14],ymm1[15]
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,0,1,0,1,0,1,8,9,10,11,4,5,2,3,0,1,4,5,0,1,4,5,8,9,10,11,4,5,6,7]
; AVX512DQ-FCP-NEXT: vpshufb %ymm3, %ymm0, %ymm0
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm3, %ymm26
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm3, %ymm27
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,2,3,0,1,2,3,8,9,10,11,2,3,6,7,4,5,2,3,4,5,2,3,8,9,10,11,6,7,6,7]
; AVX512DQ-FCP-NEXT: vpshufb %ymm3, %ymm2, %ymm2
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm3, %ymm27
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm3, %ymm28
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm2[0,1,2],ymm0[3],ymm2[4,5,6],ymm0[7],ymm2[8,9,10],ymm0[11],ymm2[12,13,14],ymm0[15]
; AVX512DQ-FCP-NEXT: movw $-21846, %r11w # imm = 0xAAAA
; AVX512DQ-FCP-NEXT: kmovw %r11d, %k1
; AVX512DQ-FCP-NEXT: vinserti32x8 $1, %ymm1, %zmm0, %zmm17 {%k1}
+; AVX512DQ-FCP-NEXT: vmovdqa 16(%rcx), %xmm10
+; AVX512DQ-FCP-NEXT: vmovdqa 16(%rdx), %xmm8
+; AVX512DQ-FCP-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm8[0],xmm10[0],xmm8[1],xmm10[1],xmm8[2],xmm10[2],xmm8[3],xmm10[3],xmm8[4],xmm10[4],xmm8[5],xmm10[5],xmm8[6],xmm10[6],xmm8[7],xmm10[7]
+; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
; AVX512DQ-FCP-NEXT: vmovdqa 16(%rsi), %xmm9
-; AVX512DQ-FCP-NEXT: vmovdqa 16(%rdi), %xmm8
-; AVX512DQ-FCP-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm8[0],xmm9[0],xmm8[1],xmm9[1],xmm8[2],xmm9[2],xmm8[3],xmm9[3],xmm8[4],xmm9[4],xmm8[5],xmm9[5],xmm8[6],xmm9[6],xmm8[7],xmm9[7]
-; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm2, %ymm2, %ymm0
-; AVX512DQ-FCP-NEXT: vpshufb %ymm5, %ymm0, %ymm3
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm5, %ymm30
-; AVX512DQ-FCP-NEXT: vmovdqa 16(%rcx), %xmm7
-; AVX512DQ-FCP-NEXT: vmovdqa 16(%rdx), %xmm6
-; AVX512DQ-FCP-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3],xmm6[4],xmm7[4],xmm6[5],xmm7[5],xmm6[6],xmm7[6],xmm6[7],xmm7[7]
-; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm4, %ymm4, %ymm4
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm5 = ymm4[u,u,8,9,u,u,u,u,u,u,10,11,u,u,u,u,u,u,28,29,u,u,u,u,u,u,30,31,u,u,u,u]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm3[0],ymm5[1],ymm3[2,3,4],ymm5[5],ymm3[6,7,8],ymm5[9],ymm3[10,11,12],ymm5[13],ymm3[14,15]
-; AVX512DQ-FCP-NEXT: vpmovzxwq {{.*#+}} xmm5 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
-; AVX512DQ-FCP-NEXT: vpshufb %xmm10, %xmm2, %xmm2
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm10, %xmm29
-; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm2, %ymm5, %ymm2
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm4 = ymm4[u,u,0,1,u,u,u,u,u,u,2,3,u,u,u,u,u,u,20,21,u,u,u,u,u,u,22,23,u,u,u,u]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm2[0],ymm4[1],ymm2[2,3,4],ymm4[5],ymm2[6,7,8],ymm4[9],ymm2[10,11,12],ymm4[13],ymm2[14,15]
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm3, %zmm2, %zmm16
-; AVX512DQ-FCP-NEXT: vmovdqa 16(%r10), %xmm5
+; AVX512DQ-FCP-NEXT: vmovdqa 16(%rdi), %xmm7
+; AVX512DQ-FCP-NEXT: vpmovsxdq {{.*#+}} ymm1 = [151521544,185207562,218893580,252579598]
+; AVX512DQ-FCP-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm7[0],xmm9[0],xmm7[1],xmm9[1],xmm7[2],xmm9[2],xmm7[3],xmm9[3],xmm7[4],xmm9[4],xmm7[5],xmm9[5],xmm7[6],xmm9[6],xmm7[7],xmm9[7]
+; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm2, %ymm2, %ymm3
+; AVX512DQ-FCP-NEXT: vpshufb %ymm1, %ymm3, %ymm3
+; AVX512DQ-FCP-NEXT: vpshufb %ymm1, %ymm0, %ymm1
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm3[0],ymm1[1],ymm3[2,3,4],ymm1[5],ymm3[6,7,8],ymm1[9],ymm3[10,11,12],ymm1[13],ymm3[14,15]
+; AVX512DQ-FCP-NEXT: vpmovzxwq {{.*#+}} xmm3 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
+; AVX512DQ-FCP-NEXT: vpshufb %xmm5, %xmm2, %xmm2
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm5, %xmm30
+; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm2, %ymm3, %ymm2
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,u,0,1,u,u,u,u,u,u,2,3,u,u,u,u,u,u,20,21,u,u,u,u,u,u,22,23,u,u,u,u]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm2[0],ymm0[1],ymm2[2,3,4],ymm0[5],ymm2[6,7,8],ymm0[9],ymm2[10,11,12],ymm0[13],ymm2[14,15]
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm16
+; AVX512DQ-FCP-NEXT: vmovdqa 16(%r10), %xmm6
; AVX512DQ-FCP-NEXT: vmovdqa 16(%rax), %xmm4
-; AVX512DQ-FCP-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3],xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7]
-; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm2, %ymm2, %ymm0
-; AVX512DQ-FCP-NEXT: vmovdqa 16(%r9), %xmm3
-; AVX512DQ-FCP-NEXT: vmovdqa 16(%r8), %xmm2
-; AVX512DQ-FCP-NEXT: vpunpcklbw {{.*#+}} xmm15 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
+; AVX512DQ-FCP-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm4[0],xmm6[0],xmm4[1],xmm6[1],xmm4[2],xmm6[2],xmm4[3],xmm6[3],xmm4[4],xmm6[4],xmm4[5],xmm6[5],xmm4[6],xmm6[6],xmm4[7],xmm6[7]
+; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512DQ-FCP-NEXT: vmovdqa 16(%r9), %xmm5
+; AVX512DQ-FCP-NEXT: vmovdqa 16(%r8), %xmm3
+; AVX512DQ-FCP-NEXT: vpunpcklbw {{.*#+}} xmm15 = xmm3[0],xmm5[0],xmm3[1],xmm5[1],xmm3[2],xmm5[2],xmm3[3],xmm5[3],xmm3[4],xmm5[4],xmm3[5],xmm5[5],xmm3[6],xmm5[6],xmm3[7],xmm5[7]
; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm15, %ymm15, %ymm15
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm0[u,u,u,u,u,u,8,9,u,u,u,u,u,u,10,11,u,u,u,u,u,u,28,29,u,u,u,u,u,u,30,31]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm14 = ymm15[0,1,2,3,8,9,u,u,8,9,10,11,10,11,u,u,16,17,18,19,28,29,u,u,28,29,26,27,30,31,u,u]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm14[0,1,2],ymm1[3],ymm14[4,5,6],ymm1[7],ymm14[8,9,10],ymm1[11],ymm14[12,13,14],ymm1[15]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,0,1,u,u,u,u,u,u,2,3,u,u,u,u,u,u,20,21,u,u,u,u,u,u,22,23]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm14 = ymm15[0,1,2,3,0,1,u,u,8,9,10,11,2,3,u,u,20,21,18,19,20,21,u,u,24,25,26,27,22,23,u,u]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm14[0,1,2],ymm0[3],ymm14[4,5,6],ymm0[7],ymm14[8,9,10],ymm0[11],ymm14[12,13,14],ymm0[15]
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,2,3,8,9,8,9,8,9,10,11,10,11,10,11,0,1,2,3,12,13,12,13,12,13,10,11,14,15,14,15]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm2, %ymm0, %ymm1
+; AVX512DQ-FCP-NEXT: vpshufb %ymm2, %ymm15, %ymm2
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5,6],ymm1[7],ymm2[8,9,10],ymm1[11],ymm2[12,13,14],ymm1[15]
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,2,3,0,1,0,1,8,9,10,11,2,3,2,3,4,5,2,3,4,5,4,5,8,9,10,11,6,7,6,7]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm2, %ymm0, %ymm0
+; AVX512DQ-FCP-NEXT: vpshufb %ymm2, %ymm15, %ymm2
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm2[0,1,2],ymm0[3],ymm2[4,5,6],ymm0[7],ymm2[8,9,10],ymm0[11],ymm2[12,13,14],ymm0[15]
; AVX512DQ-FCP-NEXT: vinserti32x8 $1, %ymm1, %zmm0, %zmm16 {%k1}
-; AVX512DQ-FCP-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm6[8],xmm7[8],xmm6[9],xmm7[9],xmm6[10],xmm7[10],xmm6[11],xmm7[11],xmm6[12],xmm7[12],xmm6[13],xmm7[13],xmm6[14],xmm7[14],xmm6[15],xmm7[15]
-; AVX512DQ-FCP-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm8[8],xmm9[8],xmm8[9],xmm9[9],xmm8[10],xmm9[10],xmm8[11],xmm9[11],xmm8[12],xmm9[12],xmm8[13],xmm9[13],xmm8[14],xmm9[14],xmm8[15],xmm9[15]
+; AVX512DQ-FCP-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm8[8],xmm10[8],xmm8[9],xmm10[9],xmm8[10],xmm10[10],xmm8[11],xmm10[11],xmm8[12],xmm10[12],xmm8[13],xmm10[13],xmm8[14],xmm10[14],xmm8[15],xmm10[15]
+; AVX512DQ-FCP-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm7[8],xmm9[8],xmm7[9],xmm9[9],xmm7[10],xmm9[10],xmm7[11],xmm9[11],xmm7[12],xmm9[12],xmm7[13],xmm9[13],xmm7[14],xmm9[14],xmm7[15],xmm9[15]
; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm1, %ymm1, %ymm6
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm30, %ymm14
-; AVX512DQ-FCP-NEXT: vpshufb %ymm14, %ymm6, %ymm6
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm18, %ymm10
-; AVX512DQ-FCP-NEXT: vpshufb %ymm10, %ymm0, %ymm7
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm6 = ymm6[0],ymm7[1],ymm6[2,3,4],ymm7[5],ymm6[6,7,8],ymm7[9],ymm6[10,11,12],ymm7[13],ymm6[14,15]
+; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm1, %ymm1, %ymm2
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm24, %ymm10
+; AVX512DQ-FCP-NEXT: vpshufb %ymm10, %ymm2, %ymm2
+; AVX512DQ-FCP-NEXT: vpshufb %ymm11, %ymm0, %ymm7
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm2[0],ymm7[1],ymm2[2,3,4],ymm7[5],ymm2[6,7,8],ymm7[9],ymm2[10,11,12],ymm7[13],ymm2[14,15]
; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm23, %ymm9
; AVX512DQ-FCP-NEXT: vpshufb %ymm9, %ymm0, %ymm0
; AVX512DQ-FCP-NEXT: vpmovzxwq {{.*#+}} xmm7 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm29, %xmm15
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm30, %xmm15
; AVX512DQ-FCP-NEXT: vpshufb %xmm15, %xmm1, %xmm1
; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm1, %ymm7, %ymm1
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3,4],ymm0[5],ymm1[6,7,8],ymm0[9],ymm1[10,11,12],ymm0[13],ymm1[14,15]
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm6, %zmm0, %zmm0
-; AVX512DQ-FCP-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm4[8],xmm5[8],xmm4[9],xmm5[9],xmm4[10],xmm5[10],xmm4[11],xmm5[11],xmm4[12],xmm5[12],xmm4[13],xmm5[13],xmm4[14],xmm5[14],xmm4[15],xmm5[15]
-; AVX512DQ-FCP-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm2[8],xmm3[8],xmm2[9],xmm3[9],xmm2[10],xmm3[10],xmm2[11],xmm3[11],xmm2[12],xmm3[12],xmm2[13],xmm3[13],xmm2[14],xmm3[14],xmm2[15],xmm3[15]
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
+; AVX512DQ-FCP-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm4[8],xmm6[8],xmm4[9],xmm6[9],xmm4[10],xmm6[10],xmm4[11],xmm6[11],xmm4[12],xmm6[12],xmm4[13],xmm6[13],xmm4[14],xmm6[14],xmm4[15],xmm6[15]
+; AVX512DQ-FCP-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm3[8],xmm5[8],xmm3[9],xmm5[9],xmm3[10],xmm5[10],xmm3[11],xmm5[11],xmm3[12],xmm5[12],xmm3[13],xmm5[13],xmm3[14],xmm5[14],xmm3[15],xmm5[15]
; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm1, %ymm1, %ymm1
; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm2, %ymm2, %ymm2
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm24, %ymm5
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm25, %ymm5
; AVX512DQ-FCP-NEXT: vpshufb %ymm5, %ymm1, %ymm3
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm25, %ymm6
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm26, %ymm6
; AVX512DQ-FCP-NEXT: vpshufb %ymm6, %ymm2, %ymm4
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm4[0,1,2],ymm3[3],ymm4[4,5,6],ymm3[7],ymm4[8,9,10],ymm3[11],ymm4[12,13,14],ymm3[15]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm26, %ymm7
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm27, %ymm7
; AVX512DQ-FCP-NEXT: vpshufb %ymm7, %ymm1, %ymm1
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm27, %ymm8
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm28, %ymm8
; AVX512DQ-FCP-NEXT: vpshufb %ymm8, %ymm2, %ymm2
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5,6],ymm1[7],ymm2[8,9,10],ymm1[11],ymm2[12,13,14],ymm1[15]
; AVX512DQ-FCP-NEXT: vinserti32x8 $1, %ymm3, %zmm1, %zmm0 {%k1}
@@ -4253,9 +4256,9 @@ define void @store_i8_stride8_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm22, %xmm3
; AVX512DQ-FCP-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm2, %ymm2, %ymm3
-; AVX512DQ-FCP-NEXT: vpshufb %ymm14, %ymm3, %ymm3
+; AVX512DQ-FCP-NEXT: vpshufb %ymm10, %ymm3, %ymm3
; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm1, %ymm1, %ymm1
-; AVX512DQ-FCP-NEXT: vpshufb %ymm10, %ymm1, %ymm4
+; AVX512DQ-FCP-NEXT: vpshufb %ymm11, %ymm1, %ymm4
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm3[0],ymm4[1],ymm3[2,3,4],ymm4[5],ymm3[6,7,8],ymm4[9],ymm3[10,11,12],ymm4[13],ymm3[14,15]
; AVX512DQ-FCP-NEXT: vpshufb %ymm9, %ymm1, %ymm1
; AVX512DQ-FCP-NEXT: vpshufb %xmm15, %xmm2, %xmm4
@@ -4263,8 +4266,8 @@ define void @store_i8_stride8_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm4, %ymm2, %ymm2
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3,4],ymm1[5],ymm2[6,7,8],ymm1[9],ymm2[10,11,12],ymm1[13],ymm2[14,15]
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm3, %zmm1, %zmm1
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm28, %xmm2
-; AVX512DQ-FCP-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm11[0],xmm2[0],xmm11[1],xmm2[1],xmm11[2],xmm2[2],xmm11[3],xmm2[3],xmm11[4],xmm2[4],xmm11[5],xmm2[5],xmm11[6],xmm2[6],xmm11[7],xmm2[7]
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm29, %xmm2
+; AVX512DQ-FCP-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm14[0],xmm2[0],xmm14[1],xmm2[1],xmm14[2],xmm2[2],xmm14[3],xmm2[3],xmm14[4],xmm2[4],xmm14[5],xmm2[5],xmm14[6],xmm2[6],xmm14[7],xmm2[7]
; AVX512DQ-FCP-NEXT: vpunpcklbw {{.*#+}} xmm3 = xmm13[0],xmm12[0],xmm13[1],xmm12[1],xmm13[2],xmm12[2],xmm13[3],xmm12[3],xmm13[4],xmm12[4],xmm13[5],xmm12[5],xmm13[6],xmm12[6],xmm13[7],xmm12[7]
; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm2, %ymm2, %ymm2
; AVX512DQ-FCP-NEXT: vpshufb %ymm5, %ymm2, %ymm4
diff --git a/llvm/test/CodeGen/X86/vector-shuffle-256-v16.ll b/llvm/test/CodeGen/X86/vector-shuffle-256-v16.ll
index 9a6d8c3366d98..5dd16c7b25790 100644
--- a/llvm/test/CodeGen/X86/vector-shuffle-256-v16.ll
+++ b/llvm/test/CodeGen/X86/vector-shuffle-256-v16.ll
@@ -5502,9 +5502,10 @@ define <16 x i16> @shuffle_v16i16_01_00_17_16_03_02_19_26_09_08_25_24_11_10_27_2
;
; AVX2-FAST-LABEL: shuffle_v16i16_01_00_17_16_03_02_19_26_09_08_25_24_11_10_27_26:
; AVX2-FAST: # %bb.0:
-; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[2,3,0,1,u,u,u,u,6,7,4,5,u,u,u,u,18,19,16,17,u,u,u,u,22,23,20,21,u,u,u,u]
+; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm2 = [2,3,0,1,2,3,0,1,6,7,4,5,6,7,12,13,18,19,16,17,2,3,0,1,22,23,20,21,6,7,4,5]
+; AVX2-FAST-NEXT: vpshufb %ymm2, %ymm0, %ymm0
; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
-; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,2,3,0,1,u,u,u,u,6,7,12,13,u,u,u,u,18,19,16,17,u,u,u,u,22,23,20,21]
+; AVX2-FAST-NEXT: vpshufb %ymm2, %ymm1, %ymm1
; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7]
; AVX2-FAST-NEXT: retq
;
@@ -7469,14 +7470,15 @@ define <16 x i16> @PR24935(<16 x i16> %a, <16 x i16> %b) {
;
; AVX2-FAST-PERLANE-LABEL: PR24935:
; AVX2-FAST-PERLANE: # %bb.0:
+; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} ymm2 = [u,u,2,3,2,3,u,u,10,11,u,u,6,7,u,u,2,3,18,19,18,19,u,u,26,27,8,9,0,1,u,u]
+; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm2, %ymm0, %ymm3
+; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,0,1]
+; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm2, %ymm0, %ymm0
+; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm3[1,2],ymm0[3],ymm3[4],ymm0[5,6,7,8],ymm3[9,10],ymm0[11],ymm3[12],ymm0[13,14,15]
; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} ymm2 = zero,zero,zero,zero,ymm1[8,9],zero,zero,zero,zero,ymm1[14,15,12,13,0,1,24,25,24,25],zero,zero,ymm1[24,25,16,17,30,31,28,29,16,17]
; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,0,1]
; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[6,7,4,5],zero,zero,ymm1[10,11,4,5],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm1[16,17],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; AVX2-FAST-PERLANE-NEXT: vpor %ymm2, %ymm1, %ymm1
-; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} ymm2 = ymm0[u,u,2,3,2,3,u,u,10,11,u,u,u,u,u,u,u,u,18,19,18,19,u,u,26,27,u,u,u,u,u,u]
-; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,0,1]
-; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,6,7,u,u,18,19,u,u,u,u,u,u,u,u,24,25,16,17,u,u]
-; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm2[1,2],ymm0[3],ymm2[4],ymm0[5,6,7,8],ymm2[9,10],ymm0[11],ymm2[12],ymm0[13,14,15]
; AVX2-FAST-PERLANE-NEXT: vpmovsxbw {{.*#+}} ymm2 = [65535,65535,0,65535,65535,65535,0,65535,0,0,65535,65535,0,0,0,65535]
; AVX2-FAST-PERLANE-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
; AVX2-FAST-PERLANE-NEXT: retq
@@ -7524,9 +7526,10 @@ define <16 x i16> @PR34369(<16 x i16> %vec, <16 x i16> %mask) {
; AVX1-LABEL: PR34369:
; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
-; AVX1-NEXT: vpshufb {{.*#+}} xmm3 = xmm2[u,u,u,u,u,u,10,11,u,u,u,u,u,u,4,5]
-; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[6,7,0,1,0,1,u,u,10,11,4,5,4,5,u,u]
-; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm3[3],xmm0[4,5,6],xmm3[7]
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [6,7,0,1,0,1,10,11,10,11,4,5,4,5,4,5]
+; AVX1-NEXT: vpshufb %xmm3, %xmm2, %xmm4
+; AVX1-NEXT: vpshufb %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm4[3],xmm0[4,5,6],xmm4[7]
; AVX1-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[14,15,0,1,12,13,0,1,2,3,4,5,8,9,8,9]
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
diff --git a/llvm/test/CodeGen/X86/vector-shuffle-256-v32.ll b/llvm/test/CodeGen/X86/vector-shuffle-256-v32.ll
index dbcb49507ea19..56170c5c7e699 100644
--- a/llvm/test/CodeGen/X86/vector-shuffle-256-v32.ll
+++ b/llvm/test/CodeGen/X86/vector-shuffle-256-v32.ll
@@ -2630,15 +2630,17 @@ define <32 x i8> @shuffle_v32i8_39_38_37_36_35_34_33_32_07_06_05_04_03_02_01_00_
;
; AVX2-LABEL: shuffle_v32i8_39_38_37_36_35_34_33_32_07_06_05_04_03_02_01_00_55_54_53_52_51_50_49_48_23_22_21_20_19_18_17_16:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,7,6,5,4,3,2,1,0,u,u,u,u,u,u,u,u,23,22,21,20,19,18,17,16]
-; AVX2-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[7,6,5,4,3,2,1,0,u,u,u,u,u,u,u,u,23,22,21,20,19,18,17,16,u,u,u,u,u,u,u,u]
+; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [7,6,5,4,3,2,1,0,7,6,5,4,3,2,1,0,7,6,5,4,3,2,1,0,7,6,5,4,3,2,1,0]
+; AVX2-NEXT: vpshufb %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpshufb %ymm2, %ymm1, %ymm1
; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
; AVX2-NEXT: retq
;
; AVX512VLBW-LABEL: shuffle_v32i8_39_38_37_36_35_34_33_32_07_06_05_04_03_02_01_00_55_54_53_52_51_50_49_48_23_22_21_20_19_18_17_16:
; AVX512VLBW: # %bb.0:
-; AVX512VLBW-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,7,6,5,4,3,2,1,0,u,u,u,u,u,u,u,u,23,22,21,20,19,18,17,16]
-; AVX512VLBW-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[7,6,5,4,3,2,1,0,u,u,u,u,u,u,u,u,23,22,21,20,19,18,17,16,u,u,u,u,u,u,u,u]
+; AVX512VLBW-NEXT: vpbroadcastq {{.*#+}} ymm2 = [7,6,5,4,3,2,1,0,7,6,5,4,3,2,1,0,7,6,5,4,3,2,1,0,7,6,5,4,3,2,1,0]
+; AVX512VLBW-NEXT: vpshufb %ymm2, %ymm0, %ymm0
+; AVX512VLBW-NEXT: vpshufb %ymm2, %ymm1, %ymm1
; AVX512VLBW-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
; AVX512VLBW-NEXT: retq
;
@@ -2661,8 +2663,9 @@ define <32 x i8> @shuffle_v32i8_39_38_37_36_35_34_33_32_07_06_05_04_03_02_01_00_
;
; XOPAVX2-LABEL: shuffle_v32i8_39_38_37_36_35_34_33_32_07_06_05_04_03_02_01_00_55_54_53_52_51_50_49_48_23_22_21_20_19_18_17_16:
; XOPAVX2: # %bb.0:
-; XOPAVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,7,6,5,4,3,2,1,0,u,u,u,u,u,u,u,u,23,22,21,20,19,18,17,16]
-; XOPAVX2-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[7,6,5,4,3,2,1,0,u,u,u,u,u,u,u,u,23,22,21,20,19,18,17,16,u,u,u,u,u,u,u,u]
+; XOPAVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [7,6,5,4,3,2,1,0,7,6,5,4,3,2,1,0,7,6,5,4,3,2,1,0,7,6,5,4,3,2,1,0]
+; XOPAVX2-NEXT: vpshufb %ymm2, %ymm0, %ymm0
+; XOPAVX2-NEXT: vpshufb %ymm2, %ymm1, %ymm1
; XOPAVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
; XOPAVX2-NEXT: retq
%shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 39, i32 38, i32 37, i32 36, i32 35, i32 34, i32 33, i32 32, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0, i32 55, i32 54, i32 53, i32 52, i32 51, i32 50, i32 49, i32 48, i32 23, i32 22, i32 21, i32 20, i32 19, i32 18, i32 17, i32 16>
@@ -4837,15 +4840,17 @@ define <32 x i8> @shuffle_v32i8_00_02_04_06_08_10_12_14_32_34_36_38_40_42_44_46_
;
; AVX2-LABEL: shuffle_v32i8_00_02_04_06_08_10_12_14_32_34_36_38_40_42_44_46_16_18_20_22_24_26_28_30_48_50_52_54_56_58_60_62:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u,16,18,20,22,24,26,28,30]
-; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u,16,18,20,22,24,26,28,30,u,u,u,u,u,u,u,u]
+; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [0,2,4,6,8,10,12,14,0,2,4,6,8,10,12,14,0,2,4,6,8,10,12,14,0,2,4,6,8,10,12,14]
+; AVX2-NEXT: vpshufb %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vpshufb %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
; AVX2-NEXT: retq
;
; AVX512VLBW-LABEL: shuffle_v32i8_00_02_04_06_08_10_12_14_32_34_36_38_40_42_44_46_16_18_20_22_24_26_28_30_48_50_52_54_56_58_60_62:
; AVX512VLBW: # %bb.0:
-; AVX512VLBW-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u,16,18,20,22,24,26,28,30]
-; AVX512VLBW-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u,16,18,20,22,24,26,28,30,u,u,u,u,u,u,u,u]
+; AVX512VLBW-NEXT: vpbroadcastq {{.*#+}} ymm2 = [0,2,4,6,8,10,12,14,0,2,4,6,8,10,12,14,0,2,4,6,8,10,12,14,0,2,4,6,8,10,12,14]
+; AVX512VLBW-NEXT: vpshufb %ymm2, %ymm1, %ymm1
+; AVX512VLBW-NEXT: vpshufb %ymm2, %ymm0, %ymm0
; AVX512VLBW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
; AVX512VLBW-NEXT: retq
;
@@ -4867,8 +4872,9 @@ define <32 x i8> @shuffle_v32i8_00_02_04_06_08_10_12_14_32_34_36_38_40_42_44_46_
;
; XOPAVX2-LABEL: shuffle_v32i8_00_02_04_06_08_10_12_14_32_34_36_38_40_42_44_46_16_18_20_22_24_26_28_30_48_50_52_54_56_58_60_62:
; XOPAVX2: # %bb.0:
-; XOPAVX2-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u,16,18,20,22,24,26,28,30]
-; XOPAVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u,16,18,20,22,24,26,28,30,u,u,u,u,u,u,u,u]
+; XOPAVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [0,2,4,6,8,10,12,14,0,2,4,6,8,10,12,14,0,2,4,6,8,10,12,14,0,2,4,6,8,10,12,14]
+; XOPAVX2-NEXT: vpshufb %ymm2, %ymm1, %ymm1
+; XOPAVX2-NEXT: vpshufb %ymm2, %ymm0, %ymm0
; XOPAVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
; XOPAVX2-NEXT: retq
%1 = shufflevector <32 x i8> %a0, <32 x i8> %a1, <32 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 32, i32 34, i32 36, i32 38, i32 40, i32 42, i32 44, i32 46, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30, i32 48, i32 50, i32 52, i32 54, i32 56, i32 58, i32 60, i32 62>
@@ -4892,8 +4898,9 @@ define <32 x i8> @shuffle_v32i8_00_02_04_06_08_10_12_14_16_18_20_22_24_26_28_30_
;
; AVX2-LABEL: shuffle_v32i8_00_02_04_06_08_10_12_14_16_18_20_22_24_26_28_30_32_34_36_38_40_42_44_46_48_50_52_54_56_58_60_62:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u,16,18,20,22,24,26,28,30]
-; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u,16,18,20,22,24,26,28,30,u,u,u,u,u,u,u,u]
+; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [0,2,4,6,8,10,12,14,0,2,4,6,8,10,12,14,0,2,4,6,8,10,12,14,0,2,4,6,8,10,12,14]
+; AVX2-NEXT: vpshufb %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vpshufb %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
; AVX2-NEXT: retq
@@ -4917,8 +4924,9 @@ define <32 x i8> @shuffle_v32i8_00_02_04_06_08_10_12_14_16_18_20_22_24_26_28_30_
;
; XOPAVX2-LABEL: shuffle_v32i8_00_02_04_06_08_10_12_14_16_18_20_22_24_26_28_30_32_34_36_38_40_42_44_46_48_50_52_54_56_58_60_62:
; XOPAVX2: # %bb.0:
-; XOPAVX2-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u,16,18,20,22,24,26,28,30]
-; XOPAVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u,16,18,20,22,24,26,28,30,u,u,u,u,u,u,u,u]
+; XOPAVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [0,2,4,6,8,10,12,14,0,2,4,6,8,10,12,14,0,2,4,6,8,10,12,14,0,2,4,6,8,10,12,14]
+; XOPAVX2-NEXT: vpshufb %ymm2, %ymm1, %ymm1
+; XOPAVX2-NEXT: vpshufb %ymm2, %ymm0, %ymm0
; XOPAVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
; XOPAVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
; XOPAVX2-NEXT: retq
>From 32655bde2c8f72177ac3226fc934fa757094cab0 Mon Sep 17 00:00:00 2001
From: Simon Pilgrim <llvm-dev at redking.me.uk>
Date: Tue, 16 Jul 2024 11:31:32 +0100
Subject: [PATCH 2/2] Add bool arg name comments
---
llvm/lib/Target/X86/X86ISelLowering.cpp | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 7f44fb0fa266d..32a96a48dcfbf 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -41038,7 +41038,7 @@ static SDValue combineTargetShuffle(SDValue N, const SDLoc &DL,
RHS.getOpcode() == X86ISD::PSHUFB &&
LHS.getOperand(1) != RHS.getOperand(1) &&
(LHS.getOperand(1).hasOneUse() || RHS.getOperand(1).hasOneUse()) &&
- getTargetShuffleMask(N, false, Ops, Mask)) {
+ getTargetShuffleMask(N, /*AllowSentinelZero=*/false, Ops, Mask)) {
assert(Ops.size() == 2 && LHS == peekThroughOneUseBitcasts(Ops[0]) &&
RHS == peekThroughOneUseBitcasts(Ops[1]) &&
"BLENDI decode mismatch");
@@ -41047,8 +41047,8 @@ static SDValue combineTargetShuffle(SDValue N, const SDLoc &DL,
SDValue MaskRHS = RHS.getOperand(1);
llvm::narrowShuffleMaskElts(EltBits / 8, Mask, ByteMask);
if (SDValue NewMask = combineX86ShufflesConstants(
- ShufVT, {MaskLHS, MaskRHS}, ByteMask, true, DAG, DL,
- Subtarget)) {
+ ShufVT, {MaskLHS, MaskRHS}, ByteMask,
+ /*HasVariableMask=*/true, DAG, DL, Subtarget)) {
SDValue NewLHS = DAG.getNode(X86ISD::PSHUFB, DL, ShufVT,
LHS.getOperand(0), NewMask);
SDValue NewRHS = DAG.getNode(X86ISD::PSHUFB, DL, ShufVT,
- Previous message: [llvm] [X86] Fold blend(pshufb(x,m1),pshufb(y,m2)) -> blend(pshufb(x,blend(m1,m2)),pshufb(y,blend(m1,m2))) to reduce constant pool (PR #98466)
- Next message: [llvm] [X86] Fold blend(pshufb(x,m1),pshufb(y,m2)) -> blend(pshufb(x,blend(m1,m2)),pshufb(y,blend(m1,m2))) to reduce constant pool (PR #98466)
- Messages sorted by:
[ date ]
[ thread ]
[ subject ]
[ author ]
More information about the llvm-commits
mailing list