[llvm] [X86] combineX86ShufflesRecursively - iteratively peek through bitcasts to free subvector widening/narrowing sources. (PR #134701)
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Tue Apr 8 02:09:30 PDT 2025
https://github.com/RKSimon updated https://github.com/llvm/llvm-project/pull/134701
>From 8c906a69fccac6b70202fc69f6676708a436e21c Mon Sep 17 00:00:00 2001
From: Simon Pilgrim <llvm-dev at redking.me.uk>
Date: Mon, 7 Apr 2025 19:20:39 +0100
Subject: [PATCH] [X86] combineX86ShufflesRecursively - iteratively peek
through bitcasts to free subvector widening/narrowing sources.
Generalizes the existing code to repeatedly peek though mixed bitcast/insert_subvector/extract_subvector chains to find the source of the shuffle operand.
---
llvm/lib/Target/X86/X86ISelLowering.cpp | 51 +-
.../any_extend_vector_inreg_of_broadcast.ll | 13 +-
.../vector-interleaved-load-i16-stride-3.ll | 16 +-
.../vector-interleaved-store-i16-stride-6.ll | 380 +-
.../vector-interleaved-store-i16-stride-7.ll | 5578 +++++++++--------
.../vector-interleaved-store-i8-stride-4.ll | 88 +-
.../vector-interleaved-store-i8-stride-5.ll | 1194 ++--
.../vector-interleaved-store-i8-stride-6.ll | 916 ++-
.../vector-interleaved-store-i8-stride-7.ll | 1940 +++---
.../CodeGen/X86/vector-shuffle-256-v32.ll | 474 +-
.../CodeGen/X86/x86-interleaved-access.ll | 22 +-
11 files changed, 5212 insertions(+), 5460 deletions(-)
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index bac5684733e60..d86eec1584274 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -41115,30 +41115,37 @@ static SDValue combineX86ShufflesRecursively(
}
}
+ // Peek through any free bitcasts to insert_subvector vector widenings or
+ // extract_subvector nodes back to root size.
+ // TODO: Can resolveTargetShuffleInputsAndMask do some of this?
for (auto [I, Op] : enumerate(Ops)) {
- // Peek through vector widenings + set out of bounds mask indices to undef.
- // TODO: Can resolveTargetShuffleInputsAndMask do some of this?
- if (Op.getOpcode() == ISD::INSERT_SUBVECTOR && Op.getOperand(0).isUndef() &&
- isNullConstant(Op.getOperand(2))) {
- Op = Op.getOperand(1);
- unsigned Scale = RootSizeInBits / Op.getValueSizeInBits();
- int Lo = I * Mask.size();
- int Hi = (I + 1) * Mask.size();
- int NewHi = Lo + (Mask.size() / Scale);
- for (int &M : Mask) {
- if (Lo <= M && NewHi <= M && M < Hi)
- M = SM_SentinelUndef;
- }
- }
-
- // Peek through any free bitcasts/extract_subvector nodes back to root size.
SDValue BC = Op;
- if (BC.getOpcode() == ISD::BITCAST && BC.hasOneUse())
- BC = peekThroughOneUseBitcasts(BC);
- while (BC.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
- (RootSizeInBits % BC.getOperand(0).getValueSizeInBits()) == 0 &&
- isNullConstant(BC.getOperand(1))) {
- Op = BC = BC.getOperand(0);
+ while (1) {
+ if (BC.getOpcode() == ISD::BITCAST && BC.hasOneUse()) {
+ BC = BC.getOperand(0);
+ continue;
+ }
+ if (BC.getOpcode() == ISD::INSERT_SUBVECTOR &&
+ BC.getOperand(0).isUndef() && isNullConstant(BC.getOperand(2))) {
+ // Set out of bounds mask indices to undef.
+ Op = BC = BC.getOperand(1);
+ unsigned Scale = RootSizeInBits / Op.getValueSizeInBits();
+ int Lo = I * Mask.size();
+ int Hi = (I + 1) * Mask.size();
+ int NewHi = Lo + (Mask.size() / Scale);
+ for (int &M : Mask) {
+ if (Lo <= M && NewHi <= M && M < Hi)
+ M = SM_SentinelUndef;
+ }
+ continue;
+ }
+ if (BC.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
+ (RootSizeInBits % BC.getOperand(0).getValueSizeInBits()) == 0 &&
+ isNullConstant(BC.getOperand(1))) {
+ Op = BC = BC.getOperand(0);
+ continue;
+ }
+ break;
}
}
diff --git a/llvm/test/CodeGen/X86/any_extend_vector_inreg_of_broadcast.ll b/llvm/test/CodeGen/X86/any_extend_vector_inreg_of_broadcast.ll
index b075d48627b18..1fada58f05ba9 100644
--- a/llvm/test/CodeGen/X86/any_extend_vector_inreg_of_broadcast.ll
+++ b/llvm/test/CodeGen/X86/any_extend_vector_inreg_of_broadcast.ll
@@ -4708,18 +4708,15 @@ define void @vec384_i64_widen_to_i128_factor2_broadcast_to_v3i128_factor3(ptr %i
; AVX-NEXT: vpaddb 16(%rsi), %xmm1, %xmm1
; AVX-NEXT: vpaddb 48(%rsi), %xmm2, %xmm2
; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0
-; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm3
-; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3],ymm3[4,5],ymm2[6,7]
-; AVX-NEXT: vextractf128 $1, %ymm2, %xmm3
-; AVX-NEXT: vpaddb 16(%rdx), %xmm3, %xmm3
+; AVX-NEXT: vpblendw {{.*#+}} xmm2 = xmm0[0,1,2,3],xmm2[4,5,6,7]
; AVX-NEXT: vpaddb (%rdx), %xmm2, %xmm2
; AVX-NEXT: vpaddb 48(%rdx), %xmm1, %xmm1
-; AVX-NEXT: vpaddb 32(%rdx), %xmm0, %xmm0
-; AVX-NEXT: vmovdqa %xmm0, 32(%rcx)
+; AVX-NEXT: vpaddb 32(%rdx), %xmm0, %xmm3
+; AVX-NEXT: vpaddb 16(%rdx), %xmm0, %xmm0
+; AVX-NEXT: vmovdqa %xmm0, 16(%rcx)
+; AVX-NEXT: vmovdqa %xmm3, 32(%rcx)
; AVX-NEXT: vmovdqa %xmm1, 48(%rcx)
; AVX-NEXT: vmovdqa %xmm2, (%rcx)
-; AVX-NEXT: vmovdqa %xmm3, 16(%rcx)
-; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
;
; AVX2-LABEL: vec384_i64_widen_to_i128_factor2_broadcast_to_v3i128_factor3:
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-3.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-3.ll
index a39bc6b668669..da902b3aed5ab 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-3.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-3.ll
@@ -1836,7 +1836,7 @@ define void @load_i16_stride3_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512-NEXT: vmovdqa {{.*#+}} ymm11 = [2,3,8,9,14,15,4,5,10,11,0,1,6,7,12,13,18,19,24,25,30,31,20,21,26,27,16,17,22,23,28,29]
; AVX512-NEXT: vpshufb %ymm11, %ymm10, %ymm10
; AVX512-NEXT: vpblendw {{.*#+}} xmm12 = xmm2[0,1],xmm1[2],xmm2[3,4],xmm1[5],xmm2[6,7]
-; AVX512-NEXT: vpshufb {{.*#+}} xmm12 = xmm12[2,3,8,9,14,15,4,5,10,11,10,11,10,11,10,11]
+; AVX512-NEXT: vpshufb %xmm11, %xmm12, %xmm12
; AVX512-NEXT: vpblendw {{.*#+}} xmm12 = xmm12[0,1,2,3,4],xmm10[5,6,7]
; AVX512-NEXT: vpblendd {{.*#+}} ymm10 = ymm12[0,1,2,3],ymm10[4,5,6,7]
; AVX512-NEXT: vmovdqa {{.*#+}} ymm12 = [65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
@@ -1858,7 +1858,7 @@ define void @load_i16_stride3_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512-NEXT: vmovdqa {{.*#+}} ymm6 = [4,5,10,11,0,1,6,7,12,13,2,3,8,9,14,15,20,21,26,27,16,17,22,23,28,29,18,19,24,25,30,31]
; AVX512-NEXT: vpshufb %ymm6, %ymm5, %ymm5
; AVX512-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3,4],xmm2[5],xmm1[6,7]
-; AVX512-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[4,5,10,11,0,1,6,7,12,13,14,15,0,1,2,3]
+; AVX512-NEXT: vpshufb %xmm6, %xmm1, %xmm1
; AVX512-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm5[5,6,7]
; AVX512-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm5[4,5,6,7]
; AVX512-NEXT: vpternlogq {{.*#+}} ymm0 = ymm8 ^ (ymm0 & (ymm9 ^ ymm8))
@@ -1914,7 +1914,7 @@ define void @load_i16_stride3_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm11 = [2,3,8,9,14,15,4,5,10,11,0,1,6,7,12,13,18,19,24,25,30,31,20,21,26,27,16,17,22,23,28,29]
; AVX512-FCP-NEXT: vpshufb %ymm11, %ymm10, %ymm10
; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm12 = xmm2[0,1],xmm1[2],xmm2[3,4],xmm1[5],xmm2[6,7]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm12 = xmm12[2,3,8,9,14,15,4,5,10,11,10,11,10,11,10,11]
+; AVX512-FCP-NEXT: vpshufb %xmm11, %xmm12, %xmm12
; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm12 = xmm12[0,1,2,3,4],xmm10[5,6,7]
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm10 = ymm12[0,1,2,3],ymm10[4,5,6,7]
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm12 = [65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
@@ -1936,7 +1936,7 @@ define void @load_i16_stride3_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm6 = [4,5,10,11,0,1,6,7,12,13,2,3,8,9,14,15,20,21,26,27,16,17,22,23,28,29,18,19,24,25,30,31]
; AVX512-FCP-NEXT: vpshufb %ymm6, %ymm5, %ymm5
; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3,4],xmm2[5],xmm1[6,7]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[4,5,10,11,0,1,6,7,12,13,14,15,0,1,2,3]
+; AVX512-FCP-NEXT: vpshufb %xmm6, %xmm1, %xmm1
; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm5[5,6,7]
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm5[4,5,6,7]
; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm0 = ymm8 ^ (ymm0 & (ymm9 ^ ymm8))
@@ -1992,7 +1992,7 @@ define void @load_i16_stride3_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm11 = [2,3,8,9,14,15,4,5,10,11,0,1,6,7,12,13,18,19,24,25,30,31,20,21,26,27,16,17,22,23,28,29]
; AVX512DQ-NEXT: vpshufb %ymm11, %ymm10, %ymm10
; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm12 = xmm2[0,1],xmm1[2],xmm2[3,4],xmm1[5],xmm2[6,7]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm12 = xmm12[2,3,8,9,14,15,4,5,10,11,10,11,10,11,10,11]
+; AVX512DQ-NEXT: vpshufb %xmm11, %xmm12, %xmm12
; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm12 = xmm12[0,1,2,3,4],xmm10[5,6,7]
; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm10 = ymm12[0,1,2,3],ymm10[4,5,6,7]
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm12 = [65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
@@ -2014,7 +2014,7 @@ define void @load_i16_stride3_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm6 = [4,5,10,11,0,1,6,7,12,13,2,3,8,9,14,15,20,21,26,27,16,17,22,23,28,29,18,19,24,25,30,31]
; AVX512DQ-NEXT: vpshufb %ymm6, %ymm5, %ymm5
; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3,4],xmm2[5],xmm1[6,7]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[4,5,10,11,0,1,6,7,12,13,14,15,0,1,2,3]
+; AVX512DQ-NEXT: vpshufb %xmm6, %xmm1, %xmm1
; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm5[5,6,7]
; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm5[4,5,6,7]
; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm0 = ymm8 ^ (ymm0 & (ymm9 ^ ymm8))
@@ -2070,7 +2070,7 @@ define void @load_i16_stride3_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm11 = [2,3,8,9,14,15,4,5,10,11,0,1,6,7,12,13,18,19,24,25,30,31,20,21,26,27,16,17,22,23,28,29]
; AVX512DQ-FCP-NEXT: vpshufb %ymm11, %ymm10, %ymm10
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm12 = xmm2[0,1],xmm1[2],xmm2[3,4],xmm1[5],xmm2[6,7]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm12 = xmm12[2,3,8,9,14,15,4,5,10,11,10,11,10,11,10,11]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm11, %xmm12, %xmm12
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm12 = xmm12[0,1,2,3,4],xmm10[5,6,7]
; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm10 = ymm12[0,1,2,3],ymm10[4,5,6,7]
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm12 = [65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
@@ -2092,7 +2092,7 @@ define void @load_i16_stride3_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm6 = [4,5,10,11,0,1,6,7,12,13,2,3,8,9,14,15,20,21,26,27,16,17,22,23,28,29,18,19,24,25,30,31]
; AVX512DQ-FCP-NEXT: vpshufb %ymm6, %ymm5, %ymm5
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3,4],xmm2[5],xmm1[6,7]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[4,5,10,11,0,1,6,7,12,13,14,15,0,1,2,3]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm6, %xmm1, %xmm1
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm5[5,6,7]
; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm5[4,5,6,7]
; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm0 = ymm8 ^ (ymm0 & (ymm9 ^ ymm8))
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-6.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-6.ll
index 25bad7578c111..ca8fcf2ee0f2c 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-6.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-6.ll
@@ -4230,91 +4230,91 @@ define void @store_i16_stride6_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512-FCP-NEXT: vmovdqa 32(%rdi), %xmm12
; AVX512-FCP-NEXT: vpshufb %xmm2, %xmm12, %xmm4
; AVX512-FCP-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm4[4],xmm1[4],xmm4[5],xmm1[5],xmm4[6],xmm1[6],xmm4[7],xmm1[7]
-; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} zmm20 = [0,0,2,1,8,9,8,9]
+; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} zmm4 = [8,8,0,9,0,1,0,1]
; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} xmm5 = xmm12[0],xmm13[0],xmm12[1],xmm13[1],xmm12[2],xmm13[2],xmm12[3],xmm13[3]
-; AVX512-FCP-NEXT: vpermt2q %zmm1, %zmm20, %zmm5
+; AVX512-FCP-NEXT: vpermt2q %zmm5, %zmm4, %zmm1
; AVX512-FCP-NEXT: movw $9362, %ax # imm = 0x2492
; AVX512-FCP-NEXT: kmovw %eax, %k2
-; AVX512-FCP-NEXT: vmovdqa32 %zmm3, %zmm5 {%k2}
-; AVX512-FCP-NEXT: vmovdqa %ymm5, %ymm1
-; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm6 = [16,9,10,17,12,13,18,15]
+; AVX512-FCP-NEXT: vmovdqa32 %zmm3, %zmm1 {%k2}
+; AVX512-FCP-NEXT: vmovdqa %ymm1, %ymm3
+; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm20 = [16,9,10,17,12,13,18,15]
; AVX512-FCP-NEXT: vmovdqa 32(%r8), %xmm10
-; AVX512-FCP-NEXT: vpshuflw {{.*#+}} xmm3 = xmm10[2,1,3,3,4,5,6,7]
-; AVX512-FCP-NEXT: vpermt2d %zmm3, %zmm6, %zmm5
-; AVX512-FCP-NEXT: vpmovzxwd {{.*#+}} xmm3 = xmm10[0],zero,xmm10[1],zero,xmm10[2],zero,xmm10[3],zero
+; AVX512-FCP-NEXT: vpshuflw {{.*#+}} xmm6 = xmm10[2,1,3,3,4,5,6,7]
+; AVX512-FCP-NEXT: vpermt2d %zmm6, %zmm20, %zmm1
+; AVX512-FCP-NEXT: vpmovzxwd {{.*#+}} xmm6 = xmm10[0],zero,xmm10[1],zero,xmm10[2],zero,xmm10[3],zero
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm19 = [0,1,8,3,4,9,6,7]
-; AVX512-FCP-NEXT: vpermt2d %ymm3, %ymm19, %ymm1
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm5, %zmm1, %zmm17
-; AVX512-FCP-NEXT: vmovdqa (%rcx), %xmm8
+; AVX512-FCP-NEXT: vpermt2d %ymm6, %ymm19, %ymm3
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm1, %zmm3, %zmm17
+; AVX512-FCP-NEXT: vmovdqa (%rcx), %xmm9
; AVX512-FCP-NEXT: vmovdqa (%rdx), %xmm7
-; AVX512-FCP-NEXT: vpsrldq {{.*#+}} xmm1 = xmm8[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX512-FCP-NEXT: vpsrldq {{.*#+}} xmm1 = xmm9[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
; AVX512-FCP-NEXT: vpsrldq {{.*#+}} xmm3 = xmm7[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3]
-; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm7[0],xmm8[0],xmm7[1],xmm8[1],xmm7[2],xmm8[2],xmm7[3],xmm8[3]
+; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm7[0],xmm9[0],xmm7[1],xmm9[1],xmm7[2],xmm9[2],xmm7[3],xmm9[3]
; AVX512-FCP-NEXT: vpermt2d %zmm1, %zmm0, %zmm3
-; AVX512-FCP-NEXT: vmovdqa (%rsi), %xmm1
-; AVX512-FCP-NEXT: vmovdqa (%rdi), %xmm0
-; AVX512-FCP-NEXT: vpshufb %xmm2, %xmm1, %xmm5
-; AVX512-FCP-NEXT: vpshufb %xmm2, %xmm0, %xmm2
-; AVX512-FCP-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm2[4],xmm5[4],xmm2[5],xmm5[5],xmm2[6],xmm5[6],xmm2[7],xmm5[7]
-; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} xmm4 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; AVX512-FCP-NEXT: vpermt2q %zmm2, %zmm20, %zmm4
-; AVX512-FCP-NEXT: vmovdqa32 %zmm3, %zmm4 {%k2}
+; AVX512-FCP-NEXT: vmovdqa (%rsi), %xmm5
+; AVX512-FCP-NEXT: vmovdqa (%rdi), %xmm1
+; AVX512-FCP-NEXT: vpshufb %xmm2, %xmm5, %xmm8
+; AVX512-FCP-NEXT: vpshufb %xmm2, %xmm1, %xmm2
+; AVX512-FCP-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm2[4],xmm8[4],xmm2[5],xmm8[5],xmm2[6],xmm8[6],xmm2[7],xmm8[7]
+; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} xmm8 = xmm1[0],xmm5[0],xmm1[1],xmm5[1],xmm1[2],xmm5[2],xmm1[3],xmm5[3]
+; AVX512-FCP-NEXT: vpermt2q %zmm8, %zmm4, %zmm2
+; AVX512-FCP-NEXT: vmovdqa32 %zmm3, %zmm2 {%k2}
; AVX512-FCP-NEXT: vmovdqa (%r8), %xmm3
-; AVX512-FCP-NEXT: vpmovzxwd {{.*#+}} xmm2 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero
-; AVX512-FCP-NEXT: vpermi2d %ymm2, %ymm4, %ymm19
-; AVX512-FCP-NEXT: vpshuflw {{.*#+}} xmm2 = xmm3[2,1,3,3,4,5,6,7]
-; AVX512-FCP-NEXT: vpermt2d %zmm2, %zmm6, %zmm4
-; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} zmm20 = [0,0,0,0,8,8,10,9]
-; AVX512-FCP-NEXT: vmovdqa 32(%r9), %xmm9
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm6 = [0,1,4,5,4,5,6,7,8,9,8,9,8,9,8,9]
-; AVX512-FCP-NEXT: vpshufb %xmm6, %xmm9, %xmm2
-; AVX512-FCP-NEXT: vpshuflw {{.*#+}} xmm5 = xmm9[0,0,2,1,4,5,6,7]
-; AVX512-FCP-NEXT: vpermt2q %zmm2, %zmm20, %zmm5
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm4, %zmm19, %zmm19
+; AVX512-FCP-NEXT: vpmovzxwd {{.*#+}} xmm4 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero
+; AVX512-FCP-NEXT: vpermi2d %ymm4, %ymm2, %ymm19
+; AVX512-FCP-NEXT: vpshuflw {{.*#+}} xmm4 = xmm3[2,1,3,3,4,5,6,7]
+; AVX512-FCP-NEXT: vpermt2d %zmm4, %zmm20, %zmm2
+; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} zmm20 = [0,0,0,0,8,8,0,9]
+; AVX512-FCP-NEXT: vmovdqa 32(%r9), %xmm8
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm4 = [0,1,4,5,4,5,6,7,8,9,8,9,8,9,8,9]
+; AVX512-FCP-NEXT: vpshufb %xmm4, %xmm8, %xmm0
+; AVX512-FCP-NEXT: vpshuflw {{.*#+}} xmm6 = xmm8[0,0,2,1,4,5,6,7]
+; AVX512-FCP-NEXT: vpermt2q %zmm0, %zmm20, %zmm6
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm2, %zmm19, %zmm19
; AVX512-FCP-NEXT: vmovdqa (%r9), %xmm2
-; AVX512-FCP-NEXT: vpshufb %xmm6, %xmm2, %xmm6
+; AVX512-FCP-NEXT: vpshufb %xmm4, %xmm2, %xmm0
; AVX512-FCP-NEXT: vpshuflw {{.*#+}} xmm4 = xmm2[0,0,2,1,4,5,6,7]
-; AVX512-FCP-NEXT: vpermt2q %zmm6, %zmm20, %zmm4
-; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} ymm6 = ymm14[0],ymm15[0],ymm14[1],ymm15[1],ymm14[2],ymm15[2],ymm14[3],ymm15[3],ymm14[8],ymm15[8],ymm14[9],ymm15[9],ymm14[10],ymm15[10],ymm14[11],ymm15[11]
-; AVX512-FCP-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; AVX512-FCP-NEXT: vpermt2q %zmm0, %zmm20, %zmm4
+; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm14[0],ymm15[0],ymm14[1],ymm15[1],ymm14[2],ymm15[2],ymm14[3],ymm15[3],ymm14[8],ymm15[8],ymm14[9],ymm15[9],ymm14[10],ymm15[10],ymm14[11],ymm15[11]
+; AVX512-FCP-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm5[4],xmm1[5],xmm5[5],xmm1[6],xmm5[6],xmm1[7],xmm5[7]
; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} zmm26 = [1,1,1,1,10,10,10,11]
-; AVX512-FCP-NEXT: vpermt2q %zmm6, %zmm26, %zmm0
-; AVX512-FCP-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm7[4],xmm8[4],xmm7[5],xmm8[5],xmm7[6],xmm8[6],xmm7[7],xmm8[7]
-; AVX512-FCP-NEXT: vmovdqa64 %ymm28, %ymm6
+; AVX512-FCP-NEXT: vpermt2q %zmm0, %zmm26, %zmm1
+; AVX512-FCP-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm7[4],xmm9[4],xmm7[5],xmm9[5],xmm7[6],xmm9[6],xmm7[7],xmm9[7]
+; AVX512-FCP-NEXT: vmovdqa64 %ymm28, %ymm5
; AVX512-FCP-NEXT: vmovdqa64 %ymm29, %ymm7
-; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} ymm6 = ymm7[0],ymm6[0],ymm7[1],ymm6[1],ymm7[2],ymm6[2],ymm7[3],ymm6[3],ymm7[8],ymm6[8],ymm7[9],ymm6[9],ymm7[10],ymm6[10],ymm7[11],ymm6[11]
-; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} zmm7 = [17,18,17,18,0,0,19,19,5,4,2,2,5,4,6,6]
-; AVX512-FCP-NEXT: vpermt2d %zmm1, %zmm7, %zmm6
-; AVX512-FCP-NEXT: vmovdqa32 %zmm0, %zmm6 {%k1}
+; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} ymm7 = ymm7[0],ymm5[0],ymm7[1],ymm5[1],ymm7[2],ymm5[2],ymm7[3],ymm5[3],ymm7[8],ymm5[8],ymm7[9],ymm5[9],ymm7[10],ymm5[10],ymm7[11],ymm5[11]
+; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} zmm9 = [17,18,17,18,0,0,19,19,5,4,2,2,5,4,6,6]
+; AVX512-FCP-NEXT: vpermt2d %zmm0, %zmm9, %zmm7
+; AVX512-FCP-NEXT: vmovdqa32 %zmm1, %zmm7 {%k1}
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm0 = [12,13,10,11,10,11,14,15,14,15,14,15,14,15,14,15]
; AVX512-FCP-NEXT: vpshufb %xmm0, %xmm3, %xmm1
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm3 = [0,9,2,3,8,5,6,11]
-; AVX512-FCP-NEXT: vmovdqa %ymm6, %ymm8
-; AVX512-FCP-NEXT: vpermt2d %ymm1, %ymm3, %ymm8
+; AVX512-FCP-NEXT: vmovdqa %ymm7, %ymm14
+; AVX512-FCP-NEXT: vpermt2d %ymm1, %ymm3, %ymm14
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm15 = [8,9,20,11,12,21,14,15]
; AVX512-FCP-NEXT: vmovdqa64 %ymm30, %ymm1
; AVX512-FCP-NEXT: vpshuflw {{.*#+}} ymm1 = ymm1[0,1,1,3,4,5,6,7,8,9,9,11,12,13,14,15]
-; AVX512-FCP-NEXT: vpermt2d %zmm1, %zmm15, %zmm6
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm6, %zmm8, %zmm6
+; AVX512-FCP-NEXT: vpermt2d %zmm1, %zmm15, %zmm7
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm7, %zmm14, %zmm7
; AVX512-FCP-NEXT: vmovdqa64 %ymm16, %ymm1
-; AVX512-FCP-NEXT: vpshuflw {{.*#+}} ymm8 = ymm1[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15]
+; AVX512-FCP-NEXT: vpshuflw {{.*#+}} ymm14 = ymm1[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15]
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm1 = [8,9,12,13,12,13,10,11,8,9,10,11,12,13,14,15]
; AVX512-FCP-NEXT: vpshufb %xmm1, %xmm2, %xmm2
; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} zmm20 = [0,1,0,1,10,10,10,10]
-; AVX512-FCP-NEXT: vpermt2q %zmm8, %zmm20, %zmm2
-; AVX512-FCP-NEXT: vmovdqa64 %ymm23, %ymm8
-; AVX512-FCP-NEXT: vmovdqa64 %ymm25, %ymm14
-; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} ymm8 = ymm8[0],ymm14[0],ymm8[1],ymm14[1],ymm8[2],ymm14[2],ymm8[3],ymm14[3],ymm8[8],ymm14[8],ymm8[9],ymm14[9],ymm8[10],ymm14[10],ymm8[11],ymm14[11]
+; AVX512-FCP-NEXT: vpermt2q %zmm14, %zmm20, %zmm2
+; AVX512-FCP-NEXT: vmovdqa64 %ymm23, %ymm14
+; AVX512-FCP-NEXT: vmovdqa64 %ymm25, %ymm5
+; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} ymm14 = ymm14[0],ymm5[0],ymm14[1],ymm5[1],ymm14[2],ymm5[2],ymm14[3],ymm5[3],ymm14[8],ymm5[8],ymm14[9],ymm5[9],ymm14[10],ymm5[10],ymm14[11],ymm5[11]
; AVX512-FCP-NEXT: vpunpckhwd {{.*#+}} xmm12 = xmm12[4],xmm13[4],xmm12[5],xmm13[5],xmm12[6],xmm13[6],xmm12[7],xmm13[7]
-; AVX512-FCP-NEXT: vpermt2q %zmm8, %zmm26, %zmm12
-; AVX512-FCP-NEXT: vmovdqa64 %xmm31, %xmm8
+; AVX512-FCP-NEXT: vpermt2q %zmm14, %zmm26, %zmm12
+; AVX512-FCP-NEXT: vmovdqa64 %xmm31, %xmm5
; AVX512-FCP-NEXT: vmovdqa64 %xmm18, %xmm13
-; AVX512-FCP-NEXT: vpunpckhwd {{.*#+}} xmm8 = xmm8[4],xmm13[4],xmm8[5],xmm13[5],xmm8[6],xmm13[6],xmm8[7],xmm13[7]
+; AVX512-FCP-NEXT: vpunpckhwd {{.*#+}} xmm5 = xmm5[4],xmm13[4],xmm5[5],xmm13[5],xmm5[6],xmm13[6],xmm5[7],xmm13[7]
; AVX512-FCP-NEXT: vmovdqa64 %ymm22, %ymm14
; AVX512-FCP-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm13 # 32-byte Folded Reload
; AVX512-FCP-NEXT: # ymm13 = ymm14[0],mem[0],ymm14[1],mem[1],ymm14[2],mem[2],ymm14[3],mem[3],ymm14[8],mem[8],ymm14[9],mem[9],ymm14[10],mem[10],ymm14[11],mem[11]
-; AVX512-FCP-NEXT: vpermt2d %zmm8, %zmm7, %zmm13
+; AVX512-FCP-NEXT: vpermt2d %zmm5, %zmm9, %zmm13
; AVX512-FCP-NEXT: vmovdqa32 %zmm12, %zmm13 {%k1}
; AVX512-FCP-NEXT: vpshufb %xmm0, %xmm10, %xmm0
; AVX512-FCP-NEXT: vpermi2d %ymm0, %ymm13, %ymm3
@@ -4322,7 +4322,7 @@ define void @store_i16_stride6_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512-FCP-NEXT: vpshuflw {{.*#+}} ymm0 = ymm0[0,1,1,3,4,5,6,7,8,9,9,11,12,13,14,15]
; AVX512-FCP-NEXT: vpermt2d %zmm0, %zmm15, %zmm13
; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm13, %zmm3, %zmm0
-; AVX512-FCP-NEXT: vpshufb %xmm1, %xmm9, %xmm1
+; AVX512-FCP-NEXT: vpshufb %xmm1, %xmm8, %xmm1
; AVX512-FCP-NEXT: vmovdqa64 %ymm27, %ymm3
; AVX512-FCP-NEXT: vpshuflw {{.*#+}} ymm3 = ymm3[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15]
; AVX512-FCP-NEXT: vpermt2q %zmm3, %zmm20, %zmm1
@@ -4330,13 +4330,13 @@ define void @store_i16_stride6_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm1 = zmm1 ^ (zmm3 & (zmm1 ^ zmm0))
; AVX512-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX512-FCP-NEXT: vmovdqa64 %zmm1, 256(%rax)
-; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm2 = zmm2 ^ (zmm3 & (zmm2 ^ zmm6))
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm2 = zmm2 ^ (zmm3 & (zmm2 ^ zmm7))
; AVX512-FCP-NEXT: vmovdqa64 %zmm2, 64(%rax)
; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm0 = [65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535]
; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm4 = zmm4 ^ (zmm0 & (zmm4 ^ zmm19))
; AVX512-FCP-NEXT: vmovdqa64 %zmm4, (%rax)
-; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm5 = zmm5 ^ (zmm0 & (zmm5 ^ zmm17))
-; AVX512-FCP-NEXT: vmovdqa64 %zmm5, 192(%rax)
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm6 = zmm6 ^ (zmm0 & (zmm6 ^ zmm17))
+; AVX512-FCP-NEXT: vmovdqa64 %zmm6, 192(%rax)
; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm0 = [65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0]
; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm11 = zmm11 ^ (zmm0 & (zmm11 ^ zmm21))
; AVX512-FCP-NEXT: vmovdqa64 %zmm11, 128(%rax)
@@ -4666,91 +4666,91 @@ define void @store_i16_stride6_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512DQ-FCP-NEXT: vmovdqa 32(%rdi), %xmm12
; AVX512DQ-FCP-NEXT: vpshufb %xmm2, %xmm12, %xmm4
; AVX512DQ-FCP-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm4[4],xmm1[4],xmm4[5],xmm1[5],xmm4[6],xmm1[6],xmm4[7],xmm1[7]
-; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} zmm20 = [0,0,2,1,8,9,8,9]
+; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} zmm4 = [8,8,0,9,0,1,0,1]
; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} xmm5 = xmm12[0],xmm13[0],xmm12[1],xmm13[1],xmm12[2],xmm13[2],xmm12[3],xmm13[3]
-; AVX512DQ-FCP-NEXT: vpermt2q %zmm1, %zmm20, %zmm5
+; AVX512DQ-FCP-NEXT: vpermt2q %zmm5, %zmm4, %zmm1
; AVX512DQ-FCP-NEXT: movw $9362, %ax # imm = 0x2492
; AVX512DQ-FCP-NEXT: kmovw %eax, %k2
-; AVX512DQ-FCP-NEXT: vmovdqa32 %zmm3, %zmm5 {%k2}
-; AVX512DQ-FCP-NEXT: vmovdqa %ymm5, %ymm1
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm6 = [16,9,10,17,12,13,18,15]
+; AVX512DQ-FCP-NEXT: vmovdqa32 %zmm3, %zmm1 {%k2}
+; AVX512DQ-FCP-NEXT: vmovdqa %ymm1, %ymm3
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm20 = [16,9,10,17,12,13,18,15]
; AVX512DQ-FCP-NEXT: vmovdqa 32(%r8), %xmm10
-; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} xmm3 = xmm10[2,1,3,3,4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpermt2d %zmm3, %zmm6, %zmm5
-; AVX512DQ-FCP-NEXT: vpmovzxwd {{.*#+}} xmm3 = xmm10[0],zero,xmm10[1],zero,xmm10[2],zero,xmm10[3],zero
+; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} xmm6 = xmm10[2,1,3,3,4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpermt2d %zmm6, %zmm20, %zmm1
+; AVX512DQ-FCP-NEXT: vpmovzxwd {{.*#+}} xmm6 = xmm10[0],zero,xmm10[1],zero,xmm10[2],zero,xmm10[3],zero
; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm19 = [0,1,8,3,4,9,6,7]
-; AVX512DQ-FCP-NEXT: vpermt2d %ymm3, %ymm19, %ymm1
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm5, %zmm1, %zmm17
-; AVX512DQ-FCP-NEXT: vmovdqa (%rcx), %xmm8
+; AVX512DQ-FCP-NEXT: vpermt2d %ymm6, %ymm19, %ymm3
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm1, %zmm3, %zmm17
+; AVX512DQ-FCP-NEXT: vmovdqa (%rcx), %xmm9
; AVX512DQ-FCP-NEXT: vmovdqa (%rdx), %xmm7
-; AVX512DQ-FCP-NEXT: vpsrldq {{.*#+}} xmm1 = xmm8[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX512DQ-FCP-NEXT: vpsrldq {{.*#+}} xmm1 = xmm9[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
; AVX512DQ-FCP-NEXT: vpsrldq {{.*#+}} xmm3 = xmm7[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3]
-; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm7[0],xmm8[0],xmm7[1],xmm8[1],xmm7[2],xmm8[2],xmm7[3],xmm8[3]
+; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm7[0],xmm9[0],xmm7[1],xmm9[1],xmm7[2],xmm9[2],xmm7[3],xmm9[3]
; AVX512DQ-FCP-NEXT: vpermt2d %zmm1, %zmm0, %zmm3
-; AVX512DQ-FCP-NEXT: vmovdqa (%rsi), %xmm1
-; AVX512DQ-FCP-NEXT: vmovdqa (%rdi), %xmm0
-; AVX512DQ-FCP-NEXT: vpshufb %xmm2, %xmm1, %xmm5
-; AVX512DQ-FCP-NEXT: vpshufb %xmm2, %xmm0, %xmm2
-; AVX512DQ-FCP-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm2[4],xmm5[4],xmm2[5],xmm5[5],xmm2[6],xmm5[6],xmm2[7],xmm5[7]
-; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} xmm4 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; AVX512DQ-FCP-NEXT: vpermt2q %zmm2, %zmm20, %zmm4
-; AVX512DQ-FCP-NEXT: vmovdqa32 %zmm3, %zmm4 {%k2}
+; AVX512DQ-FCP-NEXT: vmovdqa (%rsi), %xmm5
+; AVX512DQ-FCP-NEXT: vmovdqa (%rdi), %xmm1
+; AVX512DQ-FCP-NEXT: vpshufb %xmm2, %xmm5, %xmm8
+; AVX512DQ-FCP-NEXT: vpshufb %xmm2, %xmm1, %xmm2
+; AVX512DQ-FCP-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm2[4],xmm8[4],xmm2[5],xmm8[5],xmm2[6],xmm8[6],xmm2[7],xmm8[7]
+; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} xmm8 = xmm1[0],xmm5[0],xmm1[1],xmm5[1],xmm1[2],xmm5[2],xmm1[3],xmm5[3]
+; AVX512DQ-FCP-NEXT: vpermt2q %zmm8, %zmm4, %zmm2
+; AVX512DQ-FCP-NEXT: vmovdqa32 %zmm3, %zmm2 {%k2}
; AVX512DQ-FCP-NEXT: vmovdqa (%r8), %xmm3
-; AVX512DQ-FCP-NEXT: vpmovzxwd {{.*#+}} xmm2 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero
-; AVX512DQ-FCP-NEXT: vpermi2d %ymm2, %ymm4, %ymm19
-; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} xmm2 = xmm3[2,1,3,3,4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpermt2d %zmm2, %zmm6, %zmm4
-; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} zmm20 = [0,0,0,0,8,8,10,9]
-; AVX512DQ-FCP-NEXT: vmovdqa 32(%r9), %xmm9
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm6 = [0,1,4,5,4,5,6,7,8,9,8,9,8,9,8,9]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm6, %xmm9, %xmm2
-; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} xmm5 = xmm9[0,0,2,1,4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpermt2q %zmm2, %zmm20, %zmm5
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm4, %zmm19, %zmm19
+; AVX512DQ-FCP-NEXT: vpmovzxwd {{.*#+}} xmm4 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero
+; AVX512DQ-FCP-NEXT: vpermi2d %ymm4, %ymm2, %ymm19
+; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} xmm4 = xmm3[2,1,3,3,4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpermt2d %zmm4, %zmm20, %zmm2
+; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} zmm20 = [0,0,0,0,8,8,0,9]
+; AVX512DQ-FCP-NEXT: vmovdqa 32(%r9), %xmm8
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm4 = [0,1,4,5,4,5,6,7,8,9,8,9,8,9,8,9]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm4, %xmm8, %xmm0
+; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} xmm6 = xmm8[0,0,2,1,4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpermt2q %zmm0, %zmm20, %zmm6
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm2, %zmm19, %zmm19
; AVX512DQ-FCP-NEXT: vmovdqa (%r9), %xmm2
-; AVX512DQ-FCP-NEXT: vpshufb %xmm6, %xmm2, %xmm6
+; AVX512DQ-FCP-NEXT: vpshufb %xmm4, %xmm2, %xmm0
; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} xmm4 = xmm2[0,0,2,1,4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpermt2q %zmm6, %zmm20, %zmm4
-; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} ymm6 = ymm14[0],ymm15[0],ymm14[1],ymm15[1],ymm14[2],ymm15[2],ymm14[3],ymm15[3],ymm14[8],ymm15[8],ymm14[9],ymm15[9],ymm14[10],ymm15[10],ymm14[11],ymm15[11]
-; AVX512DQ-FCP-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; AVX512DQ-FCP-NEXT: vpermt2q %zmm0, %zmm20, %zmm4
+; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm14[0],ymm15[0],ymm14[1],ymm15[1],ymm14[2],ymm15[2],ymm14[3],ymm15[3],ymm14[8],ymm15[8],ymm14[9],ymm15[9],ymm14[10],ymm15[10],ymm14[11],ymm15[11]
+; AVX512DQ-FCP-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm5[4],xmm1[5],xmm5[5],xmm1[6],xmm5[6],xmm1[7],xmm5[7]
; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} zmm26 = [1,1,1,1,10,10,10,11]
-; AVX512DQ-FCP-NEXT: vpermt2q %zmm6, %zmm26, %zmm0
-; AVX512DQ-FCP-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm7[4],xmm8[4],xmm7[5],xmm8[5],xmm7[6],xmm8[6],xmm7[7],xmm8[7]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm28, %ymm6
+; AVX512DQ-FCP-NEXT: vpermt2q %zmm0, %zmm26, %zmm1
+; AVX512DQ-FCP-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm7[4],xmm9[4],xmm7[5],xmm9[5],xmm7[6],xmm9[6],xmm7[7],xmm9[7]
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm28, %ymm5
; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm29, %ymm7
-; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} ymm6 = ymm7[0],ymm6[0],ymm7[1],ymm6[1],ymm7[2],ymm6[2],ymm7[3],ymm6[3],ymm7[8],ymm6[8],ymm7[9],ymm6[9],ymm7[10],ymm6[10],ymm7[11],ymm6[11]
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} zmm7 = [17,18,17,18,0,0,19,19,5,4,2,2,5,4,6,6]
-; AVX512DQ-FCP-NEXT: vpermt2d %zmm1, %zmm7, %zmm6
-; AVX512DQ-FCP-NEXT: vmovdqa32 %zmm0, %zmm6 {%k1}
+; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} ymm7 = ymm7[0],ymm5[0],ymm7[1],ymm5[1],ymm7[2],ymm5[2],ymm7[3],ymm5[3],ymm7[8],ymm5[8],ymm7[9],ymm5[9],ymm7[10],ymm5[10],ymm7[11],ymm5[11]
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} zmm9 = [17,18,17,18,0,0,19,19,5,4,2,2,5,4,6,6]
+; AVX512DQ-FCP-NEXT: vpermt2d %zmm0, %zmm9, %zmm7
+; AVX512DQ-FCP-NEXT: vmovdqa32 %zmm1, %zmm7 {%k1}
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm0 = [12,13,10,11,10,11,14,15,14,15,14,15,14,15,14,15]
; AVX512DQ-FCP-NEXT: vpshufb %xmm0, %xmm3, %xmm1
; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm3 = [0,9,2,3,8,5,6,11]
-; AVX512DQ-FCP-NEXT: vmovdqa %ymm6, %ymm8
-; AVX512DQ-FCP-NEXT: vpermt2d %ymm1, %ymm3, %ymm8
+; AVX512DQ-FCP-NEXT: vmovdqa %ymm7, %ymm14
+; AVX512DQ-FCP-NEXT: vpermt2d %ymm1, %ymm3, %ymm14
; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm15 = [8,9,20,11,12,21,14,15]
; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm30, %ymm1
; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} ymm1 = ymm1[0,1,1,3,4,5,6,7,8,9,9,11,12,13,14,15]
-; AVX512DQ-FCP-NEXT: vpermt2d %zmm1, %zmm15, %zmm6
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm6, %zmm8, %zmm6
+; AVX512DQ-FCP-NEXT: vpermt2d %zmm1, %zmm15, %zmm7
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm7, %zmm14, %zmm7
; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm16, %ymm1
-; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} ymm8 = ymm1[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15]
+; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} ymm14 = ymm1[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15]
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm1 = [8,9,12,13,12,13,10,11,8,9,10,11,12,13,14,15]
; AVX512DQ-FCP-NEXT: vpshufb %xmm1, %xmm2, %xmm2
; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} zmm20 = [0,1,0,1,10,10,10,10]
-; AVX512DQ-FCP-NEXT: vpermt2q %zmm8, %zmm20, %zmm2
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm23, %ymm8
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm25, %ymm14
-; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} ymm8 = ymm8[0],ymm14[0],ymm8[1],ymm14[1],ymm8[2],ymm14[2],ymm8[3],ymm14[3],ymm8[8],ymm14[8],ymm8[9],ymm14[9],ymm8[10],ymm14[10],ymm8[11],ymm14[11]
+; AVX512DQ-FCP-NEXT: vpermt2q %zmm14, %zmm20, %zmm2
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm23, %ymm14
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm25, %ymm5
+; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} ymm14 = ymm14[0],ymm5[0],ymm14[1],ymm5[1],ymm14[2],ymm5[2],ymm14[3],ymm5[3],ymm14[8],ymm5[8],ymm14[9],ymm5[9],ymm14[10],ymm5[10],ymm14[11],ymm5[11]
; AVX512DQ-FCP-NEXT: vpunpckhwd {{.*#+}} xmm12 = xmm12[4],xmm13[4],xmm12[5],xmm13[5],xmm12[6],xmm13[6],xmm12[7],xmm13[7]
-; AVX512DQ-FCP-NEXT: vpermt2q %zmm8, %zmm26, %zmm12
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm31, %xmm8
+; AVX512DQ-FCP-NEXT: vpermt2q %zmm14, %zmm26, %zmm12
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm31, %xmm5
; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm18, %xmm13
-; AVX512DQ-FCP-NEXT: vpunpckhwd {{.*#+}} xmm8 = xmm8[4],xmm13[4],xmm8[5],xmm13[5],xmm8[6],xmm13[6],xmm8[7],xmm13[7]
+; AVX512DQ-FCP-NEXT: vpunpckhwd {{.*#+}} xmm5 = xmm5[4],xmm13[4],xmm5[5],xmm13[5],xmm5[6],xmm13[6],xmm5[7],xmm13[7]
; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm22, %ymm14
; AVX512DQ-FCP-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm13 # 32-byte Folded Reload
; AVX512DQ-FCP-NEXT: # ymm13 = ymm14[0],mem[0],ymm14[1],mem[1],ymm14[2],mem[2],ymm14[3],mem[3],ymm14[8],mem[8],ymm14[9],mem[9],ymm14[10],mem[10],ymm14[11],mem[11]
-; AVX512DQ-FCP-NEXT: vpermt2d %zmm8, %zmm7, %zmm13
+; AVX512DQ-FCP-NEXT: vpermt2d %zmm5, %zmm9, %zmm13
; AVX512DQ-FCP-NEXT: vmovdqa32 %zmm12, %zmm13 {%k1}
; AVX512DQ-FCP-NEXT: vpshufb %xmm0, %xmm10, %xmm0
; AVX512DQ-FCP-NEXT: vpermi2d %ymm0, %ymm13, %ymm3
@@ -4758,7 +4758,7 @@ define void @store_i16_stride6_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} ymm0 = ymm0[0,1,1,3,4,5,6,7,8,9,9,11,12,13,14,15]
; AVX512DQ-FCP-NEXT: vpermt2d %zmm0, %zmm15, %zmm13
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm13, %zmm3, %zmm0
-; AVX512DQ-FCP-NEXT: vpshufb %xmm1, %xmm9, %xmm1
+; AVX512DQ-FCP-NEXT: vpshufb %xmm1, %xmm8, %xmm1
; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm27, %ymm3
; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} ymm3 = ymm3[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15]
; AVX512DQ-FCP-NEXT: vpermt2q %zmm3, %zmm20, %zmm1
@@ -4766,13 +4766,13 @@ define void @store_i16_stride6_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm1 = zmm1 ^ (zmm3 & (zmm1 ^ zmm0))
; AVX512DQ-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm1, 256(%rax)
-; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm2 = zmm2 ^ (zmm3 & (zmm2 ^ zmm6))
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm2 = zmm2 ^ (zmm3 & (zmm2 ^ zmm7))
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm2, 64(%rax)
; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm0 = [65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535]
; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm4 = zmm4 ^ (zmm0 & (zmm4 ^ zmm19))
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm4, (%rax)
-; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm5 = zmm5 ^ (zmm0 & (zmm5 ^ zmm17))
-; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm5, 192(%rax)
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm6 = zmm6 ^ (zmm0 & (zmm6 ^ zmm17))
+; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm6, 192(%rax)
; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm0 = [65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0]
; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm11 = zmm11 ^ (zmm0 & (zmm11 ^ zmm21))
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm11, 128(%rax)
@@ -8848,24 +8848,24 @@ define void @store_i16_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512-FCP-NEXT: vmovdqa %xmm5, (%rsp) # 16-byte Spill
; AVX512-FCP-NEXT: vpshufb %xmm8, %xmm5, %xmm3
; AVX512-FCP-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
-; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} zmm27 = [0,0,2,1,8,9,8,9]
+; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} zmm27 = [8,8,0,9,0,1,0,1]
; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3]
-; AVX512-FCP-NEXT: vpermt2q %zmm1, %zmm27, %zmm3
+; AVX512-FCP-NEXT: vpermt2q %zmm3, %zmm27, %zmm1
; AVX512-FCP-NEXT: movw $9362, %ax # imm = 0x2492
; AVX512-FCP-NEXT: kmovw %eax, %k2
-; AVX512-FCP-NEXT: vmovdqa32 %zmm2, %zmm3 {%k2}
-; AVX512-FCP-NEXT: vmovdqa %ymm3, %ymm1
+; AVX512-FCP-NEXT: vmovdqa32 %zmm2, %zmm1 {%k2}
+; AVX512-FCP-NEXT: vmovdqa %ymm1, %ymm2
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm28 = [16,9,10,17,12,13,18,15]
; AVX512-FCP-NEXT: vmovdqa 96(%r8), %xmm4
; AVX512-FCP-NEXT: vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512-FCP-NEXT: vpshuflw {{.*#+}} xmm2 = xmm4[2,1,3,3,4,5,6,7]
-; AVX512-FCP-NEXT: vpermt2d %zmm2, %zmm28, %zmm3
-; AVX512-FCP-NEXT: vpmovzxwd {{.*#+}} xmm2 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero
+; AVX512-FCP-NEXT: vpshuflw {{.*#+}} xmm3 = xmm4[2,1,3,3,4,5,6,7]
+; AVX512-FCP-NEXT: vpermt2d %zmm3, %zmm28, %zmm1
+; AVX512-FCP-NEXT: vpmovzxwd {{.*#+}} xmm3 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm29 = [0,1,8,3,4,9,6,7]
-; AVX512-FCP-NEXT: vpermt2d %ymm2, %ymm29, %ymm1
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm3, %zmm1, %zmm1
+; AVX512-FCP-NEXT: vpermt2d %ymm3, %ymm29, %ymm2
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm1, %zmm2, %zmm1
; AVX512-FCP-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} zmm30 = [0,0,0,0,8,8,10,9]
+; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} zmm30 = [0,0,0,0,8,8,0,9]
; AVX512-FCP-NEXT: vmovdqa 96(%r9), %xmm2
; AVX512-FCP-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm9 = [0,1,4,5,4,5,6,7,8,9,8,9,8,9,8,9]
@@ -8890,16 +8890,16 @@ define void @store_i16_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512-FCP-NEXT: vpshufb %xmm8, %xmm5, %xmm3
; AVX512-FCP-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3]
-; AVX512-FCP-NEXT: vpermt2q %zmm1, %zmm27, %zmm3
-; AVX512-FCP-NEXT: vmovdqa32 %zmm2, %zmm3 {%k2}
-; AVX512-FCP-NEXT: vmovdqa %ymm3, %ymm1
+; AVX512-FCP-NEXT: vpermt2q %zmm3, %zmm27, %zmm1
+; AVX512-FCP-NEXT: vmovdqa32 %zmm2, %zmm1 {%k2}
+; AVX512-FCP-NEXT: vmovdqa %ymm1, %ymm2
; AVX512-FCP-NEXT: vmovdqa 64(%r8), %xmm4
; AVX512-FCP-NEXT: vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512-FCP-NEXT: vpshuflw {{.*#+}} xmm2 = xmm4[2,1,3,3,4,5,6,7]
-; AVX512-FCP-NEXT: vpermt2d %zmm2, %zmm28, %zmm3
-; AVX512-FCP-NEXT: vpmovzxwd {{.*#+}} xmm2 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero
-; AVX512-FCP-NEXT: vpermt2d %ymm2, %ymm29, %ymm1
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm3, %zmm1, %zmm1
+; AVX512-FCP-NEXT: vpshuflw {{.*#+}} xmm3 = xmm4[2,1,3,3,4,5,6,7]
+; AVX512-FCP-NEXT: vpermt2d %zmm3, %zmm28, %zmm1
+; AVX512-FCP-NEXT: vpmovzxwd {{.*#+}} xmm3 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero
+; AVX512-FCP-NEXT: vpermt2d %ymm3, %ymm29, %ymm2
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm1, %zmm2, %zmm1
; AVX512-FCP-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-FCP-NEXT: vmovdqa 64(%r9), %xmm2
; AVX512-FCP-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
@@ -8984,16 +8984,16 @@ define void @store_i16_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
; AVX512-FCP-NEXT: vmovdqa64 %xmm4, %xmm18
; AVX512-FCP-NEXT: vmovdqa64 %xmm3, %xmm17
-; AVX512-FCP-NEXT: vpermt2q %zmm0, %zmm27, %zmm2
-; AVX512-FCP-NEXT: vmovdqa32 %zmm1, %zmm2 {%k2}
-; AVX512-FCP-NEXT: vmovdqa %ymm2, %ymm0
+; AVX512-FCP-NEXT: vpermt2q %zmm2, %zmm27, %zmm0
+; AVX512-FCP-NEXT: vmovdqa32 %zmm1, %zmm0 {%k2}
+; AVX512-FCP-NEXT: vmovdqa %ymm0, %ymm1
; AVX512-FCP-NEXT: vmovdqa 32(%r8), %xmm3
-; AVX512-FCP-NEXT: vpshuflw {{.*#+}} xmm1 = xmm3[2,1,3,3,4,5,6,7]
-; AVX512-FCP-NEXT: vpermt2d %zmm1, %zmm28, %zmm2
-; AVX512-FCP-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero
+; AVX512-FCP-NEXT: vpshuflw {{.*#+}} xmm2 = xmm3[2,1,3,3,4,5,6,7]
+; AVX512-FCP-NEXT: vpermt2d %zmm2, %zmm28, %zmm0
+; AVX512-FCP-NEXT: vpmovzxwd {{.*#+}} xmm2 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero
; AVX512-FCP-NEXT: vmovdqa64 %xmm3, %xmm16
-; AVX512-FCP-NEXT: vpermt2d %ymm1, %ymm29, %ymm0
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm22
+; AVX512-FCP-NEXT: vpermt2d %ymm2, %ymm29, %ymm1
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm22
; AVX512-FCP-NEXT: vmovdqa (%rcx), %xmm6
; AVX512-FCP-NEXT: vmovdqa (%rdx), %xmm5
; AVX512-FCP-NEXT: vpsrldq {{.*#+}} xmm0 = xmm6[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
@@ -9007,19 +9007,19 @@ define void @store_i16_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512-FCP-NEXT: vpshufb %xmm8, %xmm1, %xmm7
; AVX512-FCP-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm7[4],xmm3[4],xmm7[5],xmm3[5],xmm7[6],xmm3[6],xmm7[7],xmm3[7]
; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} xmm7 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3]
-; AVX512-FCP-NEXT: vpermt2q %zmm3, %zmm27, %zmm7
-; AVX512-FCP-NEXT: vmovdqa32 %zmm2, %zmm7 {%k2}
+; AVX512-FCP-NEXT: vpermt2q %zmm7, %zmm27, %zmm3
+; AVX512-FCP-NEXT: vmovdqa32 %zmm2, %zmm3 {%k2}
; AVX512-FCP-NEXT: vmovdqa (%r8), %xmm2
-; AVX512-FCP-NEXT: vpmovzxwd {{.*#+}} xmm3 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
-; AVX512-FCP-NEXT: vpermi2d %ymm3, %ymm7, %ymm29
-; AVX512-FCP-NEXT: vpshuflw {{.*#+}} xmm3 = xmm2[2,1,3,3,4,5,6,7]
-; AVX512-FCP-NEXT: vpermt2d %zmm3, %zmm28, %zmm7
+; AVX512-FCP-NEXT: vpmovzxwd {{.*#+}} xmm7 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
+; AVX512-FCP-NEXT: vpermi2d %ymm7, %ymm3, %ymm29
+; AVX512-FCP-NEXT: vpshuflw {{.*#+}} xmm7 = xmm2[2,1,3,3,4,5,6,7]
+; AVX512-FCP-NEXT: vpermt2d %zmm7, %zmm28, %zmm3
; AVX512-FCP-NEXT: vmovdqa 32(%r9), %xmm15
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm0 = [0,1,4,5,4,5,6,7,8,9,8,9,8,9,8,9]
-; AVX512-FCP-NEXT: vpshufb %xmm0, %xmm15, %xmm3
+; AVX512-FCP-NEXT: vpshufb %xmm0, %xmm15, %xmm7
; AVX512-FCP-NEXT: vpshuflw {{.*#+}} xmm8 = xmm15[0,0,2,1,4,5,6,7]
-; AVX512-FCP-NEXT: vpermt2q %zmm3, %zmm30, %zmm8
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm7, %zmm29, %zmm23
+; AVX512-FCP-NEXT: vpermt2q %zmm7, %zmm30, %zmm8
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm3, %zmm29, %zmm23
; AVX512-FCP-NEXT: vmovdqa (%r9), %xmm3
; AVX512-FCP-NEXT: vpshufb %xmm0, %xmm3, %xmm0
; AVX512-FCP-NEXT: vpshuflw {{.*#+}} xmm7 = xmm3[0,0,2,1,4,5,6,7]
@@ -9757,24 +9757,24 @@ define void @store_i16_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512DQ-FCP-NEXT: vmovdqa %xmm5, (%rsp) # 16-byte Spill
; AVX512DQ-FCP-NEXT: vpshufb %xmm8, %xmm5, %xmm3
; AVX512DQ-FCP-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
-; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} zmm27 = [0,0,2,1,8,9,8,9]
+; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} zmm27 = [8,8,0,9,0,1,0,1]
; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3]
-; AVX512DQ-FCP-NEXT: vpermt2q %zmm1, %zmm27, %zmm3
+; AVX512DQ-FCP-NEXT: vpermt2q %zmm3, %zmm27, %zmm1
; AVX512DQ-FCP-NEXT: movw $9362, %ax # imm = 0x2492
; AVX512DQ-FCP-NEXT: kmovw %eax, %k2
-; AVX512DQ-FCP-NEXT: vmovdqa32 %zmm2, %zmm3 {%k2}
-; AVX512DQ-FCP-NEXT: vmovdqa %ymm3, %ymm1
+; AVX512DQ-FCP-NEXT: vmovdqa32 %zmm2, %zmm1 {%k2}
+; AVX512DQ-FCP-NEXT: vmovdqa %ymm1, %ymm2
; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm28 = [16,9,10,17,12,13,18,15]
; AVX512DQ-FCP-NEXT: vmovdqa 96(%r8), %xmm4
; AVX512DQ-FCP-NEXT: vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} xmm2 = xmm4[2,1,3,3,4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpermt2d %zmm2, %zmm28, %zmm3
-; AVX512DQ-FCP-NEXT: vpmovzxwd {{.*#+}} xmm2 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero
+; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} xmm3 = xmm4[2,1,3,3,4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpermt2d %zmm3, %zmm28, %zmm1
+; AVX512DQ-FCP-NEXT: vpmovzxwd {{.*#+}} xmm3 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero
; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm29 = [0,1,8,3,4,9,6,7]
-; AVX512DQ-FCP-NEXT: vpermt2d %ymm2, %ymm29, %ymm1
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm3, %zmm1, %zmm1
+; AVX512DQ-FCP-NEXT: vpermt2d %ymm3, %ymm29, %ymm2
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm1, %zmm2, %zmm1
; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} zmm30 = [0,0,0,0,8,8,10,9]
+; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} zmm30 = [0,0,0,0,8,8,0,9]
; AVX512DQ-FCP-NEXT: vmovdqa 96(%r9), %xmm2
; AVX512DQ-FCP-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm9 = [0,1,4,5,4,5,6,7,8,9,8,9,8,9,8,9]
@@ -9799,16 +9799,16 @@ define void @store_i16_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512DQ-FCP-NEXT: vpshufb %xmm8, %xmm5, %xmm3
; AVX512DQ-FCP-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3]
-; AVX512DQ-FCP-NEXT: vpermt2q %zmm1, %zmm27, %zmm3
-; AVX512DQ-FCP-NEXT: vmovdqa32 %zmm2, %zmm3 {%k2}
-; AVX512DQ-FCP-NEXT: vmovdqa %ymm3, %ymm1
+; AVX512DQ-FCP-NEXT: vpermt2q %zmm3, %zmm27, %zmm1
+; AVX512DQ-FCP-NEXT: vmovdqa32 %zmm2, %zmm1 {%k2}
+; AVX512DQ-FCP-NEXT: vmovdqa %ymm1, %ymm2
; AVX512DQ-FCP-NEXT: vmovdqa 64(%r8), %xmm4
; AVX512DQ-FCP-NEXT: vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} xmm2 = xmm4[2,1,3,3,4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpermt2d %zmm2, %zmm28, %zmm3
-; AVX512DQ-FCP-NEXT: vpmovzxwd {{.*#+}} xmm2 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero
-; AVX512DQ-FCP-NEXT: vpermt2d %ymm2, %ymm29, %ymm1
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm3, %zmm1, %zmm1
+; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} xmm3 = xmm4[2,1,3,3,4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpermt2d %zmm3, %zmm28, %zmm1
+; AVX512DQ-FCP-NEXT: vpmovzxwd {{.*#+}} xmm3 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero
+; AVX512DQ-FCP-NEXT: vpermt2d %ymm3, %ymm29, %ymm2
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm1, %zmm2, %zmm1
; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-FCP-NEXT: vmovdqa 64(%r9), %xmm2
; AVX512DQ-FCP-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
@@ -9893,16 +9893,16 @@ define void @store_i16_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm4, %xmm18
; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm3, %xmm17
-; AVX512DQ-FCP-NEXT: vpermt2q %zmm0, %zmm27, %zmm2
-; AVX512DQ-FCP-NEXT: vmovdqa32 %zmm1, %zmm2 {%k2}
-; AVX512DQ-FCP-NEXT: vmovdqa %ymm2, %ymm0
+; AVX512DQ-FCP-NEXT: vpermt2q %zmm2, %zmm27, %zmm0
+; AVX512DQ-FCP-NEXT: vmovdqa32 %zmm1, %zmm0 {%k2}
+; AVX512DQ-FCP-NEXT: vmovdqa %ymm0, %ymm1
; AVX512DQ-FCP-NEXT: vmovdqa 32(%r8), %xmm3
-; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} xmm1 = xmm3[2,1,3,3,4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpermt2d %zmm1, %zmm28, %zmm2
-; AVX512DQ-FCP-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero
+; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} xmm2 = xmm3[2,1,3,3,4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpermt2d %zmm2, %zmm28, %zmm0
+; AVX512DQ-FCP-NEXT: vpmovzxwd {{.*#+}} xmm2 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero
; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm3, %xmm16
-; AVX512DQ-FCP-NEXT: vpermt2d %ymm1, %ymm29, %ymm0
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm22
+; AVX512DQ-FCP-NEXT: vpermt2d %ymm2, %ymm29, %ymm1
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm22
; AVX512DQ-FCP-NEXT: vmovdqa (%rcx), %xmm6
; AVX512DQ-FCP-NEXT: vmovdqa (%rdx), %xmm5
; AVX512DQ-FCP-NEXT: vpsrldq {{.*#+}} xmm0 = xmm6[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
@@ -9916,19 +9916,19 @@ define void @store_i16_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512DQ-FCP-NEXT: vpshufb %xmm8, %xmm1, %xmm7
; AVX512DQ-FCP-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm7[4],xmm3[4],xmm7[5],xmm3[5],xmm7[6],xmm3[6],xmm7[7],xmm3[7]
; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} xmm7 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3]
-; AVX512DQ-FCP-NEXT: vpermt2q %zmm3, %zmm27, %zmm7
-; AVX512DQ-FCP-NEXT: vmovdqa32 %zmm2, %zmm7 {%k2}
+; AVX512DQ-FCP-NEXT: vpermt2q %zmm7, %zmm27, %zmm3
+; AVX512DQ-FCP-NEXT: vmovdqa32 %zmm2, %zmm3 {%k2}
; AVX512DQ-FCP-NEXT: vmovdqa (%r8), %xmm2
-; AVX512DQ-FCP-NEXT: vpmovzxwd {{.*#+}} xmm3 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
-; AVX512DQ-FCP-NEXT: vpermi2d %ymm3, %ymm7, %ymm29
-; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} xmm3 = xmm2[2,1,3,3,4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpermt2d %zmm3, %zmm28, %zmm7
+; AVX512DQ-FCP-NEXT: vpmovzxwd {{.*#+}} xmm7 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
+; AVX512DQ-FCP-NEXT: vpermi2d %ymm7, %ymm3, %ymm29
+; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} xmm7 = xmm2[2,1,3,3,4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpermt2d %zmm7, %zmm28, %zmm3
; AVX512DQ-FCP-NEXT: vmovdqa 32(%r9), %xmm15
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm0 = [0,1,4,5,4,5,6,7,8,9,8,9,8,9,8,9]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm0, %xmm15, %xmm3
+; AVX512DQ-FCP-NEXT: vpshufb %xmm0, %xmm15, %xmm7
; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} xmm8 = xmm15[0,0,2,1,4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpermt2q %zmm3, %zmm30, %zmm8
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm7, %zmm29, %zmm23
+; AVX512DQ-FCP-NEXT: vpermt2q %zmm7, %zmm30, %zmm8
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm3, %zmm29, %zmm23
; AVX512DQ-FCP-NEXT: vmovdqa (%r9), %xmm3
; AVX512DQ-FCP-NEXT: vpshufb %xmm0, %xmm3, %xmm0
; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} xmm7 = xmm3[0,0,2,1,4,5,6,7]
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-7.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-7.ll
index 5aa7c055d408e..7b619344e83f6 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-7.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-7.ll
@@ -2864,8 +2864,8 @@ define void @store_i16_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512-NEXT: vpblendw {{.*#+}} xmm11 = xmm12[0,1],xmm11[2],xmm12[3,4],xmm11[5],xmm12[6,7]
; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm12 = xmm15[0],xmm14[0],xmm15[1],xmm14[1],xmm15[2],xmm14[2],xmm15[3],xmm14[3]
; AVX512-NEXT: vpshuflw {{.*#+}} xmm12 = xmm12[0,1,3,2,4,5,6,7]
-; AVX512-NEXT: vpmovsxbd {{.*#+}} zmm18 = [0,16,0,0,17,17,0,0,0,0,0,1,0,0,2,0]
-; AVX512-NEXT: vpermi2d %zmm12, %zmm11, %zmm18
+; AVX512-NEXT: vpmovsxbd {{.*#+}} zmm18 = [0,0,0,0,1,1,0,0,0,0,16,17,0,0,18,0]
+; AVX512-NEXT: vpermi2d %zmm11, %zmm12, %zmm18
; AVX512-NEXT: vpshufd {{.*#+}} ymm11 = ymm8[2,2,2,2,6,6,6,6]
; AVX512-NEXT: vpshufhw {{.*#+}} ymm12 = ymm9[0,1,2,3,5,4,6,7,8,9,10,11,13,12,14,15]
; AVX512-NEXT: vpshufd {{.*#+}} ymm12 = ymm12[2,2,2,2,6,6,6,6]
@@ -3126,8 +3126,8 @@ define void @store_i16_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm11 = xmm12[0,1],xmm11[2],xmm12[3,4],xmm11[5],xmm12[6,7]
; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} xmm12 = xmm15[0],xmm14[0],xmm15[1],xmm14[1],xmm15[2],xmm14[2],xmm15[3],xmm14[3]
; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm12 = xmm12[0,1,3,2,4,5,6,7]
-; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} zmm18 = [0,16,0,0,17,17,0,0,0,0,0,1,0,0,2,0]
-; AVX512DQ-NEXT: vpermi2d %zmm12, %zmm11, %zmm18
+; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} zmm18 = [0,0,0,0,1,1,0,0,0,0,16,17,0,0,18,0]
+; AVX512DQ-NEXT: vpermi2d %zmm11, %zmm12, %zmm18
; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm11 = ymm8[2,2,2,2,6,6,6,6]
; AVX512DQ-NEXT: vpshufhw {{.*#+}} ymm12 = ymm9[0,1,2,3,5,4,6,7,8,9,10,11,13,12,14,15]
; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm12 = ymm12[2,2,2,2,6,6,6,6]
@@ -5859,32 +5859,34 @@ define void @store_i16_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
;
; AVX512-LABEL: store_i16_stride7_vf32:
; AVX512: # %bb.0:
-; AVX512-NEXT: subq $680, %rsp # imm = 0x2A8
-; AVX512-NEXT: vmovdqa (%rcx), %ymm1
+; AVX512-NEXT: subq $664, %rsp # imm = 0x298
+; AVX512-NEXT: vmovdqa (%rcx), %ymm9
; AVX512-NEXT: vmovdqa {{.*#+}} ymm13 = [128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128,128,128,128,128]
-; AVX512-NEXT: vpshufb %ymm13, %ymm1, %ymm0
-; AVX512-NEXT: vmovdqa64 %ymm1, %ymm27
-; AVX512-NEXT: vmovdqa (%rdx), %ymm8
+; AVX512-NEXT: vpshufb %ymm13, %ymm9, %ymm0
+; AVX512-NEXT: vmovdqa (%rdx), %ymm2
; AVX512-NEXT: vmovdqa {{.*#+}} ymm14 = [u,u,u,u,12,13,14,15,128,128,u,u,u,u,u,u,u,u,u,u,16,17,128,128,u,u,u,u,u,u,u,u]
-; AVX512-NEXT: vpshufb %ymm14, %ymm8, %ymm1
+; AVX512-NEXT: vpshufb %ymm14, %ymm2, %ymm1
+; AVX512-NEXT: vmovdqa64 %ymm2, %ymm21
; AVX512-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX512-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-NEXT: vmovdqa (%rsi), %ymm9
+; AVX512-NEXT: vmovdqa (%rsi), %ymm11
; AVX512-NEXT: vmovdqa {{.*#+}} ymm12 = [128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128,128,128,128,128,128,128,128,128]
-; AVX512-NEXT: vpshufb %ymm12, %ymm9, %ymm0
-; AVX512-NEXT: vmovdqa (%rdi), %ymm11
+; AVX512-NEXT: vpshufb %ymm12, %ymm11, %ymm0
+; AVX512-NEXT: vmovdqa64 %ymm11, %ymm17
+; AVX512-NEXT: vmovdqa (%rdi), %ymm2
; AVX512-NEXT: vmovdqa {{.*#+}} ymm15 = [12,13,14,15,128,128,u,u,u,u,u,u,u,u,u,u,16,17,128,128,u,u,u,u,u,u,u,u,16,17,18,19]
-; AVX512-NEXT: vpshufb %ymm15, %ymm11, %ymm1
+; AVX512-NEXT: vpshufb %ymm15, %ymm2, %ymm1
+; AVX512-NEXT: vmovdqa64 %ymm2, %ymm26
; AVX512-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX512-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX512-NEXT: vmovdqa (%r9), %ymm1
; AVX512-NEXT: vmovdqa {{.*#+}} ymm0 = [128,128,128,128,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128]
; AVX512-NEXT: vpshufb %ymm0, %ymm1, %ymm2
-; AVX512-NEXT: vmovdqa64 %ymm1, %ymm16
+; AVX512-NEXT: vmovdqa64 %ymm1, %ymm19
; AVX512-NEXT: vmovdqa (%r8), %ymm4
; AVX512-NEXT: vmovdqa {{.*#+}} ymm1 = [u,u,u,u,u,u,u,u,12,13,14,15,128,128,u,u,u,u,u,u,u,u,u,u,16,17,128,128,u,u,u,u]
; AVX512-NEXT: vpshufb %ymm1, %ymm4, %ymm3
-; AVX512-NEXT: vmovdqa64 %ymm4, %ymm17
+; AVX512-NEXT: vmovdqa64 %ymm4, %ymm20
; AVX512-NEXT: vpor %ymm2, %ymm3, %ymm2
; AVX512-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX512-NEXT: vmovdqa 32(%r9), %xmm2
@@ -5898,8 +5900,8 @@ define void @store_i16_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512-NEXT: vpermi2d %zmm3, %zmm2, %zmm4
; AVX512-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT: vmovdqa 32(%rdi), %ymm2
-; AVX512-NEXT: vmovdqa 32(%rsi), %ymm10
-; AVX512-NEXT: vpshufb %ymm12, %ymm10, %ymm3
+; AVX512-NEXT: vmovdqa 32(%rsi), %ymm11
+; AVX512-NEXT: vpshufb %ymm12, %ymm11, %ymm3
; AVX512-NEXT: vpshufb %ymm15, %ymm2, %ymm4
; AVX512-NEXT: vpor %ymm3, %ymm4, %ymm3
; AVX512-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
@@ -5923,73 +5925,74 @@ define void @store_i16_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512-NEXT: vpblendw {{.*#+}} xmm4 = xmm5[0],xmm4[1],xmm5[2,3],xmm4[4],xmm5[5,6],xmm4[7]
; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm5 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3]
; AVX512-NEXT: vpshuflw {{.*#+}} xmm5 = xmm5[0,1,3,2,4,5,6,7]
-; AVX512-NEXT: vpmovsxbd {{.*#+}} zmm26 = [0,16,0,0,17,17,0,0,0,0,0,1,2,0,0,3]
-; AVX512-NEXT: vpermi2d %zmm5, %zmm4, %zmm26
+; AVX512-NEXT: vpmovsxbd {{.*#+}} zmm6 = [0,16,0,0,17,17,0,0,0,0,0,1,2,0,0,3]
+; AVX512-NEXT: vpermi2d %zmm5, %zmm4, %zmm6
+; AVX512-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT: vmovdqa (%r9), %xmm4
; AVX512-NEXT: vmovdqa (%r8), %xmm5
; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3]
; AVX512-NEXT: vpshufhw {{.*#+}} xmm7 = xmm6[0,1,2,3,4,5,7,6]
; AVX512-NEXT: vpshuflw {{.*#+}} xmm6 = xmm6[0,1,3,2,4,5,6,7]
-; AVX512-NEXT: vpmovsxbd {{.*#+}} zmm25 = [0,0,0,1,0,1,1,0,0,18,19,0,19,19,0,0]
-; AVX512-NEXT: vpermi2d %zmm7, %zmm6, %zmm25
+; AVX512-NEXT: vpmovsxbd {{.*#+}} zmm24 = [0,0,0,1,0,1,1,0,0,18,19,0,19,19,0,0]
+; AVX512-NEXT: vpermi2d %zmm7, %zmm6, %zmm24
; AVX512-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
-; AVX512-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512-NEXT: vmovdqa64 %xmm0, %xmm27
; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX512-NEXT: vpbroadcastd 8(%rax), %ymm0
; AVX512-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
-; AVX512-NEXT: vmovdqa (%rax), %ymm3
-; AVX512-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-NEXT: vmovdqa {{.*#+}} ymm7 = [12,13,128,128,128,128,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128]
-; AVX512-NEXT: vpshufb %ymm7, %ymm3, %ymm6
-; AVX512-NEXT: vmovdqa64 %ymm7, %ymm21
+; AVX512-NEXT: vmovdqa (%rax), %ymm6
+; AVX512-NEXT: vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512-NEXT: vmovdqa {{.*#+}} ymm3 = [12,13,128,128,128,128,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128]
+; AVX512-NEXT: vpshufb %ymm3, %ymm6, %ymm6
; AVX512-NEXT: vinserti64x4 $1, %ymm6, %zmm0, %zmm0
; AVX512-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm5[4],xmm4[4],xmm5[5],xmm4[5],xmm5[6],xmm4[6],xmm5[7],xmm4[7]
; AVX512-NEXT: vpshufb {{.*#+}} ymm4 = ymm12[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,u,u,24,25,26,27,u,u,26,27,26,27]
; AVX512-NEXT: vpshufd {{.*#+}} ymm5 = ymm15[2,2,2,2,6,6,6,6]
-; AVX512-NEXT: vpblendw {{.*#+}} ymm3 = ymm4[0,1],ymm5[2],ymm4[3,4],ymm5[5],ymm4[6,7,8,9],ymm5[10],ymm4[11,12],ymm5[13],ymm4[14,15]
-; AVX512-NEXT: vmovdqa64 %ymm3, %ymm28
+; AVX512-NEXT: vpblendw {{.*#+}} ymm4 = ymm4[0,1],ymm5[2],ymm4[3,4],ymm5[5],ymm4[6,7,8,9],ymm5[10],ymm4[11,12],ymm5[13],ymm4[14,15]
+; AVX512-NEXT: vmovdqa64 %ymm4, %ymm28
; AVX512-NEXT: vpshufhw {{.*#+}} ymm4 = ymm12[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
; AVX512-NEXT: vpshufd {{.*#+}} ymm4 = ymm4[2,2,2,2,6,6,6,6]
; AVX512-NEXT: vpshufd {{.*#+}} ymm5 = ymm15[3,3,3,3,7,7,7,7]
-; AVX512-NEXT: vpblendw {{.*#+}} ymm3 = ymm5[0],ymm4[1],ymm5[2,3],ymm4[4],ymm5[5,6,7,8],ymm4[9],ymm5[10,11],ymm4[12],ymm5[13,14,15]
-; AVX512-NEXT: vmovdqa64 %ymm3, %ymm29
-; AVX512-NEXT: vpshufhw {{.*#+}} ymm4 = ymm10[0,1,2,3,5,4,6,7,8,9,10,11,13,12,14,15]
+; AVX512-NEXT: vpblendw {{.*#+}} ymm8 = ymm5[0],ymm4[1],ymm5[2,3],ymm4[4],ymm5[5,6,7,8],ymm4[9],ymm5[10,11],ymm4[12],ymm5[13,14,15]
+; AVX512-NEXT: vmovdqa64 %ymm8, %ymm29
+; AVX512-NEXT: vpshufhw {{.*#+}} ymm4 = ymm11[0,1,2,3,5,4,6,7,8,9,10,11,13,12,14,15]
; AVX512-NEXT: vpshufd {{.*#+}} ymm4 = ymm4[2,2,2,2,6,6,6,6]
; AVX512-NEXT: vpshufd {{.*#+}} ymm5 = ymm2[2,2,2,2,6,6,6,6]
-; AVX512-NEXT: vpblendw {{.*#+}} ymm3 = ymm5[0],ymm4[1],ymm5[2,3],ymm4[4],ymm5[5,6,7,8],ymm4[9],ymm5[10,11],ymm4[12],ymm5[13,14,15]
-; AVX512-NEXT: vmovdqa64 %ymm3, %ymm30
-; AVX512-NEXT: vpshufhw {{.*#+}} ymm4 = ymm10[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
+; AVX512-NEXT: vpblendw {{.*#+}} ymm4 = ymm5[0],ymm4[1],ymm5[2,3],ymm4[4],ymm5[5,6,7,8],ymm4[9],ymm5[10,11],ymm4[12],ymm5[13,14,15]
+; AVX512-NEXT: vmovdqa64 %ymm4, %ymm30
+; AVX512-NEXT: vpshufhw {{.*#+}} ymm4 = ymm11[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
; AVX512-NEXT: vpshufd {{.*#+}} ymm4 = ymm4[2,2,2,2,6,6,6,6]
; AVX512-NEXT: vpshufd {{.*#+}} ymm5 = ymm2[3,3,3,3,7,7,7,7]
-; AVX512-NEXT: vpblendw {{.*#+}} ymm3 = ymm5[0,1,2],ymm4[3],ymm5[4,5],ymm4[6],ymm5[7,8,9,10],ymm4[11],ymm5[12,13],ymm4[14],ymm5[15]
-; AVX512-NEXT: vmovdqa64 %ymm3, %ymm31
+; AVX512-NEXT: vpblendw {{.*#+}} ymm4 = ymm5[0,1,2],ymm4[3],ymm5[4,5],ymm4[6],ymm5[7,8,9,10],ymm4[11],ymm5[12,13],ymm4[14],ymm5[15]
+; AVX512-NEXT: vmovdqa64 %ymm4, %ymm31
; AVX512-NEXT: vprold $16, %ymm13, %ymm4
; AVX512-NEXT: vpshufd {{.*#+}} ymm5 = ymm14[1,2,2,3,5,6,6,7]
-; AVX512-NEXT: vpblendw {{.*#+}} ymm3 = ymm5[0,1],ymm4[2],ymm5[3,4],ymm4[5],ymm5[6,7,8,9],ymm4[10],ymm5[11,12],ymm4[13],ymm5[14,15]
-; AVX512-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512-NEXT: vpblendw {{.*#+}} ymm4 = ymm5[0,1],ymm4[2],ymm5[3,4],ymm4[5],ymm5[6,7,8,9],ymm4[10],ymm5[11,12],ymm4[13],ymm5[14,15]
+; AVX512-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX512-NEXT: vpshufhw {{.*#+}} ymm4 = ymm13[0,1,2,3,5,6,7,7,8,9,10,11,13,14,15,15]
; AVX512-NEXT: vpshufd {{.*#+}} ymm4 = ymm4[2,2,2,3,6,6,6,7]
; AVX512-NEXT: vpshufd {{.*#+}} ymm5 = ymm14[3,3,3,3,7,7,7,7]
-; AVX512-NEXT: vpblendw {{.*#+}} ymm3 = ymm4[0,1],ymm5[2],ymm4[3,4],ymm5[5],ymm4[6,7,8,9],ymm5[10],ymm4[11,12],ymm5[13],ymm4[14,15]
-; AVX512-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-NEXT: vmovdqa64 %ymm27, %ymm3
-; AVX512-NEXT: vpshufhw {{.*#+}} ymm4 = ymm3[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
+; AVX512-NEXT: vpblendw {{.*#+}} ymm4 = ymm4[0,1],ymm5[2],ymm4[3,4],ymm5[5],ymm4[6,7,8,9],ymm5[10],ymm4[11,12],ymm5[13],ymm4[14,15]
+; AVX512-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512-NEXT: vmovdqa %ymm9, %ymm8
+; AVX512-NEXT: vpshufhw {{.*#+}} ymm4 = ymm9[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
; AVX512-NEXT: vpshufd {{.*#+}} ymm4 = ymm4[2,2,2,2,6,6,6,6]
-; AVX512-NEXT: vpshufd {{.*#+}} ymm5 = ymm8[3,3,3,3,7,7,7,7]
+; AVX512-NEXT: vpshufd {{.*#+}} ymm5 = ymm21[3,3,3,3,7,7,7,7]
; AVX512-NEXT: vpblendw {{.*#+}} ymm4 = ymm5[0],ymm4[1],ymm5[2,3],ymm4[4],ymm5[5,6,7,8],ymm4[9],ymm5[10,11],ymm4[12],ymm5[13,14,15]
-; AVX512-NEXT: vmovdqu %ymm4, (%rsp) # 32-byte Spill
+; AVX512-NEXT: vmovdqa64 %ymm4, %ymm25
+; AVX512-NEXT: vmovdqa64 %ymm17, %ymm9
; AVX512-NEXT: vpshufhw {{.*#+}} ymm4 = ymm9[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
; AVX512-NEXT: vpshufd {{.*#+}} ymm4 = ymm4[2,2,2,2,6,6,6,6]
-; AVX512-NEXT: vpshufd {{.*#+}} ymm5 = ymm11[3,3,3,3,7,7,7,7]
+; AVX512-NEXT: vpshufd {{.*#+}} ymm5 = ymm26[3,3,3,3,7,7,7,7]
; AVX512-NEXT: vpblendw {{.*#+}} ymm4 = ymm5[0,1,2],ymm4[3],ymm5[4,5],ymm4[6],ymm5[7,8,9,10],ymm4[11],ymm5[12,13],ymm4[14],ymm5[15]
-; AVX512-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-NEXT: vmovdqa64 %ymm16, %ymm4
-; AVX512-NEXT: vpshufhw {{.*#+}} ymm4 = ymm4[0,1,2,3,5,6,7,7,8,9,10,11,13,14,15,15]
+; AVX512-NEXT: vmovdqa64 %ymm4, %ymm18
+; AVX512-NEXT: vmovdqa64 %ymm19, %ymm10
+; AVX512-NEXT: vpshufhw {{.*#+}} ymm4 = ymm10[0,1,2,3,5,6,7,7,8,9,10,11,13,14,15,15]
; AVX512-NEXT: vpshufd {{.*#+}} ymm4 = ymm4[2,2,2,3,6,6,6,7]
-; AVX512-NEXT: vpshufd {{.*#+}} ymm5 = ymm17[3,3,3,3,7,7,7,7]
+; AVX512-NEXT: vpshufd {{.*#+}} ymm5 = ymm20[3,3,3,3,7,7,7,7]
; AVX512-NEXT: vpblendw {{.*#+}} ymm4 = ymm4[0,1],ymm5[2],ymm4[3,4],ymm5[5],ymm4[6,7,8,9],ymm5[10],ymm4[11,12],ymm5[13],ymm4[14,15]
-; AVX512-NEXT: vmovdqa64 %ymm4, %ymm19
+; AVX512-NEXT: vmovdqa64 %ymm4, %ymm17
; AVX512-NEXT: vmovdqa 32(%rdi), %xmm4
; AVX512-NEXT: vmovdqa 32(%rsi), %xmm5
; AVX512-NEXT: vprold $16, %xmm5, %xmm6
@@ -6005,12 +6008,11 @@ define void @store_i16_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512-NEXT: vpshufd {{.*#+}} xmm7 = xmm5[1,1,2,2]
; AVX512-NEXT: vpblendw {{.*#+}} xmm1 = xmm7[0],xmm1[1],xmm7[2,3],xmm1[4],xmm7[5,6],xmm1[7]
; AVX512-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3]
-; AVX512-NEXT: vmovdqa64 %xmm1, %xmm18
+; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm7 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3]
; AVX512-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm5[4],xmm4[4],xmm5[5],xmm4[5],xmm5[6],xmm4[6],xmm5[7],xmm4[7]
; AVX512-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX512-NEXT: vpshufd {{.*#+}} ymm1 = ymm2[1,1,1,1,5,5,5,5]
-; AVX512-NEXT: vpshuflw {{.*#+}} ymm2 = ymm10[1,2,3,3,4,5,6,7,9,10,11,11,12,13,14,15]
+; AVX512-NEXT: vpshuflw {{.*#+}} ymm2 = ymm11[1,2,3,3,4,5,6,7,9,10,11,11,12,13,14,15]
; AVX512-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[0,0,2,1,4,4,6,5]
; AVX512-NEXT: vpblendw {{.*#+}} ymm1 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7,8,9],ymm1[10],ymm2[11,12],ymm1[13],ymm2[14,15]
; AVX512-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
@@ -6024,489 +6026,492 @@ define void @store_i16_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[0,0,0,0,4,4,4,4]
; AVX512-NEXT: vpblendw {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5],ymm1[6],ymm2[7,8,9,10],ymm1[11],ymm2[12,13],ymm1[14],ymm2[15]
; AVX512-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-NEXT: vpmovsxbd {{.*#+}} zmm1 = [0,5,0,0,0,6,0,0,6,0,0,0,7,0,0,7]
-; AVX512-NEXT: vmovdqa 32(%rax), %ymm2
-; AVX512-NEXT: vpermd %zmm2, %zmm1, %zmm27
-; AVX512-NEXT: vmovdqa64 %ymm21, %ymm1
-; AVX512-NEXT: vpshufb %ymm1, %ymm2, %ymm1
-; AVX512-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[0,1,1,3,4,5,5,7]
-; AVX512-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
-; AVX512-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2
-; AVX512-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm20
-; AVX512-NEXT: vmovdqa {{.*#+}} xmm4 = [0,1,2,3,4,5,4,5,6,7,10,11,8,9,10,11]
-; AVX512-NEXT: vpshufb %xmm4, %xmm0, %xmm5
-; AVX512-NEXT: vpshufb %xmm4, %xmm6, %xmm14
-; AVX512-NEXT: vmovdqa (%rdi), %xmm1
-; AVX512-NEXT: vmovdqa (%rsi), %xmm2
-; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
-; AVX512-NEXT: vpshufb %xmm4, %xmm0, %xmm0
-; AVX512-NEXT: vmovdqa64 %ymm0, %ymm21
-; AVX512-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
-; AVX512-NEXT: vprold $16, %xmm2, %xmm2
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,2,3]
-; AVX512-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3,4],xmm2[5],xmm1[6,7]
-; AVX512-NEXT: vmovdqa64 %ymm1, %ymm24
-; AVX512-NEXT: vpshuflw {{.*#+}} ymm1 = ymm3[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
-; AVX512-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,0,0,0,4,4,4,4]
-; AVX512-NEXT: vpshufd {{.*#+}} ymm2 = ymm8[0,1,1,3,4,5,5,7]
-; AVX512-NEXT: vpblendw {{.*#+}} ymm1 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7,8,9],ymm1[10],ymm2[11,12],ymm1[13],ymm2[14,15]
-; AVX512-NEXT: vmovdqa64 %ymm1, %ymm23
-; AVX512-NEXT: vpshufb {{.*#+}} ymm1 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,u,u,24,25,26,27,u,u,26,27,26,27]
-; AVX512-NEXT: vpshufd {{.*#+}} ymm2 = ymm8[2,2,2,2,6,6,6,6]
-; AVX512-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7,8,9],ymm2[10],ymm1[11,12],ymm2[13],ymm1[14,15]
-; AVX512-NEXT: vmovdqa64 %ymm1, %ymm22
-; AVX512-NEXT: vpshuflw {{.*#+}} ymm1 = ymm9[1,2,3,3,4,5,6,7,9,10,11,11,12,13,14,15]
-; AVX512-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,0,2,1,4,4,6,5]
-; AVX512-NEXT: vpshufd {{.*#+}} ymm2 = ymm11[1,1,1,1,5,5,5,5]
-; AVX512-NEXT: vpblendw {{.*#+}} ymm13 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7,8,9],ymm2[10],ymm1[11,12],ymm2[13],ymm1[14,15]
-; AVX512-NEXT: vpshufd {{.*#+}} ymm1 = ymm11[2,2,2,2,6,6,6,6]
-; AVX512-NEXT: vpshufhw {{.*#+}} ymm2 = ymm9[0,1,2,3,5,4,6,7,8,9,10,11,13,12,14,15]
-; AVX512-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[2,2,2,2,6,6,6,6]
-; AVX512-NEXT: vpblendw {{.*#+}} ymm11 = ymm1[0],ymm2[1],ymm1[2,3],ymm2[4],ymm1[5,6,7,8],ymm2[9],ymm1[10,11],ymm2[12],ymm1[13,14,15]
-; AVX512-NEXT: vmovdqa64 %ymm16, %ymm4
-; AVX512-NEXT: vpshuflw {{.*#+}} ymm1 = ymm4[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
-; AVX512-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,0,0,0,4,4,4,4]
-; AVX512-NEXT: vpshufd {{.*#+}} ymm2 = ymm17[0,0,2,1,4,4,6,5]
-; AVX512-NEXT: vpblendw {{.*#+}} ymm8 = ymm1[0,1,2],ymm2[3],ymm1[4,5],ymm2[6],ymm1[7,8,9,10],ymm2[11],ymm1[12,13],ymm2[14],ymm1[15]
-; AVX512-NEXT: vprold $16, %ymm16, %ymm1
-; AVX512-NEXT: vpshufd {{.*#+}} ymm10 = ymm17[1,2,2,3,5,6,6,7]
-; AVX512-NEXT: vpblendw {{.*#+}} ymm7 = ymm10[0,1],ymm1[2],ymm10[3,4],ymm1[5],ymm10[6,7,8,9],ymm1[10],ymm10[11,12],ymm1[13],ymm10[14,15]
-; AVX512-NEXT: vpshuflw $248, {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Folded Reload
-; AVX512-NEXT: # xmm10 = mem[0,2,3,3,4,5,6,7]
+; AVX512-NEXT: vmovdqa 32(%rax), %ymm1
+; AVX512-NEXT: vpshufb %ymm3, %ymm1, %ymm2
+; AVX512-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,1,1,3,4,5,5,7]
+; AVX512-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
+; AVX512-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
+; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm2, %zmm19
+; AVX512-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,2,3,4,5,4,5,6,7,10,11,8,9,10,11]
+; AVX512-NEXT: vpshufb %xmm3, %xmm0, %xmm1
+; AVX512-NEXT: vpshufb %xmm3, %xmm6, %xmm5
+; AVX512-NEXT: vmovdqa (%rdi), %xmm2
+; AVX512-NEXT: vmovdqa (%rsi), %xmm4
+; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3]
+; AVX512-NEXT: vpshufb %xmm3, %xmm0, %xmm0
+; AVX512-NEXT: vmovdqa64 %ymm0, %ymm23
+; AVX512-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm4[4],xmm2[4],xmm4[5],xmm2[5],xmm4[6],xmm2[6],xmm4[7],xmm2[7]
+; AVX512-NEXT: vprold $16, %xmm4, %xmm4
+; AVX512-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,2,3]
+; AVX512-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm4[2],xmm2[3,4],xmm4[5],xmm2[6,7]
+; AVX512-NEXT: vmovdqa64 %ymm2, %ymm22
+; AVX512-NEXT: vpshuflw {{.*#+}} ymm2 = ymm8[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
+; AVX512-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[0,0,0,0,4,4,4,4]
+; AVX512-NEXT: vmovdqa64 %ymm21, %ymm6
+; AVX512-NEXT: vpshufd {{.*#+}} ymm4 = ymm21[0,1,1,3,4,5,5,7]
+; AVX512-NEXT: vpblendw {{.*#+}} ymm2 = ymm4[0,1],ymm2[2],ymm4[3,4],ymm2[5],ymm4[6,7,8,9],ymm2[10],ymm4[11,12],ymm2[13],ymm4[14,15]
+; AVX512-NEXT: vmovdqa64 %ymm2, %ymm21
+; AVX512-NEXT: vpshufb {{.*#+}} ymm2 = ymm8[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,u,u,24,25,26,27,u,u,26,27,26,27]
+; AVX512-NEXT: vpshufd {{.*#+}} ymm4 = ymm6[2,2,2,2,6,6,6,6]
+; AVX512-NEXT: vpblendw {{.*#+}} ymm2 = ymm2[0,1],ymm4[2],ymm2[3,4],ymm4[5],ymm2[6,7,8,9],ymm4[10],ymm2[11,12],ymm4[13],ymm2[14,15]
+; AVX512-NEXT: vmovdqa64 %ymm2, %ymm16
+; AVX512-NEXT: vpshuflw {{.*#+}} ymm2 = ymm9[1,2,3,3,4,5,6,7,9,10,11,11,12,13,14,15]
+; AVX512-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[0,0,2,1,4,4,6,5]
+; AVX512-NEXT: vpshufd {{.*#+}} ymm4 = ymm26[1,1,1,1,5,5,5,5]
+; AVX512-NEXT: vpblendw {{.*#+}} ymm13 = ymm2[0,1],ymm4[2],ymm2[3,4],ymm4[5],ymm2[6,7,8,9],ymm4[10],ymm2[11,12],ymm4[13],ymm2[14,15]
+; AVX512-NEXT: vpshufd {{.*#+}} ymm2 = ymm26[2,2,2,2,6,6,6,6]
+; AVX512-NEXT: vpshufhw {{.*#+}} ymm4 = ymm9[0,1,2,3,5,4,6,7,8,9,10,11,13,12,14,15]
+; AVX512-NEXT: vpshufd {{.*#+}} ymm4 = ymm4[2,2,2,2,6,6,6,6]
+; AVX512-NEXT: vpblendw {{.*#+}} ymm12 = ymm2[0],ymm4[1],ymm2[2,3],ymm4[4],ymm2[5,6,7,8],ymm4[9],ymm2[10,11],ymm4[12],ymm2[13,14,15]
+; AVX512-NEXT: vmovdqa %ymm10, %ymm14
+; AVX512-NEXT: vpshuflw {{.*#+}} ymm2 = ymm10[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
+; AVX512-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[0,0,0,0,4,4,4,4]
+; AVX512-NEXT: vpshufd {{.*#+}} ymm10 = ymm20[0,0,2,1,4,4,6,5]
+; AVX512-NEXT: vpblendw {{.*#+}} ymm11 = ymm2[0,1,2],ymm10[3],ymm2[4,5],ymm10[6],ymm2[7,8,9,10],ymm10[11],ymm2[12,13],ymm10[14],ymm2[15]
+; AVX512-NEXT: vmovdqa64 %xmm27, %xmm2
+; AVX512-NEXT: vpshuflw {{.*#+}} xmm10 = xmm2[0,2,3,3,4,5,6,7]
; AVX512-NEXT: vpermq {{.*#+}} ymm10 = ymm10[0,0,2,1]
; AVX512-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[2,1,2,3,4,5,6,7]
; AVX512-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,5,4]
-; AVX512-NEXT: vpermq {{.*#+}} ymm2 = ymm0[0,0,1,3]
-; AVX512-NEXT: vpermq {{.*#+}} ymm3 = ymm5[0,0,1,1]
+; AVX512-NEXT: vpermq {{.*#+}} ymm27 = ymm0[0,0,1,3]
+; AVX512-NEXT: vpermq {{.*#+}} ymm3 = ymm1[0,0,1,1]
; AVX512-NEXT: vpermq {{.*#+}} ymm28 = ymm28[2,2,2,3]
; AVX512-NEXT: vpermq {{.*#+}} ymm29 = ymm29[0,2,2,3]
; AVX512-NEXT: vpermq {{.*#+}} ymm30 = ymm30[0,2,2,3]
; AVX512-NEXT: vpermq {{.*#+}} ymm31 = ymm31[2,1,3,3]
-; AVX512-NEXT: vpermq $182, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Folded Reload
-; AVX512-NEXT: # ymm6 = mem[2,1,3,2]
-; AVX512-NEXT: vpermq $234, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
-; AVX512-NEXT: # ymm5 = mem[2,2,2,3]
-; AVX512-NEXT: vpermq $232, (%rsp), %ymm1 # 32-byte Folded Reload
-; AVX512-NEXT: # ymm1 = mem[0,2,2,3]
-; AVX512-NEXT: vmovdqa64 %xmm18, %xmm0
-; AVX512-NEXT: vpshuflw {{.*#+}} xmm12 = xmm0[0,1,3,2,4,5,6,7]
-; AVX512-NEXT: vpshufd {{.*#+}} xmm12 = xmm12[0,0,1,1]
-; AVX512-NEXT: vpermq $246, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
-; AVX512-NEXT: # ymm0 = mem[2,1,3,3]
-; AVX512-NEXT: vpermq {{.*#+}} ymm9 = ymm14[0,0,1,1]
-; AVX512-NEXT: vpermq {{.*#+}} ymm4 = ymm19[2,2,2,3]
-; AVX512-NEXT: vpshuflw $180, {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Folded Reload
-; AVX512-NEXT: # xmm15 = mem[0,1,3,2,4,5,6,7]
-; AVX512-NEXT: vpshufd {{.*#+}} xmm15 = xmm15[0,1,1,3]
-; AVX512-NEXT: vinserti64x4 $1, %ymm29, %zmm28, %zmm28
-; AVX512-NEXT: vinserti64x4 $1, %ymm31, %zmm30, %zmm29
-; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm30 = [65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535]
-; AVX512-NEXT: vpternlogq {{.*#+}} zmm29 = zmm29 ^ (zmm30 & (zmm29 ^ zmm28))
-; AVX512-NEXT: vpermq {{.*#+}} ymm12 = ymm12[0,1,1,3]
-; AVX512-NEXT: vinserti64x4 $1, %ymm12, %zmm1, %zmm1
-; AVX512-NEXT: vinserti64x4 $1, %ymm9, %zmm0, %zmm9
-; AVX512-NEXT: vpternlogq {{.*#+}} zmm9 = zmm1 ^ (zmm30 & (zmm9 ^ zmm1))
-; AVX512-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm10, %zmm0 # 32-byte Folded Reload
-; AVX512-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm1 # 32-byte Folded Reload
-; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm2 = [65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535]
-; AVX512-NEXT: vpternlogq {{.*#+}} zmm1 = zmm0 ^ (zmm2 & (zmm1 ^ zmm0))
-; AVX512-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm0 # 32-byte Folded Reload
+; AVX512-NEXT: vpermq $182, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Folded Reload
+; AVX512-NEXT: # ymm8 = mem[2,1,3,2]
+; AVX512-NEXT: vpermq $234, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Folded Reload
+; AVX512-NEXT: # ymm6 = mem[2,2,2,3]
+; AVX512-NEXT: vpermq {{.*#+}} ymm2 = ymm25[0,2,2,3]
+; AVX512-NEXT: vpshuflw {{.*#+}} xmm9 = xmm7[0,1,3,2,4,5,6,7]
+; AVX512-NEXT: vpshufd {{.*#+}} xmm9 = xmm9[0,0,1,1]
+; AVX512-NEXT: vpermq {{.*#+}} ymm1 = ymm18[2,1,3,3]
+; AVX512-NEXT: vpermq {{.*#+}} ymm0 = ymm5[0,0,1,1]
+; AVX512-NEXT: vpermq {{.*#+}} ymm7 = ymm17[2,2,2,3]
+; AVX512-NEXT: vpshuflw $180, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Folded Reload
+; AVX512-NEXT: # xmm5 = mem[0,1,3,2,4,5,6,7]
+; AVX512-NEXT: vpshufd {{.*#+}} xmm4 = xmm5[0,1,1,3]
+; AVX512-NEXT: vprold $16, %ymm14, %ymm5
+; AVX512-NEXT: vpshufd {{.*#+}} ymm15 = ymm20[1,2,2,3,5,6,6,7]
+; AVX512-NEXT: vpblendw {{.*#+}} ymm5 = ymm15[0,1],ymm5[2],ymm15[3,4],ymm5[5],ymm15[6,7,8,9],ymm5[10],ymm15[11,12],ymm5[13],ymm15[14,15]
+; AVX512-NEXT: vinserti64x4 $1, %ymm29, %zmm28, %zmm15
+; AVX512-NEXT: vinserti64x4 $1, %ymm31, %zmm30, %zmm28
+; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm29 = [65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535]
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm28 = zmm28 ^ (zmm29 & (zmm28 ^ zmm15))
+; AVX512-NEXT: vpermq {{.*#+}} ymm9 = ymm9[0,1,1,3]
+; AVX512-NEXT: vinserti64x4 $1, %ymm9, %zmm2, %zmm2
+; AVX512-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm0 = zmm2 ^ (zmm29 & (zmm0 ^ zmm2))
+; AVX512-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm10, %zmm1 # 32-byte Folded Reload
+; AVX512-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm27, %zmm2 # 32-byte Folded Reload
+; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm9 = [65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535]
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm2 = zmm1 ^ (zmm9 & (zmm2 ^ zmm1))
+; AVX512-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm1 # 32-byte Folded Reload
; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512-NEXT: vpternlogq {{.*#+}} zmm0 = (zmm0 & mem) | zmm3
-; AVX512-NEXT: vpternlogq {{.*#+}} zmm0 = zmm0 ^ (mem & (zmm0 ^ zmm1))
-; AVX512-NEXT: vpshufd $254, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
-; AVX512-NEXT: # ymm1 = mem[2,3,3,3,6,7,7,7]
-; AVX512-NEXT: vpermq $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Folded Reload
-; AVX512-NEXT: # ymm3 = mem[0,0,2,1]
-; AVX512-NEXT: vpshuflw $230, {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Folded Reload
-; AVX512-NEXT: # xmm10 = mem[2,1,2,3,4,5,6,7]
-; AVX512-NEXT: vpshufhw {{.*#+}} xmm10 = xmm10[0,1,2,3,4,5,5,4]
-; AVX512-NEXT: vpermq {{.*#+}} ymm10 = ymm10[0,0,1,3]
-; AVX512-NEXT: vpermq $80, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Folded Reload
-; AVX512-NEXT: # ymm12 = mem[0,0,1,1]
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm1 = (zmm1 & mem) | zmm3
+; AVX512-NEXT: vpshufd $254, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Folded Reload
+; AVX512-NEXT: # ymm3 = mem[2,3,3,3,6,7,7,7]
+; AVX512-NEXT: vpermq $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Folded Reload
+; AVX512-NEXT: # ymm10 = mem[0,0,2,1]
+; AVX512-NEXT: vpshuflw $230, {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Folded Reload
+; AVX512-NEXT: # xmm15 = mem[2,1,2,3,4,5,6,7]
+; AVX512-NEXT: vpshufhw {{.*#+}} xmm15 = xmm15[0,1,2,3,4,5,5,4]
+; AVX512-NEXT: vpermq {{.*#+}} ymm15 = ymm15[0,0,1,3]
+; AVX512-NEXT: vpermq $80, {{[-0-9]+}}(%r{{[sb]}}p), %ymm17 # 32-byte Folded Reload
+; AVX512-NEXT: # ymm17 = mem[0,0,1,1]
; AVX512-NEXT: vpshuflw $248, {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Folded Reload
; AVX512-NEXT: # xmm14 = mem[0,2,3,3,4,5,6,7]
; AVX512-NEXT: vpermq {{.*#+}} ymm14 = ymm14[0,0,2,1]
-; AVX512-NEXT: vpermq $234, {{[-0-9]+}}(%r{{[sb]}}p), %ymm17 # 32-byte Folded Reload
-; AVX512-NEXT: # ymm17 = mem[2,2,2,3]
-; AVX512-NEXT: vpermq $182, {{[-0-9]+}}(%r{{[sb]}}p), %ymm18 # 32-byte Folded Reload
-; AVX512-NEXT: # ymm18 = mem[2,1,3,2]
-; AVX512-NEXT: vpermq $250, {{[-0-9]+}}(%r{{[sb]}}p), %ymm19 # 32-byte Folded Reload
-; AVX512-NEXT: # ymm19 = mem[2,2,3,3]
-; AVX512-NEXT: vpermq {{.*#+}} ymm21 = ymm21[0,0,1,1]
-; AVX512-NEXT: vpermq {{.*#+}} ymm28 = ymm24[0,0,2,1]
-; AVX512-NEXT: vpermq {{.*#+}} ymm30 = ymm23[2,1,3,2]
-; AVX512-NEXT: vpermq {{.*#+}} ymm31 = ymm22[2,2,2,3]
+; AVX512-NEXT: vpermq $234, {{[-0-9]+}}(%r{{[sb]}}p), %ymm18 # 32-byte Folded Reload
+; AVX512-NEXT: # ymm18 = mem[2,2,2,3]
+; AVX512-NEXT: vpermq $182, {{[-0-9]+}}(%r{{[sb]}}p), %ymm20 # 32-byte Folded Reload
+; AVX512-NEXT: # ymm20 = mem[2,1,3,2]
+; AVX512-NEXT: vpermq $250, {{[-0-9]+}}(%r{{[sb]}}p), %ymm26 # 32-byte Folded Reload
+; AVX512-NEXT: # ymm26 = mem[2,2,3,3]
+; AVX512-NEXT: vpermq {{.*#+}} ymm27 = ymm23[0,0,1,1]
+; AVX512-NEXT: vpermq {{.*#+}} ymm29 = ymm22[0,0,2,1]
+; AVX512-NEXT: vpermq {{.*#+}} ymm30 = ymm21[2,1,3,2]
+; AVX512-NEXT: vpermq {{.*#+}} ymm31 = ymm16[2,2,2,3]
; AVX512-NEXT: vpermq {{.*#+}} ymm13 = ymm13[2,2,2,3]
-; AVX512-NEXT: vpermq {{.*#+}} ymm11 = ymm11[0,2,2,3]
-; AVX512-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,2,3,3]
-; AVX512-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,1,3,2]
-; AVX512-NEXT: vinserti64x4 $1, %ymm5, %zmm6, %zmm5
-; AVX512-NEXT: vpternlogq {{.*#+}} zmm5 = zmm5 ^ (mem & (zmm5 ^ zmm29))
-; AVX512-NEXT: vpternlogq {{.*#+}} zmm27 = zmm27 ^ (mem & (zmm27 ^ zmm5))
-; AVX512-NEXT: vpermq {{.*#+}} ymm5 = ymm15[0,0,0,1]
-; AVX512-NEXT: vinserti64x4 $1, %ymm5, %zmm4, %zmm4
-; AVX512-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,1,3,2]
-; AVX512-NEXT: vpbroadcastd 32(%rax), %ymm5
-; AVX512-NEXT: vinserti64x4 $1, %ymm5, %zmm1, %zmm1
-; AVX512-NEXT: vpternlogq {{.*#+}} zmm1 = zmm1 ^ (mem & (zmm1 ^ zmm4))
-; AVX512-NEXT: vpternlogq {{.*#+}} zmm1 = zmm1 ^ (mem & (zmm1 ^ zmm9))
-; AVX512-NEXT: vinserti64x4 $1, %ymm10, %zmm3, %zmm3
-; AVX512-NEXT: vinserti64x4 $1, %ymm14, %zmm12, %zmm4
-; AVX512-NEXT: vpternlogq {{.*#+}} zmm4 = zmm3 ^ (zmm2 & (zmm4 ^ zmm3))
-; AVX512-NEXT: vpbroadcastd 36(%rax), %ymm2
-; AVX512-NEXT: vpbroadcastd 40(%rax), %ymm3
-; AVX512-NEXT: vinserti64x4 $1, %ymm3, %zmm2, %zmm2
-; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512-NEXT: vpternlogq {{.*#+}} zmm2 = zmm2 ^ (mem & (zmm2 ^ zmm3))
-; AVX512-NEXT: vpternlogq {{.*#+}} zmm2 = zmm2 ^ (mem & (zmm2 ^ zmm4))
-; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512-NEXT: vinserti64x4 $1, %ymm17, %zmm3, %zmm3
+; AVX512-NEXT: vpermq {{.*#+}} ymm12 = ymm12[0,2,2,3]
+; AVX512-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,2,3,3]
+; AVX512-NEXT: vpermq {{.*#+}} ymm5 = ymm5[2,1,3,2]
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm1 = zmm1 ^ (mem & (zmm1 ^ zmm2))
+; AVX512-NEXT: vinserti64x4 $1, %ymm6, %zmm8, %zmm2
+; AVX512-NEXT: vmovdqa64 (%rax), %zmm6
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm2 = zmm2 ^ (mem & (zmm2 ^ zmm28))
+; AVX512-NEXT: vpmovsxbd {{.*#+}} zmm8 = [0,13,0,0,0,14,0,0,14,0,0,0,15,0,0,15]
+; AVX512-NEXT: vpermd %zmm6, %zmm8, %zmm8
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm8 = zmm8 ^ (mem & (zmm8 ^ zmm2))
+; AVX512-NEXT: vpermq {{.*#+}} ymm2 = ymm4[0,0,0,1]
+; AVX512-NEXT: vinserti64x4 $1, %ymm2, %zmm7, %zmm2
+; AVX512-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,1,3,2]
+; AVX512-NEXT: vpbroadcastd 32(%rax), %ymm4
+; AVX512-NEXT: vinserti64x4 $1, %ymm4, %zmm3, %zmm3
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm3 = zmm3 ^ (mem & (zmm3 ^ zmm2))
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm3 = zmm3 ^ (mem & (zmm3 ^ zmm0))
+; AVX512-NEXT: vinserti64x4 $1, %ymm15, %zmm10, %zmm0
+; AVX512-NEXT: vinserti64x4 $1, %ymm14, %zmm17, %zmm2
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm2 = zmm0 ^ (zmm9 & (zmm2 ^ zmm0))
+; AVX512-NEXT: vpbroadcastd 36(%rax), %ymm0
+; AVX512-NEXT: vpbroadcastd 40(%rax), %ymm4
+; AVX512-NEXT: vinserti64x4 $1, %ymm4, %zmm0, %zmm0
; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512-NEXT: vinserti64x4 $1, %ymm18, %zmm4, %zmm4
-; AVX512-NEXT: vpternlogq {{.*#+}} zmm4 = zmm4 ^ (mem & (zmm4 ^ zmm3))
-; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm3 = zmm3[0,1,2,3],zmm19[0,1,2,3]
-; AVX512-NEXT: vpternlogq {{.*#+}} zmm20 = zmm20 | (zmm3 & mem)
-; AVX512-NEXT: vpternlogq {{.*#+}} zmm20 = zmm20 ^ (mem & (zmm20 ^ zmm4))
-; AVX512-NEXT: vinserti64x4 $1, %ymm28, %zmm21, %zmm3
-; AVX512-NEXT: vpternlogq {{.*#+}} zmm3 = zmm26 ^ (mem & (zmm3 ^ zmm26))
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm0 = zmm0 ^ (mem & (zmm0 ^ zmm4))
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm0 = zmm0 ^ (mem & (zmm0 ^ zmm2))
+; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512-NEXT: vinserti64x4 $1, %ymm18, %zmm2, %zmm2
+; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512-NEXT: vinserti64x4 $1, %ymm20, %zmm4, %zmm4
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm4 = zmm4 ^ (mem & (zmm4 ^ zmm2))
+; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm2 = zmm2[0,1,2,3],zmm26[0,1,2,3]
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm19 = zmm19 | (zmm2 & mem)
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm19 = zmm19 ^ (mem & (zmm19 ^ zmm4))
+; AVX512-NEXT: vinserti64x4 $1, %ymm29, %zmm27, %zmm2
+; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm2 = zmm4 ^ (mem & (zmm2 ^ zmm4))
; AVX512-NEXT: vpbroadcastd (%rax), %ymm4
-; AVX512-NEXT: vpbroadcastd 4(%rax), %ymm5
-; AVX512-NEXT: vinserti64x4 $1, %ymm5, %zmm4, %zmm4
-; AVX512-NEXT: vpternlogq {{.*#+}} zmm4 = zmm4 ^ (mem & (zmm4 ^ zmm25))
-; AVX512-NEXT: vpternlogq {{.*#+}} zmm4 = zmm4 ^ (mem & (zmm4 ^ zmm3))
-; AVX512-NEXT: vinserti64x4 $1, %ymm31, %zmm30, %zmm3
-; AVX512-NEXT: vinserti64x4 $1, %ymm11, %zmm13, %zmm5
-; AVX512-NEXT: vpternlogq {{.*#+}} zmm5 = zmm5 ^ (mem & (zmm5 ^ zmm3))
-; AVX512-NEXT: vinserti64x4 $1, %ymm7, %zmm8, %zmm3
-; AVX512-NEXT: vpmovsxbd {{.*#+}} zmm6 = [0,0,4,0,0,0,5,0,0,5,0,0,0,6,0,0]
-; AVX512-NEXT: vpermd (%rax), %zmm6, %zmm6
-; AVX512-NEXT: vpternlogq {{.*#+}} zmm6 = zmm6 ^ (mem & (zmm6 ^ zmm3))
-; AVX512-NEXT: vpternlogq {{.*#+}} zmm6 = zmm6 ^ (mem & (zmm6 ^ zmm5))
+; AVX512-NEXT: vpbroadcastd 4(%rax), %ymm7
+; AVX512-NEXT: vinserti64x4 $1, %ymm7, %zmm4, %zmm4
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm4 = zmm4 ^ (mem & (zmm4 ^ zmm24))
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm4 = zmm4 ^ (mem & (zmm4 ^ zmm2))
+; AVX512-NEXT: vinserti64x4 $1, %ymm31, %zmm30, %zmm2
+; AVX512-NEXT: vinserti64x4 $1, %ymm12, %zmm13, %zmm7
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm7 = zmm7 ^ (mem & (zmm7 ^ zmm2))
+; AVX512-NEXT: vinserti64x4 $1, %ymm5, %zmm11, %zmm2
+; AVX512-NEXT: vpmovsxbd {{.*#+}} zmm5 = [0,0,4,0,0,0,5,0,0,5,0,0,0,6,0,0]
+; AVX512-NEXT: vpermd %zmm6, %zmm5, %zmm5
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm5 = zmm5 ^ (mem & (zmm5 ^ zmm2))
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm5 = zmm5 ^ (mem & (zmm5 ^ zmm7))
; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX512-NEXT: vmovdqa64 %zmm6, 128(%rax)
+; AVX512-NEXT: vmovdqa64 %zmm5, 128(%rax)
; AVX512-NEXT: vmovdqa64 %zmm4, (%rax)
-; AVX512-NEXT: vmovdqa64 %zmm20, 320(%rax)
-; AVX512-NEXT: vmovdqa64 %zmm2, 256(%rax)
-; AVX512-NEXT: vmovdqa64 %zmm1, 192(%rax)
-; AVX512-NEXT: vmovdqa64 %zmm0, 64(%rax)
-; AVX512-NEXT: vmovdqa64 %zmm27, 384(%rax)
-; AVX512-NEXT: addq $680, %rsp # imm = 0x2A8
+; AVX512-NEXT: vmovdqa64 %zmm19, 320(%rax)
+; AVX512-NEXT: vmovdqa64 %zmm0, 256(%rax)
+; AVX512-NEXT: vmovdqa64 %zmm3, 192(%rax)
+; AVX512-NEXT: vmovdqa64 %zmm1, 64(%rax)
+; AVX512-NEXT: vmovdqa64 %zmm8, 384(%rax)
+; AVX512-NEXT: addq $664, %rsp # imm = 0x298
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
;
; AVX512-FCP-LABEL: store_i16_stride7_vf32:
; AVX512-FCP: # %bb.0:
-; AVX512-FCP-NEXT: subq $264, %rsp # imm = 0x108
-; AVX512-FCP-NEXT: vmovdqa (%rcx), %ymm12
+; AVX512-FCP-NEXT: subq $296, %rsp # imm = 0x128
+; AVX512-FCP-NEXT: vmovdqa (%rcx), %ymm9
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm0 = [128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128,128,128,128,128]
-; AVX512-FCP-NEXT: vpshufb %ymm0, %ymm12, %ymm2
-; AVX512-FCP-NEXT: vmovdqu %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512-FCP-NEXT: vpshufb %ymm0, %ymm9, %ymm2
+; AVX512-FCP-NEXT: vmovdqu %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX512-FCP-NEXT: vmovdqa (%rdx), %ymm4
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm1 = [u,u,u,u,12,13,14,15,128,128,u,u,u,u,u,u,u,u,u,u,16,17,128,128,u,u,u,u,u,u,u,u]
; AVX512-FCP-NEXT: vpshufb %ymm1, %ymm4, %ymm3
-; AVX512-FCP-NEXT: vmovdqa64 %ymm4, %ymm16
-; AVX512-FCP-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512-FCP-NEXT: vmovdqa64 %ymm4, %ymm17
; AVX512-FCP-NEXT: vpor %ymm2, %ymm3, %ymm2
; AVX512-FCP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-FCP-NEXT: vmovdqa (%rsi), %ymm11
-; AVX512-FCP-NEXT: vmovdqa 32(%rsi), %ymm10
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm3 = [128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128,128,128,128,128,128,128,128,128]
-; AVX512-FCP-NEXT: vpshufb %ymm3, %ymm11, %ymm2
-; AVX512-FCP-NEXT: vmovdqu %ymm11, (%rsp) # 32-byte Spill
-; AVX512-FCP-NEXT: vmovdqa (%rdi), %ymm15
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm4 = [12,13,14,15,128,128,u,u,u,u,u,u,u,u,u,u,16,17,128,128,u,u,u,u,u,u,u,u,16,17,18,19]
-; AVX512-FCP-NEXT: vpshufb %ymm4, %ymm15, %ymm5
-; AVX512-FCP-NEXT: vmovdqu %ymm15, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-FCP-NEXT: vpor %ymm2, %ymm5, %ymm2
-; AVX512-FCP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-FCP-NEXT: vmovdqa (%r9), %ymm5
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm6 = [128,128,128,128,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128]
-; AVX512-FCP-NEXT: vpshufb %ymm6, %ymm5, %ymm8
-; AVX512-FCP-NEXT: vmovdqa (%r8), %ymm2
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm7 = [u,u,u,u,u,u,u,u,12,13,14,15,128,128,u,u,u,u,u,u,u,u,u,u,16,17,128,128,u,u,u,u]
-; AVX512-FCP-NEXT: vpshufb %ymm7, %ymm2, %ymm9
-; AVX512-FCP-NEXT: vpor %ymm8, %ymm9, %ymm8
+; AVX512-FCP-NEXT: vmovdqa (%rsi), %ymm8
+; AVX512-FCP-NEXT: vmovdqa 32(%rsi), %ymm13
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm2 = [128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128,128,128,128,128,128,128,128,128]
+; AVX512-FCP-NEXT: vpshufb %ymm2, %ymm8, %ymm3
; AVX512-FCP-NEXT: vmovdqu %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-FCP-NEXT: vpshufb %ymm3, %ymm10, %ymm3
-; AVX512-FCP-NEXT: vmovdqa 32(%rdi), %ymm9
-; AVX512-FCP-NEXT: vpshufb %ymm4, %ymm9, %ymm4
-; AVX512-FCP-NEXT: vpor %ymm3, %ymm4, %ymm3
-; AVX512-FCP-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-FCP-NEXT: vmovdqa 32(%rcx), %ymm13
-; AVX512-FCP-NEXT: vpshufb %ymm0, %ymm13, %ymm0
+; AVX512-FCP-NEXT: vmovdqa (%rdi), %ymm10
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm4 = [12,13,14,15,128,128,u,u,u,u,u,u,u,u,u,u,16,17,128,128,u,u,u,u,u,u,u,u,16,17,18,19]
+; AVX512-FCP-NEXT: vpshufb %ymm4, %ymm10, %ymm5
+; AVX512-FCP-NEXT: vmovdqu %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512-FCP-NEXT: vpor %ymm3, %ymm5, %ymm3
+; AVX512-FCP-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512-FCP-NEXT: vmovdqa (%r9), %ymm15
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm5 = [128,128,128,128,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128]
+; AVX512-FCP-NEXT: vpshufb %ymm5, %ymm15, %ymm3
+; AVX512-FCP-NEXT: vmovdqa (%r8), %ymm11
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm6 = [u,u,u,u,u,u,u,u,12,13,14,15,128,128,u,u,u,u,u,u,u,u,u,u,16,17,128,128,u,u,u,u]
+; AVX512-FCP-NEXT: vpshufb %ymm6, %ymm11, %ymm7
+; AVX512-FCP-NEXT: vpor %ymm3, %ymm7, %ymm3
+; AVX512-FCP-NEXT: vmovdqu %ymm3, (%rsp) # 32-byte Spill
+; AVX512-FCP-NEXT: vpshufb %ymm2, %ymm13, %ymm2
+; AVX512-FCP-NEXT: vmovdqa 32(%rdi), %ymm12
+; AVX512-FCP-NEXT: vpshufb %ymm4, %ymm12, %ymm3
+; AVX512-FCP-NEXT: vpor %ymm2, %ymm3, %ymm2
+; AVX512-FCP-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-FCP-NEXT: vmovdqa 32(%rcx), %ymm7
+; AVX512-FCP-NEXT: vpshufb %ymm0, %ymm7, %ymm0
; AVX512-FCP-NEXT: vmovdqa 32(%rdx), %ymm14
; AVX512-FCP-NEXT: vpshufb %ymm1, %ymm14, %ymm1
; AVX512-FCP-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX512-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-FCP-NEXT: vmovdqa 32(%r9), %ymm1
-; AVX512-FCP-NEXT: vpshufb %ymm6, %ymm1, %ymm0
-; AVX512-FCP-NEXT: vmovdqa64 %ymm1, %ymm19
-; AVX512-FCP-NEXT: vmovdqa 32(%r8), %ymm3
-; AVX512-FCP-NEXT: vpshufb %ymm7, %ymm3, %ymm1
-; AVX512-FCP-NEXT: vmovdqa64 %ymm3, %ymm25
-; AVX512-FCP-NEXT: vporq %ymm0, %ymm1, %ymm20
+; AVX512-FCP-NEXT: vpshufb %ymm5, %ymm1, %ymm0
+; AVX512-FCP-NEXT: vmovdqa64 %ymm1, %ymm20
+; AVX512-FCP-NEXT: vmovdqa 32(%r8), %ymm2
+; AVX512-FCP-NEXT: vpshufb %ymm6, %ymm2, %ymm1
+; AVX512-FCP-NEXT: vmovdqa64 %ymm2, %ymm16
+; AVX512-FCP-NEXT: vpor %ymm0, %ymm1, %ymm0
+; AVX512-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm0 = [u,u,u,u,u,u,u,u,u,u,u,u,14,15,12,13,u,u,u,u,30,31,28,29,u,u,u,u,30,31,28,29]
-; AVX512-FCP-NEXT: vpshufb %ymm0, %ymm10, %ymm1
-; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm3 = ymm9[3,3,3,3,7,7,7,7]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm3[0,1,2],ymm1[3],ymm3[4,5],ymm1[6],ymm3[7,8,9,10],ymm1[11],ymm3[12,13],ymm1[14],ymm3[15]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm10[10,11,8,9,u,u,u,u,u,u,u,u,u,u,u,u,26,27,24,25,u,u,u,u,26,27,24,25,u,u,u,u]
-; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm4 = ymm9[2,2,2,2,6,6,6,6]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm4[0],ymm3[1],ymm4[2,3],ymm3[4],ymm4[5,6,7,8],ymm3[9],ymm4[10,11],ymm3[12],ymm4[13,14,15]
+; AVX512-FCP-NEXT: vpshufb %ymm0, %ymm13, %ymm1
+; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm2 = ymm12[3,3,3,3,7,7,7,7]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5],ymm1[6],ymm2[7,8,9,10],ymm1[11],ymm2[12,13],ymm1[14],ymm2[15]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm13[10,11,8,9,u,u,u,u,u,u,u,u,u,u,u,u,26,27,24,25,u,u,u,u,26,27,24,25,u,u,u,u]
+; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm3 = ymm12[2,2,2,2,6,6,6,6]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm3[0],ymm2[1],ymm3[2,3],ymm2[4],ymm3[5,6,7,8],ymm2[9],ymm3[10,11],ymm2[12],ymm3[13,14,15]
; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} zmm24 = [0,2,2,3,10,0,11,0]
-; AVX512-FCP-NEXT: vpermi2q %zmm1, %zmm3, %zmm24
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm12[14,15,12,13,u,u,u,u,u,u,u,u,u,u,u,u,30,31,28,29,u,u,u,u,30,31,28,29,u,u,u,u]
-; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm3 = ymm16[3,3,3,3,7,7,7,7]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm3[0],ymm1[1],ymm3[2,3],ymm1[4],ymm3[5,6,7,8],ymm1[9],ymm3[10,11],ymm1[12],ymm3[13,14,15]
-; AVX512-FCP-NEXT: vmovdqa 32(%rcx), %xmm4
-; AVX512-FCP-NEXT: vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512-FCP-NEXT: vmovdqa 32(%rdx), %xmm3
+; AVX512-FCP-NEXT: vpermi2q %zmm1, %zmm2, %zmm24
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm9[14,15,12,13,u,u,u,u,u,u,u,u,u,u,u,u,30,31,28,29,u,u,u,u,30,31,28,29,u,u,u,u]
+; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm2 = ymm17[3,3,3,3,7,7,7,7]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3],ymm1[4],ymm2[5,6,7,8],ymm1[9],ymm2[10,11],ymm1[12],ymm2[13,14,15]
+; AVX512-FCP-NEXT: vmovdqa 32(%rcx), %xmm3
; AVX512-FCP-NEXT: vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3]
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm6 = [0,1,2,3,0,1,2,3,6,7,4,5,6,7,4,5]
-; AVX512-FCP-NEXT: vpshufb %xmm6, %xmm3, %xmm3
-; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} zmm23 = [0,2,2,3,8,0,9,0]
-; AVX512-FCP-NEXT: vpermi2q %zmm3, %zmm1, %zmm23
-; AVX512-FCP-NEXT: vpshufb %ymm0, %ymm11, %ymm0
-; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm1 = ymm15[3,3,3,3,7,7,7,7]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5],ymm0[6],ymm1[7,8,9,10],ymm0[11],ymm1[12,13],ymm0[14],ymm1[15]
-; AVX512-FCP-NEXT: vmovdqa 32(%rdi), %xmm1
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm12 = [0,1,2,3,4,5,4,5,6,7,10,11,8,9,10,11]
-; AVX512-FCP-NEXT: vmovdqa 32(%rsi), %xmm8
-; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm1[0],xmm8[0],xmm1[1],xmm8[1],xmm1[2],xmm8[2],xmm1[3],xmm8[3]
-; AVX512-FCP-NEXT: vpshufb %xmm12, %xmm3, %xmm3
+; AVX512-FCP-NEXT: vmovdqa 32(%rdx), %xmm2
+; AVX512-FCP-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,2,3,0,1,2,3,6,7,4,5,6,7,4,5]
+; AVX512-FCP-NEXT: vpshufb %xmm3, %xmm2, %xmm2
+; AVX512-FCP-NEXT: vmovdqa64 %xmm3, %xmm27
+; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} zmm25 = [0,2,2,3,8,0,9,0]
+; AVX512-FCP-NEXT: vpermi2q %zmm2, %zmm1, %zmm25
+; AVX512-FCP-NEXT: vpshufb %ymm0, %ymm8, %ymm0
+; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm1 = ymm10[3,3,3,3,7,7,7,7]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0,1,2],ymm0[3],ymm1[4,5],ymm0[6],ymm1[7,8,9,10],ymm0[11],ymm1[12,13],ymm0[14],ymm1[15]
+; AVX512-FCP-NEXT: vmovdqa 32(%rdi), %xmm4
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm9 = [0,1,2,3,4,5,4,5,6,7,10,11,8,9,10,11]
+; AVX512-FCP-NEXT: vmovdqa 32(%rsi), %xmm0
+; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3]
+; AVX512-FCP-NEXT: vpshufb %xmm9, %xmm2, %xmm2
; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} zmm21 = [2,1,3,3,8,8,9,9]
-; AVX512-FCP-NEXT: vpermi2q %zmm3, %zmm0, %zmm21
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,26,27,28,29,26,27,28,29,26,27,28,29,30,31,30,31]
-; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm3 = ymm2[3,3,3,3,7,7,7,7]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm11 = ymm0[0,1],ymm3[2],ymm0[3,4],ymm3[5],ymm0[6,7,8,9],ymm3[10],ymm0[11,12],ymm3[13],ymm0[14,15]
-; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} zmm22 = [2,2,2,3,0,8,8,9]
+; AVX512-FCP-NEXT: vpermi2q %zmm2, %zmm1, %zmm21
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm15[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,26,27,28,29,26,27,28,29,26,27,28,29,30,31,30,31]
+; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm2 = ymm11[3,3,3,3,7,7,7,7]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm8 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7,8,9],ymm2[10],ymm1[11,12],ymm2[13],ymm1[14,15]
+; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} zmm23 = [2,2,2,3,0,8,8,9]
; AVX512-FCP-NEXT: vmovdqa 32(%r9), %xmm3
-; AVX512-FCP-NEXT: vmovdqa 32(%r8), %xmm7
-; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm7[0],xmm3[0],xmm7[1],xmm3[1],xmm7[2],xmm3[2],xmm7[3],xmm3[3]
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm4 = [0,1,2,3,6,7,4,5,6,7,4,5,12,13,14,15]
-; AVX512-FCP-NEXT: vpshufb %xmm4, %xmm0, %xmm15
-; AVX512-FCP-NEXT: vpermi2q %zmm15, %zmm11, %zmm22
-; AVX512-FCP-NEXT: vpunpckhwd {{.*#+}} xmm11 = xmm8[4],xmm1[4],xmm8[5],xmm1[5],xmm8[6],xmm1[6],xmm8[7],xmm1[7]
-; AVX512-FCP-NEXT: vprold $16, %xmm8, %xmm8
-; AVX512-FCP-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,2,3]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm8[2],xmm1[3,4],xmm8[5],xmm1[6,7]
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm15 = [4,5,2,3,4,5,6,7,8,9,10,11,10,11,8,9]
-; AVX512-FCP-NEXT: vpshufb %xmm15, %xmm11, %xmm8
-; AVX512-FCP-NEXT: vmovdqa %xmm15, %xmm11
+; AVX512-FCP-NEXT: vmovdqa 32(%r8), %xmm2
+; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm6 = [0,1,2,3,6,7,4,5,6,7,4,5,12,13,14,15]
+; AVX512-FCP-NEXT: vpshufb %xmm6, %xmm1, %xmm5
+; AVX512-FCP-NEXT: vpermi2q %zmm5, %zmm8, %zmm23
+; AVX512-FCP-NEXT: vpunpckhwd {{.*#+}} xmm5 = xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
+; AVX512-FCP-NEXT: vprold $16, %xmm0, %xmm0
+; AVX512-FCP-NEXT: vpshufd {{.*#+}} xmm4 = xmm4[1,1,2,3]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm4[0,1],xmm0[2],xmm4[3,4],xmm0[5],xmm4[6,7]
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm10 = [4,5,2,3,4,5,6,7,8,9,10,11,10,11,8,9]
+; AVX512-FCP-NEXT: vpshufb %xmm10, %xmm5, %xmm4
; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} zmm26 = [0,0,0,1,8,8,9,0]
-; AVX512-FCP-NEXT: vpermi2q %zmm8, %zmm1, %zmm26
-; AVX512-FCP-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm7[4],xmm3[4],xmm7[5],xmm3[5],xmm7[6],xmm3[6],xmm7[7],xmm3[7]
-; AVX512-FCP-NEXT: vpshufb %xmm12, %xmm1, %xmm1
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,2,3,8,9,10,11,14,15,12,13,14,15,12,13]
-; AVX512-FCP-NEXT: vpshufb %xmm3, %xmm0, %xmm0
-; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} zmm28 = [0,1,1,0,8,8,9,9]
+; AVX512-FCP-NEXT: vpermi2q %zmm4, %zmm0, %zmm26
+; AVX512-FCP-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm4 = [0,1,2,3,8,9,10,11,14,15,12,13,14,15,12,13]
+; AVX512-FCP-NEXT: vpshufb %xmm4, %xmm1, %xmm1
+; AVX512-FCP-NEXT: vpshufb %xmm9, %xmm0, %xmm0
+; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} zmm28 = [8,9,9,0,0,0,1,1]
; AVX512-FCP-NEXT: vpermi2q %zmm1, %zmm0, %zmm28
; AVX512-FCP-NEXT: vmovdqa (%rcx), %xmm1
; AVX512-FCP-NEXT: vmovdqa (%rdx), %xmm0
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm1[u,u,4,5,u,u,u,u,6,7,u,u,u,u,8,9]
-; AVX512-FCP-NEXT: vpshufd {{.*#+}} xmm8 = xmm0[1,1,2,2]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm7 = xmm8[0],xmm7[1],xmm8[2,3],xmm7[4],xmm8[5,6],xmm7[7]
-; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} xmm8 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; AVX512-FCP-NEXT: vpshufb %xmm6, %xmm8, %xmm6
-; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} zmm27 = [0,0,1,0,8,8,9,9]
-; AVX512-FCP-NEXT: vpermi2q %zmm7, %zmm6, %zmm27
-; AVX512-FCP-NEXT: vmovdqa (%r9), %xmm6
-; AVX512-FCP-NEXT: vmovdqa (%r8), %xmm15
-; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} xmm7 = xmm15[0],xmm6[0],xmm15[1],xmm6[1],xmm15[2],xmm6[2],xmm15[3],xmm6[3]
-; AVX512-FCP-NEXT: vpshufb %xmm3, %xmm7, %xmm3
-; AVX512-FCP-NEXT: vpshufb %xmm4, %xmm7, %xmm4
+; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; AVX512-FCP-NEXT: vmovdqa64 %xmm27, %xmm3
+; AVX512-FCP-NEXT: vpshufb %xmm3, %xmm2, %xmm2
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm1[u,u,4,5,u,u,u,u,6,7,u,u,u,u,8,9]
+; AVX512-FCP-NEXT: vpshufd {{.*#+}} xmm5 = xmm0[1,1,2,2]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm3 = xmm5[0],xmm3[1],xmm5[2,3],xmm3[4],xmm5[5,6],xmm3[7]
+; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} zmm27 = [8,0,9,0,0,0,1,1]
+; AVX512-FCP-NEXT: vpermi2q %zmm2, %zmm3, %zmm27
+; AVX512-FCP-NEXT: vmovdqa (%r9), %xmm5
+; AVX512-FCP-NEXT: vmovdqa (%r8), %xmm9
+; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm9[0],xmm5[0],xmm9[1],xmm5[1],xmm9[2],xmm5[2],xmm9[3],xmm5[3]
+; AVX512-FCP-NEXT: vpshufb %xmm4, %xmm3, %xmm4
+; AVX512-FCP-NEXT: vpshufb %xmm6, %xmm3, %xmm3
; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} zmm29 = [0,0,0,1,8,9,9,0]
-; AVX512-FCP-NEXT: vpermi2q %zmm3, %zmm4, %zmm29
-; AVX512-FCP-NEXT: vprold $16, %ymm5, %ymm3
-; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm4 = ymm2[1,2,2,3,5,6,6,7]
+; AVX512-FCP-NEXT: vpermi2q %zmm4, %zmm3, %zmm29
+; AVX512-FCP-NEXT: vprold $16, %ymm15, %ymm3
+; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm4 = ymm11[1,2,2,3,5,6,6,7]
; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm4[0,1],ymm3[2],ymm4[3,4],ymm3[5],ymm4[6,7,8,9],ymm3[10],ymm4[11,12],ymm3[13],ymm4[14,15]
-; AVX512-FCP-NEXT: vpbroadcastd {{.*#+}} ymm7 = [18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21]
-; AVX512-FCP-NEXT: vpshufb %ymm7, %ymm5, %ymm5
-; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[0,0,2,1,4,4,6,5]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm5[0,1,2],ymm2[3],ymm5[4,5],ymm2[6],ymm5[7,8,9,10],ymm2[11],ymm5[12,13],ymm2[14],ymm5[15]
+; AVX512-FCP-NEXT: vpbroadcastd {{.*#+}} ymm6 = [18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21]
+; AVX512-FCP-NEXT: vpshufb %ymm6, %ymm15, %ymm4
+; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm8 = ymm11[0,0,2,1,4,4,6,5]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm4 = ymm4[0,1,2],ymm8[3],ymm4[4,5],ymm8[6],ymm4[7,8,9,10],ymm8[11],ymm4[12,13],ymm8[14],ymm4[15]
; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} zmm31 = [2,2,3,3,10,0,11,10]
-; AVX512-FCP-NEXT: vpermi2q %zmm3, %zmm2, %zmm31
+; AVX512-FCP-NEXT: vpermi2q %zmm3, %zmm4, %zmm31
; AVX512-FCP-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; AVX512-FCP-NEXT: vmovdqa64 %xmm0, %xmm18
-; AVX512-FCP-NEXT: vmovdqa (%rdi), %xmm5
+; AVX512-FCP-NEXT: vmovdqa64 %xmm0, %xmm22
+; AVX512-FCP-NEXT: vmovdqa (%rdi), %xmm4
; AVX512-FCP-NEXT: vmovdqa (%rsi), %xmm3
-; AVX512-FCP-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm3[4],xmm5[4],xmm3[5],xmm5[5],xmm3[6],xmm5[6],xmm3[7],xmm5[7]
-; AVX512-FCP-NEXT: vpshufb %xmm11, %xmm0, %xmm0
-; AVX512-FCP-NEXT: vmovdqa64 %ymm0, %ymm17
+; AVX512-FCP-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
+; AVX512-FCP-NEXT: vpshufb %xmm10, %xmm1, %xmm0
+; AVX512-FCP-NEXT: vmovdqa64 %ymm0, %ymm19
; AVX512-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX512-FCP-NEXT: vpbroadcastd 8(%rax), %ymm0
-; AVX512-FCP-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
-; AVX512-FCP-NEXT: vmovdqa (%rax), %ymm8
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm4 = [12,13,128,128,128,128,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128]
-; AVX512-FCP-NEXT: vpshufb %ymm4, %ymm8, %ymm1
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm30
-; AVX512-FCP-NEXT: vpunpckhwd {{.*#+}} xmm6 = xmm15[4],xmm6[4],xmm15[5],xmm6[5],xmm15[6],xmm6[6],xmm15[7],xmm6[7]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm10[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21,18,19,20,21,24,25,26,27,22,23,22,23]
-; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm1 = ymm9[1,1,1,1,5,5,5,5]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7,8,9],ymm1[10],ymm0[11,12],ymm1[13],ymm0[14,15]
-; AVX512-FCP-NEXT: vmovdqa64 %ymm0, %ymm16
-; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm13[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
-; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm1 = ymm14[0,1,1,3,4,5,5,7]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm11 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7,8,9],ymm0[10],ymm1[11,12],ymm0[13],ymm1[14,15]
-; AVX512-FCP-NEXT: vmovdqa64 %ymm19, %ymm12
-; AVX512-FCP-NEXT: vpshufb %ymm7, %ymm12, %ymm0
-; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm1 = ymm25[0,0,2,1,4,4,6,5]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3],ymm0[4,5],ymm1[6],ymm0[7,8,9,10],ymm1[11],ymm0[12,13],ymm1[14],ymm0[15]
+; AVX512-FCP-NEXT: vpbroadcastd 8(%rax), %ymm1
+; AVX512-FCP-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
+; AVX512-FCP-NEXT: vmovdqa (%rax), %ymm10
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm2 = [12,13,128,128,128,128,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128]
+; AVX512-FCP-NEXT: vpshufb %ymm2, %ymm10, %ymm8
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm8, %zmm1, %zmm30
+; AVX512-FCP-NEXT: vpunpckhwd {{.*#+}} xmm11 = xmm9[4],xmm5[4],xmm9[5],xmm5[5],xmm9[6],xmm5[6],xmm9[7],xmm5[7]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm13[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21,18,19,20,21,24,25,26,27,22,23,22,23]
+; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm8 = ymm12[1,1,1,1,5,5,5,5]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0,1],ymm8[2],ymm1[3,4],ymm8[5],ymm1[6,7,8,9],ymm8[10],ymm1[11,12],ymm8[13],ymm1[14,15]
+; AVX512-FCP-NEXT: vmovdqa64 %ymm0, %ymm18
+; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} ymm1 = ymm7[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
+; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm9 = ymm14[0,1,1,3,4,5,5,7]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm8 = ymm9[0,1],ymm1[2],ymm9[3,4],ymm1[5],ymm9[6,7,8,9],ymm1[10],ymm9[11,12],ymm1[13],ymm9[14,15]
+; AVX512-FCP-NEXT: vmovdqa64 %ymm20, %ymm13
+; AVX512-FCP-NEXT: vpshufb %ymm6, %ymm13, %ymm1
+; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm6 = ymm16[0,0,2,1,4,4,6,5]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0,1,2],ymm6[3],ymm1[4,5],ymm6[6],ymm1[7,8,9,10],ymm6[11],ymm1[12,13],ymm6[14],ymm1[15]
; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} zmm9 = [0,1,0,3,10,10,11,11]
-; AVX512-FCP-NEXT: vpermi2q %zmm0, %zmm20, %zmm9
-; AVX512-FCP-NEXT: vmovdqa 32(%rax), %ymm2
-; AVX512-FCP-NEXT: vpshufb %ymm4, %ymm2, %ymm0
-; AVX512-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm19 = [0,5,4,0,0,6,5,0,0,5,4,0,0,6,5,0]
-; AVX512-FCP-NEXT: # zmm19 = mem[0,1,2,3,0,1,2,3]
-; AVX512-FCP-NEXT: vpermd %ymm2, %ymm19, %ymm4
-; AVX512-FCP-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm4
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm4, %zmm0, %zmm4
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm13[14,15,12,13,u,u,u,u,u,u,u,u,u,u,u,u,30,31,28,29,u,u,u,u,30,31,28,29,u,u,u,u]
-; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm7 = ymm14[3,3,3,3,7,7,7,7]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm7 = ymm7[0],ymm0[1],ymm7[2,3],ymm0[4],ymm7[5,6,7,8],ymm0[9],ymm7[10,11],ymm0[12],ymm7[13,14,15]
-; AVX512-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm10 = [22,23,26,27,0,0,24,25,26,27,0,0,26,27,26,27,22,23,26,27,0,0,24,25,26,27,0,0,26,27,26,27]
-; AVX512-FCP-NEXT: # ymm10 = mem[0,1,0,1]
-; AVX512-FCP-NEXT: vpshufb %ymm10, %ymm13, %ymm0
-; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm15 = ymm14[2,2,2,2,6,6,6,6]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm15[2],ymm0[3,4],ymm15[5],ymm0[6,7,8,9],ymm15[10],ymm0[11,12],ymm15[13],ymm0[14,15]
-; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} zmm20 = [2,2,2,3,8,10,10,11]
-; AVX512-FCP-NEXT: vpermt2q %zmm7, %zmm20, %zmm0
-; AVX512-FCP-NEXT: vmovdqa %ymm12, %ymm1
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm7 = ymm12[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,26,27,28,29,26,27,28,29,26,27,28,29,30,31,30,31]
-; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm12 = ymm25[3,3,3,3,7,7,7,7]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm7 = ymm7[0,1],ymm12[2],ymm7[3,4],ymm12[5],ymm7[6,7,8,9],ymm12[10],ymm7[11,12],ymm12[13],ymm7[14,15]
-; AVX512-FCP-NEXT: vprold $16, %ymm1, %ymm12
-; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm15 = ymm25[1,2,2,3,5,6,6,7]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm15 = ymm15[0,1],ymm12[2],ymm15[3,4],ymm12[5],ymm15[6,7,8,9],ymm12[10],ymm15[11,12],ymm12[13],ymm15[14,15]
-; AVX512-FCP-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm12 = xmm1[u,u,4,5,u,u,u,u,6,7,u,u,u,u,8,9]
+; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512-FCP-NEXT: vpermi2q %zmm1, %zmm0, %zmm9
+; AVX512-FCP-NEXT: vmovdqa 32(%rax), %ymm1
+; AVX512-FCP-NEXT: vpshufb %ymm2, %ymm1, %ymm2
+; AVX512-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm20 = [0,5,4,0,0,6,5,0,0,5,4,0,0,6,5,0]
+; AVX512-FCP-NEXT: # zmm20 = mem[0,1,2,3,0,1,2,3]
+; AVX512-FCP-NEXT: vpermd %ymm1, %ymm20, %ymm1
+; AVX512-FCP-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm1, %zmm2, %zmm1
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm7[14,15,12,13,u,u,u,u,u,u,u,u,u,u,u,u,30,31,28,29,u,u,u,u,30,31,28,29,u,u,u,u]
+; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm6 = ymm14[3,3,3,3,7,7,7,7]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm5 = ymm6[0],ymm2[1],ymm6[2,3],ymm2[4],ymm6[5,6,7,8],ymm2[9],ymm6[10,11],ymm2[12],ymm6[13,14,15]
+; AVX512-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm6 = [22,23,26,27,0,0,24,25,26,27,0,0,26,27,26,27,22,23,26,27,0,0,24,25,26,27,0,0,26,27,26,27]
+; AVX512-FCP-NEXT: # ymm6 = mem[0,1,0,1]
+; AVX512-FCP-NEXT: vpshufb %ymm6, %ymm7, %ymm0
+; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm12 = ymm14[2,2,2,2,6,6,6,6]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm12 = ymm0[0,1],ymm12[2],ymm0[3,4],ymm12[5],ymm0[6,7,8,9],ymm12[10],ymm0[11,12],ymm12[13],ymm0[14,15]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm13[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,26,27,28,29,26,27,28,29,26,27,28,29,30,31,30,31]
+; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm7 = ymm16[3,3,3,3,7,7,7,7]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm0[0,1],ymm7[2],ymm0[3,4],ymm7[5],ymm0[6,7,8,9],ymm7[10],ymm0[11,12],ymm7[13],ymm0[14,15]
+; AVX512-FCP-NEXT: vprold $16, %ymm13, %ymm7
+; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm14 = ymm16[1,2,2,3,5,6,6,7]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm14 = ymm14[0,1],ymm7[2],ymm14[3,4],ymm7[5],ymm14[6,7,8,9],ymm7[10],ymm14[11,12],ymm7[13],ymm14[14,15]
+; AVX512-FCP-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm0[u,u,4,5,u,u,u,u,6,7,u,u,u,u,8,9]
; AVX512-FCP-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
-; AVX512-FCP-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm13[4],xmm1[4],xmm13[5],xmm1[5],xmm13[6],xmm1[6],xmm13[7],xmm1[7]
-; AVX512-FCP-NEXT: vpshufd {{.*#+}} xmm14 = xmm13[1,1,2,2]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm12 = xmm14[0],xmm12[1],xmm14[2,3],xmm12[4],xmm14[5,6],xmm12[7]
-; AVX512-FCP-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,2,3,3,4,5,6,7]
-; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} zmm14 = [0,0,1,1,8,8,10,9]
-; AVX512-FCP-NEXT: vpermt2q %zmm1, %zmm14, %zmm12
-; AVX512-FCP-NEXT: vprold $16, %xmm3, %xmm1
-; AVX512-FCP-NEXT: vpshufd {{.*#+}} xmm13 = xmm5[1,1,2,3]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm13[0,1],xmm1[2],xmm13[3,4],xmm1[5],xmm13[6,7]
-; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm5[0],xmm3[0],xmm5[1],xmm3[1],xmm5[2],xmm3[2],xmm5[3],xmm3[3]
+; AVX512-FCP-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm13[4],xmm0[4],xmm13[5],xmm0[5],xmm13[6],xmm0[6],xmm13[7],xmm0[7]
+; AVX512-FCP-NEXT: vpshufd {{.*#+}} xmm15 = xmm13[1,1,2,2]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm7 = xmm15[0],xmm7[1],xmm15[2,3],xmm7[4],xmm15[5,6],xmm7[7]
+; AVX512-FCP-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,3,3,4,5,6,7]
+; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} zmm15 = [0,0,1,1,8,8,0,9]
+; AVX512-FCP-NEXT: vpermt2q %zmm0, %zmm15, %zmm7
+; AVX512-FCP-NEXT: vprold $16, %xmm3, %xmm0
+; AVX512-FCP-NEXT: vpshufd {{.*#+}} xmm13 = xmm4[1,1,2,3]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm13[0,1],xmm0[2],xmm13[3,4],xmm0[5],xmm13[6,7]
+; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm13 = [0,1,2,3,4,5,4,5,6,7,10,11,8,9,10,11]
-; AVX512-FCP-NEXT: vpshufb %xmm13, %xmm6, %xmm5
-; AVX512-FCP-NEXT: vpshufb %xmm13, %xmm3, %xmm3
-; AVX512-FCP-NEXT: vpermt2q %zmm1, %zmm14, %zmm3
-; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} zmm1 = [2,1,3,2,10,10,10,11]
-; AVX512-FCP-NEXT: vpermt2q %zmm7, %zmm1, %zmm15
+; AVX512-FCP-NEXT: vpshufb %xmm13, %xmm11, %xmm4
+; AVX512-FCP-NEXT: vpshufb %xmm13, %xmm3, %xmm11
+; AVX512-FCP-NEXT: vpermt2q %zmm0, %zmm15, %zmm11
+; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} zmm0 = [2,1,3,2,10,10,10,11]
+; AVX512-FCP-NEXT: vpermt2q %zmm2, %zmm0, %zmm14
; AVX512-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
-; AVX512-FCP-NEXT: vpshufb %ymm10, %ymm13, %ymm6
-; AVX512-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm7 = ymm10[2,2,2,2,6,6,6,6]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm6 = ymm6[0,1],ymm7[2],ymm6[3,4],ymm7[5],ymm6[6,7,8,9],ymm7[10],ymm6[11,12],ymm7[13],ymm6[14,15]
-; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} ymm7 = ymm13[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
-; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm10 = ymm10[0,1,1,3,4,5,5,7]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm7 = ymm10[0,1],ymm7[2],ymm10[3,4],ymm7[5],ymm10[6,7,8,9],ymm7[10],ymm10[11,12],ymm7[13],ymm10[14,15]
-; AVX512-FCP-NEXT: vpermt2q %zmm6, %zmm1, %zmm7
-; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm1 = [65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535]
-; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm0 = zmm24 ^ (zmm1 & (zmm0 ^ zmm24))
-; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm15 = zmm15 ^ (mem & (zmm15 ^ zmm0))
-; AVX512-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 = [6,5,0,0,7,6,0,7,6,5,0,0,7,6,0,7]
-; AVX512-FCP-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3]
-; AVX512-FCP-NEXT: vpermd %zmm2, %zmm0, %zmm2
-; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm2 = zmm2 ^ (mem & (zmm2 ^ zmm15))
-; AVX512-FCP-NEXT: vpermd %ymm8, %ymm0, %ymm0
-; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm6 = [65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535]
-; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm12 = zmm26 ^ (zmm6 & (zmm12 ^ zmm26))
-; AVX512-FCP-NEXT: vpbroadcastd 36(%rax), %ymm8
-; AVX512-FCP-NEXT: vpbroadcastd 40(%rax), %ymm10
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm10, %zmm8, %zmm8
-; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm8 = zmm8 ^ (mem & (zmm8 ^ zmm28))
-; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm8 = zmm8 ^ (mem & (zmm8 ^ zmm12))
-; AVX512-FCP-NEXT: vmovdqa64 %xmm18, %xmm10
-; AVX512-FCP-NEXT: vpshuflw {{.*#+}} xmm10 = xmm10[0,2,3,3,4,5,6,7]
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm10 = ymm10[0,0,2,1]
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm12 = ymm17[0,0,1,3]
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm5 = ymm5[0,0,1,1]
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm13 = ymm16[2,2,2,3]
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,1,3,2]
-; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm27 = zmm27 ^ (mem & (zmm27 ^ zmm3))
-; AVX512-FCP-NEXT: vmovdqu (%rsp), %ymm15 # 32-byte Reload
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm15[10,11,8,9,u,u,u,u,u,u,u,u,u,u,u,u,26,27,24,25,u,u,u,u,26,27,24,25,u,u,u,u]
-; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %ymm16 # 32-byte Reload
-; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm14 = ymm16[2,2,2,2,6,6,6,6]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm14[0],ymm3[1],ymm14[2,3],ymm3[4],ymm14[5,6,7,8],ymm3[9],ymm14[10,11],ymm3[12],ymm14[13,14,15]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm14 = ymm15[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21,18,19,20,21,24,25,26,27,22,23,22,23]
-; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm15 = ymm16[1,1,1,1,5,5,5,5]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm14 = ymm14[0,1],ymm15[2],ymm14[3,4],ymm15[5],ymm14[6,7,8,9],ymm15[10],ymm14[11,12],ymm15[13],ymm14[14,15]
-; AVX512-FCP-NEXT: vpermt2q %zmm3, %zmm20, %zmm14
-; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm14 = zmm14 ^ (mem & (zmm14 ^ zmm7))
-; AVX512-FCP-NEXT: vpermd (%rax), %zmm19, %zmm3
-; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm3 = zmm3 ^ (mem & (zmm3 ^ zmm31))
-; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm3 = zmm3 ^ (mem & (zmm3 ^ zmm14))
-; AVX512-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm10, %zmm7 # 32-byte Folded Reload
-; AVX512-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm12, %zmm10 # 32-byte Folded Reload
-; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm10 = zmm7 ^ (zmm6 & (zmm10 ^ zmm7))
-; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm21 = zmm23 ^ (zmm1 & (zmm21 ^ zmm23))
-; AVX512-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm5, %zmm1 # 32-byte Folded Reload
-; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm1 = (zmm1 & mem) | zmm30
-; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm1 = zmm1 ^ (mem & (zmm1 ^ zmm10))
-; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm13, %zmm5, %zmm5
-; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm11, %zmm6, %zmm6
-; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm6 = zmm6 ^ (mem & (zmm6 ^ zmm5))
-; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm4 = zmm4 | (zmm9 & mem)
-; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm4 = zmm4 ^ (mem & (zmm4 ^ zmm6))
-; AVX512-FCP-NEXT: vpbroadcastd 32(%rax), %ymm5
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm5, %zmm0, %zmm0
-; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm0 = zmm0 ^ (mem & (zmm0 ^ zmm22))
-; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm0 = zmm0 ^ (mem & (zmm0 ^ zmm21))
-; AVX512-FCP-NEXT: vpbroadcastd (%rax), %ymm5
-; AVX512-FCP-NEXT: vpbroadcastd 4(%rax), %ymm6
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm6, %zmm5, %zmm5
-; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm5 = zmm5 ^ (mem & (zmm5 ^ zmm29))
-; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm5 = zmm5 ^ (mem & (zmm5 ^ zmm27))
+; AVX512-FCP-NEXT: vpshufb %ymm6, %ymm13, %ymm2
+; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm3 = ymm17[2,2,2,2,6,6,6,6]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm2[0,1],ymm3[2],ymm2[3,4],ymm3[5],ymm2[6,7,8,9],ymm3[10],ymm2[11,12],ymm3[13],ymm2[14,15]
+; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} ymm3 = ymm13[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
+; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm6 = ymm17[0,1,1,3,4,5,5,7]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm6[0,1],ymm3[2],ymm6[3,4],ymm3[5],ymm6[6,7,8,9],ymm3[10],ymm6[11,12],ymm3[13],ymm6[14,15]
+; AVX512-FCP-NEXT: vpermt2q %zmm2, %zmm0, %zmm3
+; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} zmm16 = [2,2,2,3,8,10,10,11]
+; AVX512-FCP-NEXT: vpermt2q %zmm5, %zmm16, %zmm12
+; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm17 = [65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535]
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm12 = zmm24 ^ (zmm17 & (zmm12 ^ zmm24))
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm14 = zmm14 ^ (mem & (zmm14 ^ zmm12))
+; AVX512-FCP-NEXT: vmovdqa64 (%rax), %zmm5
+; AVX512-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm6 = [14,13,8,8,15,14,8,15,14,13,8,8,15,14,8,15]
+; AVX512-FCP-NEXT: # zmm6 = mem[0,1,2,3,0,1,2,3]
+; AVX512-FCP-NEXT: vpermd %zmm5, %zmm6, %zmm6
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm6 = zmm6 ^ (mem & (zmm6 ^ zmm14))
+; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm12 = [6,0,0,0,7,0,0,7]
+; AVX512-FCP-NEXT: vpermd %ymm10, %ymm12, %ymm10
+; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm12 = [65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535]
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm7 = zmm26 ^ (zmm12 & (zmm7 ^ zmm26))
+; AVX512-FCP-NEXT: vpbroadcastd 36(%rax), %ymm13
+; AVX512-FCP-NEXT: vpbroadcastd 40(%rax), %ymm14
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm14, %zmm13, %zmm13
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm13 = zmm13 ^ (mem & (zmm13 ^ zmm28))
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm13 = zmm13 ^ (mem & (zmm13 ^ zmm7))
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm27 = zmm27 ^ (mem & (zmm27 ^ zmm11))
+; AVX512-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm7 = ymm0[10,11,8,9,u,u,u,u,u,u,u,u,u,u,u,u,26,27,24,25,u,u,u,u,26,27,24,25,u,u,u,u]
+; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %ymm24 # 32-byte Reload
+; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm11 = ymm24[2,2,2,2,6,6,6,6]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm7 = ymm11[0],ymm7[1],ymm11[2,3],ymm7[4],ymm11[5,6,7,8],ymm7[9],ymm11[10,11],ymm7[12],ymm11[13,14,15]
+; AVX512-FCP-NEXT: vmovdqa64 %xmm22, %xmm2
+; AVX512-FCP-NEXT: vpshuflw {{.*#+}} xmm11 = xmm2[0,2,3,3,4,5,6,7]
+; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm11 = ymm11[0,0,2,1]
+; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm14 = ymm19[0,0,1,3]
+; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,0,1,1]
+; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm15 = ymm18[2,2,2,3]
+; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,1,3,2]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21,18,19,20,21,24,25,26,27,22,23,22,23]
+; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm2 = ymm24[1,1,1,1,5,5,5,5]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm2[2],ymm0[3,4],ymm2[5],ymm0[6,7,8,9],ymm2[10],ymm0[11,12],ymm2[13],ymm0[14,15]
+; AVX512-FCP-NEXT: vpermt2q %zmm7, %zmm16, %zmm0
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm0 = zmm0 ^ (mem & (zmm0 ^ zmm3))
+; AVX512-FCP-NEXT: vpermd %zmm5, %zmm20, %zmm2
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm2 = zmm2 ^ (mem & (zmm2 ^ zmm31))
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm2 = zmm2 ^ (mem & (zmm2 ^ zmm0))
+; AVX512-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm11, %zmm0 # 32-byte Folded Reload
+; AVX512-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm14, %zmm3 # 32-byte Folded Reload
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm3 = zmm0 ^ (zmm12 & (zmm3 ^ zmm0))
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm21 = zmm25 ^ (zmm17 & (zmm21 ^ zmm25))
+; AVX512-FCP-NEXT: vinserti64x4 $1, (%rsp), %zmm4, %zmm0 # 32-byte Folded Reload
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm0 = (zmm0 & mem) | zmm30
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm0 = zmm0 ^ (mem & (zmm0 ^ zmm3))
+; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm15, %zmm3, %zmm3
+; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm8, %zmm4, %zmm4
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm4 = zmm4 ^ (mem & (zmm4 ^ zmm3))
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm1 = zmm1 | (zmm9 & mem)
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm1 = zmm1 ^ (mem & (zmm1 ^ zmm4))
+; AVX512-FCP-NEXT: vpbroadcastd 32(%rax), %ymm3
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm3, %zmm10, %zmm3
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm3 = zmm3 ^ (mem & (zmm3 ^ zmm23))
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm3 = zmm3 ^ (mem & (zmm3 ^ zmm21))
+; AVX512-FCP-NEXT: vpbroadcastd (%rax), %ymm4
+; AVX512-FCP-NEXT: vpbroadcastd 4(%rax), %ymm5
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm5, %zmm4, %zmm4
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm4 = zmm4 ^ (mem & (zmm4 ^ zmm29))
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm4 = zmm4 ^ (mem & (zmm4 ^ zmm27))
; AVX512-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX512-FCP-NEXT: vmovdqa64 %zmm3, 128(%rax)
-; AVX512-FCP-NEXT: vmovdqa64 %zmm5, (%rax)
-; AVX512-FCP-NEXT: vmovdqa64 %zmm4, 320(%rax)
-; AVX512-FCP-NEXT: vmovdqa64 %zmm8, 256(%rax)
-; AVX512-FCP-NEXT: vmovdqa64 %zmm0, 192(%rax)
-; AVX512-FCP-NEXT: vmovdqa64 %zmm2, 384(%rax)
-; AVX512-FCP-NEXT: vmovdqa64 %zmm1, 64(%rax)
-; AVX512-FCP-NEXT: addq $264, %rsp # imm = 0x108
+; AVX512-FCP-NEXT: vmovdqa64 %zmm2, 128(%rax)
+; AVX512-FCP-NEXT: vmovdqa64 %zmm4, (%rax)
+; AVX512-FCP-NEXT: vmovdqa64 %zmm1, 320(%rax)
+; AVX512-FCP-NEXT: vmovdqa64 %zmm13, 256(%rax)
+; AVX512-FCP-NEXT: vmovdqa64 %zmm3, 192(%rax)
+; AVX512-FCP-NEXT: vmovdqa64 %zmm6, 384(%rax)
+; AVX512-FCP-NEXT: vmovdqa64 %zmm0, 64(%rax)
+; AVX512-FCP-NEXT: addq $296, %rsp # imm = 0x128
; AVX512-FCP-NEXT: vzeroupper
; AVX512-FCP-NEXT: retq
;
; AVX512DQ-LABEL: store_i16_stride7_vf32:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: subq $680, %rsp # imm = 0x2A8
-; AVX512DQ-NEXT: vmovdqa (%rcx), %ymm1
+; AVX512DQ-NEXT: subq $664, %rsp # imm = 0x298
+; AVX512DQ-NEXT: vmovdqa (%rcx), %ymm9
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm13 = [128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128,128,128,128,128]
-; AVX512DQ-NEXT: vpshufb %ymm13, %ymm1, %ymm0
-; AVX512DQ-NEXT: vmovdqa64 %ymm1, %ymm27
-; AVX512DQ-NEXT: vmovdqa (%rdx), %ymm8
+; AVX512DQ-NEXT: vpshufb %ymm13, %ymm9, %ymm0
+; AVX512DQ-NEXT: vmovdqa (%rdx), %ymm2
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm14 = [u,u,u,u,12,13,14,15,128,128,u,u,u,u,u,u,u,u,u,u,16,17,128,128,u,u,u,u,u,u,u,u]
-; AVX512DQ-NEXT: vpshufb %ymm14, %ymm8, %ymm1
+; AVX512DQ-NEXT: vpshufb %ymm14, %ymm2, %ymm1
+; AVX512DQ-NEXT: vmovdqa64 %ymm2, %ymm21
; AVX512DQ-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX512DQ-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-NEXT: vmovdqa (%rsi), %ymm9
+; AVX512DQ-NEXT: vmovdqa (%rsi), %ymm11
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm12 = [128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128,128,128,128,128,128,128,128,128]
-; AVX512DQ-NEXT: vpshufb %ymm12, %ymm9, %ymm0
-; AVX512DQ-NEXT: vmovdqa (%rdi), %ymm11
+; AVX512DQ-NEXT: vpshufb %ymm12, %ymm11, %ymm0
+; AVX512DQ-NEXT: vmovdqa64 %ymm11, %ymm17
+; AVX512DQ-NEXT: vmovdqa (%rdi), %ymm2
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm15 = [12,13,14,15,128,128,u,u,u,u,u,u,u,u,u,u,16,17,128,128,u,u,u,u,u,u,u,u,16,17,18,19]
-; AVX512DQ-NEXT: vpshufb %ymm15, %ymm11, %ymm1
+; AVX512DQ-NEXT: vpshufb %ymm15, %ymm2, %ymm1
+; AVX512DQ-NEXT: vmovdqa64 %ymm2, %ymm26
; AVX512DQ-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX512DQ-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX512DQ-NEXT: vmovdqa (%r9), %ymm1
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm0 = [128,128,128,128,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128]
; AVX512DQ-NEXT: vpshufb %ymm0, %ymm1, %ymm2
-; AVX512DQ-NEXT: vmovdqa64 %ymm1, %ymm16
+; AVX512DQ-NEXT: vmovdqa64 %ymm1, %ymm19
; AVX512DQ-NEXT: vmovdqa (%r8), %ymm4
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm1 = [u,u,u,u,u,u,u,u,12,13,14,15,128,128,u,u,u,u,u,u,u,u,u,u,16,17,128,128,u,u,u,u]
; AVX512DQ-NEXT: vpshufb %ymm1, %ymm4, %ymm3
-; AVX512DQ-NEXT: vmovdqa64 %ymm4, %ymm17
+; AVX512DQ-NEXT: vmovdqa64 %ymm4, %ymm20
; AVX512DQ-NEXT: vpor %ymm2, %ymm3, %ymm2
; AVX512DQ-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX512DQ-NEXT: vmovdqa 32(%r9), %xmm2
@@ -6520,8 +6525,8 @@ define void @store_i16_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512DQ-NEXT: vpermi2d %zmm3, %zmm2, %zmm4
; AVX512DQ-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT: vmovdqa 32(%rdi), %ymm2
-; AVX512DQ-NEXT: vmovdqa 32(%rsi), %ymm10
-; AVX512DQ-NEXT: vpshufb %ymm12, %ymm10, %ymm3
+; AVX512DQ-NEXT: vmovdqa 32(%rsi), %ymm11
+; AVX512DQ-NEXT: vpshufb %ymm12, %ymm11, %ymm3
; AVX512DQ-NEXT: vpshufb %ymm15, %ymm2, %ymm4
; AVX512DQ-NEXT: vpor %ymm3, %ymm4, %ymm3
; AVX512DQ-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
@@ -6545,73 +6550,74 @@ define void @store_i16_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm4 = xmm5[0],xmm4[1],xmm5[2,3],xmm4[4],xmm5[5,6],xmm4[7]
; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} xmm5 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3]
; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm5 = xmm5[0,1,3,2,4,5,6,7]
-; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} zmm26 = [0,16,0,0,17,17,0,0,0,0,0,1,2,0,0,3]
-; AVX512DQ-NEXT: vpermi2d %zmm5, %zmm4, %zmm26
+; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} zmm6 = [0,16,0,0,17,17,0,0,0,0,0,1,2,0,0,3]
+; AVX512DQ-NEXT: vpermi2d %zmm5, %zmm4, %zmm6
+; AVX512DQ-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT: vmovdqa (%r9), %xmm4
; AVX512DQ-NEXT: vmovdqa (%r8), %xmm5
; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3]
; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm7 = xmm6[0,1,2,3,4,5,7,6]
; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm6 = xmm6[0,1,3,2,4,5,6,7]
-; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} zmm25 = [0,0,0,1,0,1,1,0,0,18,19,0,19,19,0,0]
-; AVX512DQ-NEXT: vpermi2d %zmm7, %zmm6, %zmm25
+; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} zmm24 = [0,0,0,1,0,1,1,0,0,18,19,0,19,19,0,0]
+; AVX512DQ-NEXT: vpermi2d %zmm7, %zmm6, %zmm24
; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
-; AVX512DQ-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512DQ-NEXT: vmovdqa64 %xmm0, %xmm27
; AVX512DQ-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX512DQ-NEXT: vpbroadcastd 8(%rax), %ymm0
; AVX512DQ-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
-; AVX512DQ-NEXT: vmovdqa (%rax), %ymm3
-; AVX512DQ-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm7 = [12,13,128,128,128,128,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128]
-; AVX512DQ-NEXT: vpshufb %ymm7, %ymm3, %ymm6
-; AVX512DQ-NEXT: vmovdqa64 %ymm7, %ymm21
+; AVX512DQ-NEXT: vmovdqa (%rax), %ymm6
+; AVX512DQ-NEXT: vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm3 = [12,13,128,128,128,128,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128]
+; AVX512DQ-NEXT: vpshufb %ymm3, %ymm6, %ymm6
; AVX512DQ-NEXT: vinserti64x4 $1, %ymm6, %zmm0, %zmm0
; AVX512DQ-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm5[4],xmm4[4],xmm5[5],xmm4[5],xmm5[6],xmm4[6],xmm5[7],xmm4[7]
; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm4 = ymm12[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,u,u,24,25,26,27,u,u,26,27,26,27]
; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm5 = ymm15[2,2,2,2,6,6,6,6]
-; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm3 = ymm4[0,1],ymm5[2],ymm4[3,4],ymm5[5],ymm4[6,7,8,9],ymm5[10],ymm4[11,12],ymm5[13],ymm4[14,15]
-; AVX512DQ-NEXT: vmovdqa64 %ymm3, %ymm28
+; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm4 = ymm4[0,1],ymm5[2],ymm4[3,4],ymm5[5],ymm4[6,7,8,9],ymm5[10],ymm4[11,12],ymm5[13],ymm4[14,15]
+; AVX512DQ-NEXT: vmovdqa64 %ymm4, %ymm28
; AVX512DQ-NEXT: vpshufhw {{.*#+}} ymm4 = ymm12[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm4 = ymm4[2,2,2,2,6,6,6,6]
; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm5 = ymm15[3,3,3,3,7,7,7,7]
-; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm3 = ymm5[0],ymm4[1],ymm5[2,3],ymm4[4],ymm5[5,6,7,8],ymm4[9],ymm5[10,11],ymm4[12],ymm5[13,14,15]
-; AVX512DQ-NEXT: vmovdqa64 %ymm3, %ymm29
-; AVX512DQ-NEXT: vpshufhw {{.*#+}} ymm4 = ymm10[0,1,2,3,5,4,6,7,8,9,10,11,13,12,14,15]
+; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm8 = ymm5[0],ymm4[1],ymm5[2,3],ymm4[4],ymm5[5,6,7,8],ymm4[9],ymm5[10,11],ymm4[12],ymm5[13,14,15]
+; AVX512DQ-NEXT: vmovdqa64 %ymm8, %ymm29
+; AVX512DQ-NEXT: vpshufhw {{.*#+}} ymm4 = ymm11[0,1,2,3,5,4,6,7,8,9,10,11,13,12,14,15]
; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm4 = ymm4[2,2,2,2,6,6,6,6]
; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm5 = ymm2[2,2,2,2,6,6,6,6]
-; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm3 = ymm5[0],ymm4[1],ymm5[2,3],ymm4[4],ymm5[5,6,7,8],ymm4[9],ymm5[10,11],ymm4[12],ymm5[13,14,15]
-; AVX512DQ-NEXT: vmovdqa64 %ymm3, %ymm30
-; AVX512DQ-NEXT: vpshufhw {{.*#+}} ymm4 = ymm10[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
+; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm4 = ymm5[0],ymm4[1],ymm5[2,3],ymm4[4],ymm5[5,6,7,8],ymm4[9],ymm5[10,11],ymm4[12],ymm5[13,14,15]
+; AVX512DQ-NEXT: vmovdqa64 %ymm4, %ymm30
+; AVX512DQ-NEXT: vpshufhw {{.*#+}} ymm4 = ymm11[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm4 = ymm4[2,2,2,2,6,6,6,6]
; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm5 = ymm2[3,3,3,3,7,7,7,7]
-; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm3 = ymm5[0,1,2],ymm4[3],ymm5[4,5],ymm4[6],ymm5[7,8,9,10],ymm4[11],ymm5[12,13],ymm4[14],ymm5[15]
-; AVX512DQ-NEXT: vmovdqa64 %ymm3, %ymm31
+; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm4 = ymm5[0,1,2],ymm4[3],ymm5[4,5],ymm4[6],ymm5[7,8,9,10],ymm4[11],ymm5[12,13],ymm4[14],ymm5[15]
+; AVX512DQ-NEXT: vmovdqa64 %ymm4, %ymm31
; AVX512DQ-NEXT: vprold $16, %ymm13, %ymm4
; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm5 = ymm14[1,2,2,3,5,6,6,7]
-; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm3 = ymm5[0,1],ymm4[2],ymm5[3,4],ymm4[5],ymm5[6,7,8,9],ymm4[10],ymm5[11,12],ymm4[13],ymm5[14,15]
-; AVX512DQ-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm4 = ymm5[0,1],ymm4[2],ymm5[3,4],ymm4[5],ymm5[6,7,8,9],ymm4[10],ymm5[11,12],ymm4[13],ymm5[14,15]
+; AVX512DQ-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX512DQ-NEXT: vpshufhw {{.*#+}} ymm4 = ymm13[0,1,2,3,5,6,7,7,8,9,10,11,13,14,15,15]
; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm4 = ymm4[2,2,2,3,6,6,6,7]
; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm5 = ymm14[3,3,3,3,7,7,7,7]
-; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm3 = ymm4[0,1],ymm5[2],ymm4[3,4],ymm5[5],ymm4[6,7,8,9],ymm5[10],ymm4[11,12],ymm5[13],ymm4[14,15]
-; AVX512DQ-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-NEXT: vmovdqa64 %ymm27, %ymm3
-; AVX512DQ-NEXT: vpshufhw {{.*#+}} ymm4 = ymm3[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
+; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm4 = ymm4[0,1],ymm5[2],ymm4[3,4],ymm5[5],ymm4[6,7,8,9],ymm5[10],ymm4[11,12],ymm5[13],ymm4[14,15]
+; AVX512DQ-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-NEXT: vmovdqa %ymm9, %ymm8
+; AVX512DQ-NEXT: vpshufhw {{.*#+}} ymm4 = ymm9[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm4 = ymm4[2,2,2,2,6,6,6,6]
-; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm5 = ymm8[3,3,3,3,7,7,7,7]
+; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm5 = ymm21[3,3,3,3,7,7,7,7]
; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm4 = ymm5[0],ymm4[1],ymm5[2,3],ymm4[4],ymm5[5,6,7,8],ymm4[9],ymm5[10,11],ymm4[12],ymm5[13,14,15]
-; AVX512DQ-NEXT: vmovdqu %ymm4, (%rsp) # 32-byte Spill
+; AVX512DQ-NEXT: vmovdqa64 %ymm4, %ymm25
+; AVX512DQ-NEXT: vmovdqa64 %ymm17, %ymm9
; AVX512DQ-NEXT: vpshufhw {{.*#+}} ymm4 = ymm9[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm4 = ymm4[2,2,2,2,6,6,6,6]
-; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm5 = ymm11[3,3,3,3,7,7,7,7]
+; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm5 = ymm26[3,3,3,3,7,7,7,7]
; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm4 = ymm5[0,1,2],ymm4[3],ymm5[4,5],ymm4[6],ymm5[7,8,9,10],ymm4[11],ymm5[12,13],ymm4[14],ymm5[15]
-; AVX512DQ-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-NEXT: vmovdqa64 %ymm16, %ymm4
-; AVX512DQ-NEXT: vpshufhw {{.*#+}} ymm4 = ymm4[0,1,2,3,5,6,7,7,8,9,10,11,13,14,15,15]
+; AVX512DQ-NEXT: vmovdqa64 %ymm4, %ymm18
+; AVX512DQ-NEXT: vmovdqa64 %ymm19, %ymm10
+; AVX512DQ-NEXT: vpshufhw {{.*#+}} ymm4 = ymm10[0,1,2,3,5,6,7,7,8,9,10,11,13,14,15,15]
; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm4 = ymm4[2,2,2,3,6,6,6,7]
-; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm5 = ymm17[3,3,3,3,7,7,7,7]
+; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm5 = ymm20[3,3,3,3,7,7,7,7]
; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm4 = ymm4[0,1],ymm5[2],ymm4[3,4],ymm5[5],ymm4[6,7,8,9],ymm5[10],ymm4[11,12],ymm5[13],ymm4[14,15]
-; AVX512DQ-NEXT: vmovdqa64 %ymm4, %ymm19
+; AVX512DQ-NEXT: vmovdqa64 %ymm4, %ymm17
; AVX512DQ-NEXT: vmovdqa 32(%rdi), %xmm4
; AVX512DQ-NEXT: vmovdqa 32(%rsi), %xmm5
; AVX512DQ-NEXT: vprold $16, %xmm5, %xmm6
@@ -6627,12 +6633,11 @@ define void @store_i16_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm7 = xmm5[1,1,2,2]
; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm1 = xmm7[0],xmm1[1],xmm7[2,3],xmm1[4],xmm7[5,6],xmm1[7]
; AVX512DQ-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3]
-; AVX512DQ-NEXT: vmovdqa64 %xmm1, %xmm18
+; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} xmm7 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3]
; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm5[4],xmm4[4],xmm5[5],xmm4[5],xmm5[6],xmm4[6],xmm5[7],xmm4[7]
; AVX512DQ-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm1 = ymm2[1,1,1,1,5,5,5,5]
-; AVX512DQ-NEXT: vpshuflw {{.*#+}} ymm2 = ymm10[1,2,3,3,4,5,6,7,9,10,11,11,12,13,14,15]
+; AVX512DQ-NEXT: vpshuflw {{.*#+}} ymm2 = ymm11[1,2,3,3,4,5,6,7,9,10,11,11,12,13,14,15]
; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[0,0,2,1,4,4,6,5]
; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm1 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7,8,9],ymm1[10],ymm2[11,12],ymm1[13],ymm2[14,15]
; AVX512DQ-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
@@ -6646,458 +6651,459 @@ define void @store_i16_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[0,0,0,0,4,4,4,4]
; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5],ymm1[6],ymm2[7,8,9,10],ymm1[11],ymm2[12,13],ymm1[14],ymm2[15]
; AVX512DQ-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} zmm1 = [0,5,0,0,0,6,0,0,6,0,0,0,7,0,0,7]
-; AVX512DQ-NEXT: vmovdqa 32(%rax), %ymm2
-; AVX512DQ-NEXT: vpermd %zmm2, %zmm1, %zmm27
-; AVX512DQ-NEXT: vmovdqa64 %ymm21, %ymm1
-; AVX512DQ-NEXT: vpshufb %ymm1, %ymm2, %ymm1
-; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[0,1,1,3,4,5,5,7]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
-; AVX512DQ-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm20
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm4 = [0,1,2,3,4,5,4,5,6,7,10,11,8,9,10,11]
-; AVX512DQ-NEXT: vpshufb %xmm4, %xmm0, %xmm5
-; AVX512DQ-NEXT: vpshufb %xmm4, %xmm6, %xmm14
-; AVX512DQ-NEXT: vmovdqa (%rdi), %xmm1
-; AVX512DQ-NEXT: vmovdqa (%rsi), %xmm2
-; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
-; AVX512DQ-NEXT: vpshufb %xmm4, %xmm0, %xmm0
-; AVX512DQ-NEXT: vmovdqa64 %ymm0, %ymm21
-; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
-; AVX512DQ-NEXT: vprold $16, %xmm2, %xmm2
-; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,2,3]
-; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3,4],xmm2[5],xmm1[6,7]
-; AVX512DQ-NEXT: vmovdqa64 %ymm1, %ymm24
-; AVX512DQ-NEXT: vpshuflw {{.*#+}} ymm1 = ymm3[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
-; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,0,0,0,4,4,4,4]
-; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm2 = ymm8[0,1,1,3,4,5,5,7]
-; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm1 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7,8,9],ymm1[10],ymm2[11,12],ymm1[13],ymm2[14,15]
-; AVX512DQ-NEXT: vmovdqa64 %ymm1, %ymm23
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm1 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,u,u,24,25,26,27,u,u,26,27,26,27]
-; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm2 = ymm8[2,2,2,2,6,6,6,6]
-; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7,8,9],ymm2[10],ymm1[11,12],ymm2[13],ymm1[14,15]
-; AVX512DQ-NEXT: vmovdqa64 %ymm1, %ymm22
-; AVX512DQ-NEXT: vpshuflw {{.*#+}} ymm1 = ymm9[1,2,3,3,4,5,6,7,9,10,11,11,12,13,14,15]
-; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,0,2,1,4,4,6,5]
-; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm2 = ymm11[1,1,1,1,5,5,5,5]
-; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm13 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7,8,9],ymm2[10],ymm1[11,12],ymm2[13],ymm1[14,15]
-; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm1 = ymm11[2,2,2,2,6,6,6,6]
-; AVX512DQ-NEXT: vpshufhw {{.*#+}} ymm2 = ymm9[0,1,2,3,5,4,6,7,8,9,10,11,13,12,14,15]
-; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[2,2,2,2,6,6,6,6]
-; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm11 = ymm1[0],ymm2[1],ymm1[2,3],ymm2[4],ymm1[5,6,7,8],ymm2[9],ymm1[10,11],ymm2[12],ymm1[13,14,15]
-; AVX512DQ-NEXT: vmovdqa64 %ymm16, %ymm4
-; AVX512DQ-NEXT: vpshuflw {{.*#+}} ymm1 = ymm4[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
-; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,0,0,0,4,4,4,4]
-; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm2 = ymm17[0,0,2,1,4,4,6,5]
-; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm8 = ymm1[0,1,2],ymm2[3],ymm1[4,5],ymm2[6],ymm1[7,8,9,10],ymm2[11],ymm1[12,13],ymm2[14],ymm1[15]
-; AVX512DQ-NEXT: vprold $16, %ymm16, %ymm1
-; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm10 = ymm17[1,2,2,3,5,6,6,7]
-; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm7 = ymm10[0,1],ymm1[2],ymm10[3,4],ymm1[5],ymm10[6,7,8,9],ymm1[10],ymm10[11,12],ymm1[13],ymm10[14,15]
-; AVX512DQ-NEXT: vpshuflw $248, {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Folded Reload
-; AVX512DQ-NEXT: # xmm10 = mem[0,2,3,3,4,5,6,7]
+; AVX512DQ-NEXT: vmovdqa 32(%rax), %ymm1
+; AVX512DQ-NEXT: vpshufb %ymm3, %ymm1, %ymm2
+; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,1,1,3,4,5,5,7]
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
+; AVX512DQ-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm1, %zmm2, %zmm19
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,2,3,4,5,4,5,6,7,10,11,8,9,10,11]
+; AVX512DQ-NEXT: vpshufb %xmm3, %xmm0, %xmm1
+; AVX512DQ-NEXT: vpshufb %xmm3, %xmm6, %xmm5
+; AVX512DQ-NEXT: vmovdqa (%rdi), %xmm2
+; AVX512DQ-NEXT: vmovdqa (%rsi), %xmm4
+; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3]
+; AVX512DQ-NEXT: vpshufb %xmm3, %xmm0, %xmm0
+; AVX512DQ-NEXT: vmovdqa64 %ymm0, %ymm23
+; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm4[4],xmm2[4],xmm4[5],xmm2[5],xmm4[6],xmm2[6],xmm4[7],xmm2[7]
+; AVX512DQ-NEXT: vprold $16, %xmm4, %xmm4
+; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,2,3]
+; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm4[2],xmm2[3,4],xmm4[5],xmm2[6,7]
+; AVX512DQ-NEXT: vmovdqa64 %ymm2, %ymm22
+; AVX512DQ-NEXT: vpshuflw {{.*#+}} ymm2 = ymm8[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
+; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[0,0,0,0,4,4,4,4]
+; AVX512DQ-NEXT: vmovdqa64 %ymm21, %ymm6
+; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm4 = ymm21[0,1,1,3,4,5,5,7]
+; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm2 = ymm4[0,1],ymm2[2],ymm4[3,4],ymm2[5],ymm4[6,7,8,9],ymm2[10],ymm4[11,12],ymm2[13],ymm4[14,15]
+; AVX512DQ-NEXT: vmovdqa64 %ymm2, %ymm21
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm2 = ymm8[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,u,u,24,25,26,27,u,u,26,27,26,27]
+; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm4 = ymm6[2,2,2,2,6,6,6,6]
+; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm2 = ymm2[0,1],ymm4[2],ymm2[3,4],ymm4[5],ymm2[6,7,8,9],ymm4[10],ymm2[11,12],ymm4[13],ymm2[14,15]
+; AVX512DQ-NEXT: vmovdqa64 %ymm2, %ymm16
+; AVX512DQ-NEXT: vpshuflw {{.*#+}} ymm2 = ymm9[1,2,3,3,4,5,6,7,9,10,11,11,12,13,14,15]
+; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[0,0,2,1,4,4,6,5]
+; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm4 = ymm26[1,1,1,1,5,5,5,5]
+; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm13 = ymm2[0,1],ymm4[2],ymm2[3,4],ymm4[5],ymm2[6,7,8,9],ymm4[10],ymm2[11,12],ymm4[13],ymm2[14,15]
+; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm2 = ymm26[2,2,2,2,6,6,6,6]
+; AVX512DQ-NEXT: vpshufhw {{.*#+}} ymm4 = ymm9[0,1,2,3,5,4,6,7,8,9,10,11,13,12,14,15]
+; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm4 = ymm4[2,2,2,2,6,6,6,6]
+; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm12 = ymm2[0],ymm4[1],ymm2[2,3],ymm4[4],ymm2[5,6,7,8],ymm4[9],ymm2[10,11],ymm4[12],ymm2[13,14,15]
+; AVX512DQ-NEXT: vmovdqa %ymm10, %ymm14
+; AVX512DQ-NEXT: vpshuflw {{.*#+}} ymm2 = ymm10[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
+; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[0,0,0,0,4,4,4,4]
+; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm10 = ymm20[0,0,2,1,4,4,6,5]
+; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm11 = ymm2[0,1,2],ymm10[3],ymm2[4,5],ymm10[6],ymm2[7,8,9,10],ymm10[11],ymm2[12,13],ymm10[14],ymm2[15]
+; AVX512DQ-NEXT: vmovdqa64 %xmm27, %xmm2
+; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm10 = xmm2[0,2,3,3,4,5,6,7]
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm10 = ymm10[0,0,2,1]
; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[2,1,2,3,4,5,6,7]
; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,5,4]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm2 = ymm0[0,0,1,3]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm3 = ymm5[0,0,1,1]
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm27 = ymm0[0,0,1,3]
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm3 = ymm1[0,0,1,1]
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm28 = ymm28[2,2,2,3]
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm29 = ymm29[0,2,2,3]
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm30 = ymm30[0,2,2,3]
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm31 = ymm31[2,1,3,3]
-; AVX512DQ-NEXT: vpermq $182, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Folded Reload
-; AVX512DQ-NEXT: # ymm6 = mem[2,1,3,2]
-; AVX512DQ-NEXT: vpermq $234, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
-; AVX512DQ-NEXT: # ymm5 = mem[2,2,2,3]
-; AVX512DQ-NEXT: vpermq $232, (%rsp), %ymm1 # 32-byte Folded Reload
-; AVX512DQ-NEXT: # ymm1 = mem[0,2,2,3]
-; AVX512DQ-NEXT: vmovdqa64 %xmm18, %xmm0
-; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm12 = xmm0[0,1,3,2,4,5,6,7]
-; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm12 = xmm12[0,0,1,1]
-; AVX512DQ-NEXT: vpermq $246, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
-; AVX512DQ-NEXT: # ymm0 = mem[2,1,3,3]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm9 = ymm14[0,0,1,1]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm4 = ymm19[2,2,2,3]
-; AVX512DQ-NEXT: vpshuflw $180, {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Folded Reload
-; AVX512DQ-NEXT: # xmm15 = mem[0,1,3,2,4,5,6,7]
-; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm15 = xmm15[0,1,1,3]
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm29, %zmm28, %zmm28
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm31, %zmm30, %zmm29
-; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm30 = [65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535]
-; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm29 = zmm29 ^ (zmm30 & (zmm29 ^ zmm28))
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm12 = ymm12[0,1,1,3]
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm12, %zmm1, %zmm1
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm9, %zmm0, %zmm9
-; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm9 = zmm1 ^ (zmm30 & (zmm9 ^ zmm1))
-; AVX512DQ-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm10, %zmm0 # 32-byte Folded Reload
-; AVX512DQ-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm1 # 32-byte Folded Reload
-; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm2 = [65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535]
-; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm1 = zmm0 ^ (zmm2 & (zmm1 ^ zmm0))
-; AVX512DQ-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm0 # 32-byte Folded Reload
+; AVX512DQ-NEXT: vpermq $182, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Folded Reload
+; AVX512DQ-NEXT: # ymm8 = mem[2,1,3,2]
+; AVX512DQ-NEXT: vpermq $234, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Folded Reload
+; AVX512DQ-NEXT: # ymm6 = mem[2,2,2,3]
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm2 = ymm25[0,2,2,3]
+; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm9 = xmm7[0,1,3,2,4,5,6,7]
+; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm9 = xmm9[0,0,1,1]
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm1 = ymm18[2,1,3,3]
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm0 = ymm5[0,0,1,1]
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm7 = ymm17[2,2,2,3]
+; AVX512DQ-NEXT: vpshuflw $180, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Folded Reload
+; AVX512DQ-NEXT: # xmm5 = mem[0,1,3,2,4,5,6,7]
+; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm4 = xmm5[0,1,1,3]
+; AVX512DQ-NEXT: vprold $16, %ymm14, %ymm5
+; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm15 = ymm20[1,2,2,3,5,6,6,7]
+; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm5 = ymm15[0,1],ymm5[2],ymm15[3,4],ymm5[5],ymm15[6,7,8,9],ymm5[10],ymm15[11,12],ymm5[13],ymm15[14,15]
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm29, %zmm28, %zmm15
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm31, %zmm30, %zmm28
+; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm29 = [65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535]
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm28 = zmm28 ^ (zmm29 & (zmm28 ^ zmm15))
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm9 = ymm9[0,1,1,3]
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm9, %zmm2, %zmm2
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm0 = zmm2 ^ (zmm29 & (zmm0 ^ zmm2))
+; AVX512DQ-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm10, %zmm1 # 32-byte Folded Reload
+; AVX512DQ-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm27, %zmm2 # 32-byte Folded Reload
+; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm9 = [65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535]
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm2 = zmm1 ^ (zmm9 & (zmm2 ^ zmm1))
+; AVX512DQ-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm1 # 32-byte Folded Reload
; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm0 = (zmm0 & mem) | zmm3
-; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm0 = zmm0 ^ (mem & (zmm0 ^ zmm1))
-; AVX512DQ-NEXT: vpshufd $254, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
-; AVX512DQ-NEXT: # ymm1 = mem[2,3,3,3,6,7,7,7]
-; AVX512DQ-NEXT: vpermq $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Folded Reload
-; AVX512DQ-NEXT: # ymm3 = mem[0,0,2,1]
-; AVX512DQ-NEXT: vpshuflw $230, {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Folded Reload
-; AVX512DQ-NEXT: # xmm10 = mem[2,1,2,3,4,5,6,7]
-; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm10 = xmm10[0,1,2,3,4,5,5,4]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm10 = ymm10[0,0,1,3]
-; AVX512DQ-NEXT: vpermq $80, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Folded Reload
-; AVX512DQ-NEXT: # ymm12 = mem[0,0,1,1]
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm1 = (zmm1 & mem) | zmm3
+; AVX512DQ-NEXT: vpshufd $254, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Folded Reload
+; AVX512DQ-NEXT: # ymm3 = mem[2,3,3,3,6,7,7,7]
+; AVX512DQ-NEXT: vpermq $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Folded Reload
+; AVX512DQ-NEXT: # ymm10 = mem[0,0,2,1]
+; AVX512DQ-NEXT: vpshuflw $230, {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Folded Reload
+; AVX512DQ-NEXT: # xmm15 = mem[2,1,2,3,4,5,6,7]
+; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm15 = xmm15[0,1,2,3,4,5,5,4]
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm15 = ymm15[0,0,1,3]
+; AVX512DQ-NEXT: vpermq $80, {{[-0-9]+}}(%r{{[sb]}}p), %ymm17 # 32-byte Folded Reload
+; AVX512DQ-NEXT: # ymm17 = mem[0,0,1,1]
; AVX512DQ-NEXT: vpshuflw $248, {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Folded Reload
; AVX512DQ-NEXT: # xmm14 = mem[0,2,3,3,4,5,6,7]
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm14 = ymm14[0,0,2,1]
-; AVX512DQ-NEXT: vpermq $234, {{[-0-9]+}}(%r{{[sb]}}p), %ymm17 # 32-byte Folded Reload
-; AVX512DQ-NEXT: # ymm17 = mem[2,2,2,3]
-; AVX512DQ-NEXT: vpermq $182, {{[-0-9]+}}(%r{{[sb]}}p), %ymm18 # 32-byte Folded Reload
-; AVX512DQ-NEXT: # ymm18 = mem[2,1,3,2]
-; AVX512DQ-NEXT: vpermq $250, {{[-0-9]+}}(%r{{[sb]}}p), %ymm19 # 32-byte Folded Reload
-; AVX512DQ-NEXT: # ymm19 = mem[2,2,3,3]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm21 = ymm21[0,0,1,1]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm28 = ymm24[0,0,2,1]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm30 = ymm23[2,1,3,2]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm31 = ymm22[2,2,2,3]
+; AVX512DQ-NEXT: vpermq $234, {{[-0-9]+}}(%r{{[sb]}}p), %ymm18 # 32-byte Folded Reload
+; AVX512DQ-NEXT: # ymm18 = mem[2,2,2,3]
+; AVX512DQ-NEXT: vpermq $182, {{[-0-9]+}}(%r{{[sb]}}p), %ymm20 # 32-byte Folded Reload
+; AVX512DQ-NEXT: # ymm20 = mem[2,1,3,2]
+; AVX512DQ-NEXT: vpermq $250, {{[-0-9]+}}(%r{{[sb]}}p), %ymm26 # 32-byte Folded Reload
+; AVX512DQ-NEXT: # ymm26 = mem[2,2,3,3]
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm27 = ymm23[0,0,1,1]
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm29 = ymm22[0,0,2,1]
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm30 = ymm21[2,1,3,2]
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm31 = ymm16[2,2,2,3]
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm13 = ymm13[2,2,2,3]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm11 = ymm11[0,2,2,3]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,2,3,3]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,1,3,2]
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm5, %zmm6, %zmm5
-; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm5 = zmm5 ^ (mem & (zmm5 ^ zmm29))
-; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm27 = zmm27 ^ (mem & (zmm27 ^ zmm5))
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm5 = ymm15[0,0,0,1]
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm5, %zmm4, %zmm4
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,1,3,2]
-; AVX512DQ-NEXT: vpbroadcastd 32(%rax), %ymm5
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm5, %zmm1, %zmm1
-; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm1 = zmm1 ^ (mem & (zmm1 ^ zmm4))
-; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm1 = zmm1 ^ (mem & (zmm1 ^ zmm9))
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm10, %zmm3, %zmm3
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm14, %zmm12, %zmm4
-; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm4 = zmm3 ^ (zmm2 & (zmm4 ^ zmm3))
-; AVX512DQ-NEXT: vpbroadcastd 36(%rax), %ymm2
-; AVX512DQ-NEXT: vpbroadcastd 40(%rax), %ymm3
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm3, %zmm2, %zmm2
-; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm2 = zmm2 ^ (mem & (zmm2 ^ zmm3))
-; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm2 = zmm2 ^ (mem & (zmm2 ^ zmm4))
-; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm17, %zmm3, %zmm3
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm12 = ymm12[0,2,2,3]
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,2,3,3]
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm5 = ymm5[2,1,3,2]
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm1 = zmm1 ^ (mem & (zmm1 ^ zmm2))
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm6, %zmm8, %zmm2
+; AVX512DQ-NEXT: vmovdqa64 (%rax), %zmm6
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm2 = zmm2 ^ (mem & (zmm2 ^ zmm28))
+; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} zmm8 = [0,13,0,0,0,14,0,0,14,0,0,0,15,0,0,15]
+; AVX512DQ-NEXT: vpermd %zmm6, %zmm8, %zmm8
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm8 = zmm8 ^ (mem & (zmm8 ^ zmm2))
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm2 = ymm4[0,0,0,1]
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm2, %zmm7, %zmm2
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,1,3,2]
+; AVX512DQ-NEXT: vpbroadcastd 32(%rax), %ymm4
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm4, %zmm3, %zmm3
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm3 = zmm3 ^ (mem & (zmm3 ^ zmm2))
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm3 = zmm3 ^ (mem & (zmm3 ^ zmm0))
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm15, %zmm10, %zmm0
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm14, %zmm17, %zmm2
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm2 = zmm0 ^ (zmm9 & (zmm2 ^ zmm0))
+; AVX512DQ-NEXT: vpbroadcastd 36(%rax), %ymm0
+; AVX512DQ-NEXT: vpbroadcastd 40(%rax), %ymm4
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm4, %zmm0, %zmm0
; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm18, %zmm4, %zmm4
-; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm4 = zmm4 ^ (mem & (zmm4 ^ zmm3))
-; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512DQ-NEXT: vshufi64x2 {{.*#+}} zmm3 = zmm3[0,1,2,3],zmm19[0,1,2,3]
-; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm20 = zmm20 | (zmm3 & mem)
-; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm20 = zmm20 ^ (mem & (zmm20 ^ zmm4))
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm28, %zmm21, %zmm3
-; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm3 = zmm26 ^ (mem & (zmm3 ^ zmm26))
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm0 = zmm0 ^ (mem & (zmm0 ^ zmm4))
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm0 = zmm0 ^ (mem & (zmm0 ^ zmm2))
+; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm18, %zmm2, %zmm2
+; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm20, %zmm4, %zmm4
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm4 = zmm4 ^ (mem & (zmm4 ^ zmm2))
+; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQ-NEXT: vshufi64x2 {{.*#+}} zmm2 = zmm2[0,1,2,3],zmm26[0,1,2,3]
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm19 = zmm19 | (zmm2 & mem)
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm19 = zmm19 ^ (mem & (zmm19 ^ zmm4))
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm29, %zmm27, %zmm2
+; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm2 = zmm4 ^ (mem & (zmm2 ^ zmm4))
; AVX512DQ-NEXT: vpbroadcastd (%rax), %ymm4
-; AVX512DQ-NEXT: vpbroadcastd 4(%rax), %ymm5
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm5, %zmm4, %zmm4
-; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm4 = zmm4 ^ (mem & (zmm4 ^ zmm25))
-; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm4 = zmm4 ^ (mem & (zmm4 ^ zmm3))
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm31, %zmm30, %zmm3
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm11, %zmm13, %zmm5
-; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm5 = zmm5 ^ (mem & (zmm5 ^ zmm3))
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm7, %zmm8, %zmm3
-; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} zmm6 = [0,0,4,0,0,0,5,0,0,5,0,0,0,6,0,0]
-; AVX512DQ-NEXT: vpermd (%rax), %zmm6, %zmm6
-; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm6 = zmm6 ^ (mem & (zmm6 ^ zmm3))
-; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm6 = zmm6 ^ (mem & (zmm6 ^ zmm5))
+; AVX512DQ-NEXT: vpbroadcastd 4(%rax), %ymm7
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm7, %zmm4, %zmm4
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm4 = zmm4 ^ (mem & (zmm4 ^ zmm24))
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm4 = zmm4 ^ (mem & (zmm4 ^ zmm2))
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm31, %zmm30, %zmm2
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm12, %zmm13, %zmm7
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm7 = zmm7 ^ (mem & (zmm7 ^ zmm2))
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm5, %zmm11, %zmm2
+; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} zmm5 = [0,0,4,0,0,0,5,0,0,5,0,0,0,6,0,0]
+; AVX512DQ-NEXT: vpermd %zmm6, %zmm5, %zmm5
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm5 = zmm5 ^ (mem & (zmm5 ^ zmm2))
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm5 = zmm5 ^ (mem & (zmm5 ^ zmm7))
; AVX512DQ-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-NEXT: vmovdqa64 %zmm6, 128(%rax)
+; AVX512DQ-NEXT: vmovdqa64 %zmm5, 128(%rax)
; AVX512DQ-NEXT: vmovdqa64 %zmm4, (%rax)
-; AVX512DQ-NEXT: vmovdqa64 %zmm20, 320(%rax)
-; AVX512DQ-NEXT: vmovdqa64 %zmm2, 256(%rax)
-; AVX512DQ-NEXT: vmovdqa64 %zmm1, 192(%rax)
-; AVX512DQ-NEXT: vmovdqa64 %zmm0, 64(%rax)
-; AVX512DQ-NEXT: vmovdqa64 %zmm27, 384(%rax)
-; AVX512DQ-NEXT: addq $680, %rsp # imm = 0x2A8
+; AVX512DQ-NEXT: vmovdqa64 %zmm19, 320(%rax)
+; AVX512DQ-NEXT: vmovdqa64 %zmm0, 256(%rax)
+; AVX512DQ-NEXT: vmovdqa64 %zmm3, 192(%rax)
+; AVX512DQ-NEXT: vmovdqa64 %zmm1, 64(%rax)
+; AVX512DQ-NEXT: vmovdqa64 %zmm8, 384(%rax)
+; AVX512DQ-NEXT: addq $664, %rsp # imm = 0x298
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512DQ-FCP-LABEL: store_i16_stride7_vf32:
; AVX512DQ-FCP: # %bb.0:
-; AVX512DQ-FCP-NEXT: subq $264, %rsp # imm = 0x108
-; AVX512DQ-FCP-NEXT: vmovdqa (%rcx), %ymm12
+; AVX512DQ-FCP-NEXT: subq $296, %rsp # imm = 0x128
+; AVX512DQ-FCP-NEXT: vmovdqa (%rcx), %ymm9
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm0 = [128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128,128,128,128,128]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm0, %ymm12, %ymm2
-; AVX512DQ-FCP-NEXT: vmovdqu %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FCP-NEXT: vpshufb %ymm0, %ymm9, %ymm2
+; AVX512DQ-FCP-NEXT: vmovdqu %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX512DQ-FCP-NEXT: vmovdqa (%rdx), %ymm4
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm1 = [u,u,u,u,12,13,14,15,128,128,u,u,u,u,u,u,u,u,u,u,16,17,128,128,u,u,u,u,u,u,u,u]
; AVX512DQ-FCP-NEXT: vpshufb %ymm1, %ymm4, %ymm3
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm4, %ymm16
-; AVX512DQ-FCP-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm4, %ymm17
; AVX512DQ-FCP-NEXT: vpor %ymm2, %ymm3, %ymm2
; AVX512DQ-FCP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FCP-NEXT: vmovdqa (%rsi), %ymm11
-; AVX512DQ-FCP-NEXT: vmovdqa 32(%rsi), %ymm10
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm3 = [128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128,128,128,128,128,128,128,128,128]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm3, %ymm11, %ymm2
-; AVX512DQ-FCP-NEXT: vmovdqu %ymm11, (%rsp) # 32-byte Spill
-; AVX512DQ-FCP-NEXT: vmovdqa (%rdi), %ymm15
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm4 = [12,13,14,15,128,128,u,u,u,u,u,u,u,u,u,u,16,17,128,128,u,u,u,u,u,u,u,u,16,17,18,19]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm4, %ymm15, %ymm5
-; AVX512DQ-FCP-NEXT: vmovdqu %ymm15, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FCP-NEXT: vpor %ymm2, %ymm5, %ymm2
-; AVX512DQ-FCP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FCP-NEXT: vmovdqa (%r9), %ymm5
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm6 = [128,128,128,128,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm6, %ymm5, %ymm8
-; AVX512DQ-FCP-NEXT: vmovdqa (%r8), %ymm2
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm7 = [u,u,u,u,u,u,u,u,12,13,14,15,128,128,u,u,u,u,u,u,u,u,u,u,16,17,128,128,u,u,u,u]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm7, %ymm2, %ymm9
-; AVX512DQ-FCP-NEXT: vpor %ymm8, %ymm9, %ymm8
+; AVX512DQ-FCP-NEXT: vmovdqa (%rsi), %ymm8
+; AVX512DQ-FCP-NEXT: vmovdqa 32(%rsi), %ymm13
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm2 = [128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128,128,128,128,128,128,128,128,128]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm2, %ymm8, %ymm3
; AVX512DQ-FCP-NEXT: vmovdqu %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FCP-NEXT: vpshufb %ymm3, %ymm10, %ymm3
-; AVX512DQ-FCP-NEXT: vmovdqa 32(%rdi), %ymm9
-; AVX512DQ-FCP-NEXT: vpshufb %ymm4, %ymm9, %ymm4
-; AVX512DQ-FCP-NEXT: vpor %ymm3, %ymm4, %ymm3
-; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FCP-NEXT: vmovdqa 32(%rcx), %ymm13
-; AVX512DQ-FCP-NEXT: vpshufb %ymm0, %ymm13, %ymm0
+; AVX512DQ-FCP-NEXT: vmovdqa (%rdi), %ymm10
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm4 = [12,13,14,15,128,128,u,u,u,u,u,u,u,u,u,u,16,17,128,128,u,u,u,u,u,u,u,u,16,17,18,19]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm4, %ymm10, %ymm5
+; AVX512DQ-FCP-NEXT: vmovdqu %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FCP-NEXT: vpor %ymm3, %ymm5, %ymm3
+; AVX512DQ-FCP-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FCP-NEXT: vmovdqa (%r9), %ymm15
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm5 = [128,128,128,128,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm5, %ymm15, %ymm3
+; AVX512DQ-FCP-NEXT: vmovdqa (%r8), %ymm11
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm6 = [u,u,u,u,u,u,u,u,12,13,14,15,128,128,u,u,u,u,u,u,u,u,u,u,16,17,128,128,u,u,u,u]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm6, %ymm11, %ymm7
+; AVX512DQ-FCP-NEXT: vpor %ymm3, %ymm7, %ymm3
+; AVX512DQ-FCP-NEXT: vmovdqu %ymm3, (%rsp) # 32-byte Spill
+; AVX512DQ-FCP-NEXT: vpshufb %ymm2, %ymm13, %ymm2
+; AVX512DQ-FCP-NEXT: vmovdqa 32(%rdi), %ymm12
+; AVX512DQ-FCP-NEXT: vpshufb %ymm4, %ymm12, %ymm3
+; AVX512DQ-FCP-NEXT: vpor %ymm2, %ymm3, %ymm2
+; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FCP-NEXT: vmovdqa 32(%rcx), %ymm7
+; AVX512DQ-FCP-NEXT: vpshufb %ymm0, %ymm7, %ymm0
; AVX512DQ-FCP-NEXT: vmovdqa 32(%rdx), %ymm14
; AVX512DQ-FCP-NEXT: vpshufb %ymm1, %ymm14, %ymm1
; AVX512DQ-FCP-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-FCP-NEXT: vmovdqa 32(%r9), %ymm1
-; AVX512DQ-FCP-NEXT: vpshufb %ymm6, %ymm1, %ymm0
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm1, %ymm19
-; AVX512DQ-FCP-NEXT: vmovdqa 32(%r8), %ymm3
-; AVX512DQ-FCP-NEXT: vpshufb %ymm7, %ymm3, %ymm1
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm3, %ymm25
-; AVX512DQ-FCP-NEXT: vporq %ymm0, %ymm1, %ymm20
+; AVX512DQ-FCP-NEXT: vpshufb %ymm5, %ymm1, %ymm0
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm1, %ymm20
+; AVX512DQ-FCP-NEXT: vmovdqa 32(%r8), %ymm2
+; AVX512DQ-FCP-NEXT: vpshufb %ymm6, %ymm2, %ymm1
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm2, %ymm16
+; AVX512DQ-FCP-NEXT: vpor %ymm0, %ymm1, %ymm0
+; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm0 = [u,u,u,u,u,u,u,u,u,u,u,u,14,15,12,13,u,u,u,u,30,31,28,29,u,u,u,u,30,31,28,29]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm0, %ymm10, %ymm1
-; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm3 = ymm9[3,3,3,3,7,7,7,7]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm3[0,1,2],ymm1[3],ymm3[4,5],ymm1[6],ymm3[7,8,9,10],ymm1[11],ymm3[12,13],ymm1[14],ymm3[15]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm10[10,11,8,9,u,u,u,u,u,u,u,u,u,u,u,u,26,27,24,25,u,u,u,u,26,27,24,25,u,u,u,u]
-; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm4 = ymm9[2,2,2,2,6,6,6,6]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm4[0],ymm3[1],ymm4[2,3],ymm3[4],ymm4[5,6,7,8],ymm3[9],ymm4[10,11],ymm3[12],ymm4[13,14,15]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm0, %ymm13, %ymm1
+; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm2 = ymm12[3,3,3,3,7,7,7,7]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5],ymm1[6],ymm2[7,8,9,10],ymm1[11],ymm2[12,13],ymm1[14],ymm2[15]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm13[10,11,8,9,u,u,u,u,u,u,u,u,u,u,u,u,26,27,24,25,u,u,u,u,26,27,24,25,u,u,u,u]
+; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm3 = ymm12[2,2,2,2,6,6,6,6]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm3[0],ymm2[1],ymm3[2,3],ymm2[4],ymm3[5,6,7,8],ymm2[9],ymm3[10,11],ymm2[12],ymm3[13,14,15]
; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} zmm24 = [0,2,2,3,10,0,11,0]
-; AVX512DQ-FCP-NEXT: vpermi2q %zmm1, %zmm3, %zmm24
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm12[14,15,12,13,u,u,u,u,u,u,u,u,u,u,u,u,30,31,28,29,u,u,u,u,30,31,28,29,u,u,u,u]
-; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm3 = ymm16[3,3,3,3,7,7,7,7]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm3[0],ymm1[1],ymm3[2,3],ymm1[4],ymm3[5,6,7,8],ymm1[9],ymm3[10,11],ymm1[12],ymm3[13,14,15]
-; AVX512DQ-FCP-NEXT: vmovdqa 32(%rcx), %xmm4
-; AVX512DQ-FCP-NEXT: vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-FCP-NEXT: vmovdqa 32(%rdx), %xmm3
+; AVX512DQ-FCP-NEXT: vpermi2q %zmm1, %zmm2, %zmm24
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm9[14,15,12,13,u,u,u,u,u,u,u,u,u,u,u,u,30,31,28,29,u,u,u,u,30,31,28,29,u,u,u,u]
+; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm2 = ymm17[3,3,3,3,7,7,7,7]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3],ymm1[4],ymm2[5,6,7,8],ymm1[9],ymm2[10,11],ymm1[12],ymm2[13,14,15]
+; AVX512DQ-FCP-NEXT: vmovdqa 32(%rcx), %xmm3
; AVX512DQ-FCP-NEXT: vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3]
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm6 = [0,1,2,3,0,1,2,3,6,7,4,5,6,7,4,5]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm6, %xmm3, %xmm3
-; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} zmm23 = [0,2,2,3,8,0,9,0]
-; AVX512DQ-FCP-NEXT: vpermi2q %zmm3, %zmm1, %zmm23
-; AVX512DQ-FCP-NEXT: vpshufb %ymm0, %ymm11, %ymm0
-; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm1 = ymm15[3,3,3,3,7,7,7,7]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5],ymm0[6],ymm1[7,8,9,10],ymm0[11],ymm1[12,13],ymm0[14],ymm1[15]
-; AVX512DQ-FCP-NEXT: vmovdqa 32(%rdi), %xmm1
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm12 = [0,1,2,3,4,5,4,5,6,7,10,11,8,9,10,11]
-; AVX512DQ-FCP-NEXT: vmovdqa 32(%rsi), %xmm8
-; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm1[0],xmm8[0],xmm1[1],xmm8[1],xmm1[2],xmm8[2],xmm1[3],xmm8[3]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm12, %xmm3, %xmm3
+; AVX512DQ-FCP-NEXT: vmovdqa 32(%rdx), %xmm2
+; AVX512DQ-FCP-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,2,3,0,1,2,3,6,7,4,5,6,7,4,5]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm3, %xmm2, %xmm2
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm3, %xmm27
+; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} zmm25 = [0,2,2,3,8,0,9,0]
+; AVX512DQ-FCP-NEXT: vpermi2q %zmm2, %zmm1, %zmm25
+; AVX512DQ-FCP-NEXT: vpshufb %ymm0, %ymm8, %ymm0
+; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm1 = ymm10[3,3,3,3,7,7,7,7]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0,1,2],ymm0[3],ymm1[4,5],ymm0[6],ymm1[7,8,9,10],ymm0[11],ymm1[12,13],ymm0[14],ymm1[15]
+; AVX512DQ-FCP-NEXT: vmovdqa 32(%rdi), %xmm4
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm9 = [0,1,2,3,4,5,4,5,6,7,10,11,8,9,10,11]
+; AVX512DQ-FCP-NEXT: vmovdqa 32(%rsi), %xmm0
+; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm9, %xmm2, %xmm2
; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} zmm21 = [2,1,3,3,8,8,9,9]
-; AVX512DQ-FCP-NEXT: vpermi2q %zmm3, %zmm0, %zmm21
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,26,27,28,29,26,27,28,29,26,27,28,29,30,31,30,31]
-; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm3 = ymm2[3,3,3,3,7,7,7,7]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm11 = ymm0[0,1],ymm3[2],ymm0[3,4],ymm3[5],ymm0[6,7,8,9],ymm3[10],ymm0[11,12],ymm3[13],ymm0[14,15]
-; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} zmm22 = [2,2,2,3,0,8,8,9]
+; AVX512DQ-FCP-NEXT: vpermi2q %zmm2, %zmm1, %zmm21
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm15[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,26,27,28,29,26,27,28,29,26,27,28,29,30,31,30,31]
+; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm2 = ymm11[3,3,3,3,7,7,7,7]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm8 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7,8,9],ymm2[10],ymm1[11,12],ymm2[13],ymm1[14,15]
+; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} zmm23 = [2,2,2,3,0,8,8,9]
; AVX512DQ-FCP-NEXT: vmovdqa 32(%r9), %xmm3
-; AVX512DQ-FCP-NEXT: vmovdqa 32(%r8), %xmm7
-; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm7[0],xmm3[0],xmm7[1],xmm3[1],xmm7[2],xmm3[2],xmm7[3],xmm3[3]
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm4 = [0,1,2,3,6,7,4,5,6,7,4,5,12,13,14,15]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm4, %xmm0, %xmm15
-; AVX512DQ-FCP-NEXT: vpermi2q %zmm15, %zmm11, %zmm22
-; AVX512DQ-FCP-NEXT: vpunpckhwd {{.*#+}} xmm11 = xmm8[4],xmm1[4],xmm8[5],xmm1[5],xmm8[6],xmm1[6],xmm8[7],xmm1[7]
-; AVX512DQ-FCP-NEXT: vprold $16, %xmm8, %xmm8
-; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,2,3]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm8[2],xmm1[3,4],xmm8[5],xmm1[6,7]
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm15 = [4,5,2,3,4,5,6,7,8,9,10,11,10,11,8,9]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm15, %xmm11, %xmm8
-; AVX512DQ-FCP-NEXT: vmovdqa %xmm15, %xmm11
+; AVX512DQ-FCP-NEXT: vmovdqa 32(%r8), %xmm2
+; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm6 = [0,1,2,3,6,7,4,5,6,7,4,5,12,13,14,15]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm6, %xmm1, %xmm5
+; AVX512DQ-FCP-NEXT: vpermi2q %zmm5, %zmm8, %zmm23
+; AVX512DQ-FCP-NEXT: vpunpckhwd {{.*#+}} xmm5 = xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
+; AVX512DQ-FCP-NEXT: vprold $16, %xmm0, %xmm0
+; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} xmm4 = xmm4[1,1,2,3]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm4[0,1],xmm0[2],xmm4[3,4],xmm0[5],xmm4[6,7]
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm10 = [4,5,2,3,4,5,6,7,8,9,10,11,10,11,8,9]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm10, %xmm5, %xmm4
; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} zmm26 = [0,0,0,1,8,8,9,0]
-; AVX512DQ-FCP-NEXT: vpermi2q %zmm8, %zmm1, %zmm26
-; AVX512DQ-FCP-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm7[4],xmm3[4],xmm7[5],xmm3[5],xmm7[6],xmm3[6],xmm7[7],xmm3[7]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm12, %xmm1, %xmm1
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,2,3,8,9,10,11,14,15,12,13,14,15,12,13]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm3, %xmm0, %xmm0
-; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} zmm28 = [0,1,1,0,8,8,9,9]
+; AVX512DQ-FCP-NEXT: vpermi2q %zmm4, %zmm0, %zmm26
+; AVX512DQ-FCP-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm4 = [0,1,2,3,8,9,10,11,14,15,12,13,14,15,12,13]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm4, %xmm1, %xmm1
+; AVX512DQ-FCP-NEXT: vpshufb %xmm9, %xmm0, %xmm0
+; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} zmm28 = [8,9,9,0,0,0,1,1]
; AVX512DQ-FCP-NEXT: vpermi2q %zmm1, %zmm0, %zmm28
; AVX512DQ-FCP-NEXT: vmovdqa (%rcx), %xmm1
; AVX512DQ-FCP-NEXT: vmovdqa (%rdx), %xmm0
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm1[u,u,4,5,u,u,u,u,6,7,u,u,u,u,8,9]
-; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} xmm8 = xmm0[1,1,2,2]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm7 = xmm8[0],xmm7[1],xmm8[2,3],xmm7[4],xmm8[5,6],xmm7[7]
-; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} xmm8 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm6, %xmm8, %xmm6
-; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} zmm27 = [0,0,1,0,8,8,9,9]
-; AVX512DQ-FCP-NEXT: vpermi2q %zmm7, %zmm6, %zmm27
-; AVX512DQ-FCP-NEXT: vmovdqa (%r9), %xmm6
-; AVX512DQ-FCP-NEXT: vmovdqa (%r8), %xmm15
-; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} xmm7 = xmm15[0],xmm6[0],xmm15[1],xmm6[1],xmm15[2],xmm6[2],xmm15[3],xmm6[3]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm3, %xmm7, %xmm3
-; AVX512DQ-FCP-NEXT: vpshufb %xmm4, %xmm7, %xmm4
+; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm27, %xmm3
+; AVX512DQ-FCP-NEXT: vpshufb %xmm3, %xmm2, %xmm2
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm1[u,u,4,5,u,u,u,u,6,7,u,u,u,u,8,9]
+; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} xmm5 = xmm0[1,1,2,2]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm3 = xmm5[0],xmm3[1],xmm5[2,3],xmm3[4],xmm5[5,6],xmm3[7]
+; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} zmm27 = [8,0,9,0,0,0,1,1]
+; AVX512DQ-FCP-NEXT: vpermi2q %zmm2, %zmm3, %zmm27
+; AVX512DQ-FCP-NEXT: vmovdqa (%r9), %xmm5
+; AVX512DQ-FCP-NEXT: vmovdqa (%r8), %xmm9
+; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm9[0],xmm5[0],xmm9[1],xmm5[1],xmm9[2],xmm5[2],xmm9[3],xmm5[3]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm4, %xmm3, %xmm4
+; AVX512DQ-FCP-NEXT: vpshufb %xmm6, %xmm3, %xmm3
; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} zmm29 = [0,0,0,1,8,9,9,0]
-; AVX512DQ-FCP-NEXT: vpermi2q %zmm3, %zmm4, %zmm29
-; AVX512DQ-FCP-NEXT: vprold $16, %ymm5, %ymm3
-; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm4 = ymm2[1,2,2,3,5,6,6,7]
+; AVX512DQ-FCP-NEXT: vpermi2q %zmm4, %zmm3, %zmm29
+; AVX512DQ-FCP-NEXT: vprold $16, %ymm15, %ymm3
+; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm4 = ymm11[1,2,2,3,5,6,6,7]
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm4[0,1],ymm3[2],ymm4[3,4],ymm3[5],ymm4[6,7,8,9],ymm3[10],ymm4[11,12],ymm3[13],ymm4[14,15]
-; AVX512DQ-FCP-NEXT: vpbroadcastd {{.*#+}} ymm7 = [18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm7, %ymm5, %ymm5
-; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[0,0,2,1,4,4,6,5]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm5[0,1,2],ymm2[3],ymm5[4,5],ymm2[6],ymm5[7,8,9,10],ymm2[11],ymm5[12,13],ymm2[14],ymm5[15]
+; AVX512DQ-FCP-NEXT: vpbroadcastd {{.*#+}} ymm6 = [18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm6, %ymm15, %ymm4
+; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm8 = ymm11[0,0,2,1,4,4,6,5]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm4 = ymm4[0,1,2],ymm8[3],ymm4[4,5],ymm8[6],ymm4[7,8,9,10],ymm8[11],ymm4[12,13],ymm8[14],ymm4[15]
; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} zmm31 = [2,2,3,3,10,0,11,10]
-; AVX512DQ-FCP-NEXT: vpermi2q %zmm3, %zmm2, %zmm31
+; AVX512DQ-FCP-NEXT: vpermi2q %zmm3, %zmm4, %zmm31
; AVX512DQ-FCP-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm0, %xmm18
-; AVX512DQ-FCP-NEXT: vmovdqa (%rdi), %xmm5
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm0, %xmm22
+; AVX512DQ-FCP-NEXT: vmovdqa (%rdi), %xmm4
; AVX512DQ-FCP-NEXT: vmovdqa (%rsi), %xmm3
-; AVX512DQ-FCP-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm3[4],xmm5[4],xmm3[5],xmm5[5],xmm3[6],xmm5[6],xmm3[7],xmm5[7]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm11, %xmm0, %xmm0
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm0, %ymm17
+; AVX512DQ-FCP-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm10, %xmm1, %xmm0
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm0, %ymm19
; AVX512DQ-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-FCP-NEXT: vpbroadcastd 8(%rax), %ymm0
-; AVX512DQ-FCP-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
-; AVX512DQ-FCP-NEXT: vmovdqa (%rax), %ymm8
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm4 = [12,13,128,128,128,128,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm4, %ymm8, %ymm1
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm30
-; AVX512DQ-FCP-NEXT: vpunpckhwd {{.*#+}} xmm6 = xmm15[4],xmm6[4],xmm15[5],xmm6[5],xmm15[6],xmm6[6],xmm15[7],xmm6[7]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm10[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21,18,19,20,21,24,25,26,27,22,23,22,23]
-; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm1 = ymm9[1,1,1,1,5,5,5,5]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7,8,9],ymm1[10],ymm0[11,12],ymm1[13],ymm0[14,15]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm0, %ymm16
-; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm13[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
-; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm1 = ymm14[0,1,1,3,4,5,5,7]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm11 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7,8,9],ymm0[10],ymm1[11,12],ymm0[13],ymm1[14,15]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm19, %ymm12
-; AVX512DQ-FCP-NEXT: vpshufb %ymm7, %ymm12, %ymm0
-; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm1 = ymm25[0,0,2,1,4,4,6,5]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3],ymm0[4,5],ymm1[6],ymm0[7,8,9,10],ymm1[11],ymm0[12,13],ymm1[14],ymm0[15]
+; AVX512DQ-FCP-NEXT: vpbroadcastd 8(%rax), %ymm1
+; AVX512DQ-FCP-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
+; AVX512DQ-FCP-NEXT: vmovdqa (%rax), %ymm10
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm2 = [12,13,128,128,128,128,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm2, %ymm10, %ymm8
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm8, %zmm1, %zmm30
+; AVX512DQ-FCP-NEXT: vpunpckhwd {{.*#+}} xmm11 = xmm9[4],xmm5[4],xmm9[5],xmm5[5],xmm9[6],xmm5[6],xmm9[7],xmm5[7]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm13[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21,18,19,20,21,24,25,26,27,22,23,22,23]
+; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm8 = ymm12[1,1,1,1,5,5,5,5]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0,1],ymm8[2],ymm1[3,4],ymm8[5],ymm1[6,7,8,9],ymm8[10],ymm1[11,12],ymm8[13],ymm1[14,15]
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm0, %ymm18
+; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} ymm1 = ymm7[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
+; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm9 = ymm14[0,1,1,3,4,5,5,7]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm8 = ymm9[0,1],ymm1[2],ymm9[3,4],ymm1[5],ymm9[6,7,8,9],ymm1[10],ymm9[11,12],ymm1[13],ymm9[14,15]
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm20, %ymm13
+; AVX512DQ-FCP-NEXT: vpshufb %ymm6, %ymm13, %ymm1
+; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm6 = ymm16[0,0,2,1,4,4,6,5]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0,1,2],ymm6[3],ymm1[4,5],ymm6[6],ymm1[7,8,9,10],ymm6[11],ymm1[12,13],ymm6[14],ymm1[15]
; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} zmm9 = [0,1,0,3,10,10,11,11]
-; AVX512DQ-FCP-NEXT: vpermi2q %zmm0, %zmm20, %zmm9
-; AVX512DQ-FCP-NEXT: vmovdqa 32(%rax), %ymm2
-; AVX512DQ-FCP-NEXT: vpshufb %ymm4, %ymm2, %ymm0
-; AVX512DQ-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm19 = [0,5,4,0,0,6,5,0,0,5,4,0,0,6,5,0]
-; AVX512DQ-FCP-NEXT: # zmm19 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-FCP-NEXT: vpermd %ymm2, %ymm19, %ymm4
-; AVX512DQ-FCP-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm4
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm4, %zmm0, %zmm4
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm13[14,15,12,13,u,u,u,u,u,u,u,u,u,u,u,u,30,31,28,29,u,u,u,u,30,31,28,29,u,u,u,u]
-; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm7 = ymm14[3,3,3,3,7,7,7,7]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm7 = ymm7[0],ymm0[1],ymm7[2,3],ymm0[4],ymm7[5,6,7,8],ymm0[9],ymm7[10,11],ymm0[12],ymm7[13,14,15]
-; AVX512DQ-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm10 = [22,23,26,27,0,0,24,25,26,27,0,0,26,27,26,27,22,23,26,27,0,0,24,25,26,27,0,0,26,27,26,27]
-; AVX512DQ-FCP-NEXT: # ymm10 = mem[0,1,0,1]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm10, %ymm13, %ymm0
-; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm15 = ymm14[2,2,2,2,6,6,6,6]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm15[2],ymm0[3,4],ymm15[5],ymm0[6,7,8,9],ymm15[10],ymm0[11,12],ymm15[13],ymm0[14,15]
-; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} zmm20 = [2,2,2,3,8,10,10,11]
-; AVX512DQ-FCP-NEXT: vpermt2q %zmm7, %zmm20, %zmm0
-; AVX512DQ-FCP-NEXT: vmovdqa %ymm12, %ymm1
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm7 = ymm12[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,26,27,28,29,26,27,28,29,26,27,28,29,30,31,30,31]
-; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm12 = ymm25[3,3,3,3,7,7,7,7]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm7 = ymm7[0,1],ymm12[2],ymm7[3,4],ymm12[5],ymm7[6,7,8,9],ymm12[10],ymm7[11,12],ymm12[13],ymm7[14,15]
-; AVX512DQ-FCP-NEXT: vprold $16, %ymm1, %ymm12
-; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm15 = ymm25[1,2,2,3,5,6,6,7]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm15 = ymm15[0,1],ymm12[2],ymm15[3,4],ymm12[5],ymm15[6,7,8,9],ymm12[10],ymm15[11,12],ymm12[13],ymm15[14,15]
-; AVX512DQ-FCP-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm12 = xmm1[u,u,4,5,u,u,u,u,6,7,u,u,u,u,8,9]
+; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-FCP-NEXT: vpermi2q %zmm1, %zmm0, %zmm9
+; AVX512DQ-FCP-NEXT: vmovdqa 32(%rax), %ymm1
+; AVX512DQ-FCP-NEXT: vpshufb %ymm2, %ymm1, %ymm2
+; AVX512DQ-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm20 = [0,5,4,0,0,6,5,0,0,5,4,0,0,6,5,0]
+; AVX512DQ-FCP-NEXT: # zmm20 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-FCP-NEXT: vpermd %ymm1, %ymm20, %ymm1
+; AVX512DQ-FCP-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm1, %zmm2, %zmm1
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm7[14,15,12,13,u,u,u,u,u,u,u,u,u,u,u,u,30,31,28,29,u,u,u,u,30,31,28,29,u,u,u,u]
+; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm6 = ymm14[3,3,3,3,7,7,7,7]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm5 = ymm6[0],ymm2[1],ymm6[2,3],ymm2[4],ymm6[5,6,7,8],ymm2[9],ymm6[10,11],ymm2[12],ymm6[13,14,15]
+; AVX512DQ-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm6 = [22,23,26,27,0,0,24,25,26,27,0,0,26,27,26,27,22,23,26,27,0,0,24,25,26,27,0,0,26,27,26,27]
+; AVX512DQ-FCP-NEXT: # ymm6 = mem[0,1,0,1]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm6, %ymm7, %ymm0
+; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm12 = ymm14[2,2,2,2,6,6,6,6]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm12 = ymm0[0,1],ymm12[2],ymm0[3,4],ymm12[5],ymm0[6,7,8,9],ymm12[10],ymm0[11,12],ymm12[13],ymm0[14,15]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm13[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,26,27,28,29,26,27,28,29,26,27,28,29,30,31,30,31]
+; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm7 = ymm16[3,3,3,3,7,7,7,7]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm0[0,1],ymm7[2],ymm0[3,4],ymm7[5],ymm0[6,7,8,9],ymm7[10],ymm0[11,12],ymm7[13],ymm0[14,15]
+; AVX512DQ-FCP-NEXT: vprold $16, %ymm13, %ymm7
+; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm14 = ymm16[1,2,2,3,5,6,6,7]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm14 = ymm14[0,1],ymm7[2],ymm14[3,4],ymm7[5],ymm14[6,7,8,9],ymm7[10],ymm14[11,12],ymm7[13],ymm14[14,15]
+; AVX512DQ-FCP-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm0[u,u,4,5,u,u,u,u,6,7,u,u,u,u,8,9]
; AVX512DQ-FCP-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
-; AVX512DQ-FCP-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm13[4],xmm1[4],xmm13[5],xmm1[5],xmm13[6],xmm1[6],xmm13[7],xmm1[7]
-; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} xmm14 = xmm13[1,1,2,2]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm12 = xmm14[0],xmm12[1],xmm14[2,3],xmm12[4],xmm14[5,6],xmm12[7]
-; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,2,3,3,4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} zmm14 = [0,0,1,1,8,8,10,9]
-; AVX512DQ-FCP-NEXT: vpermt2q %zmm1, %zmm14, %zmm12
-; AVX512DQ-FCP-NEXT: vprold $16, %xmm3, %xmm1
-; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} xmm13 = xmm5[1,1,2,3]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm13[0,1],xmm1[2],xmm13[3,4],xmm1[5],xmm13[6,7]
-; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm5[0],xmm3[0],xmm5[1],xmm3[1],xmm5[2],xmm3[2],xmm5[3],xmm3[3]
+; AVX512DQ-FCP-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm13[4],xmm0[4],xmm13[5],xmm0[5],xmm13[6],xmm0[6],xmm13[7],xmm0[7]
+; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} xmm15 = xmm13[1,1,2,2]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm7 = xmm15[0],xmm7[1],xmm15[2,3],xmm7[4],xmm15[5,6],xmm7[7]
+; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,3,3,4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} zmm15 = [0,0,1,1,8,8,0,9]
+; AVX512DQ-FCP-NEXT: vpermt2q %zmm0, %zmm15, %zmm7
+; AVX512DQ-FCP-NEXT: vprold $16, %xmm3, %xmm0
+; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} xmm13 = xmm4[1,1,2,3]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm13[0,1],xmm0[2],xmm13[3,4],xmm0[5],xmm13[6,7]
+; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm13 = [0,1,2,3,4,5,4,5,6,7,10,11,8,9,10,11]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm13, %xmm6, %xmm5
-; AVX512DQ-FCP-NEXT: vpshufb %xmm13, %xmm3, %xmm3
-; AVX512DQ-FCP-NEXT: vpermt2q %zmm1, %zmm14, %zmm3
-; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} zmm1 = [2,1,3,2,10,10,10,11]
-; AVX512DQ-FCP-NEXT: vpermt2q %zmm7, %zmm1, %zmm15
+; AVX512DQ-FCP-NEXT: vpshufb %xmm13, %xmm11, %xmm4
+; AVX512DQ-FCP-NEXT: vpshufb %xmm13, %xmm3, %xmm11
+; AVX512DQ-FCP-NEXT: vpermt2q %zmm0, %zmm15, %zmm11
+; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} zmm0 = [2,1,3,2,10,10,10,11]
+; AVX512DQ-FCP-NEXT: vpermt2q %zmm2, %zmm0, %zmm14
; AVX512DQ-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
-; AVX512DQ-FCP-NEXT: vpshufb %ymm10, %ymm13, %ymm6
-; AVX512DQ-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm7 = ymm10[2,2,2,2,6,6,6,6]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm6 = ymm6[0,1],ymm7[2],ymm6[3,4],ymm7[5],ymm6[6,7,8,9],ymm7[10],ymm6[11,12],ymm7[13],ymm6[14,15]
-; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} ymm7 = ymm13[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
-; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm10 = ymm10[0,1,1,3,4,5,5,7]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm7 = ymm10[0,1],ymm7[2],ymm10[3,4],ymm7[5],ymm10[6,7,8,9],ymm7[10],ymm10[11,12],ymm7[13],ymm10[14,15]
-; AVX512DQ-FCP-NEXT: vpermt2q %zmm6, %zmm1, %zmm7
-; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm1 = [65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535]
-; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm0 = zmm24 ^ (zmm1 & (zmm0 ^ zmm24))
-; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm15 = zmm15 ^ (mem & (zmm15 ^ zmm0))
-; AVX512DQ-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 = [6,5,0,0,7,6,0,7,6,5,0,0,7,6,0,7]
-; AVX512DQ-FCP-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-FCP-NEXT: vpermd %zmm2, %zmm0, %zmm2
-; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm2 = zmm2 ^ (mem & (zmm2 ^ zmm15))
-; AVX512DQ-FCP-NEXT: vpermd %ymm8, %ymm0, %ymm0
-; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm6 = [65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535]
-; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm12 = zmm26 ^ (zmm6 & (zmm12 ^ zmm26))
-; AVX512DQ-FCP-NEXT: vpbroadcastd 36(%rax), %ymm8
-; AVX512DQ-FCP-NEXT: vpbroadcastd 40(%rax), %ymm10
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm10, %zmm8, %zmm8
-; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm8 = zmm8 ^ (mem & (zmm8 ^ zmm28))
-; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm8 = zmm8 ^ (mem & (zmm8 ^ zmm12))
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm18, %xmm10
-; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} xmm10 = xmm10[0,2,3,3,4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm10 = ymm10[0,0,2,1]
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm12 = ymm17[0,0,1,3]
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm5 = ymm5[0,0,1,1]
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm13 = ymm16[2,2,2,3]
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,1,3,2]
-; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm27 = zmm27 ^ (mem & (zmm27 ^ zmm3))
-; AVX512DQ-FCP-NEXT: vmovdqu (%rsp), %ymm15 # 32-byte Reload
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm15[10,11,8,9,u,u,u,u,u,u,u,u,u,u,u,u,26,27,24,25,u,u,u,u,26,27,24,25,u,u,u,u]
-; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %ymm16 # 32-byte Reload
-; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm14 = ymm16[2,2,2,2,6,6,6,6]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm14[0],ymm3[1],ymm14[2,3],ymm3[4],ymm14[5,6,7,8],ymm3[9],ymm14[10,11],ymm3[12],ymm14[13,14,15]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm14 = ymm15[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21,18,19,20,21,24,25,26,27,22,23,22,23]
-; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm15 = ymm16[1,1,1,1,5,5,5,5]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm14 = ymm14[0,1],ymm15[2],ymm14[3,4],ymm15[5],ymm14[6,7,8,9],ymm15[10],ymm14[11,12],ymm15[13],ymm14[14,15]
-; AVX512DQ-FCP-NEXT: vpermt2q %zmm3, %zmm20, %zmm14
-; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm14 = zmm14 ^ (mem & (zmm14 ^ zmm7))
-; AVX512DQ-FCP-NEXT: vpermd (%rax), %zmm19, %zmm3
-; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm3 = zmm3 ^ (mem & (zmm3 ^ zmm31))
-; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm3 = zmm3 ^ (mem & (zmm3 ^ zmm14))
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm10, %zmm7 # 32-byte Folded Reload
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm12, %zmm10 # 32-byte Folded Reload
-; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm10 = zmm7 ^ (zmm6 & (zmm10 ^ zmm7))
-; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm21 = zmm23 ^ (zmm1 & (zmm21 ^ zmm23))
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm5, %zmm1 # 32-byte Folded Reload
-; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm1 = (zmm1 & mem) | zmm30
-; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm1 = zmm1 ^ (mem & (zmm1 ^ zmm10))
-; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm13, %zmm5, %zmm5
-; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm11, %zmm6, %zmm6
-; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm6 = zmm6 ^ (mem & (zmm6 ^ zmm5))
-; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm4 = zmm4 | (zmm9 & mem)
-; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm4 = zmm4 ^ (mem & (zmm4 ^ zmm6))
-; AVX512DQ-FCP-NEXT: vpbroadcastd 32(%rax), %ymm5
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm5, %zmm0, %zmm0
-; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm0 = zmm0 ^ (mem & (zmm0 ^ zmm22))
-; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm0 = zmm0 ^ (mem & (zmm0 ^ zmm21))
-; AVX512DQ-FCP-NEXT: vpbroadcastd (%rax), %ymm5
-; AVX512DQ-FCP-NEXT: vpbroadcastd 4(%rax), %ymm6
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm6, %zmm5, %zmm5
-; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm5 = zmm5 ^ (mem & (zmm5 ^ zmm29))
-; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm5 = zmm5 ^ (mem & (zmm5 ^ zmm27))
+; AVX512DQ-FCP-NEXT: vpshufb %ymm6, %ymm13, %ymm2
+; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm3 = ymm17[2,2,2,2,6,6,6,6]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm2[0,1],ymm3[2],ymm2[3,4],ymm3[5],ymm2[6,7,8,9],ymm3[10],ymm2[11,12],ymm3[13],ymm2[14,15]
+; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} ymm3 = ymm13[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
+; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm6 = ymm17[0,1,1,3,4,5,5,7]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm6[0,1],ymm3[2],ymm6[3,4],ymm3[5],ymm6[6,7,8,9],ymm3[10],ymm6[11,12],ymm3[13],ymm6[14,15]
+; AVX512DQ-FCP-NEXT: vpermt2q %zmm2, %zmm0, %zmm3
+; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} zmm16 = [2,2,2,3,8,10,10,11]
+; AVX512DQ-FCP-NEXT: vpermt2q %zmm5, %zmm16, %zmm12
+; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm17 = [65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535]
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm12 = zmm24 ^ (zmm17 & (zmm12 ^ zmm24))
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm14 = zmm14 ^ (mem & (zmm14 ^ zmm12))
+; AVX512DQ-FCP-NEXT: vmovdqa64 (%rax), %zmm5
+; AVX512DQ-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm6 = [14,13,8,8,15,14,8,15,14,13,8,8,15,14,8,15]
+; AVX512DQ-FCP-NEXT: # zmm6 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-FCP-NEXT: vpermd %zmm5, %zmm6, %zmm6
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm6 = zmm6 ^ (mem & (zmm6 ^ zmm14))
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm12 = [6,0,0,0,7,0,0,7]
+; AVX512DQ-FCP-NEXT: vpermd %ymm10, %ymm12, %ymm10
+; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm12 = [65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535]
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm7 = zmm26 ^ (zmm12 & (zmm7 ^ zmm26))
+; AVX512DQ-FCP-NEXT: vpbroadcastd 36(%rax), %ymm13
+; AVX512DQ-FCP-NEXT: vpbroadcastd 40(%rax), %ymm14
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm14, %zmm13, %zmm13
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm13 = zmm13 ^ (mem & (zmm13 ^ zmm28))
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm13 = zmm13 ^ (mem & (zmm13 ^ zmm7))
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm27 = zmm27 ^ (mem & (zmm27 ^ zmm11))
+; AVX512DQ-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm7 = ymm0[10,11,8,9,u,u,u,u,u,u,u,u,u,u,u,u,26,27,24,25,u,u,u,u,26,27,24,25,u,u,u,u]
+; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %ymm24 # 32-byte Reload
+; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm11 = ymm24[2,2,2,2,6,6,6,6]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm7 = ymm11[0],ymm7[1],ymm11[2,3],ymm7[4],ymm11[5,6,7,8],ymm7[9],ymm11[10,11],ymm7[12],ymm11[13,14,15]
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm22, %xmm2
+; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} xmm11 = xmm2[0,2,3,3,4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm11 = ymm11[0,0,2,1]
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm14 = ymm19[0,0,1,3]
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,0,1,1]
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm15 = ymm18[2,2,2,3]
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,1,3,2]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21,18,19,20,21,24,25,26,27,22,23,22,23]
+; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm2 = ymm24[1,1,1,1,5,5,5,5]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm2[2],ymm0[3,4],ymm2[5],ymm0[6,7,8,9],ymm2[10],ymm0[11,12],ymm2[13],ymm0[14,15]
+; AVX512DQ-FCP-NEXT: vpermt2q %zmm7, %zmm16, %zmm0
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm0 = zmm0 ^ (mem & (zmm0 ^ zmm3))
+; AVX512DQ-FCP-NEXT: vpermd %zmm5, %zmm20, %zmm2
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm2 = zmm2 ^ (mem & (zmm2 ^ zmm31))
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm2 = zmm2 ^ (mem & (zmm2 ^ zmm0))
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm11, %zmm0 # 32-byte Folded Reload
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm14, %zmm3 # 32-byte Folded Reload
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm3 = zmm0 ^ (zmm12 & (zmm3 ^ zmm0))
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm21 = zmm25 ^ (zmm17 & (zmm21 ^ zmm25))
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, (%rsp), %zmm4, %zmm0 # 32-byte Folded Reload
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm0 = (zmm0 & mem) | zmm30
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm0 = zmm0 ^ (mem & (zmm0 ^ zmm3))
+; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm15, %zmm3, %zmm3
+; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm8, %zmm4, %zmm4
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm4 = zmm4 ^ (mem & (zmm4 ^ zmm3))
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm1 = zmm1 | (zmm9 & mem)
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm1 = zmm1 ^ (mem & (zmm1 ^ zmm4))
+; AVX512DQ-FCP-NEXT: vpbroadcastd 32(%rax), %ymm3
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm3, %zmm10, %zmm3
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm3 = zmm3 ^ (mem & (zmm3 ^ zmm23))
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm3 = zmm3 ^ (mem & (zmm3 ^ zmm21))
+; AVX512DQ-FCP-NEXT: vpbroadcastd (%rax), %ymm4
+; AVX512DQ-FCP-NEXT: vpbroadcastd 4(%rax), %ymm5
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm5, %zmm4, %zmm4
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm4 = zmm4 ^ (mem & (zmm4 ^ zmm29))
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm4 = zmm4 ^ (mem & (zmm4 ^ zmm27))
; AVX512DQ-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm3, 128(%rax)
-; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm5, (%rax)
-; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm4, 320(%rax)
-; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm8, 256(%rax)
-; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm0, 192(%rax)
-; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm2, 384(%rax)
-; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm1, 64(%rax)
-; AVX512DQ-FCP-NEXT: addq $264, %rsp # imm = 0x108
+; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm2, 128(%rax)
+; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm4, (%rax)
+; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm1, 320(%rax)
+; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm13, 256(%rax)
+; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm3, 192(%rax)
+; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm6, 384(%rax)
+; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm0, 64(%rax)
+; AVX512DQ-FCP-NEXT: addq $296, %rsp # imm = 0x128
; AVX512DQ-FCP-NEXT: vzeroupper
; AVX512DQ-FCP-NEXT: retq
;
@@ -12522,70 +12528,70 @@ define void @store_i16_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
;
; AVX512-LABEL: store_i16_stride7_vf64:
; AVX512: # %bb.0:
-; AVX512-NEXT: subq $2840, %rsp # imm = 0xB18
+; AVX512-NEXT: subq $2648, %rsp # imm = 0xA58
; AVX512-NEXT: vmovdqa 96(%rcx), %ymm6
-; AVX512-NEXT: vmovdqa 96(%rdx), %ymm13
+; AVX512-NEXT: vmovdqa 96(%rdx), %ymm15
; AVX512-NEXT: vmovdqa 96(%rdi), %ymm7
; AVX512-NEXT: vmovdqa 96(%rsi), %ymm8
; AVX512-NEXT: vmovdqa {{.*#+}} ymm0 = [128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128,128,128,128,128]
; AVX512-NEXT: vpshufb %ymm0, %ymm6, %ymm2
; AVX512-NEXT: vmovdqa {{.*#+}} ymm1 = [u,u,u,u,12,13,14,15,128,128,u,u,u,u,u,u,u,u,u,u,16,17,128,128,u,u,u,u,u,u,u,u]
-; AVX512-NEXT: vpshufb %ymm1, %ymm13, %ymm3
-; AVX512-NEXT: vporq %ymm2, %ymm3, %ymm17
-; AVX512-NEXT: vmovdqa {{.*#+}} ymm10 = [128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128,128,128,128,128,128,128,128,128]
-; AVX512-NEXT: vpshufb %ymm10, %ymm8, %ymm2
-; AVX512-NEXT: vmovdqa {{.*#+}} ymm11 = [12,13,14,15,128,128,u,u,u,u,u,u,u,u,u,u,16,17,128,128,u,u,u,u,u,u,u,u,16,17,18,19]
-; AVX512-NEXT: vpshufb %ymm11, %ymm7, %ymm3
+; AVX512-NEXT: vpshufb %ymm1, %ymm15, %ymm3
; AVX512-NEXT: vporq %ymm2, %ymm3, %ymm18
-; AVX512-NEXT: vmovdqa {{.*#+}} ymm12 = [128,128,128,128,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128]
+; AVX512-NEXT: vmovdqa {{.*#+}} ymm3 = [128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128,128,128,128,128,128,128,128,128]
+; AVX512-NEXT: vpshufb %ymm3, %ymm8, %ymm2
+; AVX512-NEXT: vmovdqa {{.*#+}} ymm12 = [12,13,14,15,128,128,u,u,u,u,u,u,u,u,u,u,16,17,128,128,u,u,u,u,u,u,u,u,16,17,18,19]
+; AVX512-NEXT: vpshufb %ymm12, %ymm7, %ymm4
+; AVX512-NEXT: vporq %ymm2, %ymm4, %ymm19
+; AVX512-NEXT: vmovdqa {{.*#+}} ymm11 = [128,128,128,128,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128]
; AVX512-NEXT: vmovdqa 64(%r9), %ymm2
; AVX512-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-NEXT: vpshufb %ymm12, %ymm2, %ymm2
-; AVX512-NEXT: vmovdqa 64(%r8), %ymm3
-; AVX512-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512-NEXT: vpshufb %ymm11, %ymm2, %ymm2
+; AVX512-NEXT: vmovdqa 64(%r8), %ymm4
+; AVX512-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX512-NEXT: vmovdqa {{.*#+}} ymm9 = [u,u,u,u,u,u,u,u,12,13,14,15,128,128,u,u,u,u,u,u,u,u,u,u,16,17,128,128,u,u,u,u]
-; AVX512-NEXT: vpshufb %ymm9, %ymm3, %ymm3
-; AVX512-NEXT: vpor %ymm2, %ymm3, %ymm2
+; AVX512-NEXT: vpshufb %ymm9, %ymm4, %ymm4
+; AVX512-NEXT: vpor %ymm2, %ymm4, %ymm2
; AVX512-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-NEXT: vmovdqa 64(%rcx), %ymm3
-; AVX512-NEXT: vpshufb %ymm0, %ymm3, %ymm2
-; AVX512-NEXT: vmovdqa64 %ymm3, %ymm26
-; AVX512-NEXT: vmovdqa 64(%rdx), %ymm4
-; AVX512-NEXT: vpshufb %ymm1, %ymm4, %ymm3
-; AVX512-NEXT: vmovdqa64 %ymm4, %ymm27
-; AVX512-NEXT: vpor %ymm2, %ymm3, %ymm2
+; AVX512-NEXT: vmovdqa 64(%rcx), %ymm4
+; AVX512-NEXT: vpshufb %ymm0, %ymm4, %ymm2
+; AVX512-NEXT: vmovdqa64 %ymm4, %ymm29
+; AVX512-NEXT: vmovdqa 64(%rdx), %ymm5
+; AVX512-NEXT: vpshufb %ymm1, %ymm5, %ymm4
+; AVX512-NEXT: vmovdqa64 %ymm5, %ymm28
+; AVX512-NEXT: vpor %ymm2, %ymm4, %ymm2
; AVX512-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-NEXT: vmovdqa 64(%rsi), %ymm3
-; AVX512-NEXT: vpshufb %ymm10, %ymm3, %ymm2
-; AVX512-NEXT: vmovdqa64 %ymm3, %ymm23
-; AVX512-NEXT: vmovdqa 64(%rdi), %ymm4
-; AVX512-NEXT: vpshufb %ymm11, %ymm4, %ymm3
-; AVX512-NEXT: vmovdqa64 %ymm4, %ymm22
-; AVX512-NEXT: vpor %ymm2, %ymm3, %ymm2
+; AVX512-NEXT: vmovdqa 64(%rsi), %ymm4
+; AVX512-NEXT: vpshufb %ymm3, %ymm4, %ymm2
+; AVX512-NEXT: vmovdqa64 %ymm4, %ymm24
+; AVX512-NEXT: vmovdqa 64(%rdi), %ymm5
+; AVX512-NEXT: vpshufb %ymm12, %ymm5, %ymm4
+; AVX512-NEXT: vmovdqa64 %ymm5, %ymm23
+; AVX512-NEXT: vpor %ymm2, %ymm4, %ymm2
; AVX512-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX512-NEXT: vmovdqa (%r9), %ymm2
; AVX512-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-NEXT: vpshufb %ymm12, %ymm2, %ymm2
-; AVX512-NEXT: vmovdqa (%r8), %ymm3
-; AVX512-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-NEXT: vpshufb %ymm9, %ymm3, %ymm3
-; AVX512-NEXT: vpor %ymm2, %ymm3, %ymm2
+; AVX512-NEXT: vpshufb %ymm11, %ymm2, %ymm2
+; AVX512-NEXT: vmovdqa (%r8), %ymm4
+; AVX512-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512-NEXT: vpshufb %ymm9, %ymm4, %ymm4
+; AVX512-NEXT: vpor %ymm2, %ymm4, %ymm2
; AVX512-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX512-NEXT: vmovdqa (%rcx), %ymm2
; AVX512-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX512-NEXT: vpshufb %ymm0, %ymm2, %ymm2
-; AVX512-NEXT: vmovdqa (%rdx), %ymm3
-; AVX512-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-NEXT: vpshufb %ymm1, %ymm3, %ymm3
-; AVX512-NEXT: vpor %ymm2, %ymm3, %ymm2
+; AVX512-NEXT: vmovdqa (%rdx), %ymm4
+; AVX512-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512-NEXT: vpshufb %ymm1, %ymm4, %ymm4
+; AVX512-NEXT: vpor %ymm2, %ymm4, %ymm2
; AVX512-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX512-NEXT: vmovdqa (%rsi), %ymm2
; AVX512-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-NEXT: vpshufb %ymm10, %ymm2, %ymm2
-; AVX512-NEXT: vmovdqa (%rdi), %ymm3
-; AVX512-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-NEXT: vpshufb %ymm11, %ymm3, %ymm3
-; AVX512-NEXT: vpor %ymm2, %ymm3, %ymm2
+; AVX512-NEXT: vpshufb %ymm3, %ymm2, %ymm2
+; AVX512-NEXT: vmovdqa (%rdi), %ymm4
+; AVX512-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512-NEXT: vpshufb %ymm12, %ymm4, %ymm4
+; AVX512-NEXT: vpor %ymm2, %ymm4, %ymm2
; AVX512-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX512-NEXT: vmovdqa 32(%rcx), %ymm4
; AVX512-NEXT: vpshufb %ymm0, %ymm4, %ymm0
@@ -12594,28 +12600,28 @@ define void @store_i16_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX512-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT: vmovdqa 32(%rsi), %ymm5
-; AVX512-NEXT: vpshufb %ymm10, %ymm5, %ymm0
+; AVX512-NEXT: vpshufb %ymm3, %ymm5, %ymm0
; AVX512-NEXT: vmovdqa 32(%rdi), %ymm3
-; AVX512-NEXT: vpshufb %ymm11, %ymm3, %ymm1
+; AVX512-NEXT: vpshufb %ymm12, %ymm3, %ymm1
; AVX512-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX512-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT: vmovdqa 32(%r8), %ymm1
; AVX512-NEXT: vpshufb %ymm9, %ymm1, %ymm9
; AVX512-NEXT: vmovdqa 32(%r9), %ymm0
-; AVX512-NEXT: vpshufb %ymm12, %ymm0, %ymm10
+; AVX512-NEXT: vpshufb %ymm11, %ymm0, %ymm10
; AVX512-NEXT: vpor %ymm10, %ymm9, %ymm9
; AVX512-NEXT: vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm10 = [22,23,26,27,0,0,24,25,26,27,0,0,26,27,26,27,22,23,26,27,0,0,24,25,26,27,0,0,26,27,26,27]
; AVX512-NEXT: # ymm10 = mem[0,1,0,1]
; AVX512-NEXT: vpshufb %ymm10, %ymm6, %ymm9
-; AVX512-NEXT: vmovdqa64 %ymm10, %ymm31
-; AVX512-NEXT: vpshufd {{.*#+}} ymm10 = ymm13[2,2,2,2,6,6,6,6]
+; AVX512-NEXT: vmovdqa64 %ymm10, %ymm27
+; AVX512-NEXT: vpshufd {{.*#+}} ymm10 = ymm15[2,2,2,2,6,6,6,6]
; AVX512-NEXT: vpblendw {{.*#+}} ymm9 = ymm9[0,1],ymm10[2],ymm9[3,4],ymm10[5],ymm9[6,7,8,9],ymm10[10],ymm9[11,12],ymm10[13],ymm9[14,15]
; AVX512-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,2,2,3]
; AVX512-NEXT: vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT: vpshufhw {{.*#+}} ymm9 = ymm6[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
; AVX512-NEXT: vpshufd {{.*#+}} ymm9 = ymm9[2,2,2,2,6,6,6,6]
-; AVX512-NEXT: vpshufd {{.*#+}} ymm10 = ymm13[3,3,3,3,7,7,7,7]
+; AVX512-NEXT: vpshufd {{.*#+}} ymm10 = ymm15[3,3,3,3,7,7,7,7]
; AVX512-NEXT: vpblendw {{.*#+}} ymm9 = ymm10[0],ymm9[1],ymm10[2,3],ymm9[4],ymm10[5,6,7,8],ymm9[9],ymm10[10,11],ymm9[12],ymm10[13,14,15]
; AVX512-NEXT: vpermq {{.*#+}} ymm9 = ymm9[0,2,2,3]
; AVX512-NEXT: vmovdqu %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
@@ -12633,12 +12639,11 @@ define void @store_i16_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512-NEXT: vmovdqu %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX512-NEXT: vmovdqa 96(%r9), %ymm9
; AVX512-NEXT: vpshufhw {{.*#+}} ymm10 = ymm9[0,1,2,3,5,5,7,6,8,9,10,11,13,13,15,14]
-; AVX512-NEXT: vpermq {{.*#+}} ymm10 = ymm10[3,3,3,3]
-; AVX512-NEXT: vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-NEXT: vpermq {{.*#+}} ymm21 = ymm10[3,3,3,3]
; AVX512-NEXT: vpshufd {{.*#+}} ymm10 = ymm9[2,1,2,3,6,5,6,7]
; AVX512-NEXT: vpshuflw {{.*#+}} ymm10 = ymm10[0,0,3,3,4,5,6,7,8,8,11,11,12,13,14,15]
-; AVX512-NEXT: vpermq {{.*#+}} ymm30 = ymm10[2,2,2,2]
-; AVX512-NEXT: vpshufd {{.*#+}} ymm10 = ymm13[0,1,1,3,4,5,5,7]
+; AVX512-NEXT: vpermq {{.*#+}} ymm20 = ymm10[2,2,2,2]
+; AVX512-NEXT: vpshufd {{.*#+}} ymm10 = ymm15[0,1,1,3,4,5,5,7]
; AVX512-NEXT: vpshuflw {{.*#+}} ymm6 = ymm6[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
; AVX512-NEXT: vpshufd {{.*#+}} ymm6 = ymm6[0,0,0,0,4,4,4,4]
; AVX512-NEXT: vpblendw {{.*#+}} ymm6 = ymm10[0,1],ymm6[2],ymm10[3,4],ymm6[5],ymm10[6,7,8,9],ymm6[10],ymm10[11,12],ymm6[13],ymm10[14,15]
@@ -12649,18 +12654,17 @@ define void @store_i16_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512-NEXT: vpblendw {{.*#+}} ymm7 = ymm8[0,1],ymm7[2],ymm8[3,4],ymm7[5],ymm8[6,7,8,9],ymm7[10],ymm8[11,12],ymm7[13],ymm8[14,15]
; AVX512-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,2,2,3]
; AVX512-NEXT: vprold $16, %ymm9, %ymm8
-; AVX512-NEXT: vpshufb %ymm12, %ymm9, %ymm9
-; AVX512-NEXT: vinserti64x4 $1, %ymm6, %zmm17, %zmm6
-; AVX512-NEXT: vinserti64x4 $1, %ymm7, %zmm18, %zmm7
+; AVX512-NEXT: vpshufb %ymm11, %ymm9, %ymm9
+; AVX512-NEXT: vinserti64x4 $1, %ymm6, %zmm18, %zmm6
+; AVX512-NEXT: vinserti64x4 $1, %ymm7, %zmm19, %zmm7
; AVX512-NEXT: vpternlogq {{.*#+}} zmm7 = zmm6 ^ (mem & (zmm7 ^ zmm6))
; AVX512-NEXT: vmovdqa 96(%r8), %ymm6
; AVX512-NEXT: vpshufd {{.*#+}} ymm10 = ymm6[1,2,2,3,5,6,6,7]
; AVX512-NEXT: vmovdqu %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX512-NEXT: vpshufd {{.*#+}} ymm10 = ymm6[0,0,2,1,4,4,6,5]
; AVX512-NEXT: vpshufb {{.*#+}} ymm6 = ymm6[u,u],zero,zero,zero,zero,zero,zero,zero,zero,ymm6[14,15,u,u,u,u],zero,zero,zero,zero,zero,zero,zero,zero,ymm6[16,17,u,u,u,u],zero,zero
-; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm11 = [65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535]
-; AVX512-NEXT: vpternlogq {{.*#+}} ymm6 = ymm6 | (ymm7 & ymm11)
-; AVX512-NEXT: vmovdqa64 %zmm11, %zmm12
+; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm12 = [65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535]
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm6 = ymm6 | (ymm7 & ymm12)
; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm11 = [65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535]
; AVX512-NEXT: vpternlogq {{.*#+}} ymm9 = ymm9 | (ymm6 & ymm11)
; AVX512-NEXT: vextracti64x4 $1, %zmm7, %ymm6
@@ -12673,33 +12677,30 @@ define void @store_i16_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm6 = zmm9[0,1,2,3],zmm6[0,1,2,3]
; AVX512-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX512-NEXT: vpmovsxbd {{.*#+}} zmm18 = [0,5,0,0,0,6,0,0,6,0,0,0,7,0,0,7]
; AVX512-NEXT: vmovdqa 96(%rax), %ymm6
-; AVX512-NEXT: vpermd %zmm6, %zmm18, %zmm7
-; AVX512-NEXT: vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT: vpshufd {{.*#+}} ymm7 = ymm6[0,1,1,3,4,5,5,7]
-; AVX512-NEXT: vmovdqa {{.*#+}} ymm11 = [12,13,128,128,128,128,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128]
-; AVX512-NEXT: vpshufb %ymm11, %ymm6, %ymm6
+; AVX512-NEXT: vmovdqa {{.*#+}} ymm10 = [12,13,128,128,128,128,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128]
+; AVX512-NEXT: vpshufb %ymm10, %ymm6, %ymm6
; AVX512-NEXT: vpermq {{.*#+}} ymm7 = ymm7[0,2,2,3]
; AVX512-NEXT: vpandn %ymm7, %ymm12, %ymm7
-; AVX512-NEXT: vmovdqa64 %zmm12, %zmm19
+; AVX512-NEXT: vmovdqa64 %zmm12, %zmm17
; AVX512-NEXT: vinserti64x4 $1, %ymm7, %zmm6, %zmm6
; AVX512-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm28 = [65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535]
+; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm30 = [65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535]
; AVX512-NEXT: vpbroadcastd 72(%rax), %ymm6
-; AVX512-NEXT: vpandnq %ymm6, %ymm28, %ymm6
+; AVX512-NEXT: vpandnq %ymm6, %ymm30, %ymm6
; AVX512-NEXT: vmovdqa 64(%rax), %ymm7
; AVX512-NEXT: vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-NEXT: vpshufb %ymm11, %ymm7, %ymm7
+; AVX512-NEXT: vpshufb %ymm10, %ymm7, %ymm7
; AVX512-NEXT: vinserti64x4 $1, %ymm7, %zmm6, %zmm6
; AVX512-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT: vmovdqa 64(%r9), %xmm7
; AVX512-NEXT: vmovdqa 64(%r8), %xmm8
; AVX512-NEXT: vpunpckhwd {{.*#+}} xmm6 = xmm8[4],xmm7[4],xmm8[5],xmm7[5],xmm8[6],xmm7[6],xmm8[7],xmm7[7]
-; AVX512-NEXT: vmovdqa64 %xmm8, %xmm17
-; AVX512-NEXT: vmovdqa64 %xmm7, %xmm20
-; AVX512-NEXT: vmovdqa {{.*#+}} xmm15 = [0,1,2,3,4,5,4,5,6,7,10,11,8,9,10,11]
-; AVX512-NEXT: vpshufb %xmm15, %xmm6, %xmm6
+; AVX512-NEXT: vmovdqa64 %xmm8, %xmm18
+; AVX512-NEXT: vmovdqa64 %xmm7, %xmm22
+; AVX512-NEXT: vmovdqa {{.*#+}} xmm14 = [0,1,2,3,4,5,4,5,6,7,10,11,8,9,10,11]
+; AVX512-NEXT: vpshufb %xmm14, %xmm6, %xmm6
; AVX512-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,0,1,1]
; AVX512-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT: vmovdqa 64(%rcx), %xmm9
@@ -12708,75 +12709,75 @@ define void @store_i16_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512-NEXT: vpshuflw {{.*#+}} xmm8 = xmm8[0,2,3,3,4,5,6,7]
; AVX512-NEXT: vpermq {{.*#+}} ymm8 = ymm8[0,0,2,1]
; AVX512-NEXT: vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-NEXT: vmovdqa 64(%rdi), %xmm14
-; AVX512-NEXT: vmovdqa 64(%rsi), %xmm10
-; AVX512-NEXT: vpunpckhwd {{.*#+}} xmm8 = xmm10[4],xmm14[4],xmm10[5],xmm14[5],xmm10[6],xmm14[6],xmm10[7],xmm14[7]
-; AVX512-NEXT: vpshuflw {{.*#+}} xmm8 = xmm8[2,1,2,3,4,5,6,7]
-; AVX512-NEXT: vpshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,4,5,5,4]
-; AVX512-NEXT: vpermq {{.*#+}} ymm8 = ymm8[0,0,1,3]
-; AVX512-NEXT: vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-NEXT: vpbroadcastd 8(%rax), %ymm8
-; AVX512-NEXT: vpandnq %ymm8, %ymm28, %ymm8
-; AVX512-NEXT: vmovdqa (%rax), %ymm12
-; AVX512-NEXT: vmovdqu %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-NEXT: vpshufb %ymm11, %ymm12, %ymm13
-; AVX512-NEXT: vinserti64x4 $1, %ymm13, %zmm8, %zmm8
-; AVX512-NEXT: vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-NEXT: vmovdqa 64(%rdi), %xmm11
+; AVX512-NEXT: vmovdqa 64(%rsi), %xmm8
+; AVX512-NEXT: vpunpckhwd {{.*#+}} xmm12 = xmm8[4],xmm11[4],xmm8[5],xmm11[5],xmm8[6],xmm11[6],xmm8[7],xmm11[7]
+; AVX512-NEXT: vpshuflw {{.*#+}} xmm12 = xmm12[2,1,2,3,4,5,6,7]
+; AVX512-NEXT: vpshufhw {{.*#+}} xmm12 = xmm12[0,1,2,3,4,5,5,4]
+; AVX512-NEXT: vpermq {{.*#+}} ymm12 = ymm12[0,0,1,3]
+; AVX512-NEXT: vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-NEXT: vpbroadcastd 8(%rax), %ymm12
+; AVX512-NEXT: vpandnq %ymm12, %ymm30, %ymm12
+; AVX512-NEXT: vmovdqa (%rax), %ymm13
+; AVX512-NEXT: vmovdqu %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512-NEXT: vpshufb %ymm10, %ymm13, %ymm13
+; AVX512-NEXT: vinserti64x4 $1, %ymm13, %zmm12, %zmm12
+; AVX512-NEXT: vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT: vmovdqa (%r9), %xmm6
-; AVX512-NEXT: vmovdqa (%r8), %xmm12
-; AVX512-NEXT: vpunpckhwd {{.*#+}} xmm8 = xmm12[4],xmm6[4],xmm12[5],xmm6[5],xmm12[6],xmm6[6],xmm12[7],xmm6[7]
-; AVX512-NEXT: vmovdqa64 %xmm12, %xmm29
-; AVX512-NEXT: vmovdqa64 %xmm6, %xmm24
-; AVX512-NEXT: vpshufb %xmm15, %xmm8, %xmm8
-; AVX512-NEXT: vmovdqa64 %xmm15, %xmm25
-; AVX512-NEXT: vpermq {{.*#+}} ymm8 = ymm8[0,0,1,1]
-; AVX512-NEXT: vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-NEXT: vmovdqa (%rcx), %xmm8
+; AVX512-NEXT: vmovdqa (%r8), %xmm13
+; AVX512-NEXT: vpunpckhwd {{.*#+}} xmm12 = xmm13[4],xmm6[4],xmm13[5],xmm6[5],xmm13[6],xmm6[6],xmm13[7],xmm6[7]
+; AVX512-NEXT: vmovdqa64 %xmm13, %xmm25
+; AVX512-NEXT: vmovdqa64 %xmm6, %xmm26
+; AVX512-NEXT: vpshufb %xmm14, %xmm12, %xmm12
+; AVX512-NEXT: vmovdqa64 %xmm14, %xmm31
+; AVX512-NEXT: vpermq {{.*#+}} ymm12 = ymm12[0,0,1,1]
+; AVX512-NEXT: vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-NEXT: vmovdqa (%rcx), %xmm15
; AVX512-NEXT: vmovdqa (%rdx), %xmm13
-; AVX512-NEXT: vpunpckhwd {{.*#+}} xmm15 = xmm13[4],xmm8[4],xmm13[5],xmm8[5],xmm13[6],xmm8[6],xmm13[7],xmm8[7]
-; AVX512-NEXT: vpshuflw {{.*#+}} xmm15 = xmm15[0,2,3,3,4,5,6,7]
-; AVX512-NEXT: vpermq {{.*#+}} ymm12 = ymm15[0,0,2,1]
+; AVX512-NEXT: vpunpckhwd {{.*#+}} xmm12 = xmm13[4],xmm15[4],xmm13[5],xmm15[5],xmm13[6],xmm15[6],xmm13[7],xmm15[7]
+; AVX512-NEXT: vpshuflw {{.*#+}} xmm12 = xmm12[0,2,3,3,4,5,6,7]
+; AVX512-NEXT: vpermq {{.*#+}} ymm12 = ymm12[0,0,2,1]
; AVX512-NEXT: vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT: vmovdqa (%rdi), %xmm6
-; AVX512-NEXT: vmovdqa (%rsi), %xmm12
-; AVX512-NEXT: vpunpckhwd {{.*#+}} xmm15 = xmm12[4],xmm6[4],xmm12[5],xmm6[5],xmm12[6],xmm6[6],xmm12[7],xmm6[7]
-; AVX512-NEXT: vmovdqa64 %xmm12, %xmm21
+; AVX512-NEXT: vmovdqa (%rsi), %xmm14
+; AVX512-NEXT: vpunpckhwd {{.*#+}} xmm12 = xmm14[4],xmm6[4],xmm14[5],xmm6[5],xmm14[6],xmm6[6],xmm14[7],xmm6[7]
+; AVX512-NEXT: vmovdqa64 %xmm14, %xmm19
; AVX512-NEXT: vmovdqa64 %xmm6, %xmm16
-; AVX512-NEXT: vpshuflw {{.*#+}} xmm15 = xmm15[2,1,2,3,4,5,6,7]
-; AVX512-NEXT: vpshufhw {{.*#+}} xmm15 = xmm15[0,1,2,3,4,5,5,4]
-; AVX512-NEXT: vpermq {{.*#+}} ymm12 = ymm15[0,0,1,3]
+; AVX512-NEXT: vpshuflw {{.*#+}} xmm12 = xmm12[2,1,2,3,4,5,6,7]
+; AVX512-NEXT: vpshufhw {{.*#+}} xmm12 = xmm12[0,1,2,3,4,5,5,4]
+; AVX512-NEXT: vpermq {{.*#+}} ymm12 = ymm12[0,0,1,3]
; AVX512-NEXT: vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-NEXT: vpshuflw {{.*#+}} ymm15 = ymm4[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
-; AVX512-NEXT: vpshufd {{.*#+}} ymm15 = ymm15[0,0,0,0,4,4,4,4]
-; AVX512-NEXT: vpshufd {{.*#+}} ymm12 = ymm2[0,1,1,3,4,5,5,7]
-; AVX512-NEXT: vpblendw {{.*#+}} ymm12 = ymm12[0,1],ymm15[2],ymm12[3,4],ymm15[5],ymm12[6,7,8,9],ymm15[10],ymm12[11,12],ymm15[13],ymm12[14,15]
+; AVX512-NEXT: vpshuflw {{.*#+}} ymm12 = ymm4[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
+; AVX512-NEXT: vpshufd {{.*#+}} ymm12 = ymm12[0,0,0,0,4,4,4,4]
+; AVX512-NEXT: vpshufd {{.*#+}} ymm14 = ymm2[0,1,1,3,4,5,5,7]
+; AVX512-NEXT: vpblendw {{.*#+}} ymm12 = ymm14[0,1],ymm12[2],ymm14[3,4],ymm12[5],ymm14[6,7,8,9],ymm12[10],ymm14[11,12],ymm12[13],ymm14[14,15]
; AVX512-NEXT: vpermq {{.*#+}} ymm12 = ymm12[2,1,3,2]
; AVX512-NEXT: vmovdqu %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX512-NEXT: vpshuflw {{.*#+}} ymm12 = ymm5[1,2,3,3,4,5,6,7,9,10,11,11,12,13,14,15]
; AVX512-NEXT: vpshufd {{.*#+}} ymm12 = ymm12[0,0,2,1,4,4,6,5]
-; AVX512-NEXT: vpshufd {{.*#+}} ymm15 = ymm3[1,1,1,1,5,5,5,5]
-; AVX512-NEXT: vpblendw {{.*#+}} ymm12 = ymm12[0,1],ymm15[2],ymm12[3,4],ymm15[5],ymm12[6,7,8,9],ymm15[10],ymm12[11,12],ymm15[13],ymm12[14,15]
+; AVX512-NEXT: vpshufd {{.*#+}} ymm14 = ymm3[1,1,1,1,5,5,5,5]
+; AVX512-NEXT: vpblendw {{.*#+}} ymm12 = ymm12[0,1],ymm14[2],ymm12[3,4],ymm14[5],ymm12[6,7,8,9],ymm14[10],ymm12[11,12],ymm14[13],ymm12[14,15]
; AVX512-NEXT: vpermq {{.*#+}} ymm12 = ymm12[2,2,2,3]
; AVX512-NEXT: vmovdqu %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-NEXT: vmovdqa 32(%rax), %ymm15
-; AVX512-NEXT: vpshufb %ymm11, %ymm15, %ymm11
-; AVX512-NEXT: vpshufd {{.*#+}} ymm12 = ymm15[0,1,1,3,4,5,5,7]
+; AVX512-NEXT: vmovdqa 32(%rax), %ymm12
+; AVX512-NEXT: vpshufb %ymm10, %ymm12, %ymm10
+; AVX512-NEXT: vpshufd {{.*#+}} ymm12 = ymm12[0,1,1,3,4,5,5,7]
; AVX512-NEXT: vpermq {{.*#+}} ymm12 = ymm12[0,2,2,3]
-; AVX512-NEXT: vpandnq %ymm12, %ymm19, %ymm12
-; AVX512-NEXT: vinserti64x4 $1, %ymm12, %zmm11, %zmm11
-; AVX512-NEXT: vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-NEXT: vpshuflw {{.*#+}} ymm11 = ymm0[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
-; AVX512-NEXT: vpshufd {{.*#+}} ymm11 = ymm11[0,0,0,0,4,4,4,4]
+; AVX512-NEXT: vpandnq %ymm12, %ymm17, %ymm12
+; AVX512-NEXT: vinserti64x4 $1, %ymm12, %zmm10, %zmm10
+; AVX512-NEXT: vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-NEXT: vpshuflw {{.*#+}} ymm10 = ymm0[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
+; AVX512-NEXT: vpshufd {{.*#+}} ymm10 = ymm10[0,0,0,0,4,4,4,4]
; AVX512-NEXT: vpshufd {{.*#+}} ymm12 = ymm1[0,0,2,1,4,4,6,5]
-; AVX512-NEXT: vpblendw {{.*#+}} ymm11 = ymm11[0,1,2],ymm12[3],ymm11[4,5],ymm12[6],ymm11[7,8,9,10],ymm12[11],ymm11[12,13],ymm12[14],ymm11[15]
-; AVX512-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,2,3,3]
-; AVX512-NEXT: vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-NEXT: vmovdqa64 %ymm31, %ymm6
-; AVX512-NEXT: vpshufb %ymm6, %ymm4, %ymm11
+; AVX512-NEXT: vpblendw {{.*#+}} ymm10 = ymm10[0,1,2],ymm12[3],ymm10[4,5],ymm12[6],ymm10[7,8,9,10],ymm12[11],ymm10[12,13],ymm12[14],ymm10[15]
+; AVX512-NEXT: vpermq {{.*#+}} ymm10 = ymm10[2,2,3,3]
+; AVX512-NEXT: vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-NEXT: vmovdqa64 %ymm27, %ymm14
+; AVX512-NEXT: vpshufb %ymm14, %ymm4, %ymm10
; AVX512-NEXT: vpshufd {{.*#+}} ymm12 = ymm2[2,2,2,2,6,6,6,6]
-; AVX512-NEXT: vpblendw {{.*#+}} ymm11 = ymm11[0,1],ymm12[2],ymm11[3,4],ymm12[5],ymm11[6,7,8,9],ymm12[10],ymm11[11,12],ymm12[13],ymm11[14,15]
-; AVX512-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,2,2,3]
-; AVX512-NEXT: vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-NEXT: vpblendw {{.*#+}} ymm10 = ymm10[0,1],ymm12[2],ymm10[3,4],ymm12[5],ymm10[6,7,8,9],ymm12[10],ymm10[11,12],ymm12[13],ymm10[14,15]
+; AVX512-NEXT: vpermq {{.*#+}} ymm10 = ymm10[2,2,2,3]
+; AVX512-NEXT: vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[3,3,3,3,7,7,7,7]
; AVX512-NEXT: vpshufhw {{.*#+}} ymm4 = ymm4[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
; AVX512-NEXT: vpshufd {{.*#+}} ymm4 = ymm4[2,2,2,2,6,6,6,6]
@@ -12806,49 +12807,47 @@ define void @store_i16_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7,8,9],ymm1[10],ymm0[11,12],ymm1[13],ymm0[14,15]
; AVX512-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,2,2,3]
; AVX512-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-NEXT: vpermd %zmm15, %zmm18, %zmm0
-; AVX512-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-NEXT: vmovdqa64 %ymm23, %ymm12
-; AVX512-NEXT: vpshuflw {{.*#+}} ymm0 = ymm12[1,2,3,3,4,5,6,7,9,10,11,11,12,13,14,15]
+; AVX512-NEXT: vmovdqa64 %ymm24, %ymm6
+; AVX512-NEXT: vpshuflw {{.*#+}} ymm0 = ymm6[1,2,3,3,4,5,6,7,9,10,11,11,12,13,14,15]
; AVX512-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,0,2,1,4,4,6,5]
-; AVX512-NEXT: vpshufd {{.*#+}} ymm1 = ymm22[1,1,1,1,5,5,5,5]
+; AVX512-NEXT: vpshufd {{.*#+}} ymm1 = ymm23[1,1,1,1,5,5,5,5]
; AVX512-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7,8,9],ymm1[10],ymm0[11,12],ymm1[13],ymm0[14,15]
; AVX512-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,2,2,3]
; AVX512-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-NEXT: vpshufhw {{.*#+}} ymm0 = ymm12[0,1,2,3,5,4,6,7,8,9,10,11,13,12,14,15]
+; AVX512-NEXT: vpshufhw {{.*#+}} ymm0 = ymm6[0,1,2,3,5,4,6,7,8,9,10,11,13,12,14,15]
; AVX512-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[2,2,2,2,6,6,6,6]
-; AVX512-NEXT: vpshufd {{.*#+}} ymm1 = ymm22[2,2,2,2,6,6,6,6]
+; AVX512-NEXT: vpshufd {{.*#+}} ymm1 = ymm23[2,2,2,2,6,6,6,6]
; AVX512-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6,7,8],ymm0[9],ymm1[10,11],ymm0[12],ymm1[13,14,15]
; AVX512-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX512-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-NEXT: vmovdqa64 %ymm26, %ymm2
+; AVX512-NEXT: vmovdqa64 %ymm29, %ymm2
; AVX512-NEXT: vpshuflw {{.*#+}} ymm0 = ymm2[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
; AVX512-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,0,0,0,4,4,4,4]
-; AVX512-NEXT: vpshufd {{.*#+}} ymm1 = ymm27[0,1,1,3,4,5,5,7]
+; AVX512-NEXT: vpshufd {{.*#+}} ymm1 = ymm28[0,1,1,3,4,5,5,7]
; AVX512-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7,8,9],ymm0[10],ymm1[11,12],ymm0[13],ymm1[14,15]
; AVX512-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,1,3,2]
; AVX512-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-NEXT: vpshufb %ymm6, %ymm2, %ymm0
-; AVX512-NEXT: vmovdqa64 %ymm26, %ymm3
-; AVX512-NEXT: vmovdqa64 %ymm31, %ymm11
-; AVX512-NEXT: vpshufd {{.*#+}} ymm1 = ymm27[2,2,2,2,6,6,6,6]
+; AVX512-NEXT: vpshufb %ymm14, %ymm2, %ymm0
+; AVX512-NEXT: vmovdqa64 %ymm29, %ymm3
+; AVX512-NEXT: vmovdqa64 %ymm27, %ymm10
+; AVX512-NEXT: vpshufd {{.*#+}} ymm1 = ymm28[2,2,2,2,6,6,6,6]
; AVX512-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7,8,9],ymm1[10],ymm0[11,12],ymm1[13],ymm0[14,15]
; AVX512-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,2,2,3]
; AVX512-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX512-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
; AVX512-NEXT: vpshuflw {{.*#+}} ymm0 = ymm5[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
; AVX512-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,0,0,0,4,4,4,4]
-; AVX512-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
-; AVX512-NEXT: vpshufd {{.*#+}} ymm1 = ymm6[0,0,2,1,4,4,6,5]
+; AVX512-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
+; AVX512-NEXT: vpshufd {{.*#+}} ymm1 = ymm12[0,0,2,1,4,4,6,5]
; AVX512-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3],ymm0[4,5],ymm1[6],ymm0[7,8,9,10],ymm1[11],ymm0[12,13],ymm1[14],ymm0[15]
; AVX512-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,2,3,3]
; AVX512-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT: vprold $16, %ymm5, %ymm0
-; AVX512-NEXT: vpshufd {{.*#+}} ymm1 = ymm6[1,2,2,3,5,6,6,7]
+; AVX512-NEXT: vpshufd {{.*#+}} ymm1 = ymm12[1,2,2,3,5,6,6,7]
; AVX512-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7,8,9],ymm0[10],ymm1[11,12],ymm0[13],ymm1[14,15]
; AVX512-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,1,3,2]
; AVX512-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-NEXT: vpshufd {{.*#+}} ymm0 = ymm27[3,3,3,3,7,7,7,7]
+; AVX512-NEXT: vpshufd {{.*#+}} ymm0 = ymm28[3,3,3,3,7,7,7,7]
; AVX512-NEXT: vpshufhw {{.*#+}} ymm1 = ymm3[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
; AVX512-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[2,2,2,2,6,6,6,6]
; AVX512-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6,7,8],ymm1[9],ymm0[10,11],ymm1[12],ymm0[13,14,15]
@@ -12860,8 +12859,8 @@ define void @store_i16_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512-NEXT: vpshuflw {{.*#+}} xmm2 = xmm2[0,1,3,2,4,5,6,7]
; AVX512-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,0,1,1]
; AVX512-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-NEXT: vpshufd {{.*#+}} ymm2 = ymm22[3,3,3,3,7,7,7,7]
-; AVX512-NEXT: vpshufhw {{.*#+}} ymm3 = ymm12[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
+; AVX512-NEXT: vpshufd {{.*#+}} ymm2 = ymm23[3,3,3,3,7,7,7,7]
+; AVX512-NEXT: vpshufhw {{.*#+}} ymm3 = ymm6[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
; AVX512-NEXT: vpshufd {{.*#+}} ymm3 = ymm3[2,2,2,2,6,6,6,6]
; AVX512-NEXT: vpblendw {{.*#+}} ymm2 = ymm2[0,1,2],ymm3[3],ymm2[4,5],ymm3[6],ymm2[7,8,9,10],ymm3[11],ymm2[12,13],ymm3[14],ymm2[15]
; AVX512-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,1,3,3]
@@ -12869,11 +12868,11 @@ define void @store_i16_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512-NEXT: vmovdqa 96(%rsi), %xmm2
; AVX512-NEXT: vmovdqa 96(%rdi), %xmm3
; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm4 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
-; AVX512-NEXT: vmovdqa64 %xmm25, %xmm12
-; AVX512-NEXT: vpshufb %xmm12, %xmm4, %xmm4
+; AVX512-NEXT: vmovdqa64 %xmm31, %xmm6
+; AVX512-NEXT: vpshufb %xmm6, %xmm4, %xmm4
; AVX512-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,0,1,1]
; AVX512-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-NEXT: vpshufd {{.*#+}} ymm4 = ymm6[3,3,3,3,7,7,7,7]
+; AVX512-NEXT: vpshufd {{.*#+}} ymm4 = ymm12[3,3,3,3,7,7,7,7]
; AVX512-NEXT: vpshufhw {{.*#+}} ymm5 = ymm5[0,1,2,3,5,6,7,7,8,9,10,11,13,14,15,15]
; AVX512-NEXT: vpshufd {{.*#+}} ymm5 = ymm5[2,2,2,3,6,6,6,7]
; AVX512-NEXT: vpblendw {{.*#+}} ymm4 = ymm5[0,1],ymm4[2],ymm5[3,4],ymm4[5],ymm5[6,7,8,9],ymm4[10],ymm5[11,12],ymm4[13],ymm5[14,15]
@@ -12884,8 +12883,8 @@ define void @store_i16_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX512-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
; AVX512-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512-NEXT: vpbroadcastq {{.*#+}} xmm6 = [6,7,4,5,0,0,8,9,6,7,4,5,0,0,8,9]
-; AVX512-NEXT: vpshufb %xmm6, %xmm0, %xmm2
+; AVX512-NEXT: vpbroadcastq {{.*#+}} xmm5 = [6,7,4,5,0,0,8,9,6,7,4,5,0,0,8,9]
+; AVX512-NEXT: vpshufb %xmm5, %xmm0, %xmm2
; AVX512-NEXT: vpshufd {{.*#+}} xmm3 = xmm1[1,1,2,2]
; AVX512-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0],xmm2[1],xmm3[2,3],xmm2[4],xmm3[5,6],xmm2[7]
; AVX512-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
@@ -12896,379 +12895,384 @@ define void @store_i16_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; AVX512-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX512-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; AVX512-NEXT: vpshufb %xmm12, %xmm0, %xmm0
-; AVX512-NEXT: vpmovsxbd {{.*#+}} zmm18 = [16,18,19,19,19,19,0,0,0,1,0,1,2,3,2,3]
-; AVX512-NEXT: vpshufhw {{.*#+}} xmm1 = xmm2[0,1,2,3,4,5,7,6]
-; AVX512-NEXT: vpermt2d %zmm1, %zmm18, %zmm0
-; AVX512-NEXT: vpbroadcastd 100(%rax), %ymm1
-; AVX512-NEXT: vpbroadcastd 104(%rax), %ymm2
-; AVX512-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm31
-; AVX512-NEXT: vpternlogq {{.*#+}} zmm31 = zmm31 ^ (mem & (zmm31 ^ zmm0))
-; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm7[0],xmm9[0],xmm7[1],xmm9[1],xmm7[2],xmm9[2],xmm7[3],xmm9[3]
-; AVX512-NEXT: vpshufb %xmm6, %xmm9, %xmm1
-; AVX512-NEXT: vpshufd {{.*#+}} xmm2 = xmm7[1,1,2,2]
-; AVX512-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2,3],xmm1[4],xmm2[5,6],xmm1[7]
-; AVX512-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,1,3,2,4,5,6,7]
-; AVX512-NEXT: vpmovsxbd {{.*#+}} zmm2 = [16,16,17,17,17,17,0,0,0,1,0,1,2,3,2,3]
-; AVX512-NEXT: vpermt2d %zmm0, %zmm2, %zmm1
-; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm14[0],xmm10[0],xmm14[1],xmm10[1],xmm14[2],xmm10[2],xmm14[3],xmm10[3]
-; AVX512-NEXT: vpshufb %xmm12, %xmm0, %xmm0
-; AVX512-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,1,1]
-; AVX512-NEXT: vprold $16, %xmm10, %xmm3
-; AVX512-NEXT: vpshufd {{.*#+}} xmm4 = xmm14[1,1,2,3]
-; AVX512-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0,1],xmm3[2],xmm4[3,4],xmm3[5],xmm4[6,7]
-; AVX512-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,0,2,1]
-; AVX512-NEXT: vinserti64x4 $1, %ymm3, %zmm0, %zmm14
-; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm0 = [65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0]
-; AVX512-NEXT: vpternlogq {{.*#+}} zmm14 = zmm1 ^ (zmm0 & (zmm14 ^ zmm1))
-; AVX512-NEXT: vmovdqa64 %xmm17, %xmm1
-; AVX512-NEXT: vmovdqa64 %xmm20, %xmm3
-; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
-; AVX512-NEXT: vpshufhw {{.*#+}} xmm3 = xmm1[0,1,2,3,4,5,7,6]
-; AVX512-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,1,3,2,4,5,6,7]
-; AVX512-NEXT: vpmovsxbd {{.*#+}} zmm4 = [0,1,0,1,0,1,1,3,16,18,19,19,19,19,0,0]
-; AVX512-NEXT: vpermt2d %zmm3, %zmm4, %zmm1
-; AVX512-NEXT: vpbroadcastd 64(%rax), %ymm3
-; AVX512-NEXT: vpbroadcastd 68(%rax), %ymm5
-; AVX512-NEXT: vinserti64x4 $1, %ymm5, %zmm3, %zmm25
-; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm19 = [65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535]
-; AVX512-NEXT: vpternlogq {{.*#+}} zmm25 = zmm25 ^ (zmm19 & (zmm25 ^ zmm1))
-; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm13[0],xmm8[0],xmm13[1],xmm8[1],xmm13[2],xmm8[2],xmm13[3],xmm8[3]
-; AVX512-NEXT: vpshufb %xmm6, %xmm8, %xmm3
-; AVX512-NEXT: vpshufd {{.*#+}} xmm5 = xmm13[1,1,2,2]
-; AVX512-NEXT: vpblendw {{.*#+}} xmm3 = xmm5[0],xmm3[1],xmm5[2,3],xmm3[4],xmm5[5,6],xmm3[7]
+; AVX512-NEXT: vmovdqa64 %xmm31, %xmm4
+; AVX512-NEXT: vpshufb %xmm4, %xmm0, %xmm1
+; AVX512-NEXT: vpmovsxbd {{.*#+}} zmm17 = [16,18,19,19,19,19,0,0,0,1,0,1,2,3,2,3]
+; AVX512-NEXT: vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,7,6]
+; AVX512-NEXT: vpermt2d %zmm2, %zmm17, %zmm1
+; AVX512-NEXT: vpbroadcastd 100(%rax), %ymm2
+; AVX512-NEXT: vpbroadcastd 104(%rax), %ymm3
+; AVX512-NEXT: vinserti64x4 $1, %ymm3, %zmm2, %zmm0
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm0 = zmm0 ^ (mem & (zmm0 ^ zmm1))
+; AVX512-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm7[0],xmm9[0],xmm7[1],xmm9[1],xmm7[2],xmm9[2],xmm7[3],xmm9[3]
+; AVX512-NEXT: vmovdqa %xmm5, %xmm12
+; AVX512-NEXT: vpshufb %xmm5, %xmm9, %xmm2
+; AVX512-NEXT: vpshufd {{.*#+}} xmm3 = xmm7[1,1,2,2]
+; AVX512-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0],xmm2[1],xmm3[2,3],xmm2[4],xmm3[5,6],xmm2[7]
; AVX512-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,1,3,2,4,5,6,7]
-; AVX512-NEXT: vpermt2d %zmm1, %zmm2, %zmm3
-; AVX512-NEXT: vmovdqa64 %xmm16, %xmm5
-; AVX512-NEXT: vmovdqa64 %xmm21, %xmm2
-; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm5[0],xmm2[0],xmm5[1],xmm2[1],xmm5[2],xmm2[2],xmm5[3],xmm2[3]
-; AVX512-NEXT: vpshufb %xmm12, %xmm1, %xmm1
+; AVX512-NEXT: vpmovsxbd {{.*#+}} zmm3 = [16,16,17,17,17,17,0,0,0,1,0,1,2,3,2,3]
+; AVX512-NEXT: vpermt2d %zmm1, %zmm3, %zmm2
+; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm11[0],xmm8[0],xmm11[1],xmm8[1],xmm11[2],xmm8[2],xmm11[3],xmm8[3]
+; AVX512-NEXT: vpshufb %xmm4, %xmm1, %xmm1
+; AVX512-NEXT: vmovdqa64 %xmm31, %xmm0
; AVX512-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,0,1,1]
-; AVX512-NEXT: vprold $16, %xmm21, %xmm2
-; AVX512-NEXT: vpshufd {{.*#+}} xmm5 = xmm16[1,1,2,3]
-; AVX512-NEXT: vpblendw {{.*#+}} xmm2 = xmm5[0,1],xmm2[2],xmm5[3,4],xmm2[5],xmm5[6,7]
-; AVX512-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,0,2,1]
-; AVX512-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm5
-; AVX512-NEXT: vpternlogq {{.*#+}} zmm5 = zmm3 ^ (zmm0 & (zmm5 ^ zmm3))
-; AVX512-NEXT: vmovdqa64 %xmm29, %xmm0
-; AVX512-NEXT: vmovdqa64 %xmm24, %xmm1
-; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; AVX512-NEXT: vpshufhw {{.*#+}} xmm1 = xmm0[0,1,2,3,4,5,7,6]
-; AVX512-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,1,3,2,4,5,6,7]
-; AVX512-NEXT: vpermt2d %zmm1, %zmm4, %zmm0
-; AVX512-NEXT: vpbroadcastd (%rax), %ymm1
-; AVX512-NEXT: vpbroadcastd 4(%rax), %ymm2
-; AVX512-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm20
-; AVX512-NEXT: vpternlogq {{.*#+}} zmm20 = zmm20 ^ (zmm19 & (zmm20 ^ zmm0))
+; AVX512-NEXT: vprold $16, %xmm8, %xmm4
+; AVX512-NEXT: vpshufd {{.*#+}} xmm5 = xmm11[1,1,2,3]
+; AVX512-NEXT: vpblendw {{.*#+}} xmm4 = xmm5[0,1],xmm4[2],xmm5[3,4],xmm4[5],xmm5[6,7]
+; AVX512-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,0,2,1]
+; AVX512-NEXT: vinserti64x4 $1, %ymm4, %zmm1, %zmm14
+; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm1 = [65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0]
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm14 = zmm2 ^ (zmm1 & (zmm14 ^ zmm2))
+; AVX512-NEXT: vmovdqa64 %xmm18, %xmm2
+; AVX512-NEXT: vmovdqa64 %xmm22, %xmm4
+; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3]
+; AVX512-NEXT: vpshufhw {{.*#+}} xmm4 = xmm2[0,1,2,3,4,5,7,6]
+; AVX512-NEXT: vpshuflw {{.*#+}} xmm2 = xmm2[0,1,3,2,4,5,6,7]
+; AVX512-NEXT: vpmovsxbd {{.*#+}} zmm5 = [0,1,0,1,0,1,1,3,16,18,19,19,19,19,0,0]
+; AVX512-NEXT: vpermt2d %zmm4, %zmm5, %zmm2
+; AVX512-NEXT: vpbroadcastd 64(%rax), %ymm4
+; AVX512-NEXT: vpbroadcastd 68(%rax), %ymm6
+; AVX512-NEXT: vinserti64x4 $1, %ymm6, %zmm4, %zmm23
+; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm18 = [65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535]
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm23 = zmm23 ^ (zmm18 & (zmm23 ^ zmm2))
+; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm13[0],xmm15[0],xmm13[1],xmm15[1],xmm13[2],xmm15[2],xmm13[3],xmm15[3]
+; AVX512-NEXT: vpshufb %xmm12, %xmm15, %xmm4
+; AVX512-NEXT: vpshufd {{.*#+}} xmm6 = xmm13[1,1,2,2]
+; AVX512-NEXT: vpblendw {{.*#+}} xmm4 = xmm6[0],xmm4[1],xmm6[2,3],xmm4[4],xmm6[5,6],xmm4[7]
+; AVX512-NEXT: vpshuflw {{.*#+}} xmm2 = xmm2[0,1,3,2,4,5,6,7]
+; AVX512-NEXT: vpermt2d %zmm2, %zmm3, %zmm4
+; AVX512-NEXT: vmovdqa64 %xmm16, %xmm6
+; AVX512-NEXT: vmovdqa64 %xmm19, %xmm3
+; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm6[0],xmm3[0],xmm6[1],xmm3[1],xmm6[2],xmm3[2],xmm6[3],xmm3[3]
+; AVX512-NEXT: vpshufb %xmm0, %xmm2, %xmm2
+; AVX512-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,0,1,1]
+; AVX512-NEXT: vprold $16, %xmm19, %xmm3
+; AVX512-NEXT: vpshufd {{.*#+}} xmm6 = xmm16[1,1,2,3]
+; AVX512-NEXT: vpblendw {{.*#+}} xmm3 = xmm6[0,1],xmm3[2],xmm6[3,4],xmm3[5],xmm6[6,7]
+; AVX512-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,0,2,1]
+; AVX512-NEXT: vinserti64x4 $1, %ymm3, %zmm2, %zmm7
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm7 = zmm4 ^ (zmm1 & (zmm7 ^ zmm4))
+; AVX512-NEXT: vmovdqa64 %xmm25, %xmm1
+; AVX512-NEXT: vmovdqa64 %xmm26, %xmm2
+; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; AVX512-NEXT: vpshufhw {{.*#+}} xmm2 = xmm1[0,1,2,3,4,5,7,6]
+; AVX512-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,1,3,2,4,5,6,7]
+; AVX512-NEXT: vpermt2d %zmm2, %zmm5, %zmm1
+; AVX512-NEXT: vpbroadcastd (%rax), %ymm2
+; AVX512-NEXT: vpbroadcastd 4(%rax), %ymm3
+; AVX512-NEXT: vinserti64x4 $1, %ymm3, %zmm2, %zmm19
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm19 = zmm19 ^ (zmm18 & (zmm19 ^ zmm1))
; AVX512-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
-; AVX512-NEXT: vpshuflw {{.*#+}} ymm0 = ymm6[1,2,3,3,4,5,6,7,9,10,11,11,12,13,14,15]
-; AVX512-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,0,2,1,4,4,6,5]
-; AVX512-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX512-NEXT: vpshufd {{.*#+}} ymm1 = ymm10[1,1,1,1,5,5,5,5]
-; AVX512-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7,8,9],ymm1[10],ymm0[11,12],ymm1[13],ymm0[14,15]
-; AVX512-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,2,2,3]
-; AVX512-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-NEXT: vpshufhw {{.*#+}} ymm0 = ymm6[0,1,2,3,5,4,6,7,8,9,10,11,13,12,14,15]
-; AVX512-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[2,2,2,2,6,6,6,6]
-; AVX512-NEXT: vpshufd {{.*#+}} ymm1 = ymm10[2,2,2,2,6,6,6,6]
-; AVX512-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6,7,8],ymm0[9],ymm1[10,11],ymm0[12],ymm1[13,14,15]
-; AVX512-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX512-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
-; AVX512-NEXT: vpshuflw {{.*#+}} ymm0 = ymm9[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
-; AVX512-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,0,0,0,4,4,4,4]
+; AVX512-NEXT: vpshuflw {{.*#+}} ymm1 = ymm6[1,2,3,3,4,5,6,7,9,10,11,11,12,13,14,15]
+; AVX512-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,0,2,1,4,4,6,5]
+; AVX512-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
+; AVX512-NEXT: vpshufd {{.*#+}} ymm2 = ymm11[1,1,1,1,5,5,5,5]
+; AVX512-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7,8,9],ymm2[10],ymm1[11,12],ymm2[13],ymm1[14,15]
+; AVX512-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,2,2,3]
+; AVX512-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-NEXT: vpshufhw {{.*#+}} ymm1 = ymm6[0,1,2,3,5,4,6,7,8,9,10,11,13,12,14,15]
+; AVX512-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[2,2,2,2,6,6,6,6]
+; AVX512-NEXT: vpshufd {{.*#+}} ymm2 = ymm11[2,2,2,2,6,6,6,6]
+; AVX512-NEXT: vpblendw {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3],ymm1[4],ymm2[5,6,7,8],ymm1[9],ymm2[10,11],ymm1[12],ymm2[13,14,15]
+; AVX512-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
+; AVX512-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX512-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX512-NEXT: vpshufd {{.*#+}} ymm1 = ymm3[0,1,1,3,4,5,5,7]
-; AVX512-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7,8,9],ymm0[10],ymm1[11,12],ymm0[13],ymm1[14,15]
-; AVX512-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,1,3,2]
-; AVX512-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-NEXT: vpshufb %ymm11, %ymm9, %ymm0
-; AVX512-NEXT: vpshufd {{.*#+}} ymm1 = ymm3[2,2,2,2,6,6,6,6]
-; AVX512-NEXT: vmovdqa %ymm3, %ymm11
-; AVX512-NEXT: vpblendw {{.*#+}} ymm1 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7,8,9],ymm1[10],ymm0[11,12],ymm1[13],ymm0[14,15]
-; AVX512-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
-; AVX512-NEXT: vpshuflw {{.*#+}} ymm0 = ymm7[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
-; AVX512-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,0,0,0,4,4,4,4]
+; AVX512-NEXT: vpshuflw {{.*#+}} ymm1 = ymm3[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
+; AVX512-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,0,0,0,4,4,4,4]
+; AVX512-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
+; AVX512-NEXT: vpshufd {{.*#+}} ymm2 = ymm12[0,1,1,3,4,5,5,7]
+; AVX512-NEXT: vpblendw {{.*#+}} ymm1 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7,8,9],ymm1[10],ymm2[11,12],ymm1[13],ymm2[14,15]
+; AVX512-NEXT: vpermq {{.*#+}} ymm28 = ymm1[2,1,3,2]
+; AVX512-NEXT: vpshufb %ymm10, %ymm3, %ymm1
+; AVX512-NEXT: vmovdqa %ymm3, %ymm10
+; AVX512-NEXT: vpshufd {{.*#+}} ymm2 = ymm12[2,2,2,2,6,6,6,6]
+; AVX512-NEXT: vpblendw {{.*#+}} ymm2 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7,8,9],ymm2[10],ymm1[11,12],ymm2[13],ymm1[14,15]
; AVX512-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
-; AVX512-NEXT: vpshufd {{.*#+}} ymm2 = ymm8[0,0,2,1,4,4,6,5]
-; AVX512-NEXT: vpblendw {{.*#+}} ymm3 = ymm0[0,1,2],ymm2[3],ymm0[4,5],ymm2[6],ymm0[7,8,9,10],ymm2[11],ymm0[12,13],ymm2[14],ymm0[15]
-; AVX512-NEXT: vprold $16, %ymm7, %ymm0
-; AVX512-NEXT: vpshufd {{.*#+}} ymm2 = ymm8[1,2,2,3,5,6,6,7]
-; AVX512-NEXT: vpblendw {{.*#+}} ymm4 = ymm2[0,1],ymm0[2],ymm2[3,4],ymm0[5],ymm2[6,7,8,9],ymm0[10],ymm2[11,12],ymm0[13],ymm2[14,15]
-; AVX512-NEXT: vpshufd {{.*#+}} ymm0 = ymm11[3,3,3,3,7,7,7,7]
-; AVX512-NEXT: vpshufhw {{.*#+}} ymm2 = ymm9[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
-; AVX512-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[2,2,2,2,6,6,6,6]
-; AVX512-NEXT: vpblendw {{.*#+}} ymm2 = ymm0[0],ymm2[1],ymm0[2,3],ymm2[4],ymm0[5,6,7,8],ymm2[9],ymm0[10,11],ymm2[12],ymm0[13,14,15]
-; AVX512-NEXT: vpshufd {{.*#+}} ymm0 = ymm10[3,3,3,3,7,7,7,7]
+; AVX512-NEXT: vpshuflw {{.*#+}} ymm1 = ymm8[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
+; AVX512-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,0,0,0,4,4,4,4]
+; AVX512-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
+; AVX512-NEXT: vpshufd {{.*#+}} ymm3 = ymm9[0,0,2,1,4,4,6,5]
+; AVX512-NEXT: vpblendw {{.*#+}} ymm4 = ymm1[0,1,2],ymm3[3],ymm1[4,5],ymm3[6],ymm1[7,8,9,10],ymm3[11],ymm1[12,13],ymm3[14],ymm1[15]
+; AVX512-NEXT: vprold $16, %ymm8, %ymm1
+; AVX512-NEXT: vpshufd {{.*#+}} ymm3 = ymm9[1,2,2,3,5,6,6,7]
+; AVX512-NEXT: vpblendw {{.*#+}} ymm5 = ymm3[0,1],ymm1[2],ymm3[3,4],ymm1[5],ymm3[6,7,8,9],ymm1[10],ymm3[11,12],ymm1[13],ymm3[14,15]
+; AVX512-NEXT: vpshufd {{.*#+}} ymm1 = ymm12[3,3,3,3,7,7,7,7]
+; AVX512-NEXT: vpshufhw {{.*#+}} ymm3 = ymm10[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
+; AVX512-NEXT: vpshufd {{.*#+}} ymm3 = ymm3[2,2,2,2,6,6,6,6]
+; AVX512-NEXT: vpblendw {{.*#+}} ymm3 = ymm1[0],ymm3[1],ymm1[2,3],ymm3[4],ymm1[5,6,7,8],ymm3[9],ymm1[10,11],ymm3[12],ymm1[13,14,15]
+; AVX512-NEXT: vpshufd {{.*#+}} ymm1 = ymm11[3,3,3,3,7,7,7,7]
; AVX512-NEXT: vpshufhw {{.*#+}} ymm6 = ymm6[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
; AVX512-NEXT: vpshufd {{.*#+}} ymm6 = ymm6[2,2,2,2,6,6,6,6]
-; AVX512-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0,1,2],ymm6[3],ymm0[4,5],ymm6[6],ymm0[7,8,9,10],ymm6[11],ymm0[12,13],ymm6[14],ymm0[15]
-; AVX512-NEXT: vpshufd {{.*#+}} ymm6 = ymm8[3,3,3,3,7,7,7,7]
-; AVX512-NEXT: vpshufhw {{.*#+}} ymm7 = ymm7[0,1,2,3,5,6,7,7,8,9,10,11,13,14,15,15]
-; AVX512-NEXT: vpshufd {{.*#+}} ymm7 = ymm7[2,2,2,3,6,6,6,7]
-; AVX512-NEXT: vpblendw {{.*#+}} ymm6 = ymm7[0,1],ymm6[2],ymm7[3,4],ymm6[5],ymm7[6,7,8,9],ymm6[10],ymm7[11,12],ymm6[13],ymm7[14,15]
+; AVX512-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0,1,2],ymm6[3],ymm1[4,5],ymm6[6],ymm1[7,8,9,10],ymm6[11],ymm1[12,13],ymm6[14],ymm1[15]
+; AVX512-NEXT: vpshufd {{.*#+}} ymm6 = ymm9[3,3,3,3,7,7,7,7]
+; AVX512-NEXT: vpshufhw {{.*#+}} ymm8 = ymm8[0,1,2,3,5,6,7,7,8,9,10,11,13,14,15,15]
+; AVX512-NEXT: vpshufd {{.*#+}} ymm8 = ymm8[2,2,2,3,6,6,6,7]
+; AVX512-NEXT: vpblendw {{.*#+}} ymm6 = ymm8[0,1],ymm6[2],ymm8[3,4],ymm6[5],ymm8[6,7,8,9],ymm6[10],ymm8[11,12],ymm6[13],ymm8[14,15]
; AVX512-NEXT: vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-NEXT: vpermq {{.*#+}} ymm22 = ymm1[2,2,2,3]
-; AVX512-NEXT: vpermq {{.*#+}} ymm26 = ymm3[2,2,3,3]
-; AVX512-NEXT: vpermq {{.*#+}} ymm24 = ymm4[2,1,3,2]
-; AVX512-NEXT: vmovdqa 32(%rdi), %xmm9
-; AVX512-NEXT: vmovdqa 32(%rsi), %xmm10
-; AVX512-NEXT: vprold $16, %xmm10, %xmm1
-; AVX512-NEXT: vpshufd {{.*#+}} xmm3 = xmm9[1,1,2,3]
-; AVX512-NEXT: vpblendw {{.*#+}} xmm1 = xmm3[0,1],xmm1[2],xmm3[3,4],xmm1[5],xmm3[6,7]
-; AVX512-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-NEXT: vpermq {{.*#+}} ymm21 = ymm2[0,2,2,3]
-; AVX512-NEXT: vmovdqa 32(%r9), %xmm1
-; AVX512-NEXT: vmovdqa 32(%r8), %xmm2
-; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; AVX512-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
-; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm9[0],xmm10[0],xmm9[1],xmm10[1],xmm9[2],xmm10[2],xmm9[3],xmm10[3]
-; AVX512-NEXT: vpshufb %xmm12, %xmm2, %xmm3
-; AVX512-NEXT: vpshufb %xmm12, %xmm1, %xmm1
-; AVX512-NEXT: vpshufhw {{.*#+}} xmm2 = xmm6[0,1,2,3,4,5,7,6]
-; AVX512-NEXT: vmovdqa64 %xmm6, %xmm27
-; AVX512-NEXT: vpermt2d %zmm2, %zmm18, %zmm1
-; AVX512-NEXT: vpbroadcastd 36(%rax), %ymm2
-; AVX512-NEXT: vpbroadcastd 40(%rax), %ymm4
-; AVX512-NEXT: vinserti64x4 $1, %ymm4, %zmm2, %zmm13
-; AVX512-NEXT: vpternlogq {{.*#+}} zmm13 = zmm13 ^ (mem & (zmm13 ^ zmm1))
-; AVX512-NEXT: vmovdqa 32(%rcx), %xmm7
+; AVX512-NEXT: vpermq {{.*#+}} ymm25 = ymm2[2,2,2,3]
+; AVX512-NEXT: vpermq {{.*#+}} ymm29 = ymm4[2,2,3,3]
+; AVX512-NEXT: vpermq {{.*#+}} ymm27 = ymm5[2,1,3,2]
+; AVX512-NEXT: vmovdqa 32(%rdi), %xmm10
+; AVX512-NEXT: vmovdqa 32(%rsi), %xmm11
+; AVX512-NEXT: vprold $16, %xmm11, %xmm2
+; AVX512-NEXT: vpshufd {{.*#+}} xmm4 = xmm10[1,1,2,3]
+; AVX512-NEXT: vpblendw {{.*#+}} xmm2 = xmm4[0,1],xmm2[2],xmm4[3,4],xmm2[5],xmm4[6,7]
+; AVX512-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512-NEXT: vpermq {{.*#+}} ymm26 = ymm3[0,2,2,3]
+; AVX512-NEXT: vmovdqa 32(%r9), %xmm2
+; AVX512-NEXT: vmovdqa 32(%r8), %xmm3
+; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
+; AVX512-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
+; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm10[0],xmm11[0],xmm10[1],xmm11[1],xmm10[2],xmm11[2],xmm10[3],xmm11[3]
+; AVX512-NEXT: vpshufb %xmm0, %xmm3, %xmm4
+; AVX512-NEXT: vpshufb %xmm0, %xmm2, %xmm2
+; AVX512-NEXT: vpshufhw {{.*#+}} xmm3 = xmm6[0,1,2,3,4,5,7,6]
+; AVX512-NEXT: vmovdqa64 %xmm6, %xmm24
+; AVX512-NEXT: vpermt2d %zmm3, %zmm17, %zmm2
+; AVX512-NEXT: vpbroadcastd 36(%rax), %ymm0
+; AVX512-NEXT: vpbroadcastd 40(%rax), %ymm3
+; AVX512-NEXT: vinserti64x4 $1, %ymm3, %zmm0, %zmm13
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm13 = zmm13 ^ (mem & (zmm13 ^ zmm2))
+; AVX512-NEXT: vmovdqa 32(%rcx), %xmm8
; AVX512-NEXT: vmovdqa 32(%rdx), %xmm6
-; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm4 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3]
-; AVX512-NEXT: vpshuflw {{.*#+}} xmm4 = xmm4[0,1,3,2,4,5,6,7]
-; AVX512-NEXT: vpshufd {{.*#+}} xmm12 = xmm4[0,0,1,1]
-; AVX512-NEXT: vpermq {{.*#+}} ymm15 = ymm0[2,1,3,3]
-; AVX512-NEXT: vpermq {{.*#+}} ymm8 = ymm3[0,0,1,1]
-; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm3 # 32-byte Folded Reload
+; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm6[0],xmm8[0],xmm6[1],xmm8[1],xmm6[2],xmm8[2],xmm6[3],xmm8[3]
+; AVX512-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,1,3,2,4,5,6,7]
+; AVX512-NEXT: vpshufd {{.*#+}} xmm12 = xmm0[0,0,1,1]
+; AVX512-NEXT: vpermq {{.*#+}} ymm17 = ymm1[2,1,3,3]
+; AVX512-NEXT: vpermq {{.*#+}} ymm15 = ymm4[0,0,1,1]
; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm1 # 32-byte Folded Reload
-; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm4 = [65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535]
-; AVX512-NEXT: vpternlogq {{.*#+}} zmm1 = zmm1 ^ (zmm4 & (zmm1 ^ zmm3))
-; AVX512-NEXT: vpermq $182, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Folded Reload
-; AVX512-NEXT: # ymm3 = mem[2,1,3,2]
-; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm29 = [65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535]
-; AVX512-NEXT: vpternlogq {{.*#+}} ymm3 = ymm3 ^ (ymm29 & (ymm3 ^ ymm1))
-; AVX512-NEXT: vpternlogq {{.*#+}} ymm30 = ymm30 ^ (ymm28 & (ymm30 ^ ymm3))
-; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm3 # 32-byte Folded Reload
-; AVX512-NEXT: vpternlogq $234, {{[-0-9]+}}(%r{{[sb]}}p), %zmm28, %zmm3 # 64-byte Folded Reload
-; AVX512-NEXT: # zmm3 = (zmm3 & zmm28) | mem
; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 32-byte Folded Reload
-; AVX512-NEXT: vpternlogq $234, {{[-0-9]+}}(%r{{[sb]}}p), %zmm28, %zmm0 # 64-byte Folded Reload
-; AVX512-NEXT: # zmm0 = (zmm0 & zmm28) | mem
+; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm9 = [65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535]
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm0 = zmm0 ^ (zmm9 & (zmm0 ^ zmm1))
+; AVX512-NEXT: vpermq $182, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
+; AVX512-NEXT: # ymm1 = mem[2,1,3,2]
+; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm31 = [65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535]
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm1 = ymm1 ^ (ymm31 & (ymm1 ^ ymm0))
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm20 = ymm20 ^ (ymm30 & (ymm20 ^ ymm1))
+; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm1 # 32-byte Folded Reload
+; AVX512-NEXT: vpternlogq $234, {{[-0-9]+}}(%r{{[sb]}}p), %zmm30, %zmm1 # 64-byte Folded Reload
+; AVX512-NEXT: # zmm1 = (zmm1 & zmm30) | mem
; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm23 # 32-byte Folded Reload
+; AVX512-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm4 # 32-byte Folded Reload
+; AVX512-NEXT: vpternlogq $234, {{[-0-9]+}}(%r{{[sb]}}p), %zmm30, %zmm4 # 64-byte Folded Reload
+; AVX512-NEXT: # zmm4 = (zmm4 & zmm30) | mem
; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm18 # 32-byte Folded Reload
-; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm28 = [65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535]
-; AVX512-NEXT: vpternlogq {{.*#+}} zmm18 = zmm23 ^ (zmm28 & (zmm18 ^ zmm23))
+; AVX512-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm2 # 32-byte Folded Reload
+; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm30 # 32-byte Folded Reload
+; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm22 = [65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535]
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm30 = zmm2 ^ (zmm22 & (zmm30 ^ zmm2))
; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535]
-; AVX512-NEXT: vpternlogq {{.*#+}} zmm18 = zmm3 ^ (zmm2 & (zmm18 ^ zmm3))
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm30 = zmm1 ^ (zmm2 & (zmm30 ^ zmm1))
+; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm1 # 32-byte Folded Reload
; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm3 # 32-byte Folded Reload
-; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
-; AVX512-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm23 # 32-byte Folded Reload
-; AVX512-NEXT: vpternlogq {{.*#+}} zmm23 = zmm3 ^ (zmm28 & (zmm23 ^ zmm3))
-; AVX512-NEXT: vpternlogq {{.*#+}} zmm23 = zmm0 ^ (zmm2 & (zmm23 ^ zmm0))
-; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 32-byte Folded Reload
+; AVX512-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm5 # 32-byte Folded Reload
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm5 = zmm1 ^ (zmm22 & (zmm5 ^ zmm1))
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm5 = zmm4 ^ (zmm2 & (zmm5 ^ zmm4))
+; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm1 # 32-byte Folded Reload
; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm17 # 32-byte Folded Reload
-; AVX512-NEXT: vpternlogq {{.*#+}} zmm17 = zmm0 ^ (mem & (zmm17 ^ zmm0))
-; AVX512-NEXT: vextracti64x4 $1, %zmm1, %ymm0
-; AVX512-NEXT: vpternlogd {{.*#+}} ymm0 = mem ^ (ymm19 & (ymm0 ^ mem))
+; AVX512-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm16 # 32-byte Folded Reload
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm16 = zmm1 ^ (mem & (zmm16 ^ zmm1))
+; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm0
+; AVX512-NEXT: vpternlogd {{.*#+}} ymm0 = mem ^ (ymm18 & (ymm0 ^ mem))
; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm1 = [0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535]
-; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512-NEXT: vpternlogq {{.*#+}} ymm2 = ymm2 ^ (ymm1 & (ymm2 ^ ymm0))
-; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
-; AVX512-NEXT: vpternlogq $248, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm19 # 64-byte Folded Reload
-; AVX512-NEXT: # zmm19 = zmm19 | (zmm1 & mem)
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm21 = ymm21 ^ (ymm1 & (ymm21 ^ ymm0))
+; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
+; AVX512-NEXT: vpternlogq $248, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm18 # 64-byte Folded Reload
+; AVX512-NEXT: # zmm18 = zmm18 | (zmm1 & mem)
; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512-NEXT: vshufi64x2 $68, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 64-byte Folded Reload
-; AVX512-NEXT: # zmm0 = zmm0[0,1,2,3],mem[0,1,2,3]
-; AVX512-NEXT: vpternlogq $234, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm0 # 64-byte Folded Reload
-; AVX512-NEXT: # zmm0 = (zmm0 & zmm1) | mem
-; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm1 = zmm30[0,1,2,3],zmm2[0,1,2,3]
-; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload
-; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm2 = [65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0]
-; AVX512-NEXT: vpternlogq {{.*#+}} zmm30 = zmm30 ^ (zmm2 & (zmm30 ^ zmm1))
-; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm1 # 32-byte Folded Reload
-; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512-NEXT: vpternlogq {{.*#+}} zmm11 = zmm11 ^ (zmm2 & (zmm11 ^ zmm1))
+; AVX512-NEXT: vshufi64x2 $68, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm3 # 64-byte Folded Reload
+; AVX512-NEXT: # zmm3 = zmm0[0,1,2,3],mem[0,1,2,3]
+; AVX512-NEXT: vpternlogq $234, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm3 # 64-byte Folded Reload
+; AVX512-NEXT: # zmm3 = (zmm3 & zmm1) | mem
+; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm1 = zmm20[0,1,2,3],zmm21[0,1,2,3]
+; AVX512-NEXT: vmovdqa64 64(%rax), %zmm2
+; AVX512-NEXT: vpmovsxbd {{.*#+}} zmm21 = [0,13,0,0,0,14,0,0,14,0,0,0,15,0,0,15]
+; AVX512-NEXT: vpermd %zmm2, %zmm21, %zmm20
+; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm4 = [65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0]
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm20 = zmm20 ^ (zmm4 & (zmm20 ^ zmm1))
+; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm1 # 32-byte Folded Reload
+; AVX512-NEXT: vmovdqa64 (%rax), %zmm0
+; AVX512-NEXT: vpermd %zmm0, %zmm21, %zmm21
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm21 = zmm21 ^ (zmm4 & (zmm21 ^ zmm1))
; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm1 = [65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535]
-; AVX512-NEXT: vpternlogq {{.*#+}} zmm25 = zmm25 ^ (zmm1 & (zmm25 ^ zmm14))
-; AVX512-NEXT: vpternlogq {{.*#+}} zmm20 = zmm20 ^ (zmm1 & (zmm20 ^ zmm5))
-; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm1 # 32-byte Folded Reload
-; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm2 # 32-byte Folded Reload
-; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm5 = [0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535]
-; AVX512-NEXT: vpternlogq {{.*#+}} zmm2 = zmm1 ^ (zmm5 & (zmm2 ^ zmm1))
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm23 = zmm23 ^ (zmm1 & (zmm23 ^ zmm14))
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm19 = zmm19 ^ (zmm1 & (zmm19 ^ zmm7))
; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; AVX512-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm1 # 32-byte Folded Reload
-; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512-NEXT: vinserti64x4 $1, %ymm22, %zmm3, %zmm22
-; AVX512-NEXT: vpternlogq {{.*#+}} zmm22 = zmm1 ^ (zmm5 & (zmm22 ^ zmm1))
+; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm7 # 32-byte Folded Reload
+; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm14 = [0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535]
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm7 = zmm1 ^ (zmm14 & (zmm7 ^ zmm1))
; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; AVX512-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm1 # 32-byte Folded Reload
-; AVX512-NEXT: vpmovsxbd {{.*#+}} zmm14 = [0,0,4,0,0,0,5,0,0,5,0,0,0,6,0,0]
-; AVX512-NEXT: vpermd 64(%rax), %zmm14, %zmm5
-; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm3 = [65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535]
-; AVX512-NEXT: vpternlogq {{.*#+}} zmm5 = zmm5 ^ (zmm3 & (zmm5 ^ zmm1))
-; AVX512-NEXT: vinserti64x4 $1, %ymm24, %zmm26, %zmm1
-; AVX512-NEXT: vpermd (%rax), %zmm14, %zmm14
-; AVX512-NEXT: vpternlogq {{.*#+}} zmm14 = zmm14 ^ (zmm3 & (zmm14 ^ zmm1))
-; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm1 = [65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0]
-; AVX512-NEXT: vpternlogq {{.*#+}} zmm5 = zmm5 ^ (zmm1 & (zmm5 ^ zmm2))
-; AVX512-NEXT: vpternlogq {{.*#+}} zmm14 = zmm14 ^ (zmm1 & (zmm14 ^ zmm22))
+; AVX512-NEXT: vinserti64x4 $1, %ymm25, %zmm28, %zmm25
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm25 = zmm1 ^ (zmm14 & (zmm25 ^ zmm1))
; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm1 # 32-byte Folded Reload
+; AVX512-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm14 # 32-byte Folded Reload
+; AVX512-NEXT: vpmovsxbd {{.*#+}} zmm28 = [0,0,4,0,0,0,5,0,0,5,0,0,0,6,0,0]
+; AVX512-NEXT: vpermd %zmm2, %zmm28, %zmm1
+; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm4 = [65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535]
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm1 = zmm1 ^ (zmm4 & (zmm1 ^ zmm14))
+; AVX512-NEXT: vinserti64x4 $1, %ymm27, %zmm29, %zmm2
+; AVX512-NEXT: vpermd %zmm0, %zmm28, %zmm14
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm14 = zmm14 ^ (zmm4 & (zmm14 ^ zmm2))
+; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm0 = [65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0]
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm1 = zmm1 ^ (zmm0 & (zmm1 ^ zmm7))
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm14 = zmm14 ^ (zmm0 & (zmm14 ^ zmm25))
+; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 32-byte Folded Reload
; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
; AVX512-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm2 # 32-byte Folded Reload
-; AVX512-NEXT: vpternlogq {{.*#+}} zmm2 = zmm2 ^ (zmm4 & (zmm2 ^ zmm1))
-; AVX512-NEXT: vpermq $212, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
-; AVX512-NEXT: # ymm1 = mem[0,1,1,3]
-; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm3, %zmm1
-; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm22 # 32-byte Folded Reload
-; AVX512-NEXT: vpternlogq {{.*#+}} zmm22 = zmm1 ^ (zmm4 & (zmm22 ^ zmm1))
-; AVX512-NEXT: vpermq {{.*#+}} ymm1 = ymm12[0,1,1,3]
-; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm21, %zmm1
-; AVX512-NEXT: vinserti64x4 $1, %ymm8, %zmm15, %zmm8
-; AVX512-NEXT: vpternlogq {{.*#+}} zmm8 = zmm1 ^ (zmm4 & (zmm8 ^ zmm1))
-; AVX512-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm10[4],xmm9[4],xmm10[5],xmm9[5],xmm10[6],xmm9[6],xmm10[7],xmm9[7]
-; AVX512-NEXT: vpshufb {{.*#+}} xmm4 = xmm7[u,u,4,5,u,u,u,u,6,7,u,u,u,u,8,9]
-; AVX512-NEXT: vpshufd {{.*#+}} xmm9 = xmm6[1,1,2,2]
-; AVX512-NEXT: vpblendw {{.*#+}} xmm4 = xmm9[0],xmm4[1],xmm9[2,3],xmm4[4],xmm9[5,6],xmm4[7]
-; AVX512-NEXT: vpunpckhwd {{.*#+}} xmm6 = xmm6[4],xmm7[4],xmm6[5],xmm7[5],xmm6[6],xmm7[6],xmm6[7],xmm7[7]
-; AVX512-NEXT: vpermq $234, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Folded Reload
-; AVX512-NEXT: # ymm7 = mem[2,2,2,3]
-; AVX512-NEXT: vpshuflw $180, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload
-; AVX512-NEXT: # xmm9 = mem[0,1,3,2,4,5,6,7]
-; AVX512-NEXT: vpshufd {{.*#+}} xmm9 = xmm9[0,1,1,3]
-; AVX512-NEXT: vpshufd $254, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Folded Reload
-; AVX512-NEXT: # ymm10 = mem[2,3,3,3,6,7,7,7]
-; AVX512-NEXT: vpermq $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm16 # 32-byte Folded Reload
-; AVX512-NEXT: # ymm16 = mem[0,0,2,1]
-; AVX512-NEXT: vpshuflw $230, {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Folded Reload
-; AVX512-NEXT: # xmm12 = mem[2,1,2,3,4,5,6,7]
-; AVX512-NEXT: vpshufhw {{.*#+}} xmm12 = xmm12[0,1,2,3,4,5,5,4]
-; AVX512-NEXT: vpermq {{.*#+}} ymm12 = ymm12[0,0,1,3]
-; AVX512-NEXT: vpermq $80, {{[-0-9]+}}(%r{{[sb]}}p), %ymm21 # 32-byte Folded Reload
-; AVX512-NEXT: # ymm21 = mem[0,0,1,1]
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm2 = zmm2 ^ (zmm9 & (zmm2 ^ zmm0))
+; AVX512-NEXT: vpermq $212, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
+; AVX512-NEXT: # ymm0 = mem[0,1,1,3]
+; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512-NEXT: vinserti64x4 $1, %ymm0, %zmm4, %zmm0
+; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm7 # 32-byte Folded Reload
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm7 = zmm0 ^ (zmm9 & (zmm7 ^ zmm0))
+; AVX512-NEXT: vpermq {{.*#+}} ymm0 = ymm12[0,1,1,3]
+; AVX512-NEXT: vinserti64x4 $1, %ymm0, %zmm26, %zmm0
+; AVX512-NEXT: vinserti64x4 $1, %ymm15, %zmm17, %zmm12
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm12 = zmm0 ^ (zmm9 & (zmm12 ^ zmm0))
+; AVX512-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm11[4],xmm10[4],xmm11[5],xmm10[5],xmm11[6],xmm10[6],xmm11[7],xmm10[7]
+; AVX512-NEXT: vpshufb {{.*#+}} xmm9 = xmm8[u,u,4,5,u,u,u,u,6,7,u,u,u,u,8,9]
+; AVX512-NEXT: vpshufd {{.*#+}} xmm10 = xmm6[1,1,2,2]
+; AVX512-NEXT: vpblendw {{.*#+}} xmm9 = xmm10[0],xmm9[1],xmm10[2,3],xmm9[4],xmm10[5,6],xmm9[7]
+; AVX512-NEXT: vpunpckhwd {{.*#+}} xmm6 = xmm6[4],xmm8[4],xmm6[5],xmm8[5],xmm6[6],xmm8[6],xmm6[7],xmm8[7]
+; AVX512-NEXT: vpermq $234, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Folded Reload
+; AVX512-NEXT: # ymm8 = mem[2,2,2,3]
+; AVX512-NEXT: vpshuflw $180, {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Folded Reload
+; AVX512-NEXT: # xmm10 = mem[0,1,3,2,4,5,6,7]
+; AVX512-NEXT: vpshufd {{.*#+}} xmm10 = xmm10[0,1,1,3]
+; AVX512-NEXT: vpshufd $254, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Folded Reload
+; AVX512-NEXT: # ymm11 = mem[2,3,3,3,6,7,7,7]
+; AVX512-NEXT: vpermq $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm25 # 32-byte Folded Reload
+; AVX512-NEXT: # ymm25 = mem[0,0,2,1]
+; AVX512-NEXT: vpshuflw $230, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
+; AVX512-NEXT: # xmm4 = mem[2,1,2,3,4,5,6,7]
+; AVX512-NEXT: vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,5,5,4]
+; AVX512-NEXT: vpermq {{.*#+}} ymm27 = ymm4[0,0,1,3]
+; AVX512-NEXT: vpermq $80, {{[-0-9]+}}(%r{{[sb]}}p), %ymm17 # 32-byte Folded Reload
+; AVX512-NEXT: # ymm17 = mem[0,0,1,1]
; AVX512-NEXT: vpshuflw $248, {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Folded Reload
; AVX512-NEXT: # xmm15 = mem[0,2,3,3,4,5,6,7]
; AVX512-NEXT: vpermq {{.*#+}} ymm15 = ymm15[0,0,2,1]
-; AVX512-NEXT: vpermq $234, {{[-0-9]+}}(%r{{[sb]}}p), %ymm24 # 32-byte Folded Reload
-; AVX512-NEXT: # ymm24 = mem[2,2,2,3]
-; AVX512-NEXT: vmovdqa64 %xmm27, %xmm3
-; AVX512-NEXT: vpshuflw {{.*#+}} xmm3 = xmm3[0,1,3,2,4,5,6,7]
-; AVX512-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[0,1,1,3]
-; AVX512-NEXT: vpshufd $254, {{[-0-9]+}}(%r{{[sb]}}p), %ymm26 # 32-byte Folded Reload
-; AVX512-NEXT: # ymm26 = mem[2,3,3,3,6,7,7,7]
-; AVX512-NEXT: vpermq $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm27 # 32-byte Folded Reload
-; AVX512-NEXT: # ymm27 = mem[0,0,2,1]
-; AVX512-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[2,1,2,3,4,5,6,7]
-; AVX512-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,5,4]
-; AVX512-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,0,1,3]
-; AVX512-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,0,1,1]
+; AVX512-NEXT: vpermq $234, {{[-0-9]+}}(%r{{[sb]}}p), %ymm26 # 32-byte Folded Reload
+; AVX512-NEXT: # ymm26 = mem[2,2,2,3]
+; AVX512-NEXT: vmovdqa64 %xmm24, %xmm4
+; AVX512-NEXT: vpshuflw {{.*#+}} xmm4 = xmm4[0,1,3,2,4,5,6,7]
+; AVX512-NEXT: vpshufd {{.*#+}} xmm4 = xmm4[0,1,1,3]
+; AVX512-NEXT: vpshufd $254, {{[-0-9]+}}(%r{{[sb]}}p), %ymm28 # 32-byte Folded Reload
+; AVX512-NEXT: # ymm28 = mem[2,3,3,3,6,7,7,7]
+; AVX512-NEXT: vpermq $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm29 # 32-byte Folded Reload
+; AVX512-NEXT: # ymm29 = mem[0,0,2,1]
+; AVX512-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[2,1,2,3,4,5,6,7]
+; AVX512-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,5,4]
+; AVX512-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,1,3]
+; AVX512-NEXT: vpermq {{.*#+}} ymm9 = ymm9[0,0,1,1]
; AVX512-NEXT: vpshuflw {{.*#+}} xmm6 = xmm6[0,2,3,3,4,5,6,7]
; AVX512-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,0,2,1]
-; AVX512-NEXT: vpermq {{.*#+}} ymm9 = ymm9[0,0,0,1]
-; AVX512-NEXT: vinserti64x4 $1, %ymm9, %zmm7, %zmm7
-; AVX512-NEXT: vpermq {{.*#+}} ymm9 = ymm10[2,1,3,2]
-; AVX512-NEXT: vpbroadcastd 96(%rax), %ymm10
-; AVX512-NEXT: vinserti64x4 $1, %ymm10, %zmm9, %zmm9
-; AVX512-NEXT: vpternlogq {{.*#+}} zmm9 = zmm9 ^ (zmm29 & (zmm9 ^ zmm7))
-; AVX512-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,0,0,1]
-; AVX512-NEXT: vinserti64x4 $1, %ymm3, %zmm24, %zmm3
-; AVX512-NEXT: vpermq {{.*#+}} ymm7 = ymm26[2,1,3,2]
-; AVX512-NEXT: vpbroadcastd 32(%rax), %ymm10
-; AVX512-NEXT: vinserti64x4 $1, %ymm10, %zmm7, %zmm7
-; AVX512-NEXT: vpternlogq {{.*#+}} zmm7 = zmm7 ^ (zmm29 & (zmm7 ^ zmm3))
-; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535]
-; AVX512-NEXT: vpternlogq {{.*#+}} zmm9 = zmm9 ^ (zmm3 & (zmm9 ^ zmm22))
-; AVX512-NEXT: vpternlogq {{.*#+}} zmm7 = zmm7 ^ (zmm3 & (zmm7 ^ zmm8))
-; AVX512-NEXT: vinserti64x4 $1, %ymm12, %zmm16, %zmm3
-; AVX512-NEXT: vinserti64x4 $1, %ymm15, %zmm21, %zmm8
-; AVX512-NEXT: vpternlogq {{.*#+}} zmm8 = zmm3 ^ (zmm28 & (zmm8 ^ zmm3))
-; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm27, %zmm1
-; AVX512-NEXT: vinserti64x4 $1, %ymm6, %zmm4, %zmm3
-; AVX512-NEXT: vpternlogq {{.*#+}} zmm3 = zmm1 ^ (zmm28 & (zmm3 ^ zmm1))
-; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm1 = [65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0]
-; AVX512-NEXT: vpternlogq {{.*#+}} zmm31 = zmm31 ^ (zmm1 & (zmm31 ^ zmm8))
-; AVX512-NEXT: vpternlogq {{.*#+}} zmm13 = zmm13 ^ (zmm1 & (zmm13 ^ zmm3))
-; AVX512-NEXT: vpternlogq {{.*#+}} zmm0 = zmm0 ^ (mem & (zmm0 ^ zmm17))
-; AVX512-NEXT: vpternlogq {{.*#+}} zmm11 = zmm11 ^ (mem & (zmm11 ^ zmm2))
+; AVX512-NEXT: vpermq {{.*#+}} ymm10 = ymm10[0,0,0,1]
+; AVX512-NEXT: vinserti64x4 $1, %ymm10, %zmm8, %zmm8
+; AVX512-NEXT: vpermq {{.*#+}} ymm10 = ymm11[2,1,3,2]
+; AVX512-NEXT: vpbroadcastd 96(%rax), %ymm11
+; AVX512-NEXT: vinserti64x4 $1, %ymm11, %zmm10, %zmm10
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm10 = zmm10 ^ (zmm31 & (zmm10 ^ zmm8))
+; AVX512-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,0,0,1]
+; AVX512-NEXT: vinserti64x4 $1, %ymm4, %zmm26, %zmm4
+; AVX512-NEXT: vpermq {{.*#+}} ymm8 = ymm28[2,1,3,2]
+; AVX512-NEXT: vpbroadcastd 32(%rax), %ymm11
+; AVX512-NEXT: vinserti64x4 $1, %ymm11, %zmm8, %zmm8
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm8 = zmm8 ^ (zmm31 & (zmm8 ^ zmm4))
+; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm4 = [0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535]
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm10 = zmm10 ^ (zmm4 & (zmm10 ^ zmm7))
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm8 = zmm8 ^ (zmm4 & (zmm8 ^ zmm12))
+; AVX512-NEXT: vinserti64x4 $1, %ymm27, %zmm25, %zmm4
+; AVX512-NEXT: vinserti64x4 $1, %ymm15, %zmm17, %zmm7
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm7 = zmm4 ^ (zmm22 & (zmm7 ^ zmm4))
+; AVX512-NEXT: vinserti64x4 $1, %ymm0, %zmm29, %zmm0
+; AVX512-NEXT: vinserti64x4 $1, %ymm6, %zmm9, %zmm4
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm4 = zmm0 ^ (zmm22 & (zmm4 ^ zmm0))
+; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm0 = [65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0]
+; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm6 = zmm6 ^ (zmm0 & (zmm6 ^ zmm7))
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm13 = zmm13 ^ (zmm0 & (zmm13 ^ zmm4))
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm3 = zmm3 ^ (mem & (zmm3 ^ zmm16))
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm21 = zmm21 ^ (mem & (zmm21 ^ zmm2))
; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX512-NEXT: vmovdqa64 %zmm0, 320(%rax)
+; AVX512-NEXT: vmovdqa64 %zmm3, 320(%rax)
; AVX512-NEXT: vmovdqa64 %zmm13, 256(%rax)
-; AVX512-NEXT: vmovdqa64 %zmm7, 192(%rax)
+; AVX512-NEXT: vmovdqa64 %zmm8, 192(%rax)
; AVX512-NEXT: vmovdqa64 %zmm14, 128(%rax)
-; AVX512-NEXT: vmovdqa64 %zmm23, 64(%rax)
-; AVX512-NEXT: vmovdqa64 %zmm20, (%rax)
-; AVX512-NEXT: vmovdqa64 %zmm25, 448(%rax)
-; AVX512-NEXT: vmovdqa64 %zmm31, 704(%rax)
-; AVX512-NEXT: vmovdqa64 %zmm9, 640(%rax)
-; AVX512-NEXT: vmovdqa64 %zmm5, 576(%rax)
-; AVX512-NEXT: vmovdqa64 %zmm18, 512(%rax)
-; AVX512-NEXT: vmovdqa64 %zmm11, 384(%rax)
-; AVX512-NEXT: vmovdqa64 %zmm19, 768(%rax)
-; AVX512-NEXT: vmovdqa64 %zmm30, 832(%rax)
-; AVX512-NEXT: addq $2840, %rsp # imm = 0xB18
+; AVX512-NEXT: vmovdqa64 %zmm5, 64(%rax)
+; AVX512-NEXT: vmovdqa64 %zmm19, (%rax)
+; AVX512-NEXT: vmovdqa64 %zmm23, 448(%rax)
+; AVX512-NEXT: vmovdqa64 %zmm6, 704(%rax)
+; AVX512-NEXT: vmovdqa64 %zmm10, 640(%rax)
+; AVX512-NEXT: vmovdqa64 %zmm1, 576(%rax)
+; AVX512-NEXT: vmovdqa64 %zmm30, 512(%rax)
+; AVX512-NEXT: vmovdqa64 %zmm21, 384(%rax)
+; AVX512-NEXT: vmovdqa64 %zmm18, 768(%rax)
+; AVX512-NEXT: vmovdqa64 %zmm20, 832(%rax)
+; AVX512-NEXT: addq $2648, %rsp # imm = 0xA58
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
;
; AVX512-FCP-LABEL: store_i16_stride7_vf64:
; AVX512-FCP: # %bb.0:
-; AVX512-FCP-NEXT: subq $1576, %rsp # imm = 0x628
-; AVX512-FCP-NEXT: vmovdqa 96(%rcx), %ymm4
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm8 = [128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128,128,128,128,128]
-; AVX512-FCP-NEXT: vpshufb %ymm8, %ymm4, %ymm0
-; AVX512-FCP-NEXT: vmovdqa 96(%rdx), %ymm5
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm9 = [u,u,u,u,12,13,14,15,128,128,u,u,u,u,u,u,u,u,u,u,16,17,128,128,u,u,u,u,u,u,u,u]
-; AVX512-FCP-NEXT: vpshufb %ymm9, %ymm5, %ymm1
-; AVX512-FCP-NEXT: vporq %ymm0, %ymm1, %ymm16
+; AVX512-FCP-NEXT: subq $1544, %rsp # imm = 0x608
+; AVX512-FCP-NEXT: vmovdqa 96(%rcx), %ymm3
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm6 = [128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128,128,128,128,128]
+; AVX512-FCP-NEXT: vpshufb %ymm6, %ymm3, %ymm0
+; AVX512-FCP-NEXT: vmovdqa 96(%rdx), %ymm1
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm8 = [u,u,u,u,12,13,14,15,128,128,u,u,u,u,u,u,u,u,u,u,16,17,128,128,u,u,u,u,u,u,u,u]
+; AVX512-FCP-NEXT: vpshufb %ymm8, %ymm1, %ymm2
+; AVX512-FCP-NEXT: vporq %ymm0, %ymm2, %ymm16
; AVX512-FCP-NEXT: vmovdqa 96(%rsi), %ymm2
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm10 = [128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128,128,128,128,128,128,128,128,128]
; AVX512-FCP-NEXT: vpshufb %ymm10, %ymm2, %ymm0
-; AVX512-FCP-NEXT: vmovdqa 96(%rdi), %ymm3
+; AVX512-FCP-NEXT: vmovdqa 96(%rdi), %ymm7
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm11 = [12,13,14,15,128,128,u,u,u,u,u,u,u,u,u,u,16,17,128,128,u,u,u,u,u,u,u,u,16,17,18,19]
-; AVX512-FCP-NEXT: vpshufb %ymm11, %ymm3, %ymm1
-; AVX512-FCP-NEXT: vpor %ymm0, %ymm1, %ymm7
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm6 = [128,128,128,128,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128]
-; AVX512-FCP-NEXT: vmovdqa 64(%r9), %ymm1
-; AVX512-FCP-NEXT: vpshufb %ymm6, %ymm1, %ymm0
-; AVX512-FCP-NEXT: vmovdqa64 %ymm1, %ymm25
+; AVX512-FCP-NEXT: vpshufb %ymm11, %ymm7, %ymm4
+; AVX512-FCP-NEXT: vpor %ymm0, %ymm4, %ymm5
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm4 = [128,128,128,128,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128]
+; AVX512-FCP-NEXT: vmovdqa 64(%r9), %ymm9
+; AVX512-FCP-NEXT: vpshufb %ymm4, %ymm9, %ymm0
+; AVX512-FCP-NEXT: vmovdqa64 %ymm9, %ymm25
; AVX512-FCP-NEXT: vmovdqa 64(%r8), %ymm13
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm12 = [u,u,u,u,u,u,u,u,12,13,14,15,128,128,u,u,u,u,u,u,u,u,u,u,16,17,128,128,u,u,u,u]
-; AVX512-FCP-NEXT: vpshufb %ymm12, %ymm13, %ymm1
+; AVX512-FCP-NEXT: vpshufb %ymm12, %ymm13, %ymm9
; AVX512-FCP-NEXT: vmovdqa64 %ymm13, %ymm26
-; AVX512-FCP-NEXT: vpor %ymm0, %ymm1, %ymm0
+; AVX512-FCP-NEXT: vpor %ymm0, %ymm9, %ymm0
; AVX512-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX512-FCP-NEXT: vmovdqa 64(%rcx), %ymm0
-; AVX512-FCP-NEXT: vpshufb %ymm8, %ymm0, %ymm14
-; AVX512-FCP-NEXT: vmovdqa64 %ymm0, %ymm28
+; AVX512-FCP-NEXT: vpshufb %ymm6, %ymm0, %ymm14
+; AVX512-FCP-NEXT: vmovdqa64 %ymm0, %ymm29
; AVX512-FCP-NEXT: vmovdqa 64(%rdx), %ymm0
-; AVX512-FCP-NEXT: vpshufb %ymm9, %ymm0, %ymm15
-; AVX512-FCP-NEXT: vmovdqa64 %ymm0, %ymm27
+; AVX512-FCP-NEXT: vpshufb %ymm8, %ymm0, %ymm15
+; AVX512-FCP-NEXT: vmovdqa64 %ymm0, %ymm18
; AVX512-FCP-NEXT: vpor %ymm14, %ymm15, %ymm14
; AVX512-FCP-NEXT: vmovdqu %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX512-FCP-NEXT: vmovdqa 64(%rsi), %ymm0
; AVX512-FCP-NEXT: vpshufb %ymm10, %ymm0, %ymm14
-; AVX512-FCP-NEXT: vmovdqa64 %ymm0, %ymm23
+; AVX512-FCP-NEXT: vmovdqa64 %ymm0, %ymm27
; AVX512-FCP-NEXT: vmovdqa 64(%rdi), %ymm0
; AVX512-FCP-NEXT: vpshufb %ymm11, %ymm0, %ymm15
-; AVX512-FCP-NEXT: vmovdqa64 %ymm0, %ymm29
+; AVX512-FCP-NEXT: vmovdqa64 %ymm0, %ymm28
; AVX512-FCP-NEXT: vpor %ymm14, %ymm15, %ymm14
; AVX512-FCP-NEXT: vmovdqu %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX512-FCP-NEXT: vmovdqa (%r9), %ymm0
; AVX512-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-FCP-NEXT: vpshufb %ymm6, %ymm0, %ymm14
+; AVX512-FCP-NEXT: vpshufb %ymm4, %ymm0, %ymm14
; AVX512-FCP-NEXT: vmovdqa (%r8), %ymm0
; AVX512-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX512-FCP-NEXT: vpshufb %ymm12, %ymm0, %ymm15
@@ -13276,10 +13280,10 @@ define void @store_i16_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512-FCP-NEXT: vmovdqu %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX512-FCP-NEXT: vmovdqa (%rcx), %ymm0
; AVX512-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-FCP-NEXT: vpshufb %ymm8, %ymm0, %ymm14
+; AVX512-FCP-NEXT: vpshufb %ymm6, %ymm0, %ymm14
; AVX512-FCP-NEXT: vmovdqa (%rdx), %ymm0
; AVX512-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-FCP-NEXT: vpshufb %ymm9, %ymm0, %ymm15
+; AVX512-FCP-NEXT: vpshufb %ymm8, %ymm0, %ymm15
; AVX512-FCP-NEXT: vpor %ymm14, %ymm15, %ymm14
; AVX512-FCP-NEXT: vmovdqu %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX512-FCP-NEXT: vmovdqa (%rsi), %ymm0
@@ -13291,631 +13295,637 @@ define void @store_i16_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512-FCP-NEXT: vpor %ymm14, %ymm15, %ymm14
; AVX512-FCP-NEXT: vmovdqu %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX512-FCP-NEXT: vmovdqa 32(%rcx), %ymm15
-; AVX512-FCP-NEXT: vpshufb %ymm8, %ymm15, %ymm8
+; AVX512-FCP-NEXT: vpshufb %ymm6, %ymm15, %ymm6
; AVX512-FCP-NEXT: vmovdqa 32(%rdx), %ymm13
-; AVX512-FCP-NEXT: vpshufb %ymm9, %ymm13, %ymm9
-; AVX512-FCP-NEXT: vpor %ymm8, %ymm9, %ymm8
-; AVX512-FCP-NEXT: vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-FCP-NEXT: vpshufb %ymm8, %ymm13, %ymm8
+; AVX512-FCP-NEXT: vpor %ymm6, %ymm8, %ymm6
+; AVX512-FCP-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-FCP-NEXT: vmovdqa 32(%rsi), %ymm0
-; AVX512-FCP-NEXT: vpshufb %ymm10, %ymm0, %ymm8
-; AVX512-FCP-NEXT: vmovdqa 32(%rdi), %ymm1
-; AVX512-FCP-NEXT: vpshufb %ymm11, %ymm1, %ymm9
-; AVX512-FCP-NEXT: vpor %ymm8, %ymm9, %ymm8
-; AVX512-FCP-NEXT: vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-FCP-NEXT: vpshufb %ymm10, %ymm0, %ymm6
+; AVX512-FCP-NEXT: vmovdqa 32(%rdi), %ymm9
+; AVX512-FCP-NEXT: vpshufb %ymm11, %ymm9, %ymm8
+; AVX512-FCP-NEXT: vpor %ymm6, %ymm8, %ymm6
+; AVX512-FCP-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-FCP-NEXT: vmovdqa 32(%r8), %ymm8
-; AVX512-FCP-NEXT: vpshufb %ymm12, %ymm8, %ymm10
-; AVX512-FCP-NEXT: vmovdqa 32(%r9), %ymm9
-; AVX512-FCP-NEXT: vpshufb %ymm6, %ymm9, %ymm11
-; AVX512-FCP-NEXT: vporq %ymm11, %ymm10, %ymm20
-; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} ymm10 = ymm4[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
-; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm12 = ymm5[0,1,1,3,4,5,5,7]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm10 = ymm12[0,1],ymm10[2],ymm12[3,4],ymm10[5],ymm12[6,7,8,9],ymm10[10],ymm12[11,12],ymm10[13],ymm12[14,15]
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm10 = ymm10[2,1,3,2]
+; AVX512-FCP-NEXT: vpshufb %ymm12, %ymm8, %ymm6
+; AVX512-FCP-NEXT: vmovdqa 32(%r9), %ymm10
+; AVX512-FCP-NEXT: vpshufb %ymm4, %ymm10, %ymm11
+; AVX512-FCP-NEXT: vporq %ymm11, %ymm6, %ymm21
+; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} ymm11 = ymm3[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
+; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm12 = ymm1[0,1,1,3,4,5,5,7]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm11 = ymm12[0,1],ymm11[2],ymm12[3,4],ymm11[5],ymm12[6,7,8,9],ymm11[10],ymm12[11,12],ymm11[13],ymm12[14,15]
+; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,1,3,2]
; AVX512-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm14 = [18,19,20,21,18,19,20,21,24,25,26,27,22,23,22,23,18,19,20,21,18,19,20,21,24,25,26,27,22,23,22,23]
; AVX512-FCP-NEXT: # ymm14 = mem[0,1,0,1]
; AVX512-FCP-NEXT: vpshufb %ymm14, %ymm2, %ymm12
; AVX512-FCP-NEXT: vmovdqa64 %ymm14, %ymm17
-; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm14 = ymm3[1,1,1,1,5,5,5,5]
+; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm14 = ymm7[1,1,1,1,5,5,5,5]
; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm12 = ymm12[0,1],ymm14[2],ymm12[3,4],ymm14[5],ymm12[6,7,8,9],ymm14[10],ymm12[11,12],ymm14[13],ymm12[14,15]
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm12 = ymm12[2,2,2,3]
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm10, %zmm16, %zmm10
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm12, %zmm7, %zmm12
-; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm12 = zmm10 ^ (mem & (zmm12 ^ zmm10))
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm11, %zmm16, %zmm11
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm12, %zmm5, %zmm12
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm12 = zmm11 ^ (mem & (zmm12 ^ zmm11))
; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm14 = [65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535]
-; AVX512-FCP-NEXT: vmovdqa 96(%r8), %ymm7
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm10 = ymm7[u,u],zero,zero,zero,zero,zero,zero,zero,zero,ymm7[14,15,u,u,u,u],zero,zero,zero,zero,zero,zero,zero,zero,ymm7[16,17,u,u,u,u],zero,zero
-; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm10 = ymm10 | (ymm12 & ymm14)
-; AVX512-FCP-NEXT: vmovdqa 96(%r9), %ymm11
-; AVX512-FCP-NEXT: vpshufb %ymm6, %ymm11, %ymm6
-; AVX512-FCP-NEXT: vmovdqu %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512-FCP-NEXT: vmovdqa 96(%r8), %ymm5
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm11 = ymm5[u,u],zero,zero,zero,zero,zero,zero,zero,zero,ymm5[14,15,u,u,u,u],zero,zero,zero,zero,zero,zero,zero,zero,ymm5[16,17,u,u,u,u],zero,zero
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm11 = ymm11 | (ymm12 & ymm14)
+; AVX512-FCP-NEXT: vmovdqa 96(%r9), %ymm6
+; AVX512-FCP-NEXT: vpshufb %ymm4, %ymm6, %ymm4
+; AVX512-FCP-NEXT: vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm16 = [65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535]
-; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm6 = ymm6 | (ymm10 & ymm16)
-; AVX512-FCP-NEXT: vmovdqa64 %zmm16, %zmm21
-; AVX512-FCP-NEXT: vextracti64x4 $1, %zmm12, %ymm10
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm4 = ymm4 | (ymm11 & ymm16)
+; AVX512-FCP-NEXT: vmovdqa64 %zmm16, %zmm30
+; AVX512-FCP-NEXT: vextracti64x4 $1, %zmm12, %ymm11
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm12 = [0,4,0,0,0,5,0,0]
-; AVX512-FCP-NEXT: vpermd %ymm7, %ymm12, %ymm12
+; AVX512-FCP-NEXT: vpermd %ymm5, %ymm12, %ymm12
; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm16 = [65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0]
-; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm12 = ymm12 ^ (ymm16 & (ymm12 ^ ymm10))
-; AVX512-FCP-NEXT: vmovdqa64 %zmm16, %zmm18
-; AVX512-FCP-NEXT: vprold $16, %ymm11, %ymm10
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm10 = ymm10[2,2,2,2]
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm12 = ymm12 ^ (ymm16 & (ymm12 ^ ymm11))
+; AVX512-FCP-NEXT: vmovdqa64 %zmm16, %zmm20
+; AVX512-FCP-NEXT: vprold $16, %ymm6, %ymm11
+; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,2,2,2]
; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm16 = [65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535]
-; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm10 = ymm10 ^ (ymm16 & (ymm10 ^ ymm12))
-; AVX512-FCP-NEXT: vshufi64x2 {{.*#+}} zmm6 = zmm6[0,1,2,3],zmm10[0,1,2,3]
-; AVX512-FCP-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm11 = ymm11 ^ (ymm16 & (ymm11 ^ ymm12))
+; AVX512-FCP-NEXT: vshufi64x2 {{.*#+}} zmm4 = zmm4[0,1,2,3],zmm11[0,1,2,3]
+; AVX512-FCP-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm10 = [0,1,4,5,4,5,5,7]
-; AVX512-FCP-NEXT: vmovdqa 96(%rax), %ymm6
-; AVX512-FCP-NEXT: vpermd %ymm6, %ymm10, %ymm10
-; AVX512-FCP-NEXT: vpandn %ymm10, %ymm14, %ymm10
-; AVX512-FCP-NEXT: vmovdqa64 %zmm14, %zmm16
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm14 = [12,13,128,128,128,128,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128]
-; AVX512-FCP-NEXT: vpshufb %ymm14, %ymm6, %ymm12
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm10, %zmm12, %zmm10
-; AVX512-FCP-NEXT: vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm11 = [u,u,u,u,u,u,u,u,u,u,u,u,14,15,12,13,u,u,u,u,30,31,28,29,u,u,u,u,30,31,28,29]
-; AVX512-FCP-NEXT: vpshufb %ymm11, %ymm2, %ymm10
-; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm12 = ymm3[3,3,3,3,7,7,7,7]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm10 = ymm12[0,1,2],ymm10[3],ymm12[4,5],ymm10[6],ymm12[7,8,9,10],ymm10[11],ymm12[12,13],ymm10[14],ymm12[15]
-; AVX512-FCP-NEXT: vpmovsxdq {{.*#+}} ymm12 = [151522058,0,421010202,421010202]
-; AVX512-FCP-NEXT: vpshufb %ymm12, %ymm2, %ymm2
-; AVX512-FCP-NEXT: vmovdqa64 %ymm12, %ymm24
-; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm3 = ymm3[2,2,2,2,6,6,6,6]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm3[0],ymm2[1],ymm3[2,3],ymm2[4],ymm3[5,6,7,8],ymm2[9],ymm3[10,11],ymm2[12],ymm3[13,14,15]
-; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} zmm3 = [0,2,2,3,10,9,11,11]
-; AVX512-FCP-NEXT: vpermt2q %zmm10, %zmm3, %zmm2
-; AVX512-FCP-NEXT: vpmovsxdq {{.*#+}} ymm12 = [218894094,0,488382238,488382238]
-; AVX512-FCP-NEXT: vpshufb %ymm12, %ymm4, %ymm10
-; AVX512-FCP-NEXT: vmovdqa64 %ymm12, %ymm22
-; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm12 = ymm5[3,3,3,3,7,7,7,7]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm10 = ymm12[0],ymm10[1],ymm12[2,3],ymm10[4],ymm12[5,6,7,8],ymm10[9],ymm12[10,11],ymm10[12],ymm12[13,14,15]
-; AVX512-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm12 = [22,23,26,27,0,0,24,25,26,27,0,0,26,27,26,27,22,23,26,27,0,0,24,25,26,27,0,0,26,27,26,27]
-; AVX512-FCP-NEXT: # ymm12 = mem[0,1,0,1]
+; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm11 = [0,1,4,5,4,5,5,7]
+; AVX512-FCP-NEXT: vmovdqa 96(%rax), %ymm4
+; AVX512-FCP-NEXT: vpermd %ymm4, %ymm11, %ymm11
+; AVX512-FCP-NEXT: vpandn %ymm11, %ymm14, %ymm11
+; AVX512-FCP-NEXT: vmovdqa64 %zmm14, %zmm22
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm12 = [12,13,128,128,128,128,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128]
; AVX512-FCP-NEXT: vpshufb %ymm12, %ymm4, %ymm4
-; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm5 = ymm5[2,2,2,2,6,6,6,6]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm4 = ymm4[0,1],ymm5[2],ymm4[3,4],ymm5[5],ymm4[6,7,8,9],ymm5[10],ymm4[11,12],ymm5[13],ymm4[14,15]
-; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} zmm30 = [2,2,2,3,8,10,10,11]
-; AVX512-FCP-NEXT: vpermt2q %zmm10, %zmm30, %zmm4
-; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm19 = [65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535]
-; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm4 = zmm2 ^ (zmm19 & (zmm4 ^ zmm2))
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm11, %zmm4, %zmm4
; AVX512-FCP-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm2 = [5,0,0,0,6,0,0,6]
-; AVX512-FCP-NEXT: vpermd %ymm7, %ymm2, %ymm2
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm14 = [u,u,u,u,u,u,u,u,u,u,u,u,14,15,12,13,u,u,u,u,30,31,28,29,u,u,u,u,30,31,28,29]
+; AVX512-FCP-NEXT: vpshufb %ymm14, %ymm2, %ymm4
+; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm11 = ymm7[3,3,3,3,7,7,7,7]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm4 = ymm11[0,1,2],ymm4[3],ymm11[4,5],ymm4[6],ymm11[7,8,9,10],ymm4[11],ymm11[12,13],ymm4[14],ymm11[15]
+; AVX512-FCP-NEXT: vpmovsxdq {{.*#+}} ymm6 = [151522058,0,421010202,421010202]
+; AVX512-FCP-NEXT: vpshufb %ymm6, %ymm2, %ymm2
+; AVX512-FCP-NEXT: vmovdqa64 %ymm6, %ymm23
+; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm7 = ymm7[2,2,2,2,6,6,6,6]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm7[0],ymm2[1],ymm7[2,3],ymm2[4],ymm7[5,6,7,8],ymm2[9],ymm7[10,11],ymm2[12],ymm7[13,14,15]
+; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} zmm7 = [0,2,2,3,10,9,11,11]
+; AVX512-FCP-NEXT: vpermt2q %zmm4, %zmm7, %zmm2
+; AVX512-FCP-NEXT: vpmovsxdq {{.*#+}} ymm11 = [218894094,0,488382238,488382238]
+; AVX512-FCP-NEXT: vpshufb %ymm11, %ymm3, %ymm4
+; AVX512-FCP-NEXT: vmovdqa64 %ymm11, %ymm16
+; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm11 = ymm1[3,3,3,3,7,7,7,7]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm4 = ymm11[0],ymm4[1],ymm11[2,3],ymm4[4],ymm11[5,6,7,8],ymm4[9],ymm11[10,11],ymm4[12],ymm11[13,14,15]
+; AVX512-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm6 = [22,23,26,27,0,0,24,25,26,27,0,0,26,27,26,27,22,23,26,27,0,0,24,25,26,27,0,0,26,27,26,27]
+; AVX512-FCP-NEXT: # ymm6 = mem[0,1,0,1]
+; AVX512-FCP-NEXT: vpshufb %ymm6, %ymm3, %ymm3
+; AVX512-FCP-NEXT: vmovdqa64 %ymm6, %ymm24
+; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[2,2,2,2,6,6,6,6]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm3[0,1],ymm1[2],ymm3[3,4],ymm1[5],ymm3[6,7,8,9],ymm1[10],ymm3[11,12],ymm1[13],ymm3[14,15]
+; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} zmm19 = [2,2,2,3,8,10,10,11]
+; AVX512-FCP-NEXT: vpermt2q %zmm4, %zmm19, %zmm1
+; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm11 = [65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535]
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm1 = zmm2 ^ (zmm11 & (zmm1 ^ zmm2))
+; AVX512-FCP-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm1 = [5,0,0,0,6,0,0,6]
+; AVX512-FCP-NEXT: vpermd %ymm5, %ymm1, %ymm1
+; AVX512-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm3 = [65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535]
+; AVX512-FCP-NEXT: vpbroadcastd 72(%rax), %ymm1
+; AVX512-FCP-NEXT: vpandn %ymm1, %ymm3, %ymm1
+; AVX512-FCP-NEXT: vmovdqa 64(%rax), %ymm2
; AVX512-FCP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm2 = [6,5,0,0,7,6,0,7,6,5,0,0,7,6,0,7]
-; AVX512-FCP-NEXT: # zmm2 = mem[0,1,2,3,0,1,2,3]
-; AVX512-FCP-NEXT: vpermd %zmm6, %zmm2, %zmm4
-; AVX512-FCP-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm6 = [65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535]
-; AVX512-FCP-NEXT: vpbroadcastd 72(%rax), %ymm4
-; AVX512-FCP-NEXT: vpandn %ymm4, %ymm6, %ymm4
-; AVX512-FCP-NEXT: vmovdqa 64(%rax), %ymm5
-; AVX512-FCP-NEXT: vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-FCP-NEXT: vpshufb %ymm14, %ymm5, %ymm5
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm5, %zmm4, %zmm4
-; AVX512-FCP-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-FCP-NEXT: vpbroadcastd 8(%rax), %ymm4
-; AVX512-FCP-NEXT: vpandn %ymm4, %ymm6, %ymm4
-; AVX512-FCP-NEXT: vmovdqa (%rax), %ymm5
-; AVX512-FCP-NEXT: vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-FCP-NEXT: vpshufb %ymm14, %ymm5, %ymm5
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm5, %zmm4, %zmm4
-; AVX512-FCP-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-FCP-NEXT: vpbroadcastd {{.*#+}} ymm10 = [18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21]
-; AVX512-FCP-NEXT: vpshufb %ymm10, %ymm9, %ymm4
-; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm5 = ymm8[0,0,2,1,4,4,6,5]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm4 = ymm4[0,1,2],ymm5[3],ymm4[4,5],ymm5[6],ymm4[7,8,9,10],ymm5[11],ymm4[12,13],ymm5[14],ymm4[15]
-; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} zmm5 = [0,1,0,3,10,10,11,11]
-; AVX512-FCP-NEXT: vpermi2q %zmm4, %zmm20, %zmm5
-; AVX512-FCP-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-FCP-NEXT: vpshufb %ymm12, %ymm2, %ymm2
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm1
+; AVX512-FCP-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-FCP-NEXT: vpbroadcastd 8(%rax), %ymm1
+; AVX512-FCP-NEXT: vpandn %ymm1, %ymm3, %ymm1
+; AVX512-FCP-NEXT: vmovdqa (%rax), %ymm2
+; AVX512-FCP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512-FCP-NEXT: vpshufb %ymm12, %ymm2, %ymm2
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm1
+; AVX512-FCP-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-FCP-NEXT: vpbroadcastd {{.*#+}} ymm6 = [18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21]
+; AVX512-FCP-NEXT: vpshufb %ymm6, %ymm10, %ymm1
+; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm2 = ymm8[0,0,2,1,4,4,6,5]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3],ymm1[4,5],ymm2[6],ymm1[7,8,9,10],ymm2[11],ymm1[12,13],ymm2[14],ymm1[15]
+; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} zmm2 = [0,1,0,3,10,10,11,11]
+; AVX512-FCP-NEXT: vpermi2q %zmm1, %zmm21, %zmm2
+; AVX512-FCP-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-FCP-NEXT: vpshufb %ymm11, %ymm0, %ymm4
-; AVX512-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm5 = ymm1[3,3,3,3,7,7,7,7]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm4 = ymm5[0,1,2],ymm4[3],ymm5[4,5],ymm4[6],ymm5[7,8,9,10],ymm4[11],ymm5[12,13],ymm4[14],ymm5[15]
-; AVX512-FCP-NEXT: vmovdqa64 %ymm24, %ymm11
-; AVX512-FCP-NEXT: vpshufb %ymm11, %ymm0, %ymm5
-; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm6 = ymm1[2,2,2,2,6,6,6,6]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm5 = ymm6[0],ymm5[1],ymm6[2,3],ymm5[4],ymm6[5,6,7,8],ymm5[9],ymm6[10,11],ymm5[12],ymm6[13,14,15]
-; AVX512-FCP-NEXT: vpermt2q %zmm4, %zmm3, %zmm5
+; AVX512-FCP-NEXT: vpshufb %ymm14, %ymm0, %ymm1
+; AVX512-FCP-NEXT: vmovdqu %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm2 = ymm9[3,3,3,3,7,7,7,7]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5],ymm1[6],ymm2[7,8,9,10],ymm1[11],ymm2[12,13],ymm1[14],ymm2[15]
+; AVX512-FCP-NEXT: vmovdqa64 %ymm23, %ymm5
+; AVX512-FCP-NEXT: vpshufb %ymm5, %ymm0, %ymm2
+; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm3 = ymm9[2,2,2,2,6,6,6,6]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm3[0],ymm2[1],ymm3[2,3],ymm2[4],ymm3[5,6,7,8],ymm2[9],ymm3[10,11],ymm2[12],ymm3[13,14,15]
+; AVX512-FCP-NEXT: vpermt2q %zmm1, %zmm7, %zmm2
; AVX512-FCP-NEXT: vmovdqu %ymm15, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-FCP-NEXT: vmovdqa64 %ymm22, %ymm7
-; AVX512-FCP-NEXT: vpshufb %ymm7, %ymm15, %ymm3
+; AVX512-FCP-NEXT: vmovdqa64 %ymm16, %ymm7
+; AVX512-FCP-NEXT: vpshufb %ymm7, %ymm15, %ymm1
; AVX512-FCP-NEXT: vmovdqu %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm4 = ymm13[3,3,3,3,7,7,7,7]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm4[0],ymm3[1],ymm4[2,3],ymm3[4],ymm4[5,6,7,8],ymm3[9],ymm4[10,11],ymm3[12],ymm4[13,14,15]
-; AVX512-FCP-NEXT: vpshufb %ymm12, %ymm15, %ymm4
-; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm6 = ymm13[2,2,2,2,6,6,6,6]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm4[0,1],ymm6[2],ymm4[3,4],ymm6[5],ymm4[6,7,8,9],ymm6[10],ymm4[11,12],ymm6[13],ymm4[14,15]
-; AVX512-FCP-NEXT: vpermt2q %zmm3, %zmm30, %zmm0
-; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm0 = zmm5 ^ (zmm19 & (zmm0 ^ zmm5))
+; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm3 = ymm13[3,3,3,3,7,7,7,7]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm3[0],ymm1[1],ymm3[2,3],ymm1[4],ymm3[5,6,7,8],ymm1[9],ymm3[10,11],ymm1[12],ymm3[13,14,15]
+; AVX512-FCP-NEXT: vmovdqa64 %ymm24, %ymm12
+; AVX512-FCP-NEXT: vpshufb %ymm12, %ymm15, %ymm3
+; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm4 = ymm13[2,2,2,2,6,6,6,6]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm3[0,1],ymm4[2],ymm3[3,4],ymm4[5],ymm3[6,7,8,9],ymm4[10],ymm3[11,12],ymm4[13],ymm3[14,15]
+; AVX512-FCP-NEXT: vpermt2q %zmm1, %zmm19, %zmm0
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm0 = zmm2 ^ (zmm11 & (zmm0 ^ zmm2))
; AVX512-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm9[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,26,27,28,29,26,27,28,29,26,27,28,29,30,31,30,31]
-; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm4 = ymm8[3,3,3,3,7,7,7,7]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm3[0,1],ymm4[2],ymm3[3,4],ymm4[5],ymm3[6,7,8,9],ymm4[10],ymm3[11,12],ymm4[13],ymm3[14,15]
-; AVX512-FCP-NEXT: vprold $16, %ymm9, %ymm4
-; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm5 = ymm8[1,2,2,3,5,6,6,7]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm4 = ymm5[0,1],ymm4[2],ymm5[3,4],ymm4[5],ymm5[6,7,8,9],ymm4[10],ymm5[11,12],ymm4[13],ymm5[14,15]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm10[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,26,27,28,29,26,27,28,29,26,27,28,29,30,31,30,31]
+; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm2 = ymm8[3,3,3,3,7,7,7,7]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7,8,9],ymm2[10],ymm1[11,12],ymm2[13],ymm1[14,15]
+; AVX512-FCP-NEXT: vprold $16, %ymm10, %ymm2
+; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm3 = ymm8[1,2,2,3,5,6,6,7]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm3[0,1],ymm2[2],ymm3[3,4],ymm2[5],ymm3[6,7,8,9],ymm2[10],ymm3[11,12],ymm2[13],ymm3[14,15]
; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} zmm13 = [2,1,3,2,10,10,10,11]
-; AVX512-FCP-NEXT: vpermt2q %zmm3, %zmm13, %zmm4
-; AVX512-FCP-NEXT: vmovdqa 32(%rax), %ymm0
+; AVX512-FCP-NEXT: vpermt2q %zmm1, %zmm13, %zmm2
+; AVX512-FCP-NEXT: vmovdqa64 (%rax), %zmm16
+; AVX512-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 = [14,13,8,8,15,14,8,15,14,13,8,8,15,14,8,15]
+; AVX512-FCP-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3]
; AVX512-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-FCP-NEXT: vpermd %zmm0, %zmm2, %zmm0
-; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm0 = zmm0 ^ (zmm18 & (zmm0 ^ zmm4))
+; AVX512-FCP-NEXT: vpermd %zmm16, %zmm0, %zmm0
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm0 = zmm0 ^ (zmm20 & (zmm0 ^ zmm2))
; AVX512-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-FCP-NEXT: vmovdqa64 %ymm23, %ymm0
-; AVX512-FCP-NEXT: vpshufb %ymm11, %ymm0, %ymm2
-; AVX512-FCP-NEXT: vmovdqa64 %ymm29, %ymm19
-; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm3 = ymm29[2,2,2,2,6,6,6,6]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm3[0],ymm2[1],ymm3[2,3],ymm2[4],ymm3[5,6,7,8],ymm2[9],ymm3[10,11],ymm2[12],ymm3[13,14,15]
-; AVX512-FCP-NEXT: vmovdqa64 %ymm17, %ymm15
-; AVX512-FCP-NEXT: vpshufb %ymm15, %ymm0, %ymm3
-; AVX512-FCP-NEXT: vmovdqa64 %ymm23, %ymm18
-; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm4 = ymm29[1,1,1,1,5,5,5,5]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm3[0,1],ymm4[2],ymm3[3,4],ymm4[5],ymm3[6,7,8,9],ymm4[10],ymm3[11,12],ymm4[13],ymm3[14,15]
-; AVX512-FCP-NEXT: vpermt2q %zmm2, %zmm30, %zmm3
-; AVX512-FCP-NEXT: vmovdqa64 %ymm28, %ymm0
-; AVX512-FCP-NEXT: vpshufb %ymm12, %ymm0, %ymm2
-; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm4 = ymm27[2,2,2,2,6,6,6,6]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm2[0,1],ymm4[2],ymm2[3,4],ymm4[5],ymm2[6,7,8,9],ymm4[10],ymm2[11,12],ymm4[13],ymm2[14,15]
-; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} ymm4 = ymm0[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
-; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm5 = ymm27[0,1,1,3,4,5,5,7]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm6 = ymm5[0,1],ymm4[2],ymm5[3,4],ymm4[5],ymm5[6,7,8,9],ymm4[10],ymm5[11,12],ymm4[13],ymm5[14,15]
-; AVX512-FCP-NEXT: vpermt2q %zmm2, %zmm13, %zmm6
-; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm6 = zmm3 ^ (mem & (zmm6 ^ zmm3))
-; AVX512-FCP-NEXT: vmovdqa64 %ymm25, %ymm12
-; AVX512-FCP-NEXT: vprold $16, %ymm25, %ymm2
-; AVX512-FCP-NEXT: vmovdqa64 %ymm26, %ymm23
+; AVX512-FCP-NEXT: vmovdqa64 %ymm27, %ymm0
+; AVX512-FCP-NEXT: vpshufb %ymm5, %ymm0, %ymm1
+; AVX512-FCP-NEXT: vmovdqa64 %ymm23, %ymm21
+; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm2 = ymm28[2,2,2,2,6,6,6,6]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3],ymm1[4],ymm2[5,6,7,8],ymm1[9],ymm2[10,11],ymm1[12],ymm2[13,14,15]
+; AVX512-FCP-NEXT: vmovdqa64 %ymm17, %ymm9
+; AVX512-FCP-NEXT: vpshufb %ymm9, %ymm0, %ymm2
+; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm3 = ymm28[1,1,1,1,5,5,5,5]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm2[0,1],ymm3[2],ymm2[3,4],ymm3[5],ymm2[6,7,8,9],ymm3[10],ymm2[11,12],ymm3[13],ymm2[14,15]
+; AVX512-FCP-NEXT: vpermt2q %zmm1, %zmm19, %zmm3
+; AVX512-FCP-NEXT: vmovdqa64 %ymm29, %ymm8
+; AVX512-FCP-NEXT: vpshufb %ymm12, %ymm8, %ymm1
+; AVX512-FCP-NEXT: vmovdqa64 %ymm24, %ymm10
+; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm2 = ymm18[2,2,2,2,6,6,6,6]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7,8,9],ymm2[10],ymm1[11,12],ymm2[13],ymm1[14,15]
+; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} ymm2 = ymm8[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
+; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm4 = ymm18[0,1,1,3,4,5,5,7]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm5 = ymm4[0,1],ymm2[2],ymm4[3,4],ymm2[5],ymm4[6,7,8,9],ymm2[10],ymm4[11,12],ymm2[13],ymm4[14,15]
+; AVX512-FCP-NEXT: vpermt2q %zmm1, %zmm13, %zmm5
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm5 = zmm3 ^ (mem & (zmm5 ^ zmm3))
+; AVX512-FCP-NEXT: vmovdqa64 %ymm25, %ymm0
+; AVX512-FCP-NEXT: vmovdqu64 %ymm25, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512-FCP-NEXT: vprold $16, %ymm25, %ymm1
+; AVX512-FCP-NEXT: vmovdqa64 %ymm26, %ymm25
; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm3 = ymm26[1,2,2,3,5,6,6,7]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm3[0,1],ymm2[2],ymm3[3,4],ymm2[5],ymm3[6,7,8,9],ymm2[10],ymm3[11,12],ymm2[13],ymm3[14,15]
-; AVX512-FCP-NEXT: vpshufb %ymm10, %ymm12, %ymm3
-; AVX512-FCP-NEXT: vmovdqa64 %ymm25, %ymm28
-; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm5 = ymm26[0,0,2,1,4,4,6,5]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm3[0,1,2],ymm5[3],ymm3[4,5],ymm5[6],ymm3[7,8,9,10],ymm5[11],ymm3[12,13],ymm5[14],ymm3[15]
-; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} zmm1 = [2,2,3,3,10,9,11,10]
-; AVX512-FCP-NEXT: vpermt2q %zmm2, %zmm1, %zmm3
-; AVX512-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm20 = [0,5,4,0,0,6,5,0,0,5,4,0,0,6,5,0]
-; AVX512-FCP-NEXT: # zmm20 = mem[0,1,2,3,0,1,2,3]
-; AVX512-FCP-NEXT: vpermd 64(%rax), %zmm20, %zmm1
-; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm1 = zmm1 ^ (zmm16 & (zmm1 ^ zmm3))
-; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm1 = zmm1 ^ (mem & (zmm1 ^ zmm6))
-; AVX512-FCP-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-FCP-NEXT: vpshufb %ymm7, %ymm0, %ymm1
-; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm0 = ymm27[3,3,3,3,7,7,7,7]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6,7,8],ymm1[9],ymm0[10,11],ymm1[12],ymm0[13,14,15]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm3[0,1],ymm1[2],ymm3[3,4],ymm1[5],ymm3[6,7,8,9],ymm1[10],ymm3[11,12],ymm1[13],ymm3[14,15]
+; AVX512-FCP-NEXT: vpshufb %ymm6, %ymm0, %ymm3
+; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm4 = ymm26[0,0,2,1,4,4,6,5]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm3[0,1,2],ymm4[3],ymm3[4,5],ymm4[6],ymm3[7,8,9,10],ymm4[11],ymm3[12,13],ymm4[14],ymm3[15]
+; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} zmm0 = [2,2,3,3,10,9,11,10]
+; AVX512-FCP-NEXT: vpermt2q %zmm1, %zmm0, %zmm3
+; AVX512-FCP-NEXT: vmovdqa64 64(%rax), %zmm0
; AVX512-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-FCP-NEXT: vmovdqa 64(%rcx), %xmm0
-; AVX512-FCP-NEXT: vmovdqa 64(%rdx), %xmm3
-; AVX512-FCP-NEXT: vpbroadcastq {{.*#+}} xmm9 = [6,7,4,5,0,0,8,9,6,7,4,5,0,0,8,9]
-; AVX512-FCP-NEXT: vpshufb %xmm9, %xmm0, %xmm1
-; AVX512-FCP-NEXT: vpshufd {{.*#+}} xmm6 = xmm3[1,1,2,2]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm6[0],xmm1[1],xmm6[2,3],xmm1[4],xmm6[5,6],xmm1[7]
-; AVX512-FCP-NEXT: vpunpckhwd {{.*#+}} xmm6 = xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
+; AVX512-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm23 = [0,5,4,0,0,6,5,0,0,5,4,0,0,6,5,0]
+; AVX512-FCP-NEXT: # zmm23 = mem[0,1,2,3,0,1,2,3]
+; AVX512-FCP-NEXT: vpermd %zmm0, %zmm23, %zmm0
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm0 = zmm0 ^ (zmm22 & (zmm0 ^ zmm3))
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm0 = zmm0 ^ (mem & (zmm0 ^ zmm5))
+; AVX512-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-FCP-NEXT: vpshufb %ymm7, %ymm8, %ymm1
+; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm0 = ymm18[3,3,3,3,7,7,7,7]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm12 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6,7,8],ymm1[9],ymm0[10,11],ymm1[12],ymm0[13,14,15]
+; AVX512-FCP-NEXT: vmovdqa 64(%rcx), %xmm5
+; AVX512-FCP-NEXT: vmovdqa 64(%rdx), %xmm6
+; AVX512-FCP-NEXT: vpbroadcastq {{.*#+}} xmm4 = [6,7,4,5,0,0,8,9,6,7,4,5,0,0,8,9]
+; AVX512-FCP-NEXT: vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512-FCP-NEXT: vpshufb %xmm4, %xmm5, %xmm1
; AVX512-FCP-NEXT: vmovdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3]
+; AVX512-FCP-NEXT: vpshufd {{.*#+}} xmm3 = xmm6[1,1,2,2]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm3[0],xmm1[1],xmm3[2,3],xmm1[4],xmm3[5,6],xmm1[7]
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm14 = [0,1,2,3,0,1,2,3,6,7,4,5,6,7,4,5]
-; AVX512-FCP-NEXT: vpshufb %xmm14, %xmm0, %xmm0
-; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} zmm16 = [0,1,1,3,8,8,9,9]
-; AVX512-FCP-NEXT: vpermt2q %zmm1, %zmm16, %zmm0
+; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3]
+; AVX512-FCP-NEXT: vpshufb %xmm14, %xmm3, %xmm3
+; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} zmm22 = [8,9,9,0,0,0,1,1]
+; AVX512-FCP-NEXT: vpermt2q %zmm3, %zmm22, %zmm1
; AVX512-FCP-NEXT: vmovdqa 64(%rdi), %xmm6
-; AVX512-FCP-NEXT: vmovdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512-FCP-NEXT: vmovdqa %xmm6, (%rsp) # 16-byte Spill
; AVX512-FCP-NEXT: vmovdqa 64(%rsi), %xmm7
-; AVX512-FCP-NEXT: vmovdqa %xmm7, (%rsp) # 16-byte Spill
-; AVX512-FCP-NEXT: vprold $16, %xmm7, %xmm1
-; AVX512-FCP-NEXT: vpshufd {{.*#+}} xmm3 = xmm6[1,1,2,3]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm3[0,1],xmm1[2],xmm3[3,4],xmm1[5],xmm3[6,7]
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,2,3,4,5,4,5,6,7,10,11,8,9,10,11]
-; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3]
-; AVX512-FCP-NEXT: vpshufb %xmm2, %xmm3, %xmm3
-; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} zmm25 = [0,0,1,1,8,8,10,9]
-; AVX512-FCP-NEXT: vpermt2q %zmm1, %zmm25, %zmm3
-; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm1 = [65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0]
-; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm3 = zmm0 ^ (zmm1 & (zmm3 ^ zmm0))
-; AVX512-FCP-NEXT: vmovdqa 64(%r9), %xmm0
-; AVX512-FCP-NEXT: vmovdqa 64(%r8), %xmm6
-; AVX512-FCP-NEXT: vpunpckhwd {{.*#+}} xmm7 = xmm6[4],xmm0[4],xmm6[5],xmm0[5],xmm6[6],xmm0[6],xmm6[7],xmm0[7]
; AVX512-FCP-NEXT: vmovdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm6[0],xmm0[0],xmm6[1],xmm0[1],xmm6[2],xmm0[2],xmm6[3],xmm0[3]
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm4 = [0,1,2,3,6,7,4,5,6,7,4,5,12,13,14,15]
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm12 = [0,1,2,3,8,9,10,11,14,15,12,13,14,15,12,13]
-; AVX512-FCP-NEXT: vpshufb %xmm12, %xmm0, %xmm6
-; AVX512-FCP-NEXT: vpshufb %xmm4, %xmm0, %xmm0
-; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} zmm7 = [0,0,0,1,8,9,9,11]
-; AVX512-FCP-NEXT: vpermt2q %zmm6, %zmm7, %zmm0
+; AVX512-FCP-NEXT: vprold $16, %xmm7, %xmm3
+; AVX512-FCP-NEXT: vpshufd {{.*#+}} xmm5 = xmm6[1,1,2,3]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm3 = xmm5[0,1],xmm3[2],xmm5[3,4],xmm3[5],xmm5[6,7]
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm15 = [0,1,2,3,4,5,4,5,6,7,10,11,8,9,10,11]
+; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} xmm5 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3]
+; AVX512-FCP-NEXT: vpshufb %xmm15, %xmm5, %xmm5
+; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} zmm26 = [0,0,1,1,8,8,0,9]
+; AVX512-FCP-NEXT: vpermt2q %zmm3, %zmm26, %zmm5
+; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm3 = [65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0]
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm5 = zmm1 ^ (zmm3 & (zmm5 ^ zmm1))
+; AVX512-FCP-NEXT: vmovdqa 64(%r9), %xmm1
+; AVX512-FCP-NEXT: vmovdqa 64(%r8), %xmm6
+; AVX512-FCP-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm6[4],xmm1[4],xmm6[5],xmm1[5],xmm6[6],xmm1[6],xmm6[7],xmm1[7]
+; AVX512-FCP-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm6[0],xmm1[0],xmm6[1],xmm1[1],xmm6[2],xmm1[2],xmm6[3],xmm1[3]
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm7 = [0,1,2,3,6,7,4,5,6,7,4,5,12,13,14,15]
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,2,3,8,9,10,11,14,15,12,13,14,15,12,13]
+; AVX512-FCP-NEXT: vpshufb %xmm2, %xmm1, %xmm6
+; AVX512-FCP-NEXT: vpshufb %xmm7, %xmm1, %xmm1
+; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} zmm18 = [0,0,0,1,8,9,9,0]
+; AVX512-FCP-NEXT: vpermt2q %zmm6, %zmm18, %zmm1
; AVX512-FCP-NEXT: vpbroadcastd 64(%rax), %ymm6
; AVX512-FCP-NEXT: vpbroadcastd 68(%rax), %ymm8
; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm8, %zmm6, %zmm29
-; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm29 = zmm29 ^ (zmm21 & (zmm29 ^ zmm0))
-; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm0 = [65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535]
-; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm29 = zmm29 ^ (zmm0 & (zmm29 ^ zmm3))
+; AVX512-FCP-NEXT: vmovdqa64 %zmm30, %zmm11
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm29 = zmm29 ^ (zmm30 & (zmm29 ^ zmm1))
+; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm1 = [65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535]
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm29 = zmm29 ^ (zmm1 & (zmm29 ^ zmm5))
; AVX512-FCP-NEXT: vmovdqa (%rcx), %xmm8
; AVX512-FCP-NEXT: vmovdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512-FCP-NEXT: vmovdqa (%rdx), %xmm10
-; AVX512-FCP-NEXT: vmovdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512-FCP-NEXT: vpshufb %xmm9, %xmm8, %xmm3
-; AVX512-FCP-NEXT: vpshufd {{.*#+}} xmm6 = xmm10[1,1,2,2]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm3 = xmm6[0],xmm3[1],xmm6[2,3],xmm3[4],xmm6[5,6],xmm3[7]
-; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm10[0],xmm8[0],xmm10[1],xmm8[1],xmm10[2],xmm8[2],xmm10[3],xmm8[3]
+; AVX512-FCP-NEXT: vmovdqa (%rdx), %xmm0
+; AVX512-FCP-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512-FCP-NEXT: vpshufb %xmm4, %xmm8, %xmm5
+; AVX512-FCP-NEXT: vpshufd {{.*#+}} xmm6 = xmm0[1,1,2,2]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm5 = xmm6[0],xmm5[1],xmm6[2,3],xmm5[4],xmm6[5,6],xmm5[7]
+; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm0[0],xmm8[0],xmm0[1],xmm8[1],xmm0[2],xmm8[2],xmm0[3],xmm8[3]
; AVX512-FCP-NEXT: vpshufb %xmm14, %xmm6, %xmm6
-; AVX512-FCP-NEXT: vpermt2q %zmm3, %zmm16, %zmm6
-; AVX512-FCP-NEXT: vmovdqa (%rdi), %xmm9
-; AVX512-FCP-NEXT: vmovdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512-FCP-NEXT: vmovdqa (%rsi), %xmm10
-; AVX512-FCP-NEXT: vmovdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512-FCP-NEXT: vprold $16, %xmm10, %xmm3
-; AVX512-FCP-NEXT: vpshufd {{.*#+}} xmm8 = xmm9[1,1,2,3]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm3 = xmm8[0,1],xmm3[2],xmm8[3,4],xmm3[5],xmm8[6,7]
-; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} xmm8 = xmm9[0],xmm10[0],xmm9[1],xmm10[1],xmm9[2],xmm10[2],xmm9[3],xmm10[3]
-; AVX512-FCP-NEXT: vpshufb %xmm2, %xmm8, %xmm8
-; AVX512-FCP-NEXT: vmovdqa64 %xmm2, %xmm31
-; AVX512-FCP-NEXT: vpermt2q %zmm3, %zmm25, %zmm8
-; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm8 = zmm6 ^ (zmm1 & (zmm8 ^ zmm6))
-; AVX512-FCP-NEXT: vmovdqa (%r9), %xmm1
-; AVX512-FCP-NEXT: vmovdqa (%r8), %xmm3
-; AVX512-FCP-NEXT: vpunpckhwd {{.*#+}} xmm6 = xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
-; AVX512-FCP-NEXT: vmovdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3]
-; AVX512-FCP-NEXT: vpshufb %xmm12, %xmm1, %xmm3
-; AVX512-FCP-NEXT: vpshufb %xmm4, %xmm1, %xmm1
-; AVX512-FCP-NEXT: vmovdqa64 %xmm4, %xmm27
-; AVX512-FCP-NEXT: vpermt2q %zmm3, %zmm7, %zmm1
-; AVX512-FCP-NEXT: vpbroadcastd (%rax), %ymm3
+; AVX512-FCP-NEXT: vpermt2q %zmm6, %zmm22, %zmm5
+; AVX512-FCP-NEXT: vmovdqa (%rdi), %xmm0
+; AVX512-FCP-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512-FCP-NEXT: vmovdqa (%rsi), %xmm4
+; AVX512-FCP-NEXT: vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512-FCP-NEXT: vprold $16, %xmm4, %xmm6
+; AVX512-FCP-NEXT: vpshufd {{.*#+}} xmm8 = xmm0[1,1,2,3]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm6 = xmm8[0,1],xmm6[2],xmm8[3,4],xmm6[5],xmm8[6,7]
+; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} xmm8 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3]
+; AVX512-FCP-NEXT: vpshufb %xmm15, %xmm8, %xmm8
+; AVX512-FCP-NEXT: vpermt2q %zmm6, %zmm26, %zmm8
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm8 = zmm5 ^ (zmm3 & (zmm8 ^ zmm5))
+; AVX512-FCP-NEXT: vmovdqa (%r9), %xmm3
+; AVX512-FCP-NEXT: vmovdqa (%r8), %xmm5
+; AVX512-FCP-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm5[4],xmm3[4],xmm5[5],xmm3[5],xmm5[6],xmm3[6],xmm5[7],xmm3[7]
+; AVX512-FCP-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm5[0],xmm3[0],xmm5[1],xmm3[1],xmm5[2],xmm3[2],xmm5[3],xmm3[3]
+; AVX512-FCP-NEXT: vpshufb %xmm2, %xmm3, %xmm5
+; AVX512-FCP-NEXT: vpshufb %xmm7, %xmm3, %xmm3
+; AVX512-FCP-NEXT: vmovdqa64 %xmm7, %xmm24
+; AVX512-FCP-NEXT: vpermt2q %zmm5, %zmm18, %zmm3
+; AVX512-FCP-NEXT: vpbroadcastd (%rax), %ymm5
; AVX512-FCP-NEXT: vpbroadcastd 4(%rax), %ymm6
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm6, %zmm3, %zmm26
-; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm26 = zmm26 ^ (zmm21 & (zmm26 ^ zmm1))
-; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm26 = zmm26 ^ (zmm0 & (zmm26 ^ zmm8))
-; AVX512-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
-; AVX512-FCP-NEXT: vpshufb %ymm11, %ymm9, %ymm0
-; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %ymm22 # 32-byte Reload
-; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm1 = ymm22[2,2,2,2,6,6,6,6]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6,7,8],ymm0[9],ymm1[10,11],ymm0[12],ymm1[13,14,15]
-; AVX512-FCP-NEXT: vpshufb %ymm15, %ymm9, %ymm1
-; AVX512-FCP-NEXT: vmovdqa %ymm9, %ymm11
-; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm3 = ymm22[1,1,1,1,5,5,5,5]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0,1],ymm3[2],ymm1[3,4],ymm3[5],ymm1[6,7,8,9],ymm3[10],ymm1[11,12],ymm3[13],ymm1[14,15]
-; AVX512-FCP-NEXT: vpermt2q %zmm0, %zmm30, %zmm1
-; AVX512-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm15[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,u,u,24,25,26,27,u,u,26,27,26,27]
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm6, %zmm5, %zmm30
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm30 = zmm30 ^ (zmm11 & (zmm30 ^ zmm3))
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm30 = zmm30 ^ (zmm1 & (zmm30 ^ zmm8))
+; AVX512-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX512-FCP-NEXT: vmovdqa64 %ymm21, %ymm0
+; AVX512-FCP-NEXT: vpshufb %ymm0, %ymm7, %ymm1
; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %ymm21 # 32-byte Reload
; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm3 = ymm21[2,2,2,2,6,6,6,6]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm0[0,1],ymm3[2],ymm0[3,4],ymm3[5],ymm0[6,7,8,9],ymm3[10],ymm0[11,12],ymm3[13],ymm0[14,15]
-; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm15[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
-; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm6 = ymm21[0,1,1,3,4,5,5,7]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm6[0,1],ymm0[2],ymm6[3,4],ymm0[5],ymm6[6,7,8,9],ymm0[10],ymm6[11,12],ymm0[13],ymm6[14,15]
-; AVX512-FCP-NEXT: vpermt2q %zmm3, %zmm13, %zmm0
-; AVX512-FCP-NEXT: vmovdqa 96(%rcx), %xmm10
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm3[0],ymm1[1],ymm3[2,3],ymm1[4],ymm3[5,6,7,8],ymm1[9],ymm3[10,11],ymm1[12],ymm3[13,14,15]
+; AVX512-FCP-NEXT: vpshufb %ymm9, %ymm7, %ymm3
+; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm5 = ymm21[1,1,1,1,5,5,5,5]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm3[0,1],ymm5[2],ymm3[3,4],ymm5[5],ymm3[6,7,8,9],ymm5[10],ymm3[11,12],ymm5[13],ymm3[14,15]
+; AVX512-FCP-NEXT: vpermt2q %zmm1, %zmm19, %zmm3
+; AVX512-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
+; AVX512-FCP-NEXT: vpshufb %ymm10, %ymm9, %ymm1
+; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %ymm18 # 32-byte Reload
+; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm5 = ymm18[2,2,2,2,6,6,6,6]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm5 = ymm1[0,1],ymm5[2],ymm1[3,4],ymm5[5],ymm1[6,7,8,9],ymm5[10],ymm1[11,12],ymm5[13],ymm1[14,15]
+; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} ymm1 = ymm9[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
+; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm6 = ymm18[0,1,1,3,4,5,5,7]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm6[0,1],ymm1[2],ymm6[3,4],ymm1[5],ymm6[6,7,8,9],ymm1[10],ymm6[11,12],ymm1[13],ymm6[14,15]
+; AVX512-FCP-NEXT: vpermt2q %zmm5, %zmm13, %zmm1
+; AVX512-FCP-NEXT: vmovdqa 96(%rcx), %xmm11
; AVX512-FCP-NEXT: vmovdqa 96(%rdx), %xmm13
-; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm13[0],xmm10[0],xmm13[1],xmm10[1],xmm13[2],xmm10[2],xmm13[3],xmm10[3]
-; AVX512-FCP-NEXT: vpshufb %xmm14, %xmm3, %xmm3
-; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} zmm24 = [0,2,2,3,8,9,9,11]
-; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512-FCP-NEXT: vpermt2q %zmm3, %zmm24, %zmm5
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm12 = [u,u,u,u,u,u,u,u,u,u,u,u,14,15,12,13,u,u,u,u,30,31,28,29,u,u,u,u,30,31,28,29]
-; AVX512-FCP-NEXT: vmovdqa64 %ymm18, %ymm3
-; AVX512-FCP-NEXT: vpshufb %ymm12, %ymm3, %ymm3
-; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm7 = ymm19[3,3,3,3,7,7,7,7]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm8 = ymm7[0,1,2],ymm3[3],ymm7[4,5],ymm3[6],ymm7[7,8,9,10],ymm3[11],ymm7[12,13],ymm3[14],ymm7[15]
-; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm0 = zmm1 ^ (mem & (zmm0 ^ zmm1))
-; AVX512-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX512-FCP-NEXT: vprold $16, %ymm4, %ymm1
-; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %ymm16 # 32-byte Reload
-; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm3 = ymm16[1,2,2,3,5,6,6,7]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm3[0,1],ymm1[2],ymm3[3,4],ymm1[5],ymm3[6,7,8,9],ymm1[10],ymm3[11,12],ymm1[13],ymm3[14,15]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21]
-; AVX512-FCP-NEXT: vmovdqa64 %ymm4, %ymm17
-; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm4 = ymm16[0,0,2,1,4,4,6,5]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm3[0,1,2],ymm4[3],ymm3[4,5],ymm4[6],ymm3[7,8,9,10],ymm4[11],ymm3[12,13],ymm4[14],ymm3[15]
-; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} zmm2 = [2,2,3,3,10,9,11,10]
-; AVX512-FCP-NEXT: vpermt2q %zmm1, %zmm2, %zmm3
-; AVX512-FCP-NEXT: vmovdqa 96(%rsi), %xmm4
-; AVX512-FCP-NEXT: vmovdqa 96(%rdi), %xmm6
-; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm6[0],xmm4[0],xmm6[1],xmm4[1],xmm6[2],xmm4[2],xmm6[3],xmm4[3]
-; AVX512-FCP-NEXT: vmovdqa64 %xmm31, %xmm9
-; AVX512-FCP-NEXT: vpshufb %xmm9, %xmm1, %xmm1
+; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} xmm5 = xmm13[0],xmm11[0],xmm13[1],xmm11[1],xmm13[2],xmm11[2],xmm13[3],xmm11[3]
+; AVX512-FCP-NEXT: vpshufb %xmm14, %xmm5, %xmm5
+; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} zmm6 = [0,2,2,3,8,9,9,0]
+; AVX512-FCP-NEXT: vpermt2q %zmm5, %zmm6, %zmm12
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm10 = [u,u,u,u,u,u,u,u,u,u,u,u,14,15,12,13,u,u,u,u,30,31,28,29,u,u,u,u,30,31,28,29]
+; AVX512-FCP-NEXT: vmovdqa64 %ymm27, %ymm2
+; AVX512-FCP-NEXT: vpshufb %ymm10, %ymm2, %ymm5
+; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm8 = ymm28[3,3,3,3,7,7,7,7]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm8 = ymm8[0,1,2],ymm5[3],ymm8[4,5],ymm5[6],ymm8[7,8,9,10],ymm5[11],ymm8[12,13],ymm5[14],ymm8[15]
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm1 = zmm3 ^ (mem & (zmm1 ^ zmm3))
+; AVX512-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX512-FCP-NEXT: vprold $16, %ymm5, %ymm2
+; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %ymm20 # 32-byte Reload
+; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm3 = ymm20[1,2,2,3,5,6,6,7]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm3[0,1],ymm2[2],ymm3[3,4],ymm2[5],ymm3[6,7,8,9],ymm2[10],ymm3[11,12],ymm2[13],ymm3[14,15]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21]
+; AVX512-FCP-NEXT: vmovdqa64 %ymm5, %ymm31
+; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm5 = ymm20[0,0,2,1,4,4,6,5]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm3[0,1,2],ymm5[3],ymm3[4,5],ymm5[6],ymm3[7,8,9,10],ymm5[11],ymm3[12,13],ymm5[14],ymm3[15]
+; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} zmm0 = [2,2,3,3,10,9,11,10]
+; AVX512-FCP-NEXT: vpermt2q %zmm2, %zmm0, %zmm3
+; AVX512-FCP-NEXT: vmovdqa 96(%rsi), %xmm2
+; AVX512-FCP-NEXT: vmovdqa 96(%rdi), %xmm4
+; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} xmm5 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3]
+; AVX512-FCP-NEXT: vpshufb %xmm15, %xmm5, %xmm5
; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} zmm19 = [2,1,3,3,8,8,9,9]
-; AVX512-FCP-NEXT: vpermt2q %zmm1, %zmm19, %zmm8
-; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm30 = [65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535]
-; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm8 = zmm5 ^ (zmm30 & (zmm8 ^ zmm5))
-; AVX512-FCP-NEXT: vbroadcasti32x4 {{.*#+}} ymm18 = [26,27,28,29,26,27,28,29,26,27,28,29,30,31,30,31,26,27,28,29,26,27,28,29,26,27,28,29,30,31,30,31]
-; AVX512-FCP-NEXT: # ymm18 = mem[0,1,2,3,0,1,2,3]
-; AVX512-FCP-NEXT: vmovdqa64 %ymm18, %ymm1
-; AVX512-FCP-NEXT: vmovdqa64 %ymm28, %ymm2
-; AVX512-FCP-NEXT: vpshufb %ymm1, %ymm2, %ymm1
-; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm2 = ymm23[3,3,3,3,7,7,7,7]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm7 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7,8,9],ymm2[10],ymm1[11,12],ymm2[13],ymm1[14,15]
-; AVX512-FCP-NEXT: vpermd (%rax), %zmm20, %zmm20
-; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm20 = zmm20 ^ (mem & (zmm20 ^ zmm3))
-; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm20 = zmm20 ^ (mem & (zmm20 ^ zmm0))
+; AVX512-FCP-NEXT: vpermt2q %zmm5, %zmm19, %zmm8
+; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm17 = [65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535]
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm8 = zmm12 ^ (zmm17 & (zmm8 ^ zmm12))
+; AVX512-FCP-NEXT: vbroadcasti32x4 {{.*#+}} ymm22 = [26,27,28,29,26,27,28,29,26,27,28,29,30,31,30,31,26,27,28,29,26,27,28,29,26,27,28,29,30,31,30,31]
+; AVX512-FCP-NEXT: # ymm22 = mem[0,1,2,3,0,1,2,3]
+; AVX512-FCP-NEXT: vmovdqa64 %ymm22, %ymm0
+; AVX512-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX512-FCP-NEXT: vpshufb %ymm0, %ymm5, %ymm0
+; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm5 = ymm25[3,3,3,3,7,7,7,7]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm12 = ymm0[0,1],ymm5[2],ymm0[3,4],ymm5[5],ymm0[6,7,8,9],ymm5[10],ymm0[11,12],ymm5[13],ymm0[14,15]
+; AVX512-FCP-NEXT: vpermd %zmm16, %zmm23, %zmm16
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm16 = zmm16 ^ (mem & (zmm16 ^ zmm3))
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm16 = zmm16 ^ (mem & (zmm16 ^ zmm1))
; AVX512-FCP-NEXT: vmovdqa 32(%rcx), %xmm1
-; AVX512-FCP-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX512-FCP-NEXT: vmovdqa 32(%rdx), %xmm0
; AVX512-FCP-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; AVX512-FCP-NEXT: vpshufb %xmm14, %xmm0, %xmm3
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm15[14,15,12,13,u,u,u,u,u,u,u,u,u,u,u,u,30,31,28,29,u,u,u,u,30,31,28,29,u,u,u,u]
-; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm2 = ymm21[3,3,3,3,7,7,7,7]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm2[0],ymm1[1],ymm2[2,3],ymm1[4],ymm2[5,6,7,8],ymm1[9],ymm2[10,11],ymm1[12],ymm2[13,14,15]
-; AVX512-FCP-NEXT: vpermt2q %zmm3, %zmm24, %zmm0
-; AVX512-FCP-NEXT: vpshufb %ymm12, %ymm11, %ymm1
-; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm2 = ymm22[3,3,3,3,7,7,7,7]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm15 = ymm2[0,1,2],ymm1[3],ymm2[4,5],ymm1[6],ymm2[7,8,9,10],ymm1[11],ymm2[12,13],ymm1[14],ymm2[15]
-; AVX512-FCP-NEXT: vmovdqa 32(%rdi), %xmm12
-; AVX512-FCP-NEXT: vmovdqa 32(%rsi), %xmm11
-; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm12[0],xmm11[0],xmm12[1],xmm11[1],xmm12[2],xmm11[2],xmm12[3],xmm11[3]
-; AVX512-FCP-NEXT: vpshufb %xmm9, %xmm1, %xmm1
-; AVX512-FCP-NEXT: vmovdqa64 %xmm31, %xmm24
-; AVX512-FCP-NEXT: vpermt2q %zmm1, %zmm19, %zmm15
-; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} zmm19 = [2,2,2,3,8,8,8,9]
-; AVX512-FCP-NEXT: vmovdqa 96(%r9), %xmm5
-; AVX512-FCP-NEXT: vmovdqa 96(%r8), %xmm3
-; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} xmm9 = xmm3[0],xmm5[0],xmm3[1],xmm5[1],xmm3[2],xmm5[2],xmm3[3],xmm5[3]
-; AVX512-FCP-NEXT: vmovdqa64 %xmm27, %xmm21
-; AVX512-FCP-NEXT: vmovdqa64 %xmm27, %xmm1
-; AVX512-FCP-NEXT: vpshufb %xmm1, %xmm9, %xmm14
-; AVX512-FCP-NEXT: vpermt2q %zmm14, %zmm19, %zmm7
-; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm15 = zmm0 ^ (zmm30 & (zmm15 ^ zmm0))
-; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm27 = [6,7,3,3,7,7,6,7]
-; AVX512-FCP-NEXT: vpermd {{[-0-9]+}}(%r{{[sb]}}p), %ymm27, %ymm0 # 32-byte Folded Reload
-; AVX512-FCP-NEXT: vpbroadcastd 96(%rax), %ymm23
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm23, %zmm0, %zmm23
-; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm30 = [65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535]
-; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm23 = zmm23 ^ (zmm30 & (zmm23 ^ zmm7))
-; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm31 = [0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535]
-; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm23 = zmm23 ^ (zmm31 & (zmm23 ^ zmm8))
-; AVX512-FCP-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm4[4],xmm6[4],xmm4[5],xmm6[5],xmm4[6],xmm6[6],xmm4[7],xmm6[7]
-; AVX512-FCP-NEXT: vprold $16, %xmm4, %xmm0
-; AVX512-FCP-NEXT: vpshufd {{.*#+}} xmm4 = xmm6[1,1,2,3]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm4[0,1],xmm0[2],xmm4[3,4],xmm0[5],xmm4[6,7]
-; AVX512-FCP-NEXT: vmovdqa64 %ymm18, %ymm1
-; AVX512-FCP-NEXT: vmovdqa64 %ymm17, %ymm4
-; AVX512-FCP-NEXT: vpshufb %ymm1, %ymm4, %ymm1
-; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm4 = ymm16[3,3,3,3,7,7,7,7]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0,1],ymm4[2],ymm1[3,4],ymm4[5],ymm1[6,7,8,9],ymm4[10],ymm1[11,12],ymm4[13],ymm1[14,15]
-; AVX512-FCP-NEXT: vmovdqa 32(%r9), %xmm7
+; AVX512-FCP-NEXT: vmovdqa64 %xmm1, %xmm28
+; AVX512-FCP-NEXT: vpshufb %xmm14, %xmm0, %xmm5
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm9[14,15,12,13,u,u,u,u,u,u,u,u,u,u,u,u,30,31,28,29,u,u,u,u,30,31,28,29,u,u,u,u]
+; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm3 = ymm18[3,3,3,3,7,7,7,7]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm3[0],ymm1[1],ymm3[2,3],ymm1[4],ymm3[5,6,7,8],ymm1[9],ymm3[10,11],ymm1[12],ymm3[13,14,15]
+; AVX512-FCP-NEXT: vpermt2q %zmm5, %zmm6, %zmm0
+; AVX512-FCP-NEXT: vpshufb %ymm10, %ymm7, %ymm1
+; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm3 = ymm21[3,3,3,3,7,7,7,7]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm14 = ymm3[0,1,2],ymm1[3],ymm3[4,5],ymm1[6],ymm3[7,8,9,10],ymm1[11],ymm3[12,13],ymm1[14],ymm3[15]
+; AVX512-FCP-NEXT: vmovdqa 32(%rdi), %xmm10
+; AVX512-FCP-NEXT: vmovdqa 32(%rsi), %xmm9
+; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm10[0],xmm9[0],xmm10[1],xmm9[1],xmm10[2],xmm9[2],xmm10[3],xmm9[3]
+; AVX512-FCP-NEXT: vpshufb %xmm15, %xmm1, %xmm1
+; AVX512-FCP-NEXT: vmovdqa64 %xmm15, %xmm25
+; AVX512-FCP-NEXT: vpermt2q %zmm1, %zmm19, %zmm14
+; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} zmm21 = [2,2,2,3,8,8,8,9]
+; AVX512-FCP-NEXT: vmovdqa 96(%r9), %xmm6
+; AVX512-FCP-NEXT: vmovdqa 96(%r8), %xmm5
+; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} xmm7 = xmm5[0],xmm6[0],xmm5[1],xmm6[1],xmm5[2],xmm6[2],xmm5[3],xmm6[3]
+; AVX512-FCP-NEXT: vmovdqa64 %xmm24, %xmm18
+; AVX512-FCP-NEXT: vmovdqa64 %xmm24, %xmm1
+; AVX512-FCP-NEXT: vpshufb %xmm1, %xmm7, %xmm15
+; AVX512-FCP-NEXT: vpermt2q %zmm15, %zmm21, %zmm12
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm14 = zmm0 ^ (zmm17 & (zmm14 ^ zmm0))
+; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm15 = [6,7,3,3,7,7,6,7]
+; AVX512-FCP-NEXT: vpermd {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm0 # 32-byte Folded Reload
+; AVX512-FCP-NEXT: vpbroadcastd 96(%rax), %ymm19
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm19, %zmm0, %zmm19
+; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm24 = [65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535]
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm19 = zmm19 ^ (zmm24 & (zmm19 ^ zmm12))
+; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm23 = [0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535]
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm19 = zmm19 ^ (zmm23 & (zmm19 ^ zmm8))
+; AVX512-FCP-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
+; AVX512-FCP-NEXT: vprold $16, %xmm2, %xmm0
+; AVX512-FCP-NEXT: vpshufd {{.*#+}} xmm2 = xmm4[1,1,2,3]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm2[0,1],xmm0[2],xmm2[3,4],xmm0[5],xmm2[6,7]
+; AVX512-FCP-NEXT: vmovdqa64 %ymm22, %ymm1
+; AVX512-FCP-NEXT: vmovdqa64 %ymm31, %ymm2
+; AVX512-FCP-NEXT: vpshufb %ymm1, %ymm2, %ymm1
+; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm2 = ymm20[3,3,3,3,7,7,7,7]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7,8,9],ymm2[10],ymm1[11,12],ymm2[13],ymm1[14,15]
+; AVX512-FCP-NEXT: vmovdqa 32(%r9), %xmm8
; AVX512-FCP-NEXT: vmovdqa 32(%r8), %xmm4
-; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm4[0],xmm7[0],xmm4[1],xmm7[1],xmm4[2],xmm7[2],xmm4[3],xmm7[3]
-; AVX512-FCP-NEXT: vmovdqa64 %xmm21, %xmm8
-; AVX512-FCP-NEXT: vpshufb %xmm8, %xmm6, %xmm8
-; AVX512-FCP-NEXT: vmovdqa64 %xmm6, %xmm16
-; AVX512-FCP-NEXT: vpermt2q %zmm8, %zmm19, %zmm1
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[4,5,2,3,4,5,6,7,8,9,10,11,10,11,8,9]
-; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} zmm22 = [0,0,2,1,8,8,9,11]
-; AVX512-FCP-NEXT: vpermt2q %zmm2, %zmm22, %zmm0
-; AVX512-FCP-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm13[4],xmm10[4],xmm13[5],xmm10[5],xmm13[6],xmm10[6],xmm13[7],xmm10[7]
-; AVX512-FCP-NEXT: vpbroadcastq {{.*#+}} xmm18 = [6,7,4,5,0,0,8,9,6,7,4,5,0,0,8,9]
-; AVX512-FCP-NEXT: vmovdqa64 %xmm18, %xmm8
-; AVX512-FCP-NEXT: vpshufb %xmm8, %xmm10, %xmm10
+; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm4[0],xmm8[0],xmm4[1],xmm8[1],xmm4[2],xmm8[2],xmm4[3],xmm8[3]
+; AVX512-FCP-NEXT: vmovdqa64 %xmm18, %xmm12
+; AVX512-FCP-NEXT: vpshufb %xmm12, %xmm2, %xmm12
+; AVX512-FCP-NEXT: vpermt2q %zmm12, %zmm21, %zmm1
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm12 = [4,5,2,3,4,5,6,7,8,9,10,11,10,11,8,9]
+; AVX512-FCP-NEXT: vpshufb %xmm12, %xmm3, %xmm3
+; AVX512-FCP-NEXT: vmovdqa64 %xmm12, %xmm31
+; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} zmm17 = [0,0,0,1,8,8,9,0]
+; AVX512-FCP-NEXT: vpermt2q %zmm3, %zmm17, %zmm0
+; AVX512-FCP-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm13[4],xmm11[4],xmm13[5],xmm11[5],xmm13[6],xmm11[6],xmm13[7],xmm11[7]
+; AVX512-FCP-NEXT: vpbroadcastq {{.*#+}} xmm22 = [6,7,4,5,0,0,8,9,6,7,4,5,0,0,8,9]
+; AVX512-FCP-NEXT: vmovdqa64 %xmm22, %xmm12
+; AVX512-FCP-NEXT: vpshufb %xmm12, %xmm11, %xmm11
; AVX512-FCP-NEXT: vpshufd {{.*#+}} xmm13 = xmm13[1,1,2,2]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm13 = xmm13[0],xmm10[1],xmm13[2,3],xmm10[4],xmm13[5,6],xmm10[7]
-; AVX512-FCP-NEXT: vpshuflw {{.*#+}} xmm2 = xmm2[0,2,3,3,4,5,6,7]
-; AVX512-FCP-NEXT: vpermt2q %zmm2, %zmm25, %zmm13
-; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm28 = [65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535]
-; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm13 = zmm0 ^ (zmm28 & (zmm13 ^ zmm0))
-; AVX512-FCP-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm3[4],xmm5[4],xmm3[5],xmm5[5],xmm3[6],xmm5[6],xmm3[7],xmm5[7]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm11 = xmm13[0],xmm11[1],xmm13[2,3],xmm11[4],xmm13[5,6],xmm11[7]
+; AVX512-FCP-NEXT: vpshuflw {{.*#+}} xmm3 = xmm3[0,2,3,3,4,5,6,7]
+; AVX512-FCP-NEXT: vmovdqa64 %zmm26, %zmm21
+; AVX512-FCP-NEXT: vpermt2q %zmm3, %zmm26, %zmm11
+; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm26 = [65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535]
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm11 = zmm0 ^ (zmm26 & (zmm11 ^ zmm0))
+; AVX512-FCP-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm5[4],xmm6[4],xmm5[5],xmm6[5],xmm5[6],xmm6[6],xmm5[7],xmm6[7]
; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
-; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm14 = ymm14 ^ (ymm30 & (ymm14 ^ ymm0))
+; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %ymm20 # 32-byte Reload
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm20 = ymm20 ^ (ymm24 & (ymm20 ^ ymm0))
; AVX512-FCP-NEXT: vextracti64x4 $1, %zmm0, %ymm0
-; AVX512-FCP-NEXT: vmovdqa64 %xmm24, %xmm8
-; AVX512-FCP-NEXT: vpshufb %xmm8, %xmm3, %xmm3
-; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} xmm21 = [0,1,2,3,8,9,10,11,14,15,12,13,14,15,12,13]
-; AVX512-FCP-NEXT: vmovdqa64 %xmm21, %xmm2
-; AVX512-FCP-NEXT: vpshufb %xmm2, %xmm9, %xmm6
-; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} zmm24 = [0,1,1,3,8,8,9,9]
-; AVX512-FCP-NEXT: vpermt2q %zmm3, %zmm24, %zmm6
-; AVX512-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX512-FCP-NEXT: vpshufhw {{.*#+}} ymm3 = ymm2[0,1,2,3,5,5,7,6,8,9,10,11,13,13,15,14]
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm3 = ymm3[3,3,3,3]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm9 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,24,25,22,23,22,23,u,u,u,u,u,u,u,u]
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,2,2,2]
-; AVX512-FCP-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; AVX512-FCP-NEXT: vpshufb %xmm8, %xmm2, %xmm10
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm10[0,0,1,1]
-; AVX512-FCP-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
-; AVX512-FCP-NEXT: vpshufb %xmm8, %xmm10, %xmm10
-; AVX512-FCP-NEXT: vmovdqa %xmm8, %xmm5
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm8 = ymm10[0,0,1,1]
-; AVX512-FCP-NEXT: vpermd {{[-0-9]+}}(%r{{[sb]}}p), %ymm27, %ymm10 # 32-byte Folded Reload
-; AVX512-FCP-NEXT: vpbroadcastd 32(%rax), %ymm17
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm17, %zmm10, %zmm10
-; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm10 = zmm10 ^ (zmm30 & (zmm10 ^ zmm1))
+; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} xmm18 = [0,1,2,3,8,9,10,11,14,15,12,13,14,15,12,13]
+; AVX512-FCP-NEXT: vmovdqa64 %xmm18, %xmm5
+; AVX512-FCP-NEXT: vpshufb %xmm5, %xmm7, %xmm5
+; AVX512-FCP-NEXT: vmovdqa64 %xmm25, %xmm12
+; AVX512-FCP-NEXT: vpshufb %xmm12, %xmm3, %xmm3
+; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} zmm25 = [8,9,9,0,0,0,1,1]
+; AVX512-FCP-NEXT: vpermt2q %zmm5, %zmm25, %zmm3
+; AVX512-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX512-FCP-NEXT: vpshufhw {{.*#+}} ymm5 = ymm6[0,1,2,3,5,5,7,6,8,9,10,11,13,13,15,14]
+; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm5 = ymm5[3,3,3,3]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm6 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,24,25,22,23,22,23,u,u,u,u,u,u,u,u]
+; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm6 = ymm6[2,2,2,2]
+; AVX512-FCP-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
+; AVX512-FCP-NEXT: vpshufb %xmm12, %xmm7, %xmm7
+; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm13 = ymm7[0,0,1,1]
+; AVX512-FCP-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
+; AVX512-FCP-NEXT: vpshufb %xmm12, %xmm7, %xmm7
+; AVX512-FCP-NEXT: vmovdqa64 %xmm12, %xmm27
+; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm12 = ymm7[0,0,1,1]
+; AVX512-FCP-NEXT: vpermd {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm7 # 32-byte Folded Reload
+; AVX512-FCP-NEXT: vpbroadcastd 32(%rax), %ymm15
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm15, %zmm7, %zmm7
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm7 = zmm7 ^ (zmm24 & (zmm7 ^ zmm1))
; AVX512-FCP-NEXT: vpbroadcastd 100(%rax), %ymm1
-; AVX512-FCP-NEXT: vpbroadcastd 104(%rax), %ymm17
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm17, %zmm1, %zmm19
-; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm17 = [65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535]
-; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm19 = zmm19 ^ (zmm17 & (zmm19 ^ zmm6))
-; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm10 = zmm10 ^ (zmm31 & (zmm10 ^ zmm15))
+; AVX512-FCP-NEXT: vpbroadcastd 104(%rax), %ymm15
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm15, %zmm1, %zmm15
+; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm24 = [65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535]
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm15 = zmm15 ^ (zmm24 & (zmm15 ^ zmm3))
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm7 = zmm7 ^ (zmm23 & (zmm7 ^ zmm14))
; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm1 = [65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0]
-; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm19 = zmm19 ^ (zmm1 & (zmm19 ^ zmm13))
-; AVX512-FCP-NEXT: vpunpckhwd {{.*#+}} xmm6 = xmm11[4],xmm12[4],xmm11[5],xmm12[5],xmm11[6],xmm12[6],xmm11[7],xmm12[7]
-; AVX512-FCP-NEXT: vprold $16, %xmm11, %xmm11
-; AVX512-FCP-NEXT: vpshufd {{.*#+}} xmm12 = xmm12[1,1,2,3]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm11 = xmm12[0,1],xmm11[2],xmm12[3,4],xmm11[5],xmm12[6,7]
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm15 = [4,5,2,3,4,5,6,7,8,9,10,11,10,11,8,9]
-; AVX512-FCP-NEXT: vpshufb %xmm15, %xmm6, %xmm6
-; AVX512-FCP-NEXT: vpermt2q %zmm6, %zmm22, %zmm11
-; AVX512-FCP-NEXT: vmovdqa64 %xmm18, %xmm6
-; AVX512-FCP-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
-; AVX512-FCP-NEXT: vpshufb %xmm6, %xmm12, %xmm6
-; AVX512-FCP-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
-; AVX512-FCP-NEXT: vpunpckhwd {{.*#+}} xmm12 = xmm13[4],xmm12[4],xmm13[5],xmm12[5],xmm13[6],xmm12[6],xmm13[7],xmm12[7]
-; AVX512-FCP-NEXT: vpshufd {{.*#+}} xmm13 = xmm13[1,1,2,2]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm6 = xmm13[0],xmm6[1],xmm13[2,3],xmm6[4],xmm13[5,6],xmm6[7]
-; AVX512-FCP-NEXT: vpshuflw {{.*#+}} xmm12 = xmm12[0,2,3,3,4,5,6,7]
-; AVX512-FCP-NEXT: vpermt2q %zmm12, %zmm25, %zmm6
-; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm6 = zmm11 ^ (zmm28 & (zmm6 ^ zmm11))
-; AVX512-FCP-NEXT: vpunpckhwd {{.*#+}} xmm4 = xmm4[4],xmm7[4],xmm4[5],xmm7[5],xmm4[6],xmm7[6],xmm4[7],xmm7[7]
-; AVX512-FCP-NEXT: vpshufb %xmm5, %xmm4, %xmm4
-; AVX512-FCP-NEXT: vmovdqa64 %xmm21, %xmm5
-; AVX512-FCP-NEXT: vmovdqa64 %xmm16, %xmm7
-; AVX512-FCP-NEXT: vpshufb %xmm5, %xmm7, %xmm5
-; AVX512-FCP-NEXT: vpermt2q %zmm4, %zmm24, %zmm5
-; AVX512-FCP-NEXT: vpbroadcastd 36(%rax), %ymm4
-; AVX512-FCP-NEXT: vpbroadcastd 40(%rax), %ymm7
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm7, %zmm4, %zmm4
-; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm4 = zmm4 ^ (zmm17 & (zmm4 ^ zmm5))
-; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm4 = zmm4 ^ (zmm1 & (zmm4 ^ zmm6))
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm15 = zmm15 ^ (zmm1 & (zmm15 ^ zmm11))
+; AVX512-FCP-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm9[4],xmm10[4],xmm9[5],xmm10[5],xmm9[6],xmm10[6],xmm9[7],xmm10[7]
+; AVX512-FCP-NEXT: vprold $16, %xmm9, %xmm9
+; AVX512-FCP-NEXT: vpshufd {{.*#+}} xmm10 = xmm10[1,1,2,3]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm9 = xmm10[0,1],xmm9[2],xmm10[3,4],xmm9[5],xmm10[6,7]
+; AVX512-FCP-NEXT: vmovdqa64 %xmm31, %xmm14
+; AVX512-FCP-NEXT: vpshufb %xmm14, %xmm3, %xmm3
+; AVX512-FCP-NEXT: vpermt2q %zmm3, %zmm17, %zmm9
+; AVX512-FCP-NEXT: vmovdqa64 %xmm22, %xmm3
+; AVX512-FCP-NEXT: vmovdqa64 %xmm28, %xmm10
+; AVX512-FCP-NEXT: vpshufb %xmm3, %xmm10, %xmm3
+; AVX512-FCP-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
+; AVX512-FCP-NEXT: vpunpckhwd {{.*#+}} xmm10 = xmm11[4],xmm10[4],xmm11[5],xmm10[5],xmm11[6],xmm10[6],xmm11[7],xmm10[7]
+; AVX512-FCP-NEXT: vpshufd {{.*#+}} xmm11 = xmm11[1,1,2,2]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm3 = xmm11[0],xmm3[1],xmm11[2,3],xmm3[4],xmm11[5,6],xmm3[7]
+; AVX512-FCP-NEXT: vpshuflw {{.*#+}} xmm10 = xmm10[0,2,3,3,4,5,6,7]
+; AVX512-FCP-NEXT: vpermt2q %zmm10, %zmm21, %zmm3
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm3 = zmm9 ^ (zmm26 & (zmm3 ^ zmm9))
+; AVX512-FCP-NEXT: vmovdqa64 %xmm18, %xmm9
+; AVX512-FCP-NEXT: vpshufb %xmm9, %xmm2, %xmm2
+; AVX512-FCP-NEXT: vpunpckhwd {{.*#+}} xmm4 = xmm4[4],xmm8[4],xmm4[5],xmm8[5],xmm4[6],xmm8[6],xmm4[7],xmm8[7]
+; AVX512-FCP-NEXT: vmovdqa64 %xmm27, %xmm8
+; AVX512-FCP-NEXT: vpshufb %xmm8, %xmm4, %xmm4
+; AVX512-FCP-NEXT: vpermt2q %zmm2, %zmm25, %zmm4
+; AVX512-FCP-NEXT: vpbroadcastd 36(%rax), %ymm2
+; AVX512-FCP-NEXT: vpbroadcastd 40(%rax), %ymm8
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm8, %zmm2, %zmm2
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm2 = zmm2 ^ (zmm24 & (zmm2 ^ zmm4))
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm2 = zmm2 ^ (zmm1 & (zmm2 ^ zmm3))
; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm1 = [65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535]
; AVX512-FCP-NEXT: vpternlogd {{.*#+}} ymm0 = mem ^ (ymm1 & (ymm0 ^ mem))
; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm1 = [0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535]
-; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm3 = ymm3 ^ (ymm1 & (ymm3 ^ ymm0))
-; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm5 = [65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535]
-; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm9 = ymm9 ^ (ymm5 & (ymm9 ^ ymm14))
-; AVX512-FCP-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm9[0,1,2,3],zmm3[0,1,2,3]
-; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm12 = zmm12 ^ (mem & (zmm12 ^ zmm0))
-; AVX512-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm0 # 32-byte Folded Reload
-; AVX512-FCP-NEXT: vpternlogq $234, {{[-0-9]+}}(%r{{[sb]}}p), %zmm5, %zmm0 # 64-byte Folded Reload
-; AVX512-FCP-NEXT: # zmm0 = (zmm0 & zmm5) | mem
-; AVX512-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm2 # 32-byte Folded Reload
-; AVX512-FCP-NEXT: vpternlogq $234, {{[-0-9]+}}(%r{{[sb]}}p), %zmm5, %zmm2 # 64-byte Folded Reload
-; AVX512-FCP-NEXT: # zmm2 = (zmm2 & zmm5) | mem
-; AVX512-FCP-NEXT: vmovdqa (%rsp), %xmm3 # 16-byte Reload
-; AVX512-FCP-NEXT: vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm3 # 16-byte Folded Reload
-; AVX512-FCP-NEXT: # xmm3 = xmm3[4],mem[4],xmm3[5],mem[5],xmm3[6],mem[6],xmm3[7],mem[7]
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm5 = ymm5 ^ (ymm1 & (ymm5 ^ ymm0))
+; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm4 = [65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535]
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm6 = ymm6 ^ (ymm4 & (ymm6 ^ ymm20))
+; AVX512-FCP-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm6[0,1,2,3],zmm5[0,1,2,3]
+; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512-FCP-NEXT: vpermd {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm3 # 64-byte Folded Reload
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm3 = zmm3 ^ (mem & (zmm3 ^ zmm0))
+; AVX512-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm13, %zmm0 # 32-byte Folded Reload
+; AVX512-FCP-NEXT: vpternlogq $234, {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm0 # 64-byte Folded Reload
+; AVX512-FCP-NEXT: # zmm0 = (zmm0 & zmm4) | mem
+; AVX512-FCP-NEXT: vmovdqa64 %zmm4, %zmm5
+; AVX512-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm12, %zmm4 # 32-byte Folded Reload
+; AVX512-FCP-NEXT: vpternlogq $234, {{[-0-9]+}}(%r{{[sb]}}p), %zmm5, %zmm4 # 64-byte Folded Reload
+; AVX512-FCP-NEXT: # zmm4 = (zmm4 & zmm5) | mem
; AVX512-FCP-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
; AVX512-FCP-NEXT: vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm5 # 16-byte Folded Reload
; AVX512-FCP-NEXT: # xmm5 = xmm5[4],mem[4],xmm5[5],mem[5],xmm5[6],mem[6],xmm5[7],mem[7]
; AVX512-FCP-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; AVX512-FCP-NEXT: vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm6, %xmm6 # 16-byte Folded Reload
+; AVX512-FCP-NEXT: vpunpckhwd (%rsp), %xmm6, %xmm6 # 16-byte Folded Reload
; AVX512-FCP-NEXT: # xmm6 = xmm6[4],mem[4],xmm6[5],mem[5],xmm6[6],mem[6],xmm6[7],mem[7]
-; AVX512-FCP-NEXT: vpshufb %xmm15, %xmm3, %xmm3
-; AVX512-FCP-NEXT: vpshufb %xmm15, %xmm6, %xmm6
-; AVX512-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
-; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} ymm7 = ymm7[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
-; AVX512-FCP-NEXT: vpshufd $212, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Folded Reload
-; AVX512-FCP-NEXT: # ymm8 = mem[0,1,1,3,4,5,5,7]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm7 = ymm8[0,1],ymm7[2],ymm8[3,4],ymm7[5],ymm8[6,7,8,9],ymm7[10],ymm8[11,12],ymm7[13],ymm8[14,15]
-; AVX512-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm8 = ymm8[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21,18,19,20,21,24,25,26,27,22,23,22,23]
-; AVX512-FCP-NEXT: vpshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Folded Reload
-; AVX512-FCP-NEXT: # ymm9 = mem[1,1,1,1,5,5,5,5]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm8 = ymm8[0,1],ymm9[2],ymm8[3,4],ymm9[5],ymm8[6,7,8,9],ymm9[10],ymm8[11,12],ymm9[13],ymm8[14,15]
-; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm9 = [0,1,4,5,4,5,5,7]
-; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512-FCP-NEXT: vpermd %ymm11, %ymm9, %ymm9
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm11 = ymm11[12,13],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm11[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm11[16,17],zero,zero
-; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm13 = [65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535]
-; AVX512-FCP-NEXT: vpandn %ymm9, %ymm13, %ymm9
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm9, %zmm11, %zmm9
-; AVX512-FCP-NEXT: vpshuflw $248, {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Folded Reload
-; AVX512-FCP-NEXT: # xmm11 = mem[0,2,3,3,4,5,6,7]
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm11 = ymm11[0,0,2,1]
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,0,1,3]
+; AVX512-FCP-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
+; AVX512-FCP-NEXT: vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm8, %xmm8 # 16-byte Folded Reload
+; AVX512-FCP-NEXT: # xmm8 = xmm8[4],mem[4],xmm8[5],mem[5],xmm8[6],mem[6],xmm8[7],mem[7]
+; AVX512-FCP-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
+; AVX512-FCP-NEXT: vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm9, %xmm9 # 16-byte Folded Reload
+; AVX512-FCP-NEXT: # xmm9 = xmm9[4],mem[4],xmm9[5],mem[5],xmm9[6],mem[6],xmm9[7],mem[7]
+; AVX512-FCP-NEXT: vmovdqa64 %xmm31, %xmm10
+; AVX512-FCP-NEXT: vpshufb %xmm10, %xmm6, %xmm6
+; AVX512-FCP-NEXT: vpshufb %xmm10, %xmm9, %xmm9
+; AVX512-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
+; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} ymm10 = ymm10[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
+; AVX512-FCP-NEXT: vpshufd $212, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Folded Reload
+; AVX512-FCP-NEXT: # ymm11 = mem[0,1,1,3,4,5,5,7]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm10 = ymm11[0,1],ymm10[2],ymm11[3,4],ymm10[5],ymm11[6,7,8,9],ymm10[10],ymm11[11,12],ymm10[13],ymm11[14,15]
+; AVX512-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm11 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21,18,19,20,21,24,25,26,27,22,23,22,23]
+; AVX512-FCP-NEXT: vpshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Folded Reload
+; AVX512-FCP-NEXT: # ymm12 = mem[1,1,1,1,5,5,5,5]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm11 = ymm11[0,1],ymm12[2],ymm11[3,4],ymm12[5],ymm11[6,7,8,9],ymm12[10],ymm11[11,12],ymm12[13],ymm11[14,15]
+; AVX512-FCP-NEXT: vmovdqa 32(%rax), %ymm12
+; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm13 = [0,1,4,5,4,5,5,7]
+; AVX512-FCP-NEXT: vpermd %ymm12, %ymm13, %ymm13
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm12 = ymm12[12,13],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm12[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm12[16,17],zero,zero
+; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm14 = [65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535]
+; AVX512-FCP-NEXT: vpandn %ymm13, %ymm14, %ymm13
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm13, %zmm12, %zmm12
; AVX512-FCP-NEXT: vpshuflw {{.*#+}} xmm5 = xmm5[0,2,3,3,4,5,6,7]
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm5 = ymm5[0,0,2,1]
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,0,1,3]
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,1,3,2]
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,2,2,3]
+; AVX512-FCP-NEXT: vpshuflw {{.*#+}} xmm8 = xmm8[0,2,3,3,4,5,6,7]
+; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm8 = ymm8[0,0,2,1]
+; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm9 = ymm9[0,0,1,3]
+; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm10 = ymm10[2,1,3,2]
+; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,2,2,3]
; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm14 = zmm14 ^ (mem & (zmm14 ^ zmm13))
-; AVX512-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm11, %zmm11 # 32-byte Folded Reload
-; AVX512-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm3 # 32-byte Folded Reload
-; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm3 = zmm11 ^ (zmm28 & (zmm3 ^ zmm11))
; AVX512-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm5, %zmm5 # 32-byte Folded Reload
; AVX512-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm6, %zmm6 # 32-byte Folded Reload
-; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm6 = zmm5 ^ (zmm28 & (zmm6 ^ zmm5))
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm6 = zmm5 ^ (zmm26 & (zmm6 ^ zmm5))
+; AVX512-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm5 # 32-byte Folded Reload
+; AVX512-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm9, %zmm8 # 32-byte Folded Reload
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm8 = zmm5 ^ (zmm26 & (zmm8 ^ zmm5))
; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm5 = [0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535]
-; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm3 = zmm0 ^ (zmm5 & (zmm3 ^ zmm0))
-; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm6 = zmm2 ^ (zmm5 & (zmm6 ^ zmm2))
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm6 = zmm0 ^ (zmm5 & (zmm6 ^ zmm0))
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm8 = zmm4 ^ (zmm5 & (zmm8 ^ zmm4))
; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm7, %zmm0, %zmm0
-; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm8, %zmm2, %zmm2
-; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm2 = zmm0 ^ (mem & (zmm2 ^ zmm0))
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm10, %zmm0, %zmm0
+; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm11, %zmm4, %zmm4
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm4 = zmm0 ^ (mem & (zmm4 ^ zmm0))
; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512-FCP-NEXT: vpternlogq $248, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm0 # 64-byte Folded Reload
; AVX512-FCP-NEXT: # zmm0 = zmm0 | (zmm1 & mem)
-; AVX512-FCP-NEXT: vpternlogq $248, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm9 # 64-byte Folded Reload
-; AVX512-FCP-NEXT: # zmm9 = zmm9 | (zmm1 & mem)
-; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm9 = zmm9 ^ (mem & (zmm9 ^ zmm2))
+; AVX512-FCP-NEXT: vpternlogq $248, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm12 # 64-byte Folded Reload
+; AVX512-FCP-NEXT: # zmm12 = zmm12 | (zmm1 & mem)
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm12 = zmm12 ^ (mem & (zmm12 ^ zmm4))
; AVX512-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX512-FCP-NEXT: vmovdqa64 %zmm9, 320(%rax)
-; AVX512-FCP-NEXT: vmovdqa64 %zmm4, 256(%rax)
-; AVX512-FCP-NEXT: vmovdqa64 %zmm10, 192(%rax)
-; AVX512-FCP-NEXT: vmovdqa64 %zmm20, 128(%rax)
-; AVX512-FCP-NEXT: vmovdqa64 %zmm26, (%rax)
+; AVX512-FCP-NEXT: vmovdqa64 %zmm12, 320(%rax)
+; AVX512-FCP-NEXT: vmovdqa64 %zmm2, 256(%rax)
+; AVX512-FCP-NEXT: vmovdqa64 %zmm7, 192(%rax)
+; AVX512-FCP-NEXT: vmovdqa64 %zmm16, 128(%rax)
+; AVX512-FCP-NEXT: vmovdqa64 %zmm30, (%rax)
; AVX512-FCP-NEXT: vmovdqa64 %zmm29, 448(%rax)
-; AVX512-FCP-NEXT: vmovdqa64 %zmm19, 704(%rax)
-; AVX512-FCP-NEXT: vmovdqa64 %zmm23, 640(%rax)
+; AVX512-FCP-NEXT: vmovdqa64 %zmm15, 704(%rax)
+; AVX512-FCP-NEXT: vmovdqa64 %zmm19, 640(%rax)
; AVX512-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; AVX512-FCP-NEXT: vmovaps %zmm1, 576(%rax)
; AVX512-FCP-NEXT: vmovdqa64 %zmm14, 384(%rax)
-; AVX512-FCP-NEXT: vmovdqa64 %zmm6, 64(%rax)
-; AVX512-FCP-NEXT: vmovdqa64 %zmm3, 512(%rax)
-; AVX512-FCP-NEXT: vmovdqa64 %zmm12, 832(%rax)
+; AVX512-FCP-NEXT: vmovdqa64 %zmm8, 64(%rax)
+; AVX512-FCP-NEXT: vmovdqa64 %zmm6, 512(%rax)
+; AVX512-FCP-NEXT: vmovdqa64 %zmm3, 832(%rax)
; AVX512-FCP-NEXT: vmovdqa64 %zmm0, 768(%rax)
-; AVX512-FCP-NEXT: addq $1576, %rsp # imm = 0x628
+; AVX512-FCP-NEXT: addq $1544, %rsp # imm = 0x608
; AVX512-FCP-NEXT: vzeroupper
; AVX512-FCP-NEXT: retq
;
; AVX512DQ-LABEL: store_i16_stride7_vf64:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: subq $2840, %rsp # imm = 0xB18
+; AVX512DQ-NEXT: subq $2648, %rsp # imm = 0xA58
; AVX512DQ-NEXT: vmovdqa 96(%rcx), %ymm6
-; AVX512DQ-NEXT: vmovdqa 96(%rdx), %ymm13
+; AVX512DQ-NEXT: vmovdqa 96(%rdx), %ymm15
; AVX512DQ-NEXT: vmovdqa 96(%rdi), %ymm7
; AVX512DQ-NEXT: vmovdqa 96(%rsi), %ymm8
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm0 = [128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128,128,128,128,128]
; AVX512DQ-NEXT: vpshufb %ymm0, %ymm6, %ymm2
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm1 = [u,u,u,u,12,13,14,15,128,128,u,u,u,u,u,u,u,u,u,u,16,17,128,128,u,u,u,u,u,u,u,u]
-; AVX512DQ-NEXT: vpshufb %ymm1, %ymm13, %ymm3
-; AVX512DQ-NEXT: vporq %ymm2, %ymm3, %ymm17
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm10 = [128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128,128,128,128,128,128,128,128,128]
-; AVX512DQ-NEXT: vpshufb %ymm10, %ymm8, %ymm2
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm11 = [12,13,14,15,128,128,u,u,u,u,u,u,u,u,u,u,16,17,128,128,u,u,u,u,u,u,u,u,16,17,18,19]
-; AVX512DQ-NEXT: vpshufb %ymm11, %ymm7, %ymm3
+; AVX512DQ-NEXT: vpshufb %ymm1, %ymm15, %ymm3
; AVX512DQ-NEXT: vporq %ymm2, %ymm3, %ymm18
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm12 = [128,128,128,128,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128]
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm3 = [128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128,128,128,128,128,128,128,128,128]
+; AVX512DQ-NEXT: vpshufb %ymm3, %ymm8, %ymm2
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm12 = [12,13,14,15,128,128,u,u,u,u,u,u,u,u,u,u,16,17,128,128,u,u,u,u,u,u,u,u,16,17,18,19]
+; AVX512DQ-NEXT: vpshufb %ymm12, %ymm7, %ymm4
+; AVX512DQ-NEXT: vporq %ymm2, %ymm4, %ymm19
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm11 = [128,128,128,128,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128]
; AVX512DQ-NEXT: vmovdqa 64(%r9), %ymm2
; AVX512DQ-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-NEXT: vpshufb %ymm12, %ymm2, %ymm2
-; AVX512DQ-NEXT: vmovdqa 64(%r8), %ymm3
-; AVX512DQ-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-NEXT: vpshufb %ymm11, %ymm2, %ymm2
+; AVX512DQ-NEXT: vmovdqa 64(%r8), %ymm4
+; AVX512DQ-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm9 = [u,u,u,u,u,u,u,u,12,13,14,15,128,128,u,u,u,u,u,u,u,u,u,u,16,17,128,128,u,u,u,u]
-; AVX512DQ-NEXT: vpshufb %ymm9, %ymm3, %ymm3
-; AVX512DQ-NEXT: vpor %ymm2, %ymm3, %ymm2
+; AVX512DQ-NEXT: vpshufb %ymm9, %ymm4, %ymm4
+; AVX512DQ-NEXT: vpor %ymm2, %ymm4, %ymm2
; AVX512DQ-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-NEXT: vmovdqa 64(%rcx), %ymm3
-; AVX512DQ-NEXT: vpshufb %ymm0, %ymm3, %ymm2
-; AVX512DQ-NEXT: vmovdqa64 %ymm3, %ymm26
-; AVX512DQ-NEXT: vmovdqa 64(%rdx), %ymm4
-; AVX512DQ-NEXT: vpshufb %ymm1, %ymm4, %ymm3
-; AVX512DQ-NEXT: vmovdqa64 %ymm4, %ymm27
-; AVX512DQ-NEXT: vpor %ymm2, %ymm3, %ymm2
+; AVX512DQ-NEXT: vmovdqa 64(%rcx), %ymm4
+; AVX512DQ-NEXT: vpshufb %ymm0, %ymm4, %ymm2
+; AVX512DQ-NEXT: vmovdqa64 %ymm4, %ymm29
+; AVX512DQ-NEXT: vmovdqa 64(%rdx), %ymm5
+; AVX512DQ-NEXT: vpshufb %ymm1, %ymm5, %ymm4
+; AVX512DQ-NEXT: vmovdqa64 %ymm5, %ymm28
+; AVX512DQ-NEXT: vpor %ymm2, %ymm4, %ymm2
; AVX512DQ-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-NEXT: vmovdqa 64(%rsi), %ymm3
-; AVX512DQ-NEXT: vpshufb %ymm10, %ymm3, %ymm2
-; AVX512DQ-NEXT: vmovdqa64 %ymm3, %ymm23
-; AVX512DQ-NEXT: vmovdqa 64(%rdi), %ymm4
-; AVX512DQ-NEXT: vpshufb %ymm11, %ymm4, %ymm3
-; AVX512DQ-NEXT: vmovdqa64 %ymm4, %ymm22
-; AVX512DQ-NEXT: vpor %ymm2, %ymm3, %ymm2
+; AVX512DQ-NEXT: vmovdqa 64(%rsi), %ymm4
+; AVX512DQ-NEXT: vpshufb %ymm3, %ymm4, %ymm2
+; AVX512DQ-NEXT: vmovdqa64 %ymm4, %ymm24
+; AVX512DQ-NEXT: vmovdqa 64(%rdi), %ymm5
+; AVX512DQ-NEXT: vpshufb %ymm12, %ymm5, %ymm4
+; AVX512DQ-NEXT: vmovdqa64 %ymm5, %ymm23
+; AVX512DQ-NEXT: vpor %ymm2, %ymm4, %ymm2
; AVX512DQ-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX512DQ-NEXT: vmovdqa (%r9), %ymm2
; AVX512DQ-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-NEXT: vpshufb %ymm12, %ymm2, %ymm2
-; AVX512DQ-NEXT: vmovdqa (%r8), %ymm3
-; AVX512DQ-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-NEXT: vpshufb %ymm9, %ymm3, %ymm3
-; AVX512DQ-NEXT: vpor %ymm2, %ymm3, %ymm2
+; AVX512DQ-NEXT: vpshufb %ymm11, %ymm2, %ymm2
+; AVX512DQ-NEXT: vmovdqa (%r8), %ymm4
+; AVX512DQ-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-NEXT: vpshufb %ymm9, %ymm4, %ymm4
+; AVX512DQ-NEXT: vpor %ymm2, %ymm4, %ymm2
; AVX512DQ-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX512DQ-NEXT: vmovdqa (%rcx), %ymm2
; AVX512DQ-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX512DQ-NEXT: vpshufb %ymm0, %ymm2, %ymm2
-; AVX512DQ-NEXT: vmovdqa (%rdx), %ymm3
-; AVX512DQ-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-NEXT: vpshufb %ymm1, %ymm3, %ymm3
-; AVX512DQ-NEXT: vpor %ymm2, %ymm3, %ymm2
+; AVX512DQ-NEXT: vmovdqa (%rdx), %ymm4
+; AVX512DQ-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-NEXT: vpshufb %ymm1, %ymm4, %ymm4
+; AVX512DQ-NEXT: vpor %ymm2, %ymm4, %ymm2
; AVX512DQ-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX512DQ-NEXT: vmovdqa (%rsi), %ymm2
; AVX512DQ-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-NEXT: vpshufb %ymm10, %ymm2, %ymm2
-; AVX512DQ-NEXT: vmovdqa (%rdi), %ymm3
-; AVX512DQ-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-NEXT: vpshufb %ymm11, %ymm3, %ymm3
-; AVX512DQ-NEXT: vpor %ymm2, %ymm3, %ymm2
+; AVX512DQ-NEXT: vpshufb %ymm3, %ymm2, %ymm2
+; AVX512DQ-NEXT: vmovdqa (%rdi), %ymm4
+; AVX512DQ-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-NEXT: vpshufb %ymm12, %ymm4, %ymm4
+; AVX512DQ-NEXT: vpor %ymm2, %ymm4, %ymm2
; AVX512DQ-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX512DQ-NEXT: vmovdqa 32(%rcx), %ymm4
; AVX512DQ-NEXT: vpshufb %ymm0, %ymm4, %ymm0
@@ -13924,28 +13934,28 @@ define void @store_i16_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512DQ-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX512DQ-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT: vmovdqa 32(%rsi), %ymm5
-; AVX512DQ-NEXT: vpshufb %ymm10, %ymm5, %ymm0
+; AVX512DQ-NEXT: vpshufb %ymm3, %ymm5, %ymm0
; AVX512DQ-NEXT: vmovdqa 32(%rdi), %ymm3
-; AVX512DQ-NEXT: vpshufb %ymm11, %ymm3, %ymm1
+; AVX512DQ-NEXT: vpshufb %ymm12, %ymm3, %ymm1
; AVX512DQ-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX512DQ-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT: vmovdqa 32(%r8), %ymm1
; AVX512DQ-NEXT: vpshufb %ymm9, %ymm1, %ymm9
; AVX512DQ-NEXT: vmovdqa 32(%r9), %ymm0
-; AVX512DQ-NEXT: vpshufb %ymm12, %ymm0, %ymm10
+; AVX512DQ-NEXT: vpshufb %ymm11, %ymm0, %ymm10
; AVX512DQ-NEXT: vpor %ymm10, %ymm9, %ymm9
; AVX512DQ-NEXT: vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT: vbroadcasti128 {{.*#+}} ymm10 = [22,23,26,27,0,0,24,25,26,27,0,0,26,27,26,27,22,23,26,27,0,0,24,25,26,27,0,0,26,27,26,27]
; AVX512DQ-NEXT: # ymm10 = mem[0,1,0,1]
; AVX512DQ-NEXT: vpshufb %ymm10, %ymm6, %ymm9
-; AVX512DQ-NEXT: vmovdqa64 %ymm10, %ymm31
-; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm10 = ymm13[2,2,2,2,6,6,6,6]
+; AVX512DQ-NEXT: vmovdqa64 %ymm10, %ymm27
+; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm10 = ymm15[2,2,2,2,6,6,6,6]
; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm9 = ymm9[0,1],ymm10[2],ymm9[3,4],ymm10[5],ymm9[6,7,8,9],ymm10[10],ymm9[11,12],ymm10[13],ymm9[14,15]
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,2,2,3]
; AVX512DQ-NEXT: vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT: vpshufhw {{.*#+}} ymm9 = ymm6[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm9 = ymm9[2,2,2,2,6,6,6,6]
-; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm10 = ymm13[3,3,3,3,7,7,7,7]
+; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm10 = ymm15[3,3,3,3,7,7,7,7]
; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm9 = ymm10[0],ymm9[1],ymm10[2,3],ymm9[4],ymm10[5,6,7,8],ymm9[9],ymm10[10,11],ymm9[12],ymm10[13,14,15]
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm9 = ymm9[0,2,2,3]
; AVX512DQ-NEXT: vmovdqu %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
@@ -13963,12 +13973,11 @@ define void @store_i16_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512DQ-NEXT: vmovdqu %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX512DQ-NEXT: vmovdqa 96(%r9), %ymm9
; AVX512DQ-NEXT: vpshufhw {{.*#+}} ymm10 = ymm9[0,1,2,3,5,5,7,6,8,9,10,11,13,13,15,14]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm10 = ymm10[3,3,3,3]
-; AVX512DQ-NEXT: vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm21 = ymm10[3,3,3,3]
; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm10 = ymm9[2,1,2,3,6,5,6,7]
; AVX512DQ-NEXT: vpshuflw {{.*#+}} ymm10 = ymm10[0,0,3,3,4,5,6,7,8,8,11,11,12,13,14,15]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm30 = ymm10[2,2,2,2]
-; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm10 = ymm13[0,1,1,3,4,5,5,7]
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm20 = ymm10[2,2,2,2]
+; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm10 = ymm15[0,1,1,3,4,5,5,7]
; AVX512DQ-NEXT: vpshuflw {{.*#+}} ymm6 = ymm6[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm6 = ymm6[0,0,0,0,4,4,4,4]
; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm6 = ymm10[0,1],ymm6[2],ymm10[3,4],ymm6[5],ymm10[6,7,8,9],ymm6[10],ymm10[11,12],ymm6[13],ymm10[14,15]
@@ -13979,18 +13988,17 @@ define void @store_i16_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm7 = ymm8[0,1],ymm7[2],ymm8[3,4],ymm7[5],ymm8[6,7,8,9],ymm7[10],ymm8[11,12],ymm7[13],ymm8[14,15]
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,2,2,3]
; AVX512DQ-NEXT: vprold $16, %ymm9, %ymm8
-; AVX512DQ-NEXT: vpshufb %ymm12, %ymm9, %ymm9
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm6, %zmm17, %zmm6
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm7, %zmm18, %zmm7
+; AVX512DQ-NEXT: vpshufb %ymm11, %ymm9, %ymm9
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm6, %zmm18, %zmm6
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm7, %zmm19, %zmm7
; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm7 = zmm6 ^ (mem & (zmm7 ^ zmm6))
; AVX512DQ-NEXT: vmovdqa 96(%r8), %ymm6
; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm10 = ymm6[1,2,2,3,5,6,6,7]
; AVX512DQ-NEXT: vmovdqu %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm10 = ymm6[0,0,2,1,4,4,6,5]
; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm6 = ymm6[u,u],zero,zero,zero,zero,zero,zero,zero,zero,ymm6[14,15,u,u,u,u],zero,zero,zero,zero,zero,zero,zero,zero,ymm6[16,17,u,u,u,u],zero,zero
-; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm11 = [65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535]
-; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm6 = ymm6 | (ymm7 & ymm11)
-; AVX512DQ-NEXT: vmovdqa64 %zmm11, %zmm12
+; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm12 = [65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535]
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm6 = ymm6 | (ymm7 & ymm12)
; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm11 = [65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535]
; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm9 = ymm9 | (ymm6 & ymm11)
; AVX512DQ-NEXT: vextracti64x4 $1, %zmm7, %ymm6
@@ -14003,33 +14011,30 @@ define void @store_i16_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512DQ-NEXT: vshufi64x2 {{.*#+}} zmm6 = zmm9[0,1,2,3],zmm6[0,1,2,3]
; AVX512DQ-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} zmm18 = [0,5,0,0,0,6,0,0,6,0,0,0,7,0,0,7]
; AVX512DQ-NEXT: vmovdqa 96(%rax), %ymm6
-; AVX512DQ-NEXT: vpermd %zmm6, %zmm18, %zmm7
-; AVX512DQ-NEXT: vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm7 = ymm6[0,1,1,3,4,5,5,7]
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm11 = [12,13,128,128,128,128,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128]
-; AVX512DQ-NEXT: vpshufb %ymm11, %ymm6, %ymm6
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm10 = [12,13,128,128,128,128,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128]
+; AVX512DQ-NEXT: vpshufb %ymm10, %ymm6, %ymm6
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm7 = ymm7[0,2,2,3]
; AVX512DQ-NEXT: vpandn %ymm7, %ymm12, %ymm7
-; AVX512DQ-NEXT: vmovdqa64 %zmm12, %zmm19
+; AVX512DQ-NEXT: vmovdqa64 %zmm12, %zmm17
; AVX512DQ-NEXT: vinserti64x4 $1, %ymm7, %zmm6, %zmm6
; AVX512DQ-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm28 = [65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535]
+; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm30 = [65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535]
; AVX512DQ-NEXT: vpbroadcastd 72(%rax), %ymm6
-; AVX512DQ-NEXT: vpandnq %ymm6, %ymm28, %ymm6
+; AVX512DQ-NEXT: vpandnq %ymm6, %ymm30, %ymm6
; AVX512DQ-NEXT: vmovdqa 64(%rax), %ymm7
; AVX512DQ-NEXT: vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-NEXT: vpshufb %ymm11, %ymm7, %ymm7
+; AVX512DQ-NEXT: vpshufb %ymm10, %ymm7, %ymm7
; AVX512DQ-NEXT: vinserti64x4 $1, %ymm7, %zmm6, %zmm6
; AVX512DQ-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT: vmovdqa 64(%r9), %xmm7
; AVX512DQ-NEXT: vmovdqa 64(%r8), %xmm8
; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} xmm6 = xmm8[4],xmm7[4],xmm8[5],xmm7[5],xmm8[6],xmm7[6],xmm8[7],xmm7[7]
-; AVX512DQ-NEXT: vmovdqa64 %xmm8, %xmm17
-; AVX512DQ-NEXT: vmovdqa64 %xmm7, %xmm20
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm15 = [0,1,2,3,4,5,4,5,6,7,10,11,8,9,10,11]
-; AVX512DQ-NEXT: vpshufb %xmm15, %xmm6, %xmm6
+; AVX512DQ-NEXT: vmovdqa64 %xmm8, %xmm18
+; AVX512DQ-NEXT: vmovdqa64 %xmm7, %xmm22
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm14 = [0,1,2,3,4,5,4,5,6,7,10,11,8,9,10,11]
+; AVX512DQ-NEXT: vpshufb %xmm14, %xmm6, %xmm6
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,0,1,1]
; AVX512DQ-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT: vmovdqa 64(%rcx), %xmm9
@@ -14038,75 +14043,75 @@ define void @store_i16_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm8 = xmm8[0,2,3,3,4,5,6,7]
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm8 = ymm8[0,0,2,1]
; AVX512DQ-NEXT: vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-NEXT: vmovdqa 64(%rdi), %xmm14
-; AVX512DQ-NEXT: vmovdqa 64(%rsi), %xmm10
-; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} xmm8 = xmm10[4],xmm14[4],xmm10[5],xmm14[5],xmm10[6],xmm14[6],xmm10[7],xmm14[7]
-; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm8 = xmm8[2,1,2,3,4,5,6,7]
-; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,4,5,5,4]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm8 = ymm8[0,0,1,3]
-; AVX512DQ-NEXT: vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-NEXT: vpbroadcastd 8(%rax), %ymm8
-; AVX512DQ-NEXT: vpandnq %ymm8, %ymm28, %ymm8
-; AVX512DQ-NEXT: vmovdqa (%rax), %ymm12
-; AVX512DQ-NEXT: vmovdqu %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-NEXT: vpshufb %ymm11, %ymm12, %ymm13
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm13, %zmm8, %zmm8
-; AVX512DQ-NEXT: vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-NEXT: vmovdqa 64(%rdi), %xmm11
+; AVX512DQ-NEXT: vmovdqa 64(%rsi), %xmm8
+; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} xmm12 = xmm8[4],xmm11[4],xmm8[5],xmm11[5],xmm8[6],xmm11[6],xmm8[7],xmm11[7]
+; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm12 = xmm12[2,1,2,3,4,5,6,7]
+; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm12 = xmm12[0,1,2,3,4,5,5,4]
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm12 = ymm12[0,0,1,3]
+; AVX512DQ-NEXT: vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-NEXT: vpbroadcastd 8(%rax), %ymm12
+; AVX512DQ-NEXT: vpandnq %ymm12, %ymm30, %ymm12
+; AVX512DQ-NEXT: vmovdqa (%rax), %ymm13
+; AVX512DQ-NEXT: vmovdqu %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-NEXT: vpshufb %ymm10, %ymm13, %ymm13
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm13, %zmm12, %zmm12
+; AVX512DQ-NEXT: vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT: vmovdqa (%r9), %xmm6
-; AVX512DQ-NEXT: vmovdqa (%r8), %xmm12
-; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} xmm8 = xmm12[4],xmm6[4],xmm12[5],xmm6[5],xmm12[6],xmm6[6],xmm12[7],xmm6[7]
-; AVX512DQ-NEXT: vmovdqa64 %xmm12, %xmm29
-; AVX512DQ-NEXT: vmovdqa64 %xmm6, %xmm24
-; AVX512DQ-NEXT: vpshufb %xmm15, %xmm8, %xmm8
-; AVX512DQ-NEXT: vmovdqa64 %xmm15, %xmm25
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm8 = ymm8[0,0,1,1]
-; AVX512DQ-NEXT: vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-NEXT: vmovdqa (%rcx), %xmm8
+; AVX512DQ-NEXT: vmovdqa (%r8), %xmm13
+; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} xmm12 = xmm13[4],xmm6[4],xmm13[5],xmm6[5],xmm13[6],xmm6[6],xmm13[7],xmm6[7]
+; AVX512DQ-NEXT: vmovdqa64 %xmm13, %xmm25
+; AVX512DQ-NEXT: vmovdqa64 %xmm6, %xmm26
+; AVX512DQ-NEXT: vpshufb %xmm14, %xmm12, %xmm12
+; AVX512DQ-NEXT: vmovdqa64 %xmm14, %xmm31
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm12 = ymm12[0,0,1,1]
+; AVX512DQ-NEXT: vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-NEXT: vmovdqa (%rcx), %xmm15
; AVX512DQ-NEXT: vmovdqa (%rdx), %xmm13
-; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} xmm15 = xmm13[4],xmm8[4],xmm13[5],xmm8[5],xmm13[6],xmm8[6],xmm13[7],xmm8[7]
-; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm15 = xmm15[0,2,3,3,4,5,6,7]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm12 = ymm15[0,0,2,1]
+; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} xmm12 = xmm13[4],xmm15[4],xmm13[5],xmm15[5],xmm13[6],xmm15[6],xmm13[7],xmm15[7]
+; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm12 = xmm12[0,2,3,3,4,5,6,7]
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm12 = ymm12[0,0,2,1]
; AVX512DQ-NEXT: vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT: vmovdqa (%rdi), %xmm6
-; AVX512DQ-NEXT: vmovdqa (%rsi), %xmm12
-; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} xmm15 = xmm12[4],xmm6[4],xmm12[5],xmm6[5],xmm12[6],xmm6[6],xmm12[7],xmm6[7]
-; AVX512DQ-NEXT: vmovdqa64 %xmm12, %xmm21
+; AVX512DQ-NEXT: vmovdqa (%rsi), %xmm14
+; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} xmm12 = xmm14[4],xmm6[4],xmm14[5],xmm6[5],xmm14[6],xmm6[6],xmm14[7],xmm6[7]
+; AVX512DQ-NEXT: vmovdqa64 %xmm14, %xmm19
; AVX512DQ-NEXT: vmovdqa64 %xmm6, %xmm16
-; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm15 = xmm15[2,1,2,3,4,5,6,7]
-; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm15 = xmm15[0,1,2,3,4,5,5,4]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm12 = ymm15[0,0,1,3]
+; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm12 = xmm12[2,1,2,3,4,5,6,7]
+; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm12 = xmm12[0,1,2,3,4,5,5,4]
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm12 = ymm12[0,0,1,3]
; AVX512DQ-NEXT: vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-NEXT: vpshuflw {{.*#+}} ymm15 = ymm4[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
-; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm15 = ymm15[0,0,0,0,4,4,4,4]
-; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm12 = ymm2[0,1,1,3,4,5,5,7]
-; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm12 = ymm12[0,1],ymm15[2],ymm12[3,4],ymm15[5],ymm12[6,7,8,9],ymm15[10],ymm12[11,12],ymm15[13],ymm12[14,15]
+; AVX512DQ-NEXT: vpshuflw {{.*#+}} ymm12 = ymm4[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
+; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm12 = ymm12[0,0,0,0,4,4,4,4]
+; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm14 = ymm2[0,1,1,3,4,5,5,7]
+; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm12 = ymm14[0,1],ymm12[2],ymm14[3,4],ymm12[5],ymm14[6,7,8,9],ymm12[10],ymm14[11,12],ymm12[13],ymm14[14,15]
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm12 = ymm12[2,1,3,2]
; AVX512DQ-NEXT: vmovdqu %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX512DQ-NEXT: vpshuflw {{.*#+}} ymm12 = ymm5[1,2,3,3,4,5,6,7,9,10,11,11,12,13,14,15]
; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm12 = ymm12[0,0,2,1,4,4,6,5]
-; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm15 = ymm3[1,1,1,1,5,5,5,5]
-; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm12 = ymm12[0,1],ymm15[2],ymm12[3,4],ymm15[5],ymm12[6,7,8,9],ymm15[10],ymm12[11,12],ymm15[13],ymm12[14,15]
+; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm14 = ymm3[1,1,1,1,5,5,5,5]
+; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm12 = ymm12[0,1],ymm14[2],ymm12[3,4],ymm14[5],ymm12[6,7,8,9],ymm14[10],ymm12[11,12],ymm14[13],ymm12[14,15]
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm12 = ymm12[2,2,2,3]
; AVX512DQ-NEXT: vmovdqu %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-NEXT: vmovdqa 32(%rax), %ymm15
-; AVX512DQ-NEXT: vpshufb %ymm11, %ymm15, %ymm11
-; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm12 = ymm15[0,1,1,3,4,5,5,7]
+; AVX512DQ-NEXT: vmovdqa 32(%rax), %ymm12
+; AVX512DQ-NEXT: vpshufb %ymm10, %ymm12, %ymm10
+; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm12 = ymm12[0,1,1,3,4,5,5,7]
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm12 = ymm12[0,2,2,3]
-; AVX512DQ-NEXT: vpandnq %ymm12, %ymm19, %ymm12
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm12, %zmm11, %zmm11
-; AVX512DQ-NEXT: vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-NEXT: vpshuflw {{.*#+}} ymm11 = ymm0[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
-; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm11 = ymm11[0,0,0,0,4,4,4,4]
+; AVX512DQ-NEXT: vpandnq %ymm12, %ymm17, %ymm12
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm12, %zmm10, %zmm10
+; AVX512DQ-NEXT: vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-NEXT: vpshuflw {{.*#+}} ymm10 = ymm0[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
+; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm10 = ymm10[0,0,0,0,4,4,4,4]
; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm12 = ymm1[0,0,2,1,4,4,6,5]
-; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm11 = ymm11[0,1,2],ymm12[3],ymm11[4,5],ymm12[6],ymm11[7,8,9,10],ymm12[11],ymm11[12,13],ymm12[14],ymm11[15]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,2,3,3]
-; AVX512DQ-NEXT: vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-NEXT: vmovdqa64 %ymm31, %ymm6
-; AVX512DQ-NEXT: vpshufb %ymm6, %ymm4, %ymm11
+; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm10 = ymm10[0,1,2],ymm12[3],ymm10[4,5],ymm12[6],ymm10[7,8,9,10],ymm12[11],ymm10[12,13],ymm12[14],ymm10[15]
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm10 = ymm10[2,2,3,3]
+; AVX512DQ-NEXT: vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-NEXT: vmovdqa64 %ymm27, %ymm14
+; AVX512DQ-NEXT: vpshufb %ymm14, %ymm4, %ymm10
; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm12 = ymm2[2,2,2,2,6,6,6,6]
-; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm11 = ymm11[0,1],ymm12[2],ymm11[3,4],ymm12[5],ymm11[6,7,8,9],ymm12[10],ymm11[11,12],ymm12[13],ymm11[14,15]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,2,2,3]
-; AVX512DQ-NEXT: vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm10 = ymm10[0,1],ymm12[2],ymm10[3,4],ymm12[5],ymm10[6,7,8,9],ymm12[10],ymm10[11,12],ymm12[13],ymm10[14,15]
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm10 = ymm10[2,2,2,3]
+; AVX512DQ-NEXT: vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[3,3,3,3,7,7,7,7]
; AVX512DQ-NEXT: vpshufhw {{.*#+}} ymm4 = ymm4[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm4 = ymm4[2,2,2,2,6,6,6,6]
@@ -14136,49 +14141,47 @@ define void @store_i16_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7,8,9],ymm1[10],ymm0[11,12],ymm1[13],ymm0[14,15]
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,2,2,3]
; AVX512DQ-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-NEXT: vpermd %zmm15, %zmm18, %zmm0
-; AVX512DQ-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-NEXT: vmovdqa64 %ymm23, %ymm12
-; AVX512DQ-NEXT: vpshuflw {{.*#+}} ymm0 = ymm12[1,2,3,3,4,5,6,7,9,10,11,11,12,13,14,15]
+; AVX512DQ-NEXT: vmovdqa64 %ymm24, %ymm6
+; AVX512DQ-NEXT: vpshuflw {{.*#+}} ymm0 = ymm6[1,2,3,3,4,5,6,7,9,10,11,11,12,13,14,15]
; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,0,2,1,4,4,6,5]
-; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm1 = ymm22[1,1,1,1,5,5,5,5]
+; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm1 = ymm23[1,1,1,1,5,5,5,5]
; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7,8,9],ymm1[10],ymm0[11,12],ymm1[13],ymm0[14,15]
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,2,2,3]
; AVX512DQ-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-NEXT: vpshufhw {{.*#+}} ymm0 = ymm12[0,1,2,3,5,4,6,7,8,9,10,11,13,12,14,15]
+; AVX512DQ-NEXT: vpshufhw {{.*#+}} ymm0 = ymm6[0,1,2,3,5,4,6,7,8,9,10,11,13,12,14,15]
; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[2,2,2,2,6,6,6,6]
-; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm1 = ymm22[2,2,2,2,6,6,6,6]
+; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm1 = ymm23[2,2,2,2,6,6,6,6]
; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6,7,8],ymm0[9],ymm1[10,11],ymm0[12],ymm1[13,14,15]
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX512DQ-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-NEXT: vmovdqa64 %ymm26, %ymm2
+; AVX512DQ-NEXT: vmovdqa64 %ymm29, %ymm2
; AVX512DQ-NEXT: vpshuflw {{.*#+}} ymm0 = ymm2[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,0,0,0,4,4,4,4]
-; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm1 = ymm27[0,1,1,3,4,5,5,7]
+; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm1 = ymm28[0,1,1,3,4,5,5,7]
; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7,8,9],ymm0[10],ymm1[11,12],ymm0[13],ymm1[14,15]
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,1,3,2]
; AVX512DQ-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-NEXT: vpshufb %ymm6, %ymm2, %ymm0
-; AVX512DQ-NEXT: vmovdqa64 %ymm26, %ymm3
-; AVX512DQ-NEXT: vmovdqa64 %ymm31, %ymm11
-; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm1 = ymm27[2,2,2,2,6,6,6,6]
+; AVX512DQ-NEXT: vpshufb %ymm14, %ymm2, %ymm0
+; AVX512DQ-NEXT: vmovdqa64 %ymm29, %ymm3
+; AVX512DQ-NEXT: vmovdqa64 %ymm27, %ymm10
+; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm1 = ymm28[2,2,2,2,6,6,6,6]
; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7,8,9],ymm1[10],ymm0[11,12],ymm1[13],ymm0[14,15]
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,2,2,3]
; AVX512DQ-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX512DQ-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
; AVX512DQ-NEXT: vpshuflw {{.*#+}} ymm0 = ymm5[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,0,0,0,4,4,4,4]
-; AVX512DQ-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
-; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm1 = ymm6[0,0,2,1,4,4,6,5]
+; AVX512DQ-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
+; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm1 = ymm12[0,0,2,1,4,4,6,5]
; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3],ymm0[4,5],ymm1[6],ymm0[7,8,9,10],ymm1[11],ymm0[12,13],ymm1[14],ymm0[15]
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,2,3,3]
; AVX512DQ-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT: vprold $16, %ymm5, %ymm0
-; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm1 = ymm6[1,2,2,3,5,6,6,7]
+; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm1 = ymm12[1,2,2,3,5,6,6,7]
; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7,8,9],ymm0[10],ymm1[11,12],ymm0[13],ymm1[14,15]
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,1,3,2]
; AVX512DQ-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm0 = ymm27[3,3,3,3,7,7,7,7]
+; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm0 = ymm28[3,3,3,3,7,7,7,7]
; AVX512DQ-NEXT: vpshufhw {{.*#+}} ymm1 = ymm3[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[2,2,2,2,6,6,6,6]
; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6,7,8],ymm1[9],ymm0[10,11],ymm1[12],ymm0[13,14,15]
@@ -14190,8 +14193,8 @@ define void @store_i16_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm2 = xmm2[0,1,3,2,4,5,6,7]
; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,0,1,1]
; AVX512DQ-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm2 = ymm22[3,3,3,3,7,7,7,7]
-; AVX512DQ-NEXT: vpshufhw {{.*#+}} ymm3 = ymm12[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
+; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm2 = ymm23[3,3,3,3,7,7,7,7]
+; AVX512DQ-NEXT: vpshufhw {{.*#+}} ymm3 = ymm6[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm3 = ymm3[2,2,2,2,6,6,6,6]
; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm2 = ymm2[0,1,2],ymm3[3],ymm2[4,5],ymm3[6],ymm2[7,8,9,10],ymm3[11],ymm2[12,13],ymm3[14],ymm2[15]
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,1,3,3]
@@ -14199,11 +14202,11 @@ define void @store_i16_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512DQ-NEXT: vmovdqa 96(%rsi), %xmm2
; AVX512DQ-NEXT: vmovdqa 96(%rdi), %xmm3
; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} xmm4 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
-; AVX512DQ-NEXT: vmovdqa64 %xmm25, %xmm12
-; AVX512DQ-NEXT: vpshufb %xmm12, %xmm4, %xmm4
+; AVX512DQ-NEXT: vmovdqa64 %xmm31, %xmm6
+; AVX512DQ-NEXT: vpshufb %xmm6, %xmm4, %xmm4
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,0,1,1]
; AVX512DQ-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm4 = ymm6[3,3,3,3,7,7,7,7]
+; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm4 = ymm12[3,3,3,3,7,7,7,7]
; AVX512DQ-NEXT: vpshufhw {{.*#+}} ymm5 = ymm5[0,1,2,3,5,6,7,7,8,9,10,11,13,14,15,15]
; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm5 = ymm5[2,2,2,3,6,6,6,7]
; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm4 = ymm5[0,1],ymm4[2],ymm5[3,4],ymm4[5],ymm5[6,7,8,9],ymm4[10],ymm5[11,12],ymm4[13],ymm5[14,15]
@@ -14214,8 +14217,8 @@ define void @store_i16_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512DQ-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
; AVX512DQ-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-NEXT: vpbroadcastq {{.*#+}} xmm6 = [6,7,4,5,0,0,8,9,6,7,4,5,0,0,8,9]
-; AVX512DQ-NEXT: vpshufb %xmm6, %xmm0, %xmm2
+; AVX512DQ-NEXT: vpbroadcastq {{.*#+}} xmm5 = [6,7,4,5,0,0,8,9,6,7,4,5,0,0,8,9]
+; AVX512DQ-NEXT: vpshufb %xmm5, %xmm0, %xmm2
; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm3 = xmm1[1,1,2,2]
; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0],xmm2[1],xmm3[2,3],xmm2[4],xmm3[5,6],xmm2[7]
; AVX512DQ-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
@@ -14226,379 +14229,384 @@ define void @store_i16_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; AVX512DQ-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; AVX512DQ-NEXT: vpshufb %xmm12, %xmm0, %xmm0
-; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} zmm18 = [16,18,19,19,19,19,0,0,0,1,0,1,2,3,2,3]
-; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm1 = xmm2[0,1,2,3,4,5,7,6]
-; AVX512DQ-NEXT: vpermt2d %zmm1, %zmm18, %zmm0
-; AVX512DQ-NEXT: vpbroadcastd 100(%rax), %ymm1
-; AVX512DQ-NEXT: vpbroadcastd 104(%rax), %ymm2
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm31
-; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm31 = zmm31 ^ (mem & (zmm31 ^ zmm0))
-; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm7[0],xmm9[0],xmm7[1],xmm9[1],xmm7[2],xmm9[2],xmm7[3],xmm9[3]
-; AVX512DQ-NEXT: vpshufb %xmm6, %xmm9, %xmm1
-; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm2 = xmm7[1,1,2,2]
-; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2,3],xmm1[4],xmm2[5,6],xmm1[7]
-; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,1,3,2,4,5,6,7]
-; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} zmm2 = [16,16,17,17,17,17,0,0,0,1,0,1,2,3,2,3]
-; AVX512DQ-NEXT: vpermt2d %zmm0, %zmm2, %zmm1
-; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm14[0],xmm10[0],xmm14[1],xmm10[1],xmm14[2],xmm10[2],xmm14[3],xmm10[3]
-; AVX512DQ-NEXT: vpshufb %xmm12, %xmm0, %xmm0
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,1,1]
-; AVX512DQ-NEXT: vprold $16, %xmm10, %xmm3
-; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm4 = xmm14[1,1,2,3]
-; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0,1],xmm3[2],xmm4[3,4],xmm3[5],xmm4[6,7]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,0,2,1]
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm3, %zmm0, %zmm14
-; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm0 = [65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0]
-; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm14 = zmm1 ^ (zmm0 & (zmm14 ^ zmm1))
-; AVX512DQ-NEXT: vmovdqa64 %xmm17, %xmm1
-; AVX512DQ-NEXT: vmovdqa64 %xmm20, %xmm3
-; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
-; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm3 = xmm1[0,1,2,3,4,5,7,6]
-; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,1,3,2,4,5,6,7]
-; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} zmm4 = [0,1,0,1,0,1,1,3,16,18,19,19,19,19,0,0]
-; AVX512DQ-NEXT: vpermt2d %zmm3, %zmm4, %zmm1
-; AVX512DQ-NEXT: vpbroadcastd 64(%rax), %ymm3
-; AVX512DQ-NEXT: vpbroadcastd 68(%rax), %ymm5
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm5, %zmm3, %zmm25
-; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm19 = [65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535]
-; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm25 = zmm25 ^ (zmm19 & (zmm25 ^ zmm1))
-; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm13[0],xmm8[0],xmm13[1],xmm8[1],xmm13[2],xmm8[2],xmm13[3],xmm8[3]
-; AVX512DQ-NEXT: vpshufb %xmm6, %xmm8, %xmm3
-; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm5 = xmm13[1,1,2,2]
-; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm3 = xmm5[0],xmm3[1],xmm5[2,3],xmm3[4],xmm5[5,6],xmm3[7]
+; AVX512DQ-NEXT: vmovdqa64 %xmm31, %xmm4
+; AVX512DQ-NEXT: vpshufb %xmm4, %xmm0, %xmm1
+; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} zmm17 = [16,18,19,19,19,19,0,0,0,1,0,1,2,3,2,3]
+; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,7,6]
+; AVX512DQ-NEXT: vpermt2d %zmm2, %zmm17, %zmm1
+; AVX512DQ-NEXT: vpbroadcastd 100(%rax), %ymm2
+; AVX512DQ-NEXT: vpbroadcastd 104(%rax), %ymm3
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm3, %zmm2, %zmm0
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm0 = zmm0 ^ (mem & (zmm0 ^ zmm1))
+; AVX512DQ-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm7[0],xmm9[0],xmm7[1],xmm9[1],xmm7[2],xmm9[2],xmm7[3],xmm9[3]
+; AVX512DQ-NEXT: vmovdqa %xmm5, %xmm12
+; AVX512DQ-NEXT: vpshufb %xmm5, %xmm9, %xmm2
+; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm3 = xmm7[1,1,2,2]
+; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0],xmm2[1],xmm3[2,3],xmm2[4],xmm3[5,6],xmm2[7]
; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,1,3,2,4,5,6,7]
-; AVX512DQ-NEXT: vpermt2d %zmm1, %zmm2, %zmm3
-; AVX512DQ-NEXT: vmovdqa64 %xmm16, %xmm5
-; AVX512DQ-NEXT: vmovdqa64 %xmm21, %xmm2
-; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm5[0],xmm2[0],xmm5[1],xmm2[1],xmm5[2],xmm2[2],xmm5[3],xmm2[3]
-; AVX512DQ-NEXT: vpshufb %xmm12, %xmm1, %xmm1
+; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} zmm3 = [16,16,17,17,17,17,0,0,0,1,0,1,2,3,2,3]
+; AVX512DQ-NEXT: vpermt2d %zmm1, %zmm3, %zmm2
+; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm11[0],xmm8[0],xmm11[1],xmm8[1],xmm11[2],xmm8[2],xmm11[3],xmm8[3]
+; AVX512DQ-NEXT: vpshufb %xmm4, %xmm1, %xmm1
+; AVX512DQ-NEXT: vmovdqa64 %xmm31, %xmm0
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,0,1,1]
-; AVX512DQ-NEXT: vprold $16, %xmm21, %xmm2
-; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm5 = xmm16[1,1,2,3]
-; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm2 = xmm5[0,1],xmm2[2],xmm5[3,4],xmm2[5],xmm5[6,7]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,0,2,1]
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm5
-; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm5 = zmm3 ^ (zmm0 & (zmm5 ^ zmm3))
-; AVX512DQ-NEXT: vmovdqa64 %xmm29, %xmm0
-; AVX512DQ-NEXT: vmovdqa64 %xmm24, %xmm1
-; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm1 = xmm0[0,1,2,3,4,5,7,6]
-; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,1,3,2,4,5,6,7]
-; AVX512DQ-NEXT: vpermt2d %zmm1, %zmm4, %zmm0
-; AVX512DQ-NEXT: vpbroadcastd (%rax), %ymm1
-; AVX512DQ-NEXT: vpbroadcastd 4(%rax), %ymm2
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm20
-; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm20 = zmm20 ^ (zmm19 & (zmm20 ^ zmm0))
+; AVX512DQ-NEXT: vprold $16, %xmm8, %xmm4
+; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm5 = xmm11[1,1,2,3]
+; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm4 = xmm5[0,1],xmm4[2],xmm5[3,4],xmm4[5],xmm5[6,7]
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,0,2,1]
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm4, %zmm1, %zmm14
+; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm1 = [65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0]
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm14 = zmm2 ^ (zmm1 & (zmm14 ^ zmm2))
+; AVX512DQ-NEXT: vmovdqa64 %xmm18, %xmm2
+; AVX512DQ-NEXT: vmovdqa64 %xmm22, %xmm4
+; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3]
+; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm4 = xmm2[0,1,2,3,4,5,7,6]
+; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm2 = xmm2[0,1,3,2,4,5,6,7]
+; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} zmm5 = [0,1,0,1,0,1,1,3,16,18,19,19,19,19,0,0]
+; AVX512DQ-NEXT: vpermt2d %zmm4, %zmm5, %zmm2
+; AVX512DQ-NEXT: vpbroadcastd 64(%rax), %ymm4
+; AVX512DQ-NEXT: vpbroadcastd 68(%rax), %ymm6
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm6, %zmm4, %zmm23
+; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm18 = [65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535]
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm23 = zmm23 ^ (zmm18 & (zmm23 ^ zmm2))
+; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm13[0],xmm15[0],xmm13[1],xmm15[1],xmm13[2],xmm15[2],xmm13[3],xmm15[3]
+; AVX512DQ-NEXT: vpshufb %xmm12, %xmm15, %xmm4
+; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm6 = xmm13[1,1,2,2]
+; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm4 = xmm6[0],xmm4[1],xmm6[2,3],xmm4[4],xmm6[5,6],xmm4[7]
+; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm2 = xmm2[0,1,3,2,4,5,6,7]
+; AVX512DQ-NEXT: vpermt2d %zmm2, %zmm3, %zmm4
+; AVX512DQ-NEXT: vmovdqa64 %xmm16, %xmm6
+; AVX512DQ-NEXT: vmovdqa64 %xmm19, %xmm3
+; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm6[0],xmm3[0],xmm6[1],xmm3[1],xmm6[2],xmm3[2],xmm6[3],xmm3[3]
+; AVX512DQ-NEXT: vpshufb %xmm0, %xmm2, %xmm2
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,0,1,1]
+; AVX512DQ-NEXT: vprold $16, %xmm19, %xmm3
+; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm6 = xmm16[1,1,2,3]
+; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm3 = xmm6[0,1],xmm3[2],xmm6[3,4],xmm3[5],xmm6[6,7]
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,0,2,1]
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm3, %zmm2, %zmm7
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm7 = zmm4 ^ (zmm1 & (zmm7 ^ zmm4))
+; AVX512DQ-NEXT: vmovdqa64 %xmm25, %xmm1
+; AVX512DQ-NEXT: vmovdqa64 %xmm26, %xmm2
+; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm2 = xmm1[0,1,2,3,4,5,7,6]
+; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,1,3,2,4,5,6,7]
+; AVX512DQ-NEXT: vpermt2d %zmm2, %zmm5, %zmm1
+; AVX512DQ-NEXT: vpbroadcastd (%rax), %ymm2
+; AVX512DQ-NEXT: vpbroadcastd 4(%rax), %ymm3
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm3, %zmm2, %zmm19
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm19 = zmm19 ^ (zmm18 & (zmm19 ^ zmm1))
; AVX512DQ-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
-; AVX512DQ-NEXT: vpshuflw {{.*#+}} ymm0 = ymm6[1,2,3,3,4,5,6,7,9,10,11,11,12,13,14,15]
-; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,0,2,1,4,4,6,5]
-; AVX512DQ-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm1 = ymm10[1,1,1,1,5,5,5,5]
-; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7,8,9],ymm1[10],ymm0[11,12],ymm1[13],ymm0[14,15]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,2,2,3]
-; AVX512DQ-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-NEXT: vpshufhw {{.*#+}} ymm0 = ymm6[0,1,2,3,5,4,6,7,8,9,10,11,13,12,14,15]
-; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[2,2,2,2,6,6,6,6]
-; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm1 = ymm10[2,2,2,2,6,6,6,6]
-; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6,7,8],ymm0[9],ymm1[10,11],ymm0[12],ymm1[13,14,15]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX512DQ-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
-; AVX512DQ-NEXT: vpshuflw {{.*#+}} ymm0 = ymm9[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
-; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,0,0,0,4,4,4,4]
+; AVX512DQ-NEXT: vpshuflw {{.*#+}} ymm1 = ymm6[1,2,3,3,4,5,6,7,9,10,11,11,12,13,14,15]
+; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,0,2,1,4,4,6,5]
+; AVX512DQ-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
+; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm2 = ymm11[1,1,1,1,5,5,5,5]
+; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7,8,9],ymm2[10],ymm1[11,12],ymm2[13],ymm1[14,15]
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,2,2,3]
+; AVX512DQ-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-NEXT: vpshufhw {{.*#+}} ymm1 = ymm6[0,1,2,3,5,4,6,7,8,9,10,11,13,12,14,15]
+; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[2,2,2,2,6,6,6,6]
+; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm2 = ymm11[2,2,2,2,6,6,6,6]
+; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3],ymm1[4],ymm2[5,6,7,8],ymm1[9],ymm2[10,11],ymm1[12],ymm2[13,14,15]
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
+; AVX512DQ-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX512DQ-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm1 = ymm3[0,1,1,3,4,5,5,7]
-; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7,8,9],ymm0[10],ymm1[11,12],ymm0[13],ymm1[14,15]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,1,3,2]
-; AVX512DQ-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-NEXT: vpshufb %ymm11, %ymm9, %ymm0
-; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm1 = ymm3[2,2,2,2,6,6,6,6]
-; AVX512DQ-NEXT: vmovdqa %ymm3, %ymm11
-; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm1 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7,8,9],ymm1[10],ymm0[11,12],ymm1[13],ymm0[14,15]
-; AVX512DQ-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
-; AVX512DQ-NEXT: vpshuflw {{.*#+}} ymm0 = ymm7[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
-; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,0,0,0,4,4,4,4]
+; AVX512DQ-NEXT: vpshuflw {{.*#+}} ymm1 = ymm3[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
+; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,0,0,0,4,4,4,4]
+; AVX512DQ-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
+; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm2 = ymm12[0,1,1,3,4,5,5,7]
+; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm1 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7,8,9],ymm1[10],ymm2[11,12],ymm1[13],ymm2[14,15]
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm28 = ymm1[2,1,3,2]
+; AVX512DQ-NEXT: vpshufb %ymm10, %ymm3, %ymm1
+; AVX512DQ-NEXT: vmovdqa %ymm3, %ymm10
+; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm2 = ymm12[2,2,2,2,6,6,6,6]
+; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm2 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7,8,9],ymm2[10],ymm1[11,12],ymm2[13],ymm1[14,15]
; AVX512DQ-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
-; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm2 = ymm8[0,0,2,1,4,4,6,5]
-; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm3 = ymm0[0,1,2],ymm2[3],ymm0[4,5],ymm2[6],ymm0[7,8,9,10],ymm2[11],ymm0[12,13],ymm2[14],ymm0[15]
-; AVX512DQ-NEXT: vprold $16, %ymm7, %ymm0
-; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm2 = ymm8[1,2,2,3,5,6,6,7]
-; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm4 = ymm2[0,1],ymm0[2],ymm2[3,4],ymm0[5],ymm2[6,7,8,9],ymm0[10],ymm2[11,12],ymm0[13],ymm2[14,15]
-; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm0 = ymm11[3,3,3,3,7,7,7,7]
-; AVX512DQ-NEXT: vpshufhw {{.*#+}} ymm2 = ymm9[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
-; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[2,2,2,2,6,6,6,6]
-; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm2 = ymm0[0],ymm2[1],ymm0[2,3],ymm2[4],ymm0[5,6,7,8],ymm2[9],ymm0[10,11],ymm2[12],ymm0[13,14,15]
-; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm0 = ymm10[3,3,3,3,7,7,7,7]
+; AVX512DQ-NEXT: vpshuflw {{.*#+}} ymm1 = ymm8[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
+; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,0,0,0,4,4,4,4]
+; AVX512DQ-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
+; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm3 = ymm9[0,0,2,1,4,4,6,5]
+; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm4 = ymm1[0,1,2],ymm3[3],ymm1[4,5],ymm3[6],ymm1[7,8,9,10],ymm3[11],ymm1[12,13],ymm3[14],ymm1[15]
+; AVX512DQ-NEXT: vprold $16, %ymm8, %ymm1
+; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm3 = ymm9[1,2,2,3,5,6,6,7]
+; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm5 = ymm3[0,1],ymm1[2],ymm3[3,4],ymm1[5],ymm3[6,7,8,9],ymm1[10],ymm3[11,12],ymm1[13],ymm3[14,15]
+; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm1 = ymm12[3,3,3,3,7,7,7,7]
+; AVX512DQ-NEXT: vpshufhw {{.*#+}} ymm3 = ymm10[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
+; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm3 = ymm3[2,2,2,2,6,6,6,6]
+; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm3 = ymm1[0],ymm3[1],ymm1[2,3],ymm3[4],ymm1[5,6,7,8],ymm3[9],ymm1[10,11],ymm3[12],ymm1[13,14,15]
+; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm1 = ymm11[3,3,3,3,7,7,7,7]
; AVX512DQ-NEXT: vpshufhw {{.*#+}} ymm6 = ymm6[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm6 = ymm6[2,2,2,2,6,6,6,6]
-; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0,1,2],ymm6[3],ymm0[4,5],ymm6[6],ymm0[7,8,9,10],ymm6[11],ymm0[12,13],ymm6[14],ymm0[15]
-; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm6 = ymm8[3,3,3,3,7,7,7,7]
-; AVX512DQ-NEXT: vpshufhw {{.*#+}} ymm7 = ymm7[0,1,2,3,5,6,7,7,8,9,10,11,13,14,15,15]
-; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm7 = ymm7[2,2,2,3,6,6,6,7]
-; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm6 = ymm7[0,1],ymm6[2],ymm7[3,4],ymm6[5],ymm7[6,7,8,9],ymm6[10],ymm7[11,12],ymm6[13],ymm7[14,15]
+; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0,1,2],ymm6[3],ymm1[4,5],ymm6[6],ymm1[7,8,9,10],ymm6[11],ymm1[12,13],ymm6[14],ymm1[15]
+; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm6 = ymm9[3,3,3,3,7,7,7,7]
+; AVX512DQ-NEXT: vpshufhw {{.*#+}} ymm8 = ymm8[0,1,2,3,5,6,7,7,8,9,10,11,13,14,15,15]
+; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm8 = ymm8[2,2,2,3,6,6,6,7]
+; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm6 = ymm8[0,1],ymm6[2],ymm8[3,4],ymm6[5],ymm8[6,7,8,9],ymm6[10],ymm8[11,12],ymm6[13],ymm8[14,15]
; AVX512DQ-NEXT: vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm22 = ymm1[2,2,2,3]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm26 = ymm3[2,2,3,3]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm24 = ymm4[2,1,3,2]
-; AVX512DQ-NEXT: vmovdqa 32(%rdi), %xmm9
-; AVX512DQ-NEXT: vmovdqa 32(%rsi), %xmm10
-; AVX512DQ-NEXT: vprold $16, %xmm10, %xmm1
-; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm3 = xmm9[1,1,2,3]
-; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm1 = xmm3[0,1],xmm1[2],xmm3[3,4],xmm1[5],xmm3[6,7]
-; AVX512DQ-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm21 = ymm2[0,2,2,3]
-; AVX512DQ-NEXT: vmovdqa 32(%r9), %xmm1
-; AVX512DQ-NEXT: vmovdqa 32(%r8), %xmm2
-; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
-; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm9[0],xmm10[0],xmm9[1],xmm10[1],xmm9[2],xmm10[2],xmm9[3],xmm10[3]
-; AVX512DQ-NEXT: vpshufb %xmm12, %xmm2, %xmm3
-; AVX512DQ-NEXT: vpshufb %xmm12, %xmm1, %xmm1
-; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm2 = xmm6[0,1,2,3,4,5,7,6]
-; AVX512DQ-NEXT: vmovdqa64 %xmm6, %xmm27
-; AVX512DQ-NEXT: vpermt2d %zmm2, %zmm18, %zmm1
-; AVX512DQ-NEXT: vpbroadcastd 36(%rax), %ymm2
-; AVX512DQ-NEXT: vpbroadcastd 40(%rax), %ymm4
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm4, %zmm2, %zmm13
-; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm13 = zmm13 ^ (mem & (zmm13 ^ zmm1))
-; AVX512DQ-NEXT: vmovdqa 32(%rcx), %xmm7
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm25 = ymm2[2,2,2,3]
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm29 = ymm4[2,2,3,3]
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm27 = ymm5[2,1,3,2]
+; AVX512DQ-NEXT: vmovdqa 32(%rdi), %xmm10
+; AVX512DQ-NEXT: vmovdqa 32(%rsi), %xmm11
+; AVX512DQ-NEXT: vprold $16, %xmm11, %xmm2
+; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm4 = xmm10[1,1,2,3]
+; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm2 = xmm4[0,1],xmm2[2],xmm4[3,4],xmm2[5],xmm4[6,7]
+; AVX512DQ-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm26 = ymm3[0,2,2,3]
+; AVX512DQ-NEXT: vmovdqa 32(%r9), %xmm2
+; AVX512DQ-NEXT: vmovdqa 32(%r8), %xmm3
+; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
+; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
+; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm10[0],xmm11[0],xmm10[1],xmm11[1],xmm10[2],xmm11[2],xmm10[3],xmm11[3]
+; AVX512DQ-NEXT: vpshufb %xmm0, %xmm3, %xmm4
+; AVX512DQ-NEXT: vpshufb %xmm0, %xmm2, %xmm2
+; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm3 = xmm6[0,1,2,3,4,5,7,6]
+; AVX512DQ-NEXT: vmovdqa64 %xmm6, %xmm24
+; AVX512DQ-NEXT: vpermt2d %zmm3, %zmm17, %zmm2
+; AVX512DQ-NEXT: vpbroadcastd 36(%rax), %ymm0
+; AVX512DQ-NEXT: vpbroadcastd 40(%rax), %ymm3
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm3, %zmm0, %zmm13
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm13 = zmm13 ^ (mem & (zmm13 ^ zmm2))
+; AVX512DQ-NEXT: vmovdqa 32(%rcx), %xmm8
; AVX512DQ-NEXT: vmovdqa 32(%rdx), %xmm6
-; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} xmm4 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3]
-; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm4 = xmm4[0,1,3,2,4,5,6,7]
-; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm12 = xmm4[0,0,1,1]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm15 = ymm0[2,1,3,3]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm8 = ymm3[0,0,1,1]
-; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm3 # 32-byte Folded Reload
+; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm6[0],xmm8[0],xmm6[1],xmm8[1],xmm6[2],xmm8[2],xmm6[3],xmm8[3]
+; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,1,3,2,4,5,6,7]
+; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm12 = xmm0[0,0,1,1]
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm17 = ymm1[2,1,3,3]
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm15 = ymm4[0,0,1,1]
; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm1 # 32-byte Folded Reload
-; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm4 = [65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535]
-; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm1 = zmm1 ^ (zmm4 & (zmm1 ^ zmm3))
-; AVX512DQ-NEXT: vpermq $182, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Folded Reload
-; AVX512DQ-NEXT: # ymm3 = mem[2,1,3,2]
-; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm29 = [65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535]
-; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm3 = ymm3 ^ (ymm29 & (ymm3 ^ ymm1))
-; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm30 = ymm30 ^ (ymm28 & (ymm30 ^ ymm3))
-; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm3 # 32-byte Folded Reload
-; AVX512DQ-NEXT: vpternlogq $234, {{[-0-9]+}}(%r{{[sb]}}p), %zmm28, %zmm3 # 64-byte Folded Reload
-; AVX512DQ-NEXT: # zmm3 = (zmm3 & zmm28) | mem
; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 32-byte Folded Reload
-; AVX512DQ-NEXT: vpternlogq $234, {{[-0-9]+}}(%r{{[sb]}}p), %zmm28, %zmm0 # 64-byte Folded Reload
-; AVX512DQ-NEXT: # zmm0 = (zmm0 & zmm28) | mem
+; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm9 = [65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535]
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm0 = zmm0 ^ (zmm9 & (zmm0 ^ zmm1))
+; AVX512DQ-NEXT: vpermq $182, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
+; AVX512DQ-NEXT: # ymm1 = mem[2,1,3,2]
+; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm31 = [65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535]
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm1 = ymm1 ^ (ymm31 & (ymm1 ^ ymm0))
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm20 = ymm20 ^ (ymm30 & (ymm20 ^ ymm1))
+; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512DQ-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm1 # 32-byte Folded Reload
+; AVX512DQ-NEXT: vpternlogq $234, {{[-0-9]+}}(%r{{[sb]}}p), %zmm30, %zmm1 # 64-byte Folded Reload
+; AVX512DQ-NEXT: # zmm1 = (zmm1 & zmm30) | mem
; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm23 # 32-byte Folded Reload
+; AVX512DQ-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm4 # 32-byte Folded Reload
+; AVX512DQ-NEXT: vpternlogq $234, {{[-0-9]+}}(%r{{[sb]}}p), %zmm30, %zmm4 # 64-byte Folded Reload
+; AVX512DQ-NEXT: # zmm4 = (zmm4 & zmm30) | mem
; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm18 # 32-byte Folded Reload
-; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm28 = [65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535]
-; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm18 = zmm23 ^ (zmm28 & (zmm18 ^ zmm23))
-; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535]
-; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm18 = zmm3 ^ (zmm2 & (zmm18 ^ zmm3))
+; AVX512DQ-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm2 # 32-byte Folded Reload
; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512DQ-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm3 # 32-byte Folded Reload
-; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
-; AVX512DQ-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm23 # 32-byte Folded Reload
-; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm23 = zmm3 ^ (zmm28 & (zmm23 ^ zmm3))
-; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm23 = zmm0 ^ (zmm2 & (zmm23 ^ zmm0))
-; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 32-byte Folded Reload
-; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm17 # 32-byte Folded Reload
-; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm17 = zmm0 ^ (mem & (zmm17 ^ zmm0))
-; AVX512DQ-NEXT: vextracti64x4 $1, %zmm1, %ymm0
-; AVX512DQ-NEXT: vpternlogd {{.*#+}} ymm0 = mem ^ (ymm19 & (ymm0 ^ mem))
-; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm1 = [0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535]
-; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm2 = ymm2 ^ (ymm1 & (ymm2 ^ ymm0))
-; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
-; AVX512DQ-NEXT: vpternlogq $248, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm19 # 64-byte Folded Reload
-; AVX512DQ-NEXT: # zmm19 = zmm19 | (zmm1 & mem)
-; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-NEXT: vshufi64x2 $68, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 64-byte Folded Reload
-; AVX512DQ-NEXT: # zmm0 = zmm0[0,1,2,3],mem[0,1,2,3]
-; AVX512DQ-NEXT: vpternlogq $234, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm0 # 64-byte Folded Reload
-; AVX512DQ-NEXT: # zmm0 = (zmm0 & zmm1) | mem
-; AVX512DQ-NEXT: vshufi64x2 {{.*#+}} zmm1 = zmm30[0,1,2,3],zmm2[0,1,2,3]
-; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload
-; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm2 = [65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0]
-; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm30 = zmm30 ^ (zmm2 & (zmm30 ^ zmm1))
+; AVX512DQ-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm30 # 32-byte Folded Reload
+; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm22 = [65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535]
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm30 = zmm2 ^ (zmm22 & (zmm30 ^ zmm2))
+; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535]
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm30 = zmm1 ^ (zmm2 & (zmm30 ^ zmm1))
; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; AVX512DQ-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm1 # 32-byte Folded Reload
-; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm11 = zmm11 ^ (zmm2 & (zmm11 ^ zmm1))
-; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm1 = [65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535]
-; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm25 = zmm25 ^ (zmm1 & (zmm25 ^ zmm14))
-; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm20 = zmm20 ^ (zmm1 & (zmm20 ^ zmm5))
+; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512DQ-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm5 # 32-byte Folded Reload
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm5 = zmm1 ^ (zmm22 & (zmm5 ^ zmm1))
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm5 = zmm4 ^ (zmm2 & (zmm5 ^ zmm4))
; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; AVX512DQ-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm1 # 32-byte Folded Reload
; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm2 # 32-byte Folded Reload
-; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm5 = [0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535]
-; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm2 = zmm1 ^ (zmm5 & (zmm2 ^ zmm1))
+; AVX512DQ-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm16 # 32-byte Folded Reload
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm16 = zmm1 ^ (mem & (zmm16 ^ zmm1))
+; AVX512DQ-NEXT: vextracti64x4 $1, %zmm0, %ymm0
+; AVX512DQ-NEXT: vpternlogd {{.*#+}} ymm0 = mem ^ (ymm18 & (ymm0 ^ mem))
+; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm1 = [0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535]
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm21 = ymm21 ^ (ymm1 & (ymm21 ^ ymm0))
+; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
+; AVX512DQ-NEXT: vpternlogq $248, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm18 # 64-byte Folded Reload
+; AVX512DQ-NEXT: # zmm18 = zmm18 | (zmm1 & mem)
+; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-NEXT: vshufi64x2 $68, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm3 # 64-byte Folded Reload
+; AVX512DQ-NEXT: # zmm3 = zmm0[0,1,2,3],mem[0,1,2,3]
+; AVX512DQ-NEXT: vpternlogq $234, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm3 # 64-byte Folded Reload
+; AVX512DQ-NEXT: # zmm3 = (zmm3 & zmm1) | mem
+; AVX512DQ-NEXT: vshufi64x2 {{.*#+}} zmm1 = zmm20[0,1,2,3],zmm21[0,1,2,3]
+; AVX512DQ-NEXT: vmovdqa64 64(%rax), %zmm2
+; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} zmm21 = [0,13,0,0,0,14,0,0,14,0,0,0,15,0,0,15]
+; AVX512DQ-NEXT: vpermd %zmm2, %zmm21, %zmm20
+; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm4 = [65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0]
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm20 = zmm20 ^ (zmm4 & (zmm20 ^ zmm1))
+; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm1 # 32-byte Folded Reload
+; AVX512DQ-NEXT: vmovdqa64 (%rax), %zmm0
+; AVX512DQ-NEXT: vpermd %zmm0, %zmm21, %zmm21
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm21 = zmm21 ^ (zmm4 & (zmm21 ^ zmm1))
+; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm1 = [65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535]
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm23 = zmm23 ^ (zmm1 & (zmm23 ^ zmm14))
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm19 = zmm19 ^ (zmm1 & (zmm19 ^ zmm7))
; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; AVX512DQ-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm1 # 32-byte Folded Reload
-; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm22, %zmm3, %zmm22
-; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm22 = zmm1 ^ (zmm5 & (zmm22 ^ zmm1))
+; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512DQ-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm7 # 32-byte Folded Reload
+; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm14 = [0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535]
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm7 = zmm1 ^ (zmm14 & (zmm7 ^ zmm1))
; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; AVX512DQ-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm1 # 32-byte Folded Reload
-; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} zmm14 = [0,0,4,0,0,0,5,0,0,5,0,0,0,6,0,0]
-; AVX512DQ-NEXT: vpermd 64(%rax), %zmm14, %zmm5
-; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm3 = [65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535]
-; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm5 = zmm5 ^ (zmm3 & (zmm5 ^ zmm1))
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm24, %zmm26, %zmm1
-; AVX512DQ-NEXT: vpermd (%rax), %zmm14, %zmm14
-; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm14 = zmm14 ^ (zmm3 & (zmm14 ^ zmm1))
-; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm1 = [65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0]
-; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm5 = zmm5 ^ (zmm1 & (zmm5 ^ zmm2))
-; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm14 = zmm14 ^ (zmm1 & (zmm14 ^ zmm22))
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm25, %zmm28, %zmm25
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm25 = zmm1 ^ (zmm14 & (zmm25 ^ zmm1))
; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm1 # 32-byte Folded Reload
+; AVX512DQ-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm14 # 32-byte Folded Reload
+; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} zmm28 = [0,0,4,0,0,0,5,0,0,5,0,0,0,6,0,0]
+; AVX512DQ-NEXT: vpermd %zmm2, %zmm28, %zmm1
+; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm4 = [65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535]
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm1 = zmm1 ^ (zmm4 & (zmm1 ^ zmm14))
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm27, %zmm29, %zmm2
+; AVX512DQ-NEXT: vpermd %zmm0, %zmm28, %zmm14
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm14 = zmm14 ^ (zmm4 & (zmm14 ^ zmm2))
+; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm0 = [65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0]
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm1 = zmm1 ^ (zmm0 & (zmm1 ^ zmm7))
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm14 = zmm14 ^ (zmm0 & (zmm14 ^ zmm25))
+; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 32-byte Folded Reload
; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
; AVX512DQ-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm2 # 32-byte Folded Reload
-; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm2 = zmm2 ^ (zmm4 & (zmm2 ^ zmm1))
-; AVX512DQ-NEXT: vpermq $212, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
-; AVX512DQ-NEXT: # ymm1 = mem[0,1,1,3]
-; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm1, %zmm3, %zmm1
-; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512DQ-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm22 # 32-byte Folded Reload
-; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm22 = zmm1 ^ (zmm4 & (zmm22 ^ zmm1))
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm1 = ymm12[0,1,1,3]
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm1, %zmm21, %zmm1
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm8, %zmm15, %zmm8
-; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm8 = zmm1 ^ (zmm4 & (zmm8 ^ zmm1))
-; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm10[4],xmm9[4],xmm10[5],xmm9[5],xmm10[6],xmm9[6],xmm10[7],xmm9[7]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm4 = xmm7[u,u,4,5,u,u,u,u,6,7,u,u,u,u,8,9]
-; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm9 = xmm6[1,1,2,2]
-; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm4 = xmm9[0],xmm4[1],xmm9[2,3],xmm4[4],xmm9[5,6],xmm4[7]
-; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} xmm6 = xmm6[4],xmm7[4],xmm6[5],xmm7[5],xmm6[6],xmm7[6],xmm6[7],xmm7[7]
-; AVX512DQ-NEXT: vpermq $234, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Folded Reload
-; AVX512DQ-NEXT: # ymm7 = mem[2,2,2,3]
-; AVX512DQ-NEXT: vpshuflw $180, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload
-; AVX512DQ-NEXT: # xmm9 = mem[0,1,3,2,4,5,6,7]
-; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm9 = xmm9[0,1,1,3]
-; AVX512DQ-NEXT: vpshufd $254, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Folded Reload
-; AVX512DQ-NEXT: # ymm10 = mem[2,3,3,3,6,7,7,7]
-; AVX512DQ-NEXT: vpermq $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm16 # 32-byte Folded Reload
-; AVX512DQ-NEXT: # ymm16 = mem[0,0,2,1]
-; AVX512DQ-NEXT: vpshuflw $230, {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Folded Reload
-; AVX512DQ-NEXT: # xmm12 = mem[2,1,2,3,4,5,6,7]
-; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm12 = xmm12[0,1,2,3,4,5,5,4]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm12 = ymm12[0,0,1,3]
-; AVX512DQ-NEXT: vpermq $80, {{[-0-9]+}}(%r{{[sb]}}p), %ymm21 # 32-byte Folded Reload
-; AVX512DQ-NEXT: # ymm21 = mem[0,0,1,1]
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm2 = zmm2 ^ (zmm9 & (zmm2 ^ zmm0))
+; AVX512DQ-NEXT: vpermq $212, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
+; AVX512DQ-NEXT: # ymm0 = mem[0,1,1,3]
+; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm0, %zmm4, %zmm0
+; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512DQ-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm7 # 32-byte Folded Reload
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm7 = zmm0 ^ (zmm9 & (zmm7 ^ zmm0))
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm0 = ymm12[0,1,1,3]
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm0, %zmm26, %zmm0
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm15, %zmm17, %zmm12
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm12 = zmm0 ^ (zmm9 & (zmm12 ^ zmm0))
+; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm11[4],xmm10[4],xmm11[5],xmm10[5],xmm11[6],xmm10[6],xmm11[7],xmm10[7]
+; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm9 = xmm8[u,u,4,5,u,u,u,u,6,7,u,u,u,u,8,9]
+; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm10 = xmm6[1,1,2,2]
+; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm9 = xmm10[0],xmm9[1],xmm10[2,3],xmm9[4],xmm10[5,6],xmm9[7]
+; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} xmm6 = xmm6[4],xmm8[4],xmm6[5],xmm8[5],xmm6[6],xmm8[6],xmm6[7],xmm8[7]
+; AVX512DQ-NEXT: vpermq $234, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Folded Reload
+; AVX512DQ-NEXT: # ymm8 = mem[2,2,2,3]
+; AVX512DQ-NEXT: vpshuflw $180, {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Folded Reload
+; AVX512DQ-NEXT: # xmm10 = mem[0,1,3,2,4,5,6,7]
+; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm10 = xmm10[0,1,1,3]
+; AVX512DQ-NEXT: vpshufd $254, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Folded Reload
+; AVX512DQ-NEXT: # ymm11 = mem[2,3,3,3,6,7,7,7]
+; AVX512DQ-NEXT: vpermq $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm25 # 32-byte Folded Reload
+; AVX512DQ-NEXT: # ymm25 = mem[0,0,2,1]
+; AVX512DQ-NEXT: vpshuflw $230, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
+; AVX512DQ-NEXT: # xmm4 = mem[2,1,2,3,4,5,6,7]
+; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,5,5,4]
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm27 = ymm4[0,0,1,3]
+; AVX512DQ-NEXT: vpermq $80, {{[-0-9]+}}(%r{{[sb]}}p), %ymm17 # 32-byte Folded Reload
+; AVX512DQ-NEXT: # ymm17 = mem[0,0,1,1]
; AVX512DQ-NEXT: vpshuflw $248, {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Folded Reload
; AVX512DQ-NEXT: # xmm15 = mem[0,2,3,3,4,5,6,7]
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm15 = ymm15[0,0,2,1]
-; AVX512DQ-NEXT: vpermq $234, {{[-0-9]+}}(%r{{[sb]}}p), %ymm24 # 32-byte Folded Reload
-; AVX512DQ-NEXT: # ymm24 = mem[2,2,2,3]
-; AVX512DQ-NEXT: vmovdqa64 %xmm27, %xmm3
-; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm3 = xmm3[0,1,3,2,4,5,6,7]
-; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[0,1,1,3]
-; AVX512DQ-NEXT: vpshufd $254, {{[-0-9]+}}(%r{{[sb]}}p), %ymm26 # 32-byte Folded Reload
-; AVX512DQ-NEXT: # ymm26 = mem[2,3,3,3,6,7,7,7]
-; AVX512DQ-NEXT: vpermq $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm27 # 32-byte Folded Reload
-; AVX512DQ-NEXT: # ymm27 = mem[0,0,2,1]
-; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[2,1,2,3,4,5,6,7]
-; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,5,4]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,0,1,3]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,0,1,1]
+; AVX512DQ-NEXT: vpermq $234, {{[-0-9]+}}(%r{{[sb]}}p), %ymm26 # 32-byte Folded Reload
+; AVX512DQ-NEXT: # ymm26 = mem[2,2,2,3]
+; AVX512DQ-NEXT: vmovdqa64 %xmm24, %xmm4
+; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm4 = xmm4[0,1,3,2,4,5,6,7]
+; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm4 = xmm4[0,1,1,3]
+; AVX512DQ-NEXT: vpshufd $254, {{[-0-9]+}}(%r{{[sb]}}p), %ymm28 # 32-byte Folded Reload
+; AVX512DQ-NEXT: # ymm28 = mem[2,3,3,3,6,7,7,7]
+; AVX512DQ-NEXT: vpermq $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm29 # 32-byte Folded Reload
+; AVX512DQ-NEXT: # ymm29 = mem[0,0,2,1]
+; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[2,1,2,3,4,5,6,7]
+; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,5,4]
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,1,3]
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm9 = ymm9[0,0,1,1]
; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm6 = xmm6[0,2,3,3,4,5,6,7]
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,0,2,1]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm9 = ymm9[0,0,0,1]
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm9, %zmm7, %zmm7
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm9 = ymm10[2,1,3,2]
-; AVX512DQ-NEXT: vpbroadcastd 96(%rax), %ymm10
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm10, %zmm9, %zmm9
-; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm9 = zmm9 ^ (zmm29 & (zmm9 ^ zmm7))
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,0,0,1]
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm3, %zmm24, %zmm3
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm7 = ymm26[2,1,3,2]
-; AVX512DQ-NEXT: vpbroadcastd 32(%rax), %ymm10
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm10, %zmm7, %zmm7
-; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm7 = zmm7 ^ (zmm29 & (zmm7 ^ zmm3))
-; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535]
-; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm9 = zmm9 ^ (zmm3 & (zmm9 ^ zmm22))
-; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm7 = zmm7 ^ (zmm3 & (zmm7 ^ zmm8))
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm12, %zmm16, %zmm3
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm15, %zmm21, %zmm8
-; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm8 = zmm3 ^ (zmm28 & (zmm8 ^ zmm3))
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm1, %zmm27, %zmm1
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm6, %zmm4, %zmm3
-; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm3 = zmm1 ^ (zmm28 & (zmm3 ^ zmm1))
-; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm1 = [65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0]
-; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm31 = zmm31 ^ (zmm1 & (zmm31 ^ zmm8))
-; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm13 = zmm13 ^ (zmm1 & (zmm13 ^ zmm3))
-; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm0 = zmm0 ^ (mem & (zmm0 ^ zmm17))
-; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm11 = zmm11 ^ (mem & (zmm11 ^ zmm2))
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm10 = ymm10[0,0,0,1]
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm10, %zmm8, %zmm8
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm10 = ymm11[2,1,3,2]
+; AVX512DQ-NEXT: vpbroadcastd 96(%rax), %ymm11
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm11, %zmm10, %zmm10
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm10 = zmm10 ^ (zmm31 & (zmm10 ^ zmm8))
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,0,0,1]
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm4, %zmm26, %zmm4
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm8 = ymm28[2,1,3,2]
+; AVX512DQ-NEXT: vpbroadcastd 32(%rax), %ymm11
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm11, %zmm8, %zmm8
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm8 = zmm8 ^ (zmm31 & (zmm8 ^ zmm4))
+; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm4 = [0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535]
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm10 = zmm10 ^ (zmm4 & (zmm10 ^ zmm7))
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm8 = zmm8 ^ (zmm4 & (zmm8 ^ zmm12))
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm27, %zmm25, %zmm4
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm15, %zmm17, %zmm7
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm7 = zmm4 ^ (zmm22 & (zmm7 ^ zmm4))
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm0, %zmm29, %zmm0
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm6, %zmm9, %zmm4
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm4 = zmm0 ^ (zmm22 & (zmm4 ^ zmm0))
+; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm0 = [65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0]
+; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm6 = zmm6 ^ (zmm0 & (zmm6 ^ zmm7))
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm13 = zmm13 ^ (zmm0 & (zmm13 ^ zmm4))
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm3 = zmm3 ^ (mem & (zmm3 ^ zmm16))
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm21 = zmm21 ^ (mem & (zmm21 ^ zmm2))
; AVX512DQ-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-NEXT: vmovdqa64 %zmm0, 320(%rax)
+; AVX512DQ-NEXT: vmovdqa64 %zmm3, 320(%rax)
; AVX512DQ-NEXT: vmovdqa64 %zmm13, 256(%rax)
-; AVX512DQ-NEXT: vmovdqa64 %zmm7, 192(%rax)
+; AVX512DQ-NEXT: vmovdqa64 %zmm8, 192(%rax)
; AVX512DQ-NEXT: vmovdqa64 %zmm14, 128(%rax)
-; AVX512DQ-NEXT: vmovdqa64 %zmm23, 64(%rax)
-; AVX512DQ-NEXT: vmovdqa64 %zmm20, (%rax)
-; AVX512DQ-NEXT: vmovdqa64 %zmm25, 448(%rax)
-; AVX512DQ-NEXT: vmovdqa64 %zmm31, 704(%rax)
-; AVX512DQ-NEXT: vmovdqa64 %zmm9, 640(%rax)
-; AVX512DQ-NEXT: vmovdqa64 %zmm5, 576(%rax)
-; AVX512DQ-NEXT: vmovdqa64 %zmm18, 512(%rax)
-; AVX512DQ-NEXT: vmovdqa64 %zmm11, 384(%rax)
-; AVX512DQ-NEXT: vmovdqa64 %zmm19, 768(%rax)
-; AVX512DQ-NEXT: vmovdqa64 %zmm30, 832(%rax)
-; AVX512DQ-NEXT: addq $2840, %rsp # imm = 0xB18
+; AVX512DQ-NEXT: vmovdqa64 %zmm5, 64(%rax)
+; AVX512DQ-NEXT: vmovdqa64 %zmm19, (%rax)
+; AVX512DQ-NEXT: vmovdqa64 %zmm23, 448(%rax)
+; AVX512DQ-NEXT: vmovdqa64 %zmm6, 704(%rax)
+; AVX512DQ-NEXT: vmovdqa64 %zmm10, 640(%rax)
+; AVX512DQ-NEXT: vmovdqa64 %zmm1, 576(%rax)
+; AVX512DQ-NEXT: vmovdqa64 %zmm30, 512(%rax)
+; AVX512DQ-NEXT: vmovdqa64 %zmm21, 384(%rax)
+; AVX512DQ-NEXT: vmovdqa64 %zmm18, 768(%rax)
+; AVX512DQ-NEXT: vmovdqa64 %zmm20, 832(%rax)
+; AVX512DQ-NEXT: addq $2648, %rsp # imm = 0xA58
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512DQ-FCP-LABEL: store_i16_stride7_vf64:
; AVX512DQ-FCP: # %bb.0:
-; AVX512DQ-FCP-NEXT: subq $1576, %rsp # imm = 0x628
-; AVX512DQ-FCP-NEXT: vmovdqa 96(%rcx), %ymm4
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm8 = [128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128,128,128,128,128]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm8, %ymm4, %ymm0
-; AVX512DQ-FCP-NEXT: vmovdqa 96(%rdx), %ymm5
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm9 = [u,u,u,u,12,13,14,15,128,128,u,u,u,u,u,u,u,u,u,u,16,17,128,128,u,u,u,u,u,u,u,u]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm9, %ymm5, %ymm1
-; AVX512DQ-FCP-NEXT: vporq %ymm0, %ymm1, %ymm16
+; AVX512DQ-FCP-NEXT: subq $1544, %rsp # imm = 0x608
+; AVX512DQ-FCP-NEXT: vmovdqa 96(%rcx), %ymm3
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm6 = [128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128,128,128,128,128]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm6, %ymm3, %ymm0
+; AVX512DQ-FCP-NEXT: vmovdqa 96(%rdx), %ymm1
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm8 = [u,u,u,u,12,13,14,15,128,128,u,u,u,u,u,u,u,u,u,u,16,17,128,128,u,u,u,u,u,u,u,u]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm8, %ymm1, %ymm2
+; AVX512DQ-FCP-NEXT: vporq %ymm0, %ymm2, %ymm16
; AVX512DQ-FCP-NEXT: vmovdqa 96(%rsi), %ymm2
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm10 = [128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128,128,128,128,128,128,128,128,128]
; AVX512DQ-FCP-NEXT: vpshufb %ymm10, %ymm2, %ymm0
-; AVX512DQ-FCP-NEXT: vmovdqa 96(%rdi), %ymm3
+; AVX512DQ-FCP-NEXT: vmovdqa 96(%rdi), %ymm7
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm11 = [12,13,14,15,128,128,u,u,u,u,u,u,u,u,u,u,16,17,128,128,u,u,u,u,u,u,u,u,16,17,18,19]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm11, %ymm3, %ymm1
-; AVX512DQ-FCP-NEXT: vpor %ymm0, %ymm1, %ymm7
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm6 = [128,128,128,128,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128]
-; AVX512DQ-FCP-NEXT: vmovdqa 64(%r9), %ymm1
-; AVX512DQ-FCP-NEXT: vpshufb %ymm6, %ymm1, %ymm0
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm1, %ymm25
+; AVX512DQ-FCP-NEXT: vpshufb %ymm11, %ymm7, %ymm4
+; AVX512DQ-FCP-NEXT: vpor %ymm0, %ymm4, %ymm5
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm4 = [128,128,128,128,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128]
+; AVX512DQ-FCP-NEXT: vmovdqa 64(%r9), %ymm9
+; AVX512DQ-FCP-NEXT: vpshufb %ymm4, %ymm9, %ymm0
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm9, %ymm25
; AVX512DQ-FCP-NEXT: vmovdqa 64(%r8), %ymm13
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm12 = [u,u,u,u,u,u,u,u,12,13,14,15,128,128,u,u,u,u,u,u,u,u,u,u,16,17,128,128,u,u,u,u]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm12, %ymm13, %ymm1
+; AVX512DQ-FCP-NEXT: vpshufb %ymm12, %ymm13, %ymm9
; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm13, %ymm26
-; AVX512DQ-FCP-NEXT: vpor %ymm0, %ymm1, %ymm0
+; AVX512DQ-FCP-NEXT: vpor %ymm0, %ymm9, %ymm0
; AVX512DQ-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX512DQ-FCP-NEXT: vmovdqa 64(%rcx), %ymm0
-; AVX512DQ-FCP-NEXT: vpshufb %ymm8, %ymm0, %ymm14
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm0, %ymm28
+; AVX512DQ-FCP-NEXT: vpshufb %ymm6, %ymm0, %ymm14
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm0, %ymm29
; AVX512DQ-FCP-NEXT: vmovdqa 64(%rdx), %ymm0
-; AVX512DQ-FCP-NEXT: vpshufb %ymm9, %ymm0, %ymm15
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm0, %ymm27
+; AVX512DQ-FCP-NEXT: vpshufb %ymm8, %ymm0, %ymm15
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm0, %ymm18
; AVX512DQ-FCP-NEXT: vpor %ymm14, %ymm15, %ymm14
; AVX512DQ-FCP-NEXT: vmovdqu %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX512DQ-FCP-NEXT: vmovdqa 64(%rsi), %ymm0
; AVX512DQ-FCP-NEXT: vpshufb %ymm10, %ymm0, %ymm14
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm0, %ymm23
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm0, %ymm27
; AVX512DQ-FCP-NEXT: vmovdqa 64(%rdi), %ymm0
; AVX512DQ-FCP-NEXT: vpshufb %ymm11, %ymm0, %ymm15
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm0, %ymm29
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm0, %ymm28
; AVX512DQ-FCP-NEXT: vpor %ymm14, %ymm15, %ymm14
; AVX512DQ-FCP-NEXT: vmovdqu %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX512DQ-FCP-NEXT: vmovdqa (%r9), %ymm0
; AVX512DQ-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FCP-NEXT: vpshufb %ymm6, %ymm0, %ymm14
+; AVX512DQ-FCP-NEXT: vpshufb %ymm4, %ymm0, %ymm14
; AVX512DQ-FCP-NEXT: vmovdqa (%r8), %ymm0
; AVX512DQ-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX512DQ-FCP-NEXT: vpshufb %ymm12, %ymm0, %ymm15
@@ -14606,10 +14614,10 @@ define void @store_i16_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512DQ-FCP-NEXT: vmovdqu %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX512DQ-FCP-NEXT: vmovdqa (%rcx), %ymm0
; AVX512DQ-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FCP-NEXT: vpshufb %ymm8, %ymm0, %ymm14
+; AVX512DQ-FCP-NEXT: vpshufb %ymm6, %ymm0, %ymm14
; AVX512DQ-FCP-NEXT: vmovdqa (%rdx), %ymm0
; AVX512DQ-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FCP-NEXT: vpshufb %ymm9, %ymm0, %ymm15
+; AVX512DQ-FCP-NEXT: vpshufb %ymm8, %ymm0, %ymm15
; AVX512DQ-FCP-NEXT: vpor %ymm14, %ymm15, %ymm14
; AVX512DQ-FCP-NEXT: vmovdqu %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX512DQ-FCP-NEXT: vmovdqa (%rsi), %ymm0
@@ -14621,562 +14629,568 @@ define void @store_i16_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512DQ-FCP-NEXT: vpor %ymm14, %ymm15, %ymm14
; AVX512DQ-FCP-NEXT: vmovdqu %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX512DQ-FCP-NEXT: vmovdqa 32(%rcx), %ymm15
-; AVX512DQ-FCP-NEXT: vpshufb %ymm8, %ymm15, %ymm8
+; AVX512DQ-FCP-NEXT: vpshufb %ymm6, %ymm15, %ymm6
; AVX512DQ-FCP-NEXT: vmovdqa 32(%rdx), %ymm13
-; AVX512DQ-FCP-NEXT: vpshufb %ymm9, %ymm13, %ymm9
-; AVX512DQ-FCP-NEXT: vpor %ymm8, %ymm9, %ymm8
-; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FCP-NEXT: vpshufb %ymm8, %ymm13, %ymm8
+; AVX512DQ-FCP-NEXT: vpor %ymm6, %ymm8, %ymm6
+; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-FCP-NEXT: vmovdqa 32(%rsi), %ymm0
-; AVX512DQ-FCP-NEXT: vpshufb %ymm10, %ymm0, %ymm8
-; AVX512DQ-FCP-NEXT: vmovdqa 32(%rdi), %ymm1
-; AVX512DQ-FCP-NEXT: vpshufb %ymm11, %ymm1, %ymm9
-; AVX512DQ-FCP-NEXT: vpor %ymm8, %ymm9, %ymm8
-; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FCP-NEXT: vpshufb %ymm10, %ymm0, %ymm6
+; AVX512DQ-FCP-NEXT: vmovdqa 32(%rdi), %ymm9
+; AVX512DQ-FCP-NEXT: vpshufb %ymm11, %ymm9, %ymm8
+; AVX512DQ-FCP-NEXT: vpor %ymm6, %ymm8, %ymm6
+; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-FCP-NEXT: vmovdqa 32(%r8), %ymm8
-; AVX512DQ-FCP-NEXT: vpshufb %ymm12, %ymm8, %ymm10
-; AVX512DQ-FCP-NEXT: vmovdqa 32(%r9), %ymm9
-; AVX512DQ-FCP-NEXT: vpshufb %ymm6, %ymm9, %ymm11
-; AVX512DQ-FCP-NEXT: vporq %ymm11, %ymm10, %ymm20
-; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} ymm10 = ymm4[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
-; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm12 = ymm5[0,1,1,3,4,5,5,7]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm10 = ymm12[0,1],ymm10[2],ymm12[3,4],ymm10[5],ymm12[6,7,8,9],ymm10[10],ymm12[11,12],ymm10[13],ymm12[14,15]
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm10 = ymm10[2,1,3,2]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm12, %ymm8, %ymm6
+; AVX512DQ-FCP-NEXT: vmovdqa 32(%r9), %ymm10
+; AVX512DQ-FCP-NEXT: vpshufb %ymm4, %ymm10, %ymm11
+; AVX512DQ-FCP-NEXT: vporq %ymm11, %ymm6, %ymm21
+; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} ymm11 = ymm3[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
+; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm12 = ymm1[0,1,1,3,4,5,5,7]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm11 = ymm12[0,1],ymm11[2],ymm12[3,4],ymm11[5],ymm12[6,7,8,9],ymm11[10],ymm12[11,12],ymm11[13],ymm12[14,15]
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,1,3,2]
; AVX512DQ-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm14 = [18,19,20,21,18,19,20,21,24,25,26,27,22,23,22,23,18,19,20,21,18,19,20,21,24,25,26,27,22,23,22,23]
; AVX512DQ-FCP-NEXT: # ymm14 = mem[0,1,0,1]
; AVX512DQ-FCP-NEXT: vpshufb %ymm14, %ymm2, %ymm12
; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm14, %ymm17
-; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm14 = ymm3[1,1,1,1,5,5,5,5]
+; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm14 = ymm7[1,1,1,1,5,5,5,5]
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm12 = ymm12[0,1],ymm14[2],ymm12[3,4],ymm14[5],ymm12[6,7,8,9],ymm14[10],ymm12[11,12],ymm14[13],ymm12[14,15]
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm12 = ymm12[2,2,2,3]
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm10, %zmm16, %zmm10
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm12, %zmm7, %zmm12
-; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm12 = zmm10 ^ (mem & (zmm12 ^ zmm10))
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm11, %zmm16, %zmm11
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm12, %zmm5, %zmm12
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm12 = zmm11 ^ (mem & (zmm12 ^ zmm11))
; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm14 = [65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535]
-; AVX512DQ-FCP-NEXT: vmovdqa 96(%r8), %ymm7
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm10 = ymm7[u,u],zero,zero,zero,zero,zero,zero,zero,zero,ymm7[14,15,u,u,u,u],zero,zero,zero,zero,zero,zero,zero,zero,ymm7[16,17,u,u,u,u],zero,zero
-; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm10 = ymm10 | (ymm12 & ymm14)
-; AVX512DQ-FCP-NEXT: vmovdqa 96(%r9), %ymm11
-; AVX512DQ-FCP-NEXT: vpshufb %ymm6, %ymm11, %ymm6
-; AVX512DQ-FCP-NEXT: vmovdqu %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FCP-NEXT: vmovdqa 96(%r8), %ymm5
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm11 = ymm5[u,u],zero,zero,zero,zero,zero,zero,zero,zero,ymm5[14,15,u,u,u,u],zero,zero,zero,zero,zero,zero,zero,zero,ymm5[16,17,u,u,u,u],zero,zero
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm11 = ymm11 | (ymm12 & ymm14)
+; AVX512DQ-FCP-NEXT: vmovdqa 96(%r9), %ymm6
+; AVX512DQ-FCP-NEXT: vpshufb %ymm4, %ymm6, %ymm4
+; AVX512DQ-FCP-NEXT: vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm16 = [65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535]
-; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm6 = ymm6 | (ymm10 & ymm16)
-; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm16, %zmm21
-; AVX512DQ-FCP-NEXT: vextracti64x4 $1, %zmm12, %ymm10
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm4 = ymm4 | (ymm11 & ymm16)
+; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm16, %zmm30
+; AVX512DQ-FCP-NEXT: vextracti64x4 $1, %zmm12, %ymm11
; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm12 = [0,4,0,0,0,5,0,0]
-; AVX512DQ-FCP-NEXT: vpermd %ymm7, %ymm12, %ymm12
+; AVX512DQ-FCP-NEXT: vpermd %ymm5, %ymm12, %ymm12
; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm16 = [65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0]
-; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm12 = ymm12 ^ (ymm16 & (ymm12 ^ ymm10))
-; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm16, %zmm18
-; AVX512DQ-FCP-NEXT: vprold $16, %ymm11, %ymm10
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm10 = ymm10[2,2,2,2]
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm12 = ymm12 ^ (ymm16 & (ymm12 ^ ymm11))
+; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm16, %zmm20
+; AVX512DQ-FCP-NEXT: vprold $16, %ymm6, %ymm11
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,2,2,2]
; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm16 = [65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535]
-; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm10 = ymm10 ^ (ymm16 & (ymm10 ^ ymm12))
-; AVX512DQ-FCP-NEXT: vshufi64x2 {{.*#+}} zmm6 = zmm6[0,1,2,3],zmm10[0,1,2,3]
-; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm11 = ymm11 ^ (ymm16 & (ymm11 ^ ymm12))
+; AVX512DQ-FCP-NEXT: vshufi64x2 {{.*#+}} zmm4 = zmm4[0,1,2,3],zmm11[0,1,2,3]
+; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm10 = [0,1,4,5,4,5,5,7]
-; AVX512DQ-FCP-NEXT: vmovdqa 96(%rax), %ymm6
-; AVX512DQ-FCP-NEXT: vpermd %ymm6, %ymm10, %ymm10
-; AVX512DQ-FCP-NEXT: vpandn %ymm10, %ymm14, %ymm10
-; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm14, %zmm16
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm14 = [12,13,128,128,128,128,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm14, %ymm6, %ymm12
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm10, %zmm12, %zmm10
-; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm11 = [u,u,u,u,u,u,u,u,u,u,u,u,14,15,12,13,u,u,u,u,30,31,28,29,u,u,u,u,30,31,28,29]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm11, %ymm2, %ymm10
-; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm12 = ymm3[3,3,3,3,7,7,7,7]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm10 = ymm12[0,1,2],ymm10[3],ymm12[4,5],ymm10[6],ymm12[7,8,9,10],ymm10[11],ymm12[12,13],ymm10[14],ymm12[15]
-; AVX512DQ-FCP-NEXT: vpmovsxdq {{.*#+}} ymm12 = [151522058,0,421010202,421010202]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm12, %ymm2, %ymm2
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm12, %ymm24
-; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm3 = ymm3[2,2,2,2,6,6,6,6]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm3[0],ymm2[1],ymm3[2,3],ymm2[4],ymm3[5,6,7,8],ymm2[9],ymm3[10,11],ymm2[12],ymm3[13,14,15]
-; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} zmm3 = [0,2,2,3,10,9,11,11]
-; AVX512DQ-FCP-NEXT: vpermt2q %zmm10, %zmm3, %zmm2
-; AVX512DQ-FCP-NEXT: vpmovsxdq {{.*#+}} ymm12 = [218894094,0,488382238,488382238]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm12, %ymm4, %ymm10
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm12, %ymm22
-; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm12 = ymm5[3,3,3,3,7,7,7,7]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm10 = ymm12[0],ymm10[1],ymm12[2,3],ymm10[4],ymm12[5,6,7,8],ymm10[9],ymm12[10,11],ymm10[12],ymm12[13,14,15]
-; AVX512DQ-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm12 = [22,23,26,27,0,0,24,25,26,27,0,0,26,27,26,27,22,23,26,27,0,0,24,25,26,27,0,0,26,27,26,27]
-; AVX512DQ-FCP-NEXT: # ymm12 = mem[0,1,0,1]
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm11 = [0,1,4,5,4,5,5,7]
+; AVX512DQ-FCP-NEXT: vmovdqa 96(%rax), %ymm4
+; AVX512DQ-FCP-NEXT: vpermd %ymm4, %ymm11, %ymm11
+; AVX512DQ-FCP-NEXT: vpandn %ymm11, %ymm14, %ymm11
+; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm14, %zmm22
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm12 = [12,13,128,128,128,128,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128]
; AVX512DQ-FCP-NEXT: vpshufb %ymm12, %ymm4, %ymm4
-; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm5 = ymm5[2,2,2,2,6,6,6,6]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm4 = ymm4[0,1],ymm5[2],ymm4[3,4],ymm5[5],ymm4[6,7,8,9],ymm5[10],ymm4[11,12],ymm5[13],ymm4[14,15]
-; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} zmm30 = [2,2,2,3,8,10,10,11]
-; AVX512DQ-FCP-NEXT: vpermt2q %zmm10, %zmm30, %zmm4
-; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm19 = [65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535]
-; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm4 = zmm2 ^ (zmm19 & (zmm4 ^ zmm2))
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm11, %zmm4, %zmm4
; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm2 = [5,0,0,0,6,0,0,6]
-; AVX512DQ-FCP-NEXT: vpermd %ymm7, %ymm2, %ymm2
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm14 = [u,u,u,u,u,u,u,u,u,u,u,u,14,15,12,13,u,u,u,u,30,31,28,29,u,u,u,u,30,31,28,29]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm14, %ymm2, %ymm4
+; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm11 = ymm7[3,3,3,3,7,7,7,7]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm4 = ymm11[0,1,2],ymm4[3],ymm11[4,5],ymm4[6],ymm11[7,8,9,10],ymm4[11],ymm11[12,13],ymm4[14],ymm11[15]
+; AVX512DQ-FCP-NEXT: vpmovsxdq {{.*#+}} ymm6 = [151522058,0,421010202,421010202]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm6, %ymm2, %ymm2
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm6, %ymm23
+; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm7 = ymm7[2,2,2,2,6,6,6,6]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm7[0],ymm2[1],ymm7[2,3],ymm2[4],ymm7[5,6,7,8],ymm2[9],ymm7[10,11],ymm2[12],ymm7[13,14,15]
+; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} zmm7 = [0,2,2,3,10,9,11,11]
+; AVX512DQ-FCP-NEXT: vpermt2q %zmm4, %zmm7, %zmm2
+; AVX512DQ-FCP-NEXT: vpmovsxdq {{.*#+}} ymm11 = [218894094,0,488382238,488382238]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm11, %ymm3, %ymm4
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm11, %ymm16
+; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm11 = ymm1[3,3,3,3,7,7,7,7]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm4 = ymm11[0],ymm4[1],ymm11[2,3],ymm4[4],ymm11[5,6,7,8],ymm4[9],ymm11[10,11],ymm4[12],ymm11[13,14,15]
+; AVX512DQ-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm6 = [22,23,26,27,0,0,24,25,26,27,0,0,26,27,26,27,22,23,26,27,0,0,24,25,26,27,0,0,26,27,26,27]
+; AVX512DQ-FCP-NEXT: # ymm6 = mem[0,1,0,1]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm6, %ymm3, %ymm3
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm6, %ymm24
+; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[2,2,2,2,6,6,6,6]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm3[0,1],ymm1[2],ymm3[3,4],ymm1[5],ymm3[6,7,8,9],ymm1[10],ymm3[11,12],ymm1[13],ymm3[14,15]
+; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} zmm19 = [2,2,2,3,8,10,10,11]
+; AVX512DQ-FCP-NEXT: vpermt2q %zmm4, %zmm19, %zmm1
+; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm11 = [65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535]
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm1 = zmm2 ^ (zmm11 & (zmm1 ^ zmm2))
+; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm1 = [5,0,0,0,6,0,0,6]
+; AVX512DQ-FCP-NEXT: vpermd %ymm5, %ymm1, %ymm1
+; AVX512DQ-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm3 = [65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535]
+; AVX512DQ-FCP-NEXT: vpbroadcastd 72(%rax), %ymm1
+; AVX512DQ-FCP-NEXT: vpandn %ymm1, %ymm3, %ymm1
+; AVX512DQ-FCP-NEXT: vmovdqa 64(%rax), %ymm2
; AVX512DQ-FCP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm2 = [6,5,0,0,7,6,0,7,6,5,0,0,7,6,0,7]
-; AVX512DQ-FCP-NEXT: # zmm2 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-FCP-NEXT: vpermd %zmm6, %zmm2, %zmm4
-; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm6 = [65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535]
-; AVX512DQ-FCP-NEXT: vpbroadcastd 72(%rax), %ymm4
-; AVX512DQ-FCP-NEXT: vpandn %ymm4, %ymm6, %ymm4
-; AVX512DQ-FCP-NEXT: vmovdqa 64(%rax), %ymm5
-; AVX512DQ-FCP-NEXT: vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FCP-NEXT: vpshufb %ymm14, %ymm5, %ymm5
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm5, %zmm4, %zmm4
-; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FCP-NEXT: vpbroadcastd 8(%rax), %ymm4
-; AVX512DQ-FCP-NEXT: vpandn %ymm4, %ymm6, %ymm4
-; AVX512DQ-FCP-NEXT: vmovdqa (%rax), %ymm5
-; AVX512DQ-FCP-NEXT: vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FCP-NEXT: vpshufb %ymm14, %ymm5, %ymm5
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm5, %zmm4, %zmm4
-; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FCP-NEXT: vpbroadcastd {{.*#+}} ymm10 = [18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm10, %ymm9, %ymm4
-; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm5 = ymm8[0,0,2,1,4,4,6,5]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm4 = ymm4[0,1,2],ymm5[3],ymm4[4,5],ymm5[6],ymm4[7,8,9,10],ymm5[11],ymm4[12,13],ymm5[14],ymm4[15]
-; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} zmm5 = [0,1,0,3,10,10,11,11]
-; AVX512DQ-FCP-NEXT: vpermi2q %zmm4, %zmm20, %zmm5
-; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FCP-NEXT: vpshufb %ymm12, %ymm2, %ymm2
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm1
+; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FCP-NEXT: vpbroadcastd 8(%rax), %ymm1
+; AVX512DQ-FCP-NEXT: vpandn %ymm1, %ymm3, %ymm1
+; AVX512DQ-FCP-NEXT: vmovdqa (%rax), %ymm2
+; AVX512DQ-FCP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FCP-NEXT: vpshufb %ymm12, %ymm2, %ymm2
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm1
+; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FCP-NEXT: vpbroadcastd {{.*#+}} ymm6 = [18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm6, %ymm10, %ymm1
+; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm2 = ymm8[0,0,2,1,4,4,6,5]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3],ymm1[4,5],ymm2[6],ymm1[7,8,9,10],ymm2[11],ymm1[12,13],ymm2[14],ymm1[15]
+; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} zmm2 = [0,1,0,3,10,10,11,11]
+; AVX512DQ-FCP-NEXT: vpermi2q %zmm1, %zmm21, %zmm2
+; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FCP-NEXT: vpshufb %ymm11, %ymm0, %ymm4
-; AVX512DQ-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm5 = ymm1[3,3,3,3,7,7,7,7]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm4 = ymm5[0,1,2],ymm4[3],ymm5[4,5],ymm4[6],ymm5[7,8,9,10],ymm4[11],ymm5[12,13],ymm4[14],ymm5[15]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm24, %ymm11
-; AVX512DQ-FCP-NEXT: vpshufb %ymm11, %ymm0, %ymm5
-; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm6 = ymm1[2,2,2,2,6,6,6,6]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm5 = ymm6[0],ymm5[1],ymm6[2,3],ymm5[4],ymm6[5,6,7,8],ymm5[9],ymm6[10,11],ymm5[12],ymm6[13,14,15]
-; AVX512DQ-FCP-NEXT: vpermt2q %zmm4, %zmm3, %zmm5
+; AVX512DQ-FCP-NEXT: vpshufb %ymm14, %ymm0, %ymm1
+; AVX512DQ-FCP-NEXT: vmovdqu %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm2 = ymm9[3,3,3,3,7,7,7,7]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5],ymm1[6],ymm2[7,8,9,10],ymm1[11],ymm2[12,13],ymm1[14],ymm2[15]
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm23, %ymm5
+; AVX512DQ-FCP-NEXT: vpshufb %ymm5, %ymm0, %ymm2
+; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm3 = ymm9[2,2,2,2,6,6,6,6]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm3[0],ymm2[1],ymm3[2,3],ymm2[4],ymm3[5,6,7,8],ymm2[9],ymm3[10,11],ymm2[12],ymm3[13,14,15]
+; AVX512DQ-FCP-NEXT: vpermt2q %zmm1, %zmm7, %zmm2
; AVX512DQ-FCP-NEXT: vmovdqu %ymm15, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm22, %ymm7
-; AVX512DQ-FCP-NEXT: vpshufb %ymm7, %ymm15, %ymm3
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm16, %ymm7
+; AVX512DQ-FCP-NEXT: vpshufb %ymm7, %ymm15, %ymm1
; AVX512DQ-FCP-NEXT: vmovdqu %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm4 = ymm13[3,3,3,3,7,7,7,7]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm4[0],ymm3[1],ymm4[2,3],ymm3[4],ymm4[5,6,7,8],ymm3[9],ymm4[10,11],ymm3[12],ymm4[13,14,15]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm12, %ymm15, %ymm4
-; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm6 = ymm13[2,2,2,2,6,6,6,6]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm4[0,1],ymm6[2],ymm4[3,4],ymm6[5],ymm4[6,7,8,9],ymm6[10],ymm4[11,12],ymm6[13],ymm4[14,15]
-; AVX512DQ-FCP-NEXT: vpermt2q %zmm3, %zmm30, %zmm0
-; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm0 = zmm5 ^ (zmm19 & (zmm0 ^ zmm5))
+; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm3 = ymm13[3,3,3,3,7,7,7,7]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm3[0],ymm1[1],ymm3[2,3],ymm1[4],ymm3[5,6,7,8],ymm1[9],ymm3[10,11],ymm1[12],ymm3[13,14,15]
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm24, %ymm12
+; AVX512DQ-FCP-NEXT: vpshufb %ymm12, %ymm15, %ymm3
+; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm4 = ymm13[2,2,2,2,6,6,6,6]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm3[0,1],ymm4[2],ymm3[3,4],ymm4[5],ymm3[6,7,8,9],ymm4[10],ymm3[11,12],ymm4[13],ymm3[14,15]
+; AVX512DQ-FCP-NEXT: vpermt2q %zmm1, %zmm19, %zmm0
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm0 = zmm2 ^ (zmm11 & (zmm0 ^ zmm2))
; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm9[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,26,27,28,29,26,27,28,29,26,27,28,29,30,31,30,31]
-; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm4 = ymm8[3,3,3,3,7,7,7,7]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm3[0,1],ymm4[2],ymm3[3,4],ymm4[5],ymm3[6,7,8,9],ymm4[10],ymm3[11,12],ymm4[13],ymm3[14,15]
-; AVX512DQ-FCP-NEXT: vprold $16, %ymm9, %ymm4
-; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm5 = ymm8[1,2,2,3,5,6,6,7]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm4 = ymm5[0,1],ymm4[2],ymm5[3,4],ymm4[5],ymm5[6,7,8,9],ymm4[10],ymm5[11,12],ymm4[13],ymm5[14,15]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm10[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,26,27,28,29,26,27,28,29,26,27,28,29,30,31,30,31]
+; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm2 = ymm8[3,3,3,3,7,7,7,7]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7,8,9],ymm2[10],ymm1[11,12],ymm2[13],ymm1[14,15]
+; AVX512DQ-FCP-NEXT: vprold $16, %ymm10, %ymm2
+; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm3 = ymm8[1,2,2,3,5,6,6,7]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm3[0,1],ymm2[2],ymm3[3,4],ymm2[5],ymm3[6,7,8,9],ymm2[10],ymm3[11,12],ymm2[13],ymm3[14,15]
; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} zmm13 = [2,1,3,2,10,10,10,11]
-; AVX512DQ-FCP-NEXT: vpermt2q %zmm3, %zmm13, %zmm4
-; AVX512DQ-FCP-NEXT: vmovdqa 32(%rax), %ymm0
+; AVX512DQ-FCP-NEXT: vpermt2q %zmm1, %zmm13, %zmm2
+; AVX512DQ-FCP-NEXT: vmovdqa64 (%rax), %zmm16
+; AVX512DQ-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 = [14,13,8,8,15,14,8,15,14,13,8,8,15,14,8,15]
+; AVX512DQ-FCP-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3]
; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FCP-NEXT: vpermd %zmm0, %zmm2, %zmm0
-; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm0 = zmm0 ^ (zmm18 & (zmm0 ^ zmm4))
+; AVX512DQ-FCP-NEXT: vpermd %zmm16, %zmm0, %zmm0
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm0 = zmm0 ^ (zmm20 & (zmm0 ^ zmm2))
; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm23, %ymm0
-; AVX512DQ-FCP-NEXT: vpshufb %ymm11, %ymm0, %ymm2
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm29, %ymm19
-; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm3 = ymm29[2,2,2,2,6,6,6,6]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm3[0],ymm2[1],ymm3[2,3],ymm2[4],ymm3[5,6,7,8],ymm2[9],ymm3[10,11],ymm2[12],ymm3[13,14,15]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm17, %ymm15
-; AVX512DQ-FCP-NEXT: vpshufb %ymm15, %ymm0, %ymm3
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm23, %ymm18
-; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm4 = ymm29[1,1,1,1,5,5,5,5]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm3[0,1],ymm4[2],ymm3[3,4],ymm4[5],ymm3[6,7,8,9],ymm4[10],ymm3[11,12],ymm4[13],ymm3[14,15]
-; AVX512DQ-FCP-NEXT: vpermt2q %zmm2, %zmm30, %zmm3
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm28, %ymm0
-; AVX512DQ-FCP-NEXT: vpshufb %ymm12, %ymm0, %ymm2
-; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm4 = ymm27[2,2,2,2,6,6,6,6]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm2[0,1],ymm4[2],ymm2[3,4],ymm4[5],ymm2[6,7,8,9],ymm4[10],ymm2[11,12],ymm4[13],ymm2[14,15]
-; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} ymm4 = ymm0[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
-; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm5 = ymm27[0,1,1,3,4,5,5,7]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm6 = ymm5[0,1],ymm4[2],ymm5[3,4],ymm4[5],ymm5[6,7,8,9],ymm4[10],ymm5[11,12],ymm4[13],ymm5[14,15]
-; AVX512DQ-FCP-NEXT: vpermt2q %zmm2, %zmm13, %zmm6
-; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm6 = zmm3 ^ (mem & (zmm6 ^ zmm3))
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm25, %ymm12
-; AVX512DQ-FCP-NEXT: vprold $16, %ymm25, %ymm2
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm26, %ymm23
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm27, %ymm0
+; AVX512DQ-FCP-NEXT: vpshufb %ymm5, %ymm0, %ymm1
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm23, %ymm21
+; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm2 = ymm28[2,2,2,2,6,6,6,6]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3],ymm1[4],ymm2[5,6,7,8],ymm1[9],ymm2[10,11],ymm1[12],ymm2[13,14,15]
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm17, %ymm9
+; AVX512DQ-FCP-NEXT: vpshufb %ymm9, %ymm0, %ymm2
+; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm3 = ymm28[1,1,1,1,5,5,5,5]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm2[0,1],ymm3[2],ymm2[3,4],ymm3[5],ymm2[6,7,8,9],ymm3[10],ymm2[11,12],ymm3[13],ymm2[14,15]
+; AVX512DQ-FCP-NEXT: vpermt2q %zmm1, %zmm19, %zmm3
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm29, %ymm8
+; AVX512DQ-FCP-NEXT: vpshufb %ymm12, %ymm8, %ymm1
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm24, %ymm10
+; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm2 = ymm18[2,2,2,2,6,6,6,6]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7,8,9],ymm2[10],ymm1[11,12],ymm2[13],ymm1[14,15]
+; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} ymm2 = ymm8[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
+; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm4 = ymm18[0,1,1,3,4,5,5,7]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm5 = ymm4[0,1],ymm2[2],ymm4[3,4],ymm2[5],ymm4[6,7,8,9],ymm2[10],ymm4[11,12],ymm2[13],ymm4[14,15]
+; AVX512DQ-FCP-NEXT: vpermt2q %zmm1, %zmm13, %zmm5
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm5 = zmm3 ^ (mem & (zmm5 ^ zmm3))
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm25, %ymm0
+; AVX512DQ-FCP-NEXT: vmovdqu64 %ymm25, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FCP-NEXT: vprold $16, %ymm25, %ymm1
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm26, %ymm25
; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm3 = ymm26[1,2,2,3,5,6,6,7]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm3[0,1],ymm2[2],ymm3[3,4],ymm2[5],ymm3[6,7,8,9],ymm2[10],ymm3[11,12],ymm2[13],ymm3[14,15]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm10, %ymm12, %ymm3
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm25, %ymm28
-; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm5 = ymm26[0,0,2,1,4,4,6,5]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm3[0,1,2],ymm5[3],ymm3[4,5],ymm5[6],ymm3[7,8,9,10],ymm5[11],ymm3[12,13],ymm5[14],ymm3[15]
-; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} zmm1 = [2,2,3,3,10,9,11,10]
-; AVX512DQ-FCP-NEXT: vpermt2q %zmm2, %zmm1, %zmm3
-; AVX512DQ-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm20 = [0,5,4,0,0,6,5,0,0,5,4,0,0,6,5,0]
-; AVX512DQ-FCP-NEXT: # zmm20 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-FCP-NEXT: vpermd 64(%rax), %zmm20, %zmm1
-; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm1 = zmm1 ^ (zmm16 & (zmm1 ^ zmm3))
-; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm1 = zmm1 ^ (mem & (zmm1 ^ zmm6))
-; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FCP-NEXT: vpshufb %ymm7, %ymm0, %ymm1
-; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm0 = ymm27[3,3,3,3,7,7,7,7]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6,7,8],ymm1[9],ymm0[10,11],ymm1[12],ymm0[13,14,15]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm3[0,1],ymm1[2],ymm3[3,4],ymm1[5],ymm3[6,7,8,9],ymm1[10],ymm3[11,12],ymm1[13],ymm3[14,15]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm6, %ymm0, %ymm3
+; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm4 = ymm26[0,0,2,1,4,4,6,5]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm3[0,1,2],ymm4[3],ymm3[4,5],ymm4[6],ymm3[7,8,9,10],ymm4[11],ymm3[12,13],ymm4[14],ymm3[15]
+; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} zmm0 = [2,2,3,3,10,9,11,10]
+; AVX512DQ-FCP-NEXT: vpermt2q %zmm1, %zmm0, %zmm3
+; AVX512DQ-FCP-NEXT: vmovdqa64 64(%rax), %zmm0
+; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm23 = [0,5,4,0,0,6,5,0,0,5,4,0,0,6,5,0]
+; AVX512DQ-FCP-NEXT: # zmm23 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-FCP-NEXT: vpermd %zmm0, %zmm23, %zmm0
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm0 = zmm0 ^ (zmm22 & (zmm0 ^ zmm3))
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm0 = zmm0 ^ (mem & (zmm0 ^ zmm5))
; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FCP-NEXT: vmovdqa 64(%rcx), %xmm0
-; AVX512DQ-FCP-NEXT: vmovdqa 64(%rdx), %xmm3
-; AVX512DQ-FCP-NEXT: vpbroadcastq {{.*#+}} xmm9 = [6,7,4,5,0,0,8,9,6,7,4,5,0,0,8,9]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm9, %xmm0, %xmm1
-; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} xmm6 = xmm3[1,1,2,2]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm6[0],xmm1[1],xmm6[2,3],xmm1[4],xmm6[5,6],xmm1[7]
-; AVX512DQ-FCP-NEXT: vpunpckhwd {{.*#+}} xmm6 = xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm7, %ymm8, %ymm1
+; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm0 = ymm18[3,3,3,3,7,7,7,7]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm12 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6,7,8],ymm1[9],ymm0[10,11],ymm1[12],ymm0[13,14,15]
+; AVX512DQ-FCP-NEXT: vmovdqa 64(%rcx), %xmm5
+; AVX512DQ-FCP-NEXT: vmovdqa 64(%rdx), %xmm6
+; AVX512DQ-FCP-NEXT: vpbroadcastq {{.*#+}} xmm4 = [6,7,4,5,0,0,8,9,6,7,4,5,0,0,8,9]
+; AVX512DQ-FCP-NEXT: vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512DQ-FCP-NEXT: vpshufb %xmm4, %xmm5, %xmm1
; AVX512DQ-FCP-NEXT: vmovdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3]
+; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} xmm3 = xmm6[1,1,2,2]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm3[0],xmm1[1],xmm3[2,3],xmm1[4],xmm3[5,6],xmm1[7]
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm14 = [0,1,2,3,0,1,2,3,6,7,4,5,6,7,4,5]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm14, %xmm0, %xmm0
-; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} zmm16 = [0,1,1,3,8,8,9,9]
-; AVX512DQ-FCP-NEXT: vpermt2q %zmm1, %zmm16, %zmm0
+; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm14, %xmm3, %xmm3
+; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} zmm22 = [8,9,9,0,0,0,1,1]
+; AVX512DQ-FCP-NEXT: vpermt2q %zmm3, %zmm22, %zmm1
; AVX512DQ-FCP-NEXT: vmovdqa 64(%rdi), %xmm6
-; AVX512DQ-FCP-NEXT: vmovdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512DQ-FCP-NEXT: vmovdqa %xmm6, (%rsp) # 16-byte Spill
; AVX512DQ-FCP-NEXT: vmovdqa 64(%rsi), %xmm7
-; AVX512DQ-FCP-NEXT: vmovdqa %xmm7, (%rsp) # 16-byte Spill
-; AVX512DQ-FCP-NEXT: vprold $16, %xmm7, %xmm1
-; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} xmm3 = xmm6[1,1,2,3]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm3[0,1],xmm1[2],xmm3[3,4],xmm1[5],xmm3[6,7]
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,2,3,4,5,4,5,6,7,10,11,8,9,10,11]
-; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm2, %xmm3, %xmm3
-; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} zmm25 = [0,0,1,1,8,8,10,9]
-; AVX512DQ-FCP-NEXT: vpermt2q %zmm1, %zmm25, %zmm3
-; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm1 = [65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0]
-; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm3 = zmm0 ^ (zmm1 & (zmm3 ^ zmm0))
-; AVX512DQ-FCP-NEXT: vmovdqa 64(%r9), %xmm0
-; AVX512DQ-FCP-NEXT: vmovdqa 64(%r8), %xmm6
-; AVX512DQ-FCP-NEXT: vpunpckhwd {{.*#+}} xmm7 = xmm6[4],xmm0[4],xmm6[5],xmm0[5],xmm6[6],xmm0[6],xmm6[7],xmm0[7]
; AVX512DQ-FCP-NEXT: vmovdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm6[0],xmm0[0],xmm6[1],xmm0[1],xmm6[2],xmm0[2],xmm6[3],xmm0[3]
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm4 = [0,1,2,3,6,7,4,5,6,7,4,5,12,13,14,15]
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm12 = [0,1,2,3,8,9,10,11,14,15,12,13,14,15,12,13]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm12, %xmm0, %xmm6
-; AVX512DQ-FCP-NEXT: vpshufb %xmm4, %xmm0, %xmm0
-; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} zmm7 = [0,0,0,1,8,9,9,11]
-; AVX512DQ-FCP-NEXT: vpermt2q %zmm6, %zmm7, %zmm0
+; AVX512DQ-FCP-NEXT: vprold $16, %xmm7, %xmm3
+; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} xmm5 = xmm6[1,1,2,3]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm3 = xmm5[0,1],xmm3[2],xmm5[3,4],xmm3[5],xmm5[6,7]
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm15 = [0,1,2,3,4,5,4,5,6,7,10,11,8,9,10,11]
+; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} xmm5 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm15, %xmm5, %xmm5
+; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} zmm26 = [0,0,1,1,8,8,0,9]
+; AVX512DQ-FCP-NEXT: vpermt2q %zmm3, %zmm26, %zmm5
+; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm3 = [65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0]
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm5 = zmm1 ^ (zmm3 & (zmm5 ^ zmm1))
+; AVX512DQ-FCP-NEXT: vmovdqa 64(%r9), %xmm1
+; AVX512DQ-FCP-NEXT: vmovdqa 64(%r8), %xmm6
+; AVX512DQ-FCP-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm6[4],xmm1[4],xmm6[5],xmm1[5],xmm6[6],xmm1[6],xmm6[7],xmm1[7]
+; AVX512DQ-FCP-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm6[0],xmm1[0],xmm6[1],xmm1[1],xmm6[2],xmm1[2],xmm6[3],xmm1[3]
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm7 = [0,1,2,3,6,7,4,5,6,7,4,5,12,13,14,15]
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,2,3,8,9,10,11,14,15,12,13,14,15,12,13]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm2, %xmm1, %xmm6
+; AVX512DQ-FCP-NEXT: vpshufb %xmm7, %xmm1, %xmm1
+; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} zmm18 = [0,0,0,1,8,9,9,0]
+; AVX512DQ-FCP-NEXT: vpermt2q %zmm6, %zmm18, %zmm1
; AVX512DQ-FCP-NEXT: vpbroadcastd 64(%rax), %ymm6
; AVX512DQ-FCP-NEXT: vpbroadcastd 68(%rax), %ymm8
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm8, %zmm6, %zmm29
-; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm29 = zmm29 ^ (zmm21 & (zmm29 ^ zmm0))
-; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm0 = [65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535]
-; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm29 = zmm29 ^ (zmm0 & (zmm29 ^ zmm3))
+; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm30, %zmm11
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm29 = zmm29 ^ (zmm30 & (zmm29 ^ zmm1))
+; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm1 = [65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535]
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm29 = zmm29 ^ (zmm1 & (zmm29 ^ zmm5))
; AVX512DQ-FCP-NEXT: vmovdqa (%rcx), %xmm8
; AVX512DQ-FCP-NEXT: vmovdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-FCP-NEXT: vmovdqa (%rdx), %xmm10
-; AVX512DQ-FCP-NEXT: vmovdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-FCP-NEXT: vpshufb %xmm9, %xmm8, %xmm3
-; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} xmm6 = xmm10[1,1,2,2]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm3 = xmm6[0],xmm3[1],xmm6[2,3],xmm3[4],xmm6[5,6],xmm3[7]
-; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm10[0],xmm8[0],xmm10[1],xmm8[1],xmm10[2],xmm8[2],xmm10[3],xmm8[3]
+; AVX512DQ-FCP-NEXT: vmovdqa (%rdx), %xmm0
+; AVX512DQ-FCP-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512DQ-FCP-NEXT: vpshufb %xmm4, %xmm8, %xmm5
+; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} xmm6 = xmm0[1,1,2,2]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm5 = xmm6[0],xmm5[1],xmm6[2,3],xmm5[4],xmm6[5,6],xmm5[7]
+; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm0[0],xmm8[0],xmm0[1],xmm8[1],xmm0[2],xmm8[2],xmm0[3],xmm8[3]
; AVX512DQ-FCP-NEXT: vpshufb %xmm14, %xmm6, %xmm6
-; AVX512DQ-FCP-NEXT: vpermt2q %zmm3, %zmm16, %zmm6
-; AVX512DQ-FCP-NEXT: vmovdqa (%rdi), %xmm9
-; AVX512DQ-FCP-NEXT: vmovdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-FCP-NEXT: vmovdqa (%rsi), %xmm10
-; AVX512DQ-FCP-NEXT: vmovdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-FCP-NEXT: vprold $16, %xmm10, %xmm3
-; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} xmm8 = xmm9[1,1,2,3]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm3 = xmm8[0,1],xmm3[2],xmm8[3,4],xmm3[5],xmm8[6,7]
-; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} xmm8 = xmm9[0],xmm10[0],xmm9[1],xmm10[1],xmm9[2],xmm10[2],xmm9[3],xmm10[3]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm2, %xmm8, %xmm8
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm2, %xmm31
-; AVX512DQ-FCP-NEXT: vpermt2q %zmm3, %zmm25, %zmm8
-; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm8 = zmm6 ^ (zmm1 & (zmm8 ^ zmm6))
-; AVX512DQ-FCP-NEXT: vmovdqa (%r9), %xmm1
-; AVX512DQ-FCP-NEXT: vmovdqa (%r8), %xmm3
-; AVX512DQ-FCP-NEXT: vpunpckhwd {{.*#+}} xmm6 = xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
-; AVX512DQ-FCP-NEXT: vmovdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm12, %xmm1, %xmm3
-; AVX512DQ-FCP-NEXT: vpshufb %xmm4, %xmm1, %xmm1
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm4, %xmm27
-; AVX512DQ-FCP-NEXT: vpermt2q %zmm3, %zmm7, %zmm1
-; AVX512DQ-FCP-NEXT: vpbroadcastd (%rax), %ymm3
+; AVX512DQ-FCP-NEXT: vpermt2q %zmm6, %zmm22, %zmm5
+; AVX512DQ-FCP-NEXT: vmovdqa (%rdi), %xmm0
+; AVX512DQ-FCP-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512DQ-FCP-NEXT: vmovdqa (%rsi), %xmm4
+; AVX512DQ-FCP-NEXT: vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512DQ-FCP-NEXT: vprold $16, %xmm4, %xmm6
+; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} xmm8 = xmm0[1,1,2,3]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm6 = xmm8[0,1],xmm6[2],xmm8[3,4],xmm6[5],xmm8[6,7]
+; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} xmm8 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm15, %xmm8, %xmm8
+; AVX512DQ-FCP-NEXT: vpermt2q %zmm6, %zmm26, %zmm8
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm8 = zmm5 ^ (zmm3 & (zmm8 ^ zmm5))
+; AVX512DQ-FCP-NEXT: vmovdqa (%r9), %xmm3
+; AVX512DQ-FCP-NEXT: vmovdqa (%r8), %xmm5
+; AVX512DQ-FCP-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm5[4],xmm3[4],xmm5[5],xmm3[5],xmm5[6],xmm3[6],xmm5[7],xmm3[7]
+; AVX512DQ-FCP-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm5[0],xmm3[0],xmm5[1],xmm3[1],xmm5[2],xmm3[2],xmm5[3],xmm3[3]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm2, %xmm3, %xmm5
+; AVX512DQ-FCP-NEXT: vpshufb %xmm7, %xmm3, %xmm3
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm7, %xmm24
+; AVX512DQ-FCP-NEXT: vpermt2q %zmm5, %zmm18, %zmm3
+; AVX512DQ-FCP-NEXT: vpbroadcastd (%rax), %ymm5
; AVX512DQ-FCP-NEXT: vpbroadcastd 4(%rax), %ymm6
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm6, %zmm3, %zmm26
-; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm26 = zmm26 ^ (zmm21 & (zmm26 ^ zmm1))
-; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm26 = zmm26 ^ (zmm0 & (zmm26 ^ zmm8))
-; AVX512DQ-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
-; AVX512DQ-FCP-NEXT: vpshufb %ymm11, %ymm9, %ymm0
-; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %ymm22 # 32-byte Reload
-; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm1 = ymm22[2,2,2,2,6,6,6,6]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6,7,8],ymm0[9],ymm1[10,11],ymm0[12],ymm1[13,14,15]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm15, %ymm9, %ymm1
-; AVX512DQ-FCP-NEXT: vmovdqa %ymm9, %ymm11
-; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm3 = ymm22[1,1,1,1,5,5,5,5]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0,1],ymm3[2],ymm1[3,4],ymm3[5],ymm1[6,7,8,9],ymm3[10],ymm1[11,12],ymm3[13],ymm1[14,15]
-; AVX512DQ-FCP-NEXT: vpermt2q %zmm0, %zmm30, %zmm1
-; AVX512DQ-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm15[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,u,u,24,25,26,27,u,u,26,27,26,27]
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm6, %zmm5, %zmm30
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm30 = zmm30 ^ (zmm11 & (zmm30 ^ zmm3))
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm30 = zmm30 ^ (zmm1 & (zmm30 ^ zmm8))
+; AVX512DQ-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm21, %ymm0
+; AVX512DQ-FCP-NEXT: vpshufb %ymm0, %ymm7, %ymm1
; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %ymm21 # 32-byte Reload
; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm3 = ymm21[2,2,2,2,6,6,6,6]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm0[0,1],ymm3[2],ymm0[3,4],ymm3[5],ymm0[6,7,8,9],ymm3[10],ymm0[11,12],ymm3[13],ymm0[14,15]
-; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm15[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
-; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm6 = ymm21[0,1,1,3,4,5,5,7]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm6[0,1],ymm0[2],ymm6[3,4],ymm0[5],ymm6[6,7,8,9],ymm0[10],ymm6[11,12],ymm0[13],ymm6[14,15]
-; AVX512DQ-FCP-NEXT: vpermt2q %zmm3, %zmm13, %zmm0
-; AVX512DQ-FCP-NEXT: vmovdqa 96(%rcx), %xmm10
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm3[0],ymm1[1],ymm3[2,3],ymm1[4],ymm3[5,6,7,8],ymm1[9],ymm3[10,11],ymm1[12],ymm3[13,14,15]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm9, %ymm7, %ymm3
+; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm5 = ymm21[1,1,1,1,5,5,5,5]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm3[0,1],ymm5[2],ymm3[3,4],ymm5[5],ymm3[6,7,8,9],ymm5[10],ymm3[11,12],ymm5[13],ymm3[14,15]
+; AVX512DQ-FCP-NEXT: vpermt2q %zmm1, %zmm19, %zmm3
+; AVX512DQ-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
+; AVX512DQ-FCP-NEXT: vpshufb %ymm10, %ymm9, %ymm1
+; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %ymm18 # 32-byte Reload
+; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm5 = ymm18[2,2,2,2,6,6,6,6]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm5 = ymm1[0,1],ymm5[2],ymm1[3,4],ymm5[5],ymm1[6,7,8,9],ymm5[10],ymm1[11,12],ymm5[13],ymm1[14,15]
+; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} ymm1 = ymm9[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
+; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm6 = ymm18[0,1,1,3,4,5,5,7]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm6[0,1],ymm1[2],ymm6[3,4],ymm1[5],ymm6[6,7,8,9],ymm1[10],ymm6[11,12],ymm1[13],ymm6[14,15]
+; AVX512DQ-FCP-NEXT: vpermt2q %zmm5, %zmm13, %zmm1
+; AVX512DQ-FCP-NEXT: vmovdqa 96(%rcx), %xmm11
; AVX512DQ-FCP-NEXT: vmovdqa 96(%rdx), %xmm13
-; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm13[0],xmm10[0],xmm13[1],xmm10[1],xmm13[2],xmm10[2],xmm13[3],xmm10[3]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm14, %xmm3, %xmm3
-; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} zmm24 = [0,2,2,3,8,9,9,11]
-; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQ-FCP-NEXT: vpermt2q %zmm3, %zmm24, %zmm5
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm12 = [u,u,u,u,u,u,u,u,u,u,u,u,14,15,12,13,u,u,u,u,30,31,28,29,u,u,u,u,30,31,28,29]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm18, %ymm3
-; AVX512DQ-FCP-NEXT: vpshufb %ymm12, %ymm3, %ymm3
-; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm7 = ymm19[3,3,3,3,7,7,7,7]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm8 = ymm7[0,1,2],ymm3[3],ymm7[4,5],ymm3[6],ymm7[7,8,9,10],ymm3[11],ymm7[12,13],ymm3[14],ymm7[15]
-; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm0 = zmm1 ^ (mem & (zmm0 ^ zmm1))
-; AVX512DQ-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX512DQ-FCP-NEXT: vprold $16, %ymm4, %ymm1
-; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %ymm16 # 32-byte Reload
-; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm3 = ymm16[1,2,2,3,5,6,6,7]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm3[0,1],ymm1[2],ymm3[3,4],ymm1[5],ymm3[6,7,8,9],ymm1[10],ymm3[11,12],ymm1[13],ymm3[14,15]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm4, %ymm17
-; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm4 = ymm16[0,0,2,1,4,4,6,5]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm3[0,1,2],ymm4[3],ymm3[4,5],ymm4[6],ymm3[7,8,9,10],ymm4[11],ymm3[12,13],ymm4[14],ymm3[15]
-; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} zmm2 = [2,2,3,3,10,9,11,10]
-; AVX512DQ-FCP-NEXT: vpermt2q %zmm1, %zmm2, %zmm3
-; AVX512DQ-FCP-NEXT: vmovdqa 96(%rsi), %xmm4
-; AVX512DQ-FCP-NEXT: vmovdqa 96(%rdi), %xmm6
-; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm6[0],xmm4[0],xmm6[1],xmm4[1],xmm6[2],xmm4[2],xmm6[3],xmm4[3]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm31, %xmm9
-; AVX512DQ-FCP-NEXT: vpshufb %xmm9, %xmm1, %xmm1
+; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} xmm5 = xmm13[0],xmm11[0],xmm13[1],xmm11[1],xmm13[2],xmm11[2],xmm13[3],xmm11[3]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm14, %xmm5, %xmm5
+; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} zmm6 = [0,2,2,3,8,9,9,0]
+; AVX512DQ-FCP-NEXT: vpermt2q %zmm5, %zmm6, %zmm12
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm10 = [u,u,u,u,u,u,u,u,u,u,u,u,14,15,12,13,u,u,u,u,30,31,28,29,u,u,u,u,30,31,28,29]
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm27, %ymm2
+; AVX512DQ-FCP-NEXT: vpshufb %ymm10, %ymm2, %ymm5
+; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm8 = ymm28[3,3,3,3,7,7,7,7]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm8 = ymm8[0,1,2],ymm5[3],ymm8[4,5],ymm5[6],ymm8[7,8,9,10],ymm5[11],ymm8[12,13],ymm5[14],ymm8[15]
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm1 = zmm3 ^ (mem & (zmm1 ^ zmm3))
+; AVX512DQ-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX512DQ-FCP-NEXT: vprold $16, %ymm5, %ymm2
+; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %ymm20 # 32-byte Reload
+; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm3 = ymm20[1,2,2,3,5,6,6,7]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm3[0,1],ymm2[2],ymm3[3,4],ymm2[5],ymm3[6,7,8,9],ymm2[10],ymm3[11,12],ymm2[13],ymm3[14,15]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21]
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm5, %ymm31
+; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm5 = ymm20[0,0,2,1,4,4,6,5]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm3[0,1,2],ymm5[3],ymm3[4,5],ymm5[6],ymm3[7,8,9,10],ymm5[11],ymm3[12,13],ymm5[14],ymm3[15]
+; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} zmm0 = [2,2,3,3,10,9,11,10]
+; AVX512DQ-FCP-NEXT: vpermt2q %zmm2, %zmm0, %zmm3
+; AVX512DQ-FCP-NEXT: vmovdqa 96(%rsi), %xmm2
+; AVX512DQ-FCP-NEXT: vmovdqa 96(%rdi), %xmm4
+; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} xmm5 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm15, %xmm5, %xmm5
; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} zmm19 = [2,1,3,3,8,8,9,9]
-; AVX512DQ-FCP-NEXT: vpermt2q %zmm1, %zmm19, %zmm8
-; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm30 = [65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535]
-; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm8 = zmm5 ^ (zmm30 & (zmm8 ^ zmm5))
-; AVX512DQ-FCP-NEXT: vbroadcasti32x4 {{.*#+}} ymm18 = [26,27,28,29,26,27,28,29,26,27,28,29,30,31,30,31,26,27,28,29,26,27,28,29,26,27,28,29,30,31,30,31]
-; AVX512DQ-FCP-NEXT: # ymm18 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm18, %ymm1
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm28, %ymm2
-; AVX512DQ-FCP-NEXT: vpshufb %ymm1, %ymm2, %ymm1
-; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm2 = ymm23[3,3,3,3,7,7,7,7]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm7 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7,8,9],ymm2[10],ymm1[11,12],ymm2[13],ymm1[14,15]
-; AVX512DQ-FCP-NEXT: vpermd (%rax), %zmm20, %zmm20
-; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm20 = zmm20 ^ (mem & (zmm20 ^ zmm3))
-; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm20 = zmm20 ^ (mem & (zmm20 ^ zmm0))
+; AVX512DQ-FCP-NEXT: vpermt2q %zmm5, %zmm19, %zmm8
+; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm17 = [65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535]
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm8 = zmm12 ^ (zmm17 & (zmm8 ^ zmm12))
+; AVX512DQ-FCP-NEXT: vbroadcasti32x4 {{.*#+}} ymm22 = [26,27,28,29,26,27,28,29,26,27,28,29,30,31,30,31,26,27,28,29,26,27,28,29,26,27,28,29,30,31,30,31]
+; AVX512DQ-FCP-NEXT: # ymm22 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm22, %ymm0
+; AVX512DQ-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX512DQ-FCP-NEXT: vpshufb %ymm0, %ymm5, %ymm0
+; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm5 = ymm25[3,3,3,3,7,7,7,7]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm12 = ymm0[0,1],ymm5[2],ymm0[3,4],ymm5[5],ymm0[6,7,8,9],ymm5[10],ymm0[11,12],ymm5[13],ymm0[14,15]
+; AVX512DQ-FCP-NEXT: vpermd %zmm16, %zmm23, %zmm16
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm16 = zmm16 ^ (mem & (zmm16 ^ zmm3))
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm16 = zmm16 ^ (mem & (zmm16 ^ zmm1))
; AVX512DQ-FCP-NEXT: vmovdqa 32(%rcx), %xmm1
-; AVX512DQ-FCP-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX512DQ-FCP-NEXT: vmovdqa 32(%rdx), %xmm0
; AVX512DQ-FCP-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm14, %xmm0, %xmm3
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm15[14,15,12,13,u,u,u,u,u,u,u,u,u,u,u,u,30,31,28,29,u,u,u,u,30,31,28,29,u,u,u,u]
-; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm2 = ymm21[3,3,3,3,7,7,7,7]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm2[0],ymm1[1],ymm2[2,3],ymm1[4],ymm2[5,6,7,8],ymm1[9],ymm2[10,11],ymm1[12],ymm2[13,14,15]
-; AVX512DQ-FCP-NEXT: vpermt2q %zmm3, %zmm24, %zmm0
-; AVX512DQ-FCP-NEXT: vpshufb %ymm12, %ymm11, %ymm1
-; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm2 = ymm22[3,3,3,3,7,7,7,7]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm15 = ymm2[0,1,2],ymm1[3],ymm2[4,5],ymm1[6],ymm2[7,8,9,10],ymm1[11],ymm2[12,13],ymm1[14],ymm2[15]
-; AVX512DQ-FCP-NEXT: vmovdqa 32(%rdi), %xmm12
-; AVX512DQ-FCP-NEXT: vmovdqa 32(%rsi), %xmm11
-; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm12[0],xmm11[0],xmm12[1],xmm11[1],xmm12[2],xmm11[2],xmm12[3],xmm11[3]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm9, %xmm1, %xmm1
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm31, %xmm24
-; AVX512DQ-FCP-NEXT: vpermt2q %zmm1, %zmm19, %zmm15
-; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} zmm19 = [2,2,2,3,8,8,8,9]
-; AVX512DQ-FCP-NEXT: vmovdqa 96(%r9), %xmm5
-; AVX512DQ-FCP-NEXT: vmovdqa 96(%r8), %xmm3
-; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} xmm9 = xmm3[0],xmm5[0],xmm3[1],xmm5[1],xmm3[2],xmm5[2],xmm3[3],xmm5[3]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm27, %xmm21
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm27, %xmm1
-; AVX512DQ-FCP-NEXT: vpshufb %xmm1, %xmm9, %xmm14
-; AVX512DQ-FCP-NEXT: vpermt2q %zmm14, %zmm19, %zmm7
-; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm15 = zmm0 ^ (zmm30 & (zmm15 ^ zmm0))
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm27 = [6,7,3,3,7,7,6,7]
-; AVX512DQ-FCP-NEXT: vpermd {{[-0-9]+}}(%r{{[sb]}}p), %ymm27, %ymm0 # 32-byte Folded Reload
-; AVX512DQ-FCP-NEXT: vpbroadcastd 96(%rax), %ymm23
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm23, %zmm0, %zmm23
-; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm30 = [65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535]
-; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm23 = zmm23 ^ (zmm30 & (zmm23 ^ zmm7))
-; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm31 = [0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535]
-; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm23 = zmm23 ^ (zmm31 & (zmm23 ^ zmm8))
-; AVX512DQ-FCP-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm4[4],xmm6[4],xmm4[5],xmm6[5],xmm4[6],xmm6[6],xmm4[7],xmm6[7]
-; AVX512DQ-FCP-NEXT: vprold $16, %xmm4, %xmm0
-; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} xmm4 = xmm6[1,1,2,3]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm4[0,1],xmm0[2],xmm4[3,4],xmm0[5],xmm4[6,7]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm18, %ymm1
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm17, %ymm4
-; AVX512DQ-FCP-NEXT: vpshufb %ymm1, %ymm4, %ymm1
-; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm4 = ymm16[3,3,3,3,7,7,7,7]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0,1],ymm4[2],ymm1[3,4],ymm4[5],ymm1[6,7,8,9],ymm4[10],ymm1[11,12],ymm4[13],ymm1[14,15]
-; AVX512DQ-FCP-NEXT: vmovdqa 32(%r9), %xmm7
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm1, %xmm28
+; AVX512DQ-FCP-NEXT: vpshufb %xmm14, %xmm0, %xmm5
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm9[14,15,12,13,u,u,u,u,u,u,u,u,u,u,u,u,30,31,28,29,u,u,u,u,30,31,28,29,u,u,u,u]
+; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm3 = ymm18[3,3,3,3,7,7,7,7]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm3[0],ymm1[1],ymm3[2,3],ymm1[4],ymm3[5,6,7,8],ymm1[9],ymm3[10,11],ymm1[12],ymm3[13,14,15]
+; AVX512DQ-FCP-NEXT: vpermt2q %zmm5, %zmm6, %zmm0
+; AVX512DQ-FCP-NEXT: vpshufb %ymm10, %ymm7, %ymm1
+; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm3 = ymm21[3,3,3,3,7,7,7,7]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm14 = ymm3[0,1,2],ymm1[3],ymm3[4,5],ymm1[6],ymm3[7,8,9,10],ymm1[11],ymm3[12,13],ymm1[14],ymm3[15]
+; AVX512DQ-FCP-NEXT: vmovdqa 32(%rdi), %xmm10
+; AVX512DQ-FCP-NEXT: vmovdqa 32(%rsi), %xmm9
+; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm10[0],xmm9[0],xmm10[1],xmm9[1],xmm10[2],xmm9[2],xmm10[3],xmm9[3]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm15, %xmm1, %xmm1
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm15, %xmm25
+; AVX512DQ-FCP-NEXT: vpermt2q %zmm1, %zmm19, %zmm14
+; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} zmm21 = [2,2,2,3,8,8,8,9]
+; AVX512DQ-FCP-NEXT: vmovdqa 96(%r9), %xmm6
+; AVX512DQ-FCP-NEXT: vmovdqa 96(%r8), %xmm5
+; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} xmm7 = xmm5[0],xmm6[0],xmm5[1],xmm6[1],xmm5[2],xmm6[2],xmm5[3],xmm6[3]
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm24, %xmm18
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm24, %xmm1
+; AVX512DQ-FCP-NEXT: vpshufb %xmm1, %xmm7, %xmm15
+; AVX512DQ-FCP-NEXT: vpermt2q %zmm15, %zmm21, %zmm12
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm14 = zmm0 ^ (zmm17 & (zmm14 ^ zmm0))
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm15 = [6,7,3,3,7,7,6,7]
+; AVX512DQ-FCP-NEXT: vpermd {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm0 # 32-byte Folded Reload
+; AVX512DQ-FCP-NEXT: vpbroadcastd 96(%rax), %ymm19
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm19, %zmm0, %zmm19
+; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm24 = [65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535]
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm19 = zmm19 ^ (zmm24 & (zmm19 ^ zmm12))
+; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm23 = [0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535]
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm19 = zmm19 ^ (zmm23 & (zmm19 ^ zmm8))
+; AVX512DQ-FCP-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
+; AVX512DQ-FCP-NEXT: vprold $16, %xmm2, %xmm0
+; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} xmm2 = xmm4[1,1,2,3]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm2[0,1],xmm0[2],xmm2[3,4],xmm0[5],xmm2[6,7]
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm22, %ymm1
+; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm31, %ymm2
+; AVX512DQ-FCP-NEXT: vpshufb %ymm1, %ymm2, %ymm1
+; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm2 = ymm20[3,3,3,3,7,7,7,7]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7,8,9],ymm2[10],ymm1[11,12],ymm2[13],ymm1[14,15]
+; AVX512DQ-FCP-NEXT: vmovdqa 32(%r9), %xmm8
; AVX512DQ-FCP-NEXT: vmovdqa 32(%r8), %xmm4
-; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm4[0],xmm7[0],xmm4[1],xmm7[1],xmm4[2],xmm7[2],xmm4[3],xmm7[3]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm21, %xmm8
-; AVX512DQ-FCP-NEXT: vpshufb %xmm8, %xmm6, %xmm8
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm6, %xmm16
-; AVX512DQ-FCP-NEXT: vpermt2q %zmm8, %zmm19, %zmm1
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[4,5,2,3,4,5,6,7,8,9,10,11,10,11,8,9]
-; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} zmm22 = [0,0,2,1,8,8,9,11]
-; AVX512DQ-FCP-NEXT: vpermt2q %zmm2, %zmm22, %zmm0
-; AVX512DQ-FCP-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm13[4],xmm10[4],xmm13[5],xmm10[5],xmm13[6],xmm10[6],xmm13[7],xmm10[7]
-; AVX512DQ-FCP-NEXT: vpbroadcastq {{.*#+}} xmm18 = [6,7,4,5,0,0,8,9,6,7,4,5,0,0,8,9]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm18, %xmm8
-; AVX512DQ-FCP-NEXT: vpshufb %xmm8, %xmm10, %xmm10
+; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm4[0],xmm8[0],xmm4[1],xmm8[1],xmm4[2],xmm8[2],xmm4[3],xmm8[3]
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm18, %xmm12
+; AVX512DQ-FCP-NEXT: vpshufb %xmm12, %xmm2, %xmm12
+; AVX512DQ-FCP-NEXT: vpermt2q %zmm12, %zmm21, %zmm1
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm12 = [4,5,2,3,4,5,6,7,8,9,10,11,10,11,8,9]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm12, %xmm3, %xmm3
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm12, %xmm31
+; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} zmm17 = [0,0,0,1,8,8,9,0]
+; AVX512DQ-FCP-NEXT: vpermt2q %zmm3, %zmm17, %zmm0
+; AVX512DQ-FCP-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm13[4],xmm11[4],xmm13[5],xmm11[5],xmm13[6],xmm11[6],xmm13[7],xmm11[7]
+; AVX512DQ-FCP-NEXT: vpbroadcastq {{.*#+}} xmm22 = [6,7,4,5,0,0,8,9,6,7,4,5,0,0,8,9]
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm22, %xmm12
+; AVX512DQ-FCP-NEXT: vpshufb %xmm12, %xmm11, %xmm11
; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} xmm13 = xmm13[1,1,2,2]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm13 = xmm13[0],xmm10[1],xmm13[2,3],xmm10[4],xmm13[5,6],xmm10[7]
-; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} xmm2 = xmm2[0,2,3,3,4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpermt2q %zmm2, %zmm25, %zmm13
-; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm28 = [65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535]
-; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm13 = zmm0 ^ (zmm28 & (zmm13 ^ zmm0))
-; AVX512DQ-FCP-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm3[4],xmm5[4],xmm3[5],xmm5[5],xmm3[6],xmm5[6],xmm3[7],xmm5[7]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm11 = xmm13[0],xmm11[1],xmm13[2,3],xmm11[4],xmm13[5,6],xmm11[7]
+; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} xmm3 = xmm3[0,2,3,3,4,5,6,7]
+; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm26, %zmm21
+; AVX512DQ-FCP-NEXT: vpermt2q %zmm3, %zmm26, %zmm11
+; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm26 = [65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535]
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm11 = zmm0 ^ (zmm26 & (zmm11 ^ zmm0))
+; AVX512DQ-FCP-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm5[4],xmm6[4],xmm5[5],xmm6[5],xmm5[6],xmm6[6],xmm5[7],xmm6[7]
; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
-; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm14 = ymm14 ^ (ymm30 & (ymm14 ^ ymm0))
+; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %ymm20 # 32-byte Reload
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm20 = ymm20 ^ (ymm24 & (ymm20 ^ ymm0))
; AVX512DQ-FCP-NEXT: vextracti64x4 $1, %zmm0, %ymm0
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm24, %xmm8
-; AVX512DQ-FCP-NEXT: vpshufb %xmm8, %xmm3, %xmm3
-; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} xmm21 = [0,1,2,3,8,9,10,11,14,15,12,13,14,15,12,13]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm21, %xmm2
-; AVX512DQ-FCP-NEXT: vpshufb %xmm2, %xmm9, %xmm6
-; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} zmm24 = [0,1,1,3,8,8,9,9]
-; AVX512DQ-FCP-NEXT: vpermt2q %zmm3, %zmm24, %zmm6
-; AVX512DQ-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX512DQ-FCP-NEXT: vpshufhw {{.*#+}} ymm3 = ymm2[0,1,2,3,5,5,7,6,8,9,10,11,13,13,15,14]
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm3 = ymm3[3,3,3,3]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm9 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,24,25,22,23,22,23,u,u,u,u,u,u,u,u]
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,2,2,2]
-; AVX512DQ-FCP-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; AVX512DQ-FCP-NEXT: vpshufb %xmm8, %xmm2, %xmm10
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm10[0,0,1,1]
-; AVX512DQ-FCP-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
-; AVX512DQ-FCP-NEXT: vpshufb %xmm8, %xmm10, %xmm10
-; AVX512DQ-FCP-NEXT: vmovdqa %xmm8, %xmm5
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm8 = ymm10[0,0,1,1]
-; AVX512DQ-FCP-NEXT: vpermd {{[-0-9]+}}(%r{{[sb]}}p), %ymm27, %ymm10 # 32-byte Folded Reload
-; AVX512DQ-FCP-NEXT: vpbroadcastd 32(%rax), %ymm17
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm17, %zmm10, %zmm10
-; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm10 = zmm10 ^ (zmm30 & (zmm10 ^ zmm1))
+; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} xmm18 = [0,1,2,3,8,9,10,11,14,15,12,13,14,15,12,13]
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm18, %xmm5
+; AVX512DQ-FCP-NEXT: vpshufb %xmm5, %xmm7, %xmm5
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm25, %xmm12
+; AVX512DQ-FCP-NEXT: vpshufb %xmm12, %xmm3, %xmm3
+; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} zmm25 = [8,9,9,0,0,0,1,1]
+; AVX512DQ-FCP-NEXT: vpermt2q %zmm5, %zmm25, %zmm3
+; AVX512DQ-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX512DQ-FCP-NEXT: vpshufhw {{.*#+}} ymm5 = ymm6[0,1,2,3,5,5,7,6,8,9,10,11,13,13,15,14]
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm5 = ymm5[3,3,3,3]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm6 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,24,25,22,23,22,23,u,u,u,u,u,u,u,u]
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm6 = ymm6[2,2,2,2]
+; AVX512DQ-FCP-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
+; AVX512DQ-FCP-NEXT: vpshufb %xmm12, %xmm7, %xmm7
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm13 = ymm7[0,0,1,1]
+; AVX512DQ-FCP-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
+; AVX512DQ-FCP-NEXT: vpshufb %xmm12, %xmm7, %xmm7
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm12, %xmm27
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm12 = ymm7[0,0,1,1]
+; AVX512DQ-FCP-NEXT: vpermd {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm7 # 32-byte Folded Reload
+; AVX512DQ-FCP-NEXT: vpbroadcastd 32(%rax), %ymm15
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm15, %zmm7, %zmm7
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm7 = zmm7 ^ (zmm24 & (zmm7 ^ zmm1))
; AVX512DQ-FCP-NEXT: vpbroadcastd 100(%rax), %ymm1
-; AVX512DQ-FCP-NEXT: vpbroadcastd 104(%rax), %ymm17
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm17, %zmm1, %zmm19
-; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm17 = [65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535]
-; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm19 = zmm19 ^ (zmm17 & (zmm19 ^ zmm6))
-; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm10 = zmm10 ^ (zmm31 & (zmm10 ^ zmm15))
+; AVX512DQ-FCP-NEXT: vpbroadcastd 104(%rax), %ymm15
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm15, %zmm1, %zmm15
+; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm24 = [65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535]
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm15 = zmm15 ^ (zmm24 & (zmm15 ^ zmm3))
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm7 = zmm7 ^ (zmm23 & (zmm7 ^ zmm14))
; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm1 = [65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0]
-; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm19 = zmm19 ^ (zmm1 & (zmm19 ^ zmm13))
-; AVX512DQ-FCP-NEXT: vpunpckhwd {{.*#+}} xmm6 = xmm11[4],xmm12[4],xmm11[5],xmm12[5],xmm11[6],xmm12[6],xmm11[7],xmm12[7]
-; AVX512DQ-FCP-NEXT: vprold $16, %xmm11, %xmm11
-; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} xmm12 = xmm12[1,1,2,3]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm11 = xmm12[0,1],xmm11[2],xmm12[3,4],xmm11[5],xmm12[6,7]
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm15 = [4,5,2,3,4,5,6,7,8,9,10,11,10,11,8,9]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm15, %xmm6, %xmm6
-; AVX512DQ-FCP-NEXT: vpermt2q %zmm6, %zmm22, %zmm11
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm18, %xmm6
-; AVX512DQ-FCP-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
-; AVX512DQ-FCP-NEXT: vpshufb %xmm6, %xmm12, %xmm6
-; AVX512DQ-FCP-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
-; AVX512DQ-FCP-NEXT: vpunpckhwd {{.*#+}} xmm12 = xmm13[4],xmm12[4],xmm13[5],xmm12[5],xmm13[6],xmm12[6],xmm13[7],xmm12[7]
-; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} xmm13 = xmm13[1,1,2,2]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm6 = xmm13[0],xmm6[1],xmm13[2,3],xmm6[4],xmm13[5,6],xmm6[7]
-; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} xmm12 = xmm12[0,2,3,3,4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpermt2q %zmm12, %zmm25, %zmm6
-; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm6 = zmm11 ^ (zmm28 & (zmm6 ^ zmm11))
-; AVX512DQ-FCP-NEXT: vpunpckhwd {{.*#+}} xmm4 = xmm4[4],xmm7[4],xmm4[5],xmm7[5],xmm4[6],xmm7[6],xmm4[7],xmm7[7]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm5, %xmm4, %xmm4
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm21, %xmm5
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm16, %xmm7
-; AVX512DQ-FCP-NEXT: vpshufb %xmm5, %xmm7, %xmm5
-; AVX512DQ-FCP-NEXT: vpermt2q %zmm4, %zmm24, %zmm5
-; AVX512DQ-FCP-NEXT: vpbroadcastd 36(%rax), %ymm4
-; AVX512DQ-FCP-NEXT: vpbroadcastd 40(%rax), %ymm7
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm7, %zmm4, %zmm4
-; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm4 = zmm4 ^ (zmm17 & (zmm4 ^ zmm5))
-; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm4 = zmm4 ^ (zmm1 & (zmm4 ^ zmm6))
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm15 = zmm15 ^ (zmm1 & (zmm15 ^ zmm11))
+; AVX512DQ-FCP-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm9[4],xmm10[4],xmm9[5],xmm10[5],xmm9[6],xmm10[6],xmm9[7],xmm10[7]
+; AVX512DQ-FCP-NEXT: vprold $16, %xmm9, %xmm9
+; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} xmm10 = xmm10[1,1,2,3]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm9 = xmm10[0,1],xmm9[2],xmm10[3,4],xmm9[5],xmm10[6,7]
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm31, %xmm14
+; AVX512DQ-FCP-NEXT: vpshufb %xmm14, %xmm3, %xmm3
+; AVX512DQ-FCP-NEXT: vpermt2q %zmm3, %zmm17, %zmm9
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm22, %xmm3
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm28, %xmm10
+; AVX512DQ-FCP-NEXT: vpshufb %xmm3, %xmm10, %xmm3
+; AVX512DQ-FCP-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
+; AVX512DQ-FCP-NEXT: vpunpckhwd {{.*#+}} xmm10 = xmm11[4],xmm10[4],xmm11[5],xmm10[5],xmm11[6],xmm10[6],xmm11[7],xmm10[7]
+; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} xmm11 = xmm11[1,1,2,2]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm3 = xmm11[0],xmm3[1],xmm11[2,3],xmm3[4],xmm11[5,6],xmm3[7]
+; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} xmm10 = xmm10[0,2,3,3,4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpermt2q %zmm10, %zmm21, %zmm3
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm3 = zmm9 ^ (zmm26 & (zmm3 ^ zmm9))
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm18, %xmm9
+; AVX512DQ-FCP-NEXT: vpshufb %xmm9, %xmm2, %xmm2
+; AVX512DQ-FCP-NEXT: vpunpckhwd {{.*#+}} xmm4 = xmm4[4],xmm8[4],xmm4[5],xmm8[5],xmm4[6],xmm8[6],xmm4[7],xmm8[7]
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm27, %xmm8
+; AVX512DQ-FCP-NEXT: vpshufb %xmm8, %xmm4, %xmm4
+; AVX512DQ-FCP-NEXT: vpermt2q %zmm2, %zmm25, %zmm4
+; AVX512DQ-FCP-NEXT: vpbroadcastd 36(%rax), %ymm2
+; AVX512DQ-FCP-NEXT: vpbroadcastd 40(%rax), %ymm8
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm8, %zmm2, %zmm2
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm2 = zmm2 ^ (zmm24 & (zmm2 ^ zmm4))
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm2 = zmm2 ^ (zmm1 & (zmm2 ^ zmm3))
; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm1 = [65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535]
; AVX512DQ-FCP-NEXT: vpternlogd {{.*#+}} ymm0 = mem ^ (ymm1 & (ymm0 ^ mem))
; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm1 = [0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535]
-; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm3 = ymm3 ^ (ymm1 & (ymm3 ^ ymm0))
-; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm5 = [65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535]
-; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm9 = ymm9 ^ (ymm5 & (ymm9 ^ ymm14))
-; AVX512DQ-FCP-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm9[0,1,2,3],zmm3[0,1,2,3]
-; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm12 = zmm12 ^ (mem & (zmm12 ^ zmm0))
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm0 # 32-byte Folded Reload
-; AVX512DQ-FCP-NEXT: vpternlogq $234, {{[-0-9]+}}(%r{{[sb]}}p), %zmm5, %zmm0 # 64-byte Folded Reload
-; AVX512DQ-FCP-NEXT: # zmm0 = (zmm0 & zmm5) | mem
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm2 # 32-byte Folded Reload
-; AVX512DQ-FCP-NEXT: vpternlogq $234, {{[-0-9]+}}(%r{{[sb]}}p), %zmm5, %zmm2 # 64-byte Folded Reload
-; AVX512DQ-FCP-NEXT: # zmm2 = (zmm2 & zmm5) | mem
-; AVX512DQ-FCP-NEXT: vmovdqa (%rsp), %xmm3 # 16-byte Reload
-; AVX512DQ-FCP-NEXT: vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm3 # 16-byte Folded Reload
-; AVX512DQ-FCP-NEXT: # xmm3 = xmm3[4],mem[4],xmm3[5],mem[5],xmm3[6],mem[6],xmm3[7],mem[7]
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm5 = ymm5 ^ (ymm1 & (ymm5 ^ ymm0))
+; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm4 = [65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535]
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm6 = ymm6 ^ (ymm4 & (ymm6 ^ ymm20))
+; AVX512DQ-FCP-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm6[0,1,2,3],zmm5[0,1,2,3]
+; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512DQ-FCP-NEXT: vpermd {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm3 # 64-byte Folded Reload
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm3 = zmm3 ^ (mem & (zmm3 ^ zmm0))
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm13, %zmm0 # 32-byte Folded Reload
+; AVX512DQ-FCP-NEXT: vpternlogq $234, {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm0 # 64-byte Folded Reload
+; AVX512DQ-FCP-NEXT: # zmm0 = (zmm0 & zmm4) | mem
+; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm4, %zmm5
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm12, %zmm4 # 32-byte Folded Reload
+; AVX512DQ-FCP-NEXT: vpternlogq $234, {{[-0-9]+}}(%r{{[sb]}}p), %zmm5, %zmm4 # 64-byte Folded Reload
+; AVX512DQ-FCP-NEXT: # zmm4 = (zmm4 & zmm5) | mem
; AVX512DQ-FCP-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
; AVX512DQ-FCP-NEXT: vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm5 # 16-byte Folded Reload
; AVX512DQ-FCP-NEXT: # xmm5 = xmm5[4],mem[4],xmm5[5],mem[5],xmm5[6],mem[6],xmm5[7],mem[7]
; AVX512DQ-FCP-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; AVX512DQ-FCP-NEXT: vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm6, %xmm6 # 16-byte Folded Reload
+; AVX512DQ-FCP-NEXT: vpunpckhwd (%rsp), %xmm6, %xmm6 # 16-byte Folded Reload
; AVX512DQ-FCP-NEXT: # xmm6 = xmm6[4],mem[4],xmm6[5],mem[5],xmm6[6],mem[6],xmm6[7],mem[7]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm15, %xmm3, %xmm3
-; AVX512DQ-FCP-NEXT: vpshufb %xmm15, %xmm6, %xmm6
-; AVX512DQ-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
-; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} ymm7 = ymm7[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
-; AVX512DQ-FCP-NEXT: vpshufd $212, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Folded Reload
-; AVX512DQ-FCP-NEXT: # ymm8 = mem[0,1,1,3,4,5,5,7]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm7 = ymm8[0,1],ymm7[2],ymm8[3,4],ymm7[5],ymm8[6,7,8,9],ymm7[10],ymm8[11,12],ymm7[13],ymm8[14,15]
-; AVX512DQ-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm8 = ymm8[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21,18,19,20,21,24,25,26,27,22,23,22,23]
-; AVX512DQ-FCP-NEXT: vpshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Folded Reload
-; AVX512DQ-FCP-NEXT: # ymm9 = mem[1,1,1,1,5,5,5,5]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm8 = ymm8[0,1],ymm9[2],ymm8[3,4],ymm9[5],ymm8[6,7,8,9],ymm9[10],ymm8[11,12],ymm9[13],ymm8[14,15]
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm9 = [0,1,4,5,4,5,5,7]
-; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512DQ-FCP-NEXT: vpermd %ymm11, %ymm9, %ymm9
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm11 = ymm11[12,13],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm11[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm11[16,17],zero,zero
-; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm13 = [65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535]
-; AVX512DQ-FCP-NEXT: vpandn %ymm9, %ymm13, %ymm9
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm9, %zmm11, %zmm9
-; AVX512DQ-FCP-NEXT: vpshuflw $248, {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Folded Reload
-; AVX512DQ-FCP-NEXT: # xmm11 = mem[0,2,3,3,4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm11 = ymm11[0,0,2,1]
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,0,1,3]
+; AVX512DQ-FCP-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
+; AVX512DQ-FCP-NEXT: vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm8, %xmm8 # 16-byte Folded Reload
+; AVX512DQ-FCP-NEXT: # xmm8 = xmm8[4],mem[4],xmm8[5],mem[5],xmm8[6],mem[6],xmm8[7],mem[7]
+; AVX512DQ-FCP-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
+; AVX512DQ-FCP-NEXT: vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm9, %xmm9 # 16-byte Folded Reload
+; AVX512DQ-FCP-NEXT: # xmm9 = xmm9[4],mem[4],xmm9[5],mem[5],xmm9[6],mem[6],xmm9[7],mem[7]
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm31, %xmm10
+; AVX512DQ-FCP-NEXT: vpshufb %xmm10, %xmm6, %xmm6
+; AVX512DQ-FCP-NEXT: vpshufb %xmm10, %xmm9, %xmm9
+; AVX512DQ-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
+; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} ymm10 = ymm10[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
+; AVX512DQ-FCP-NEXT: vpshufd $212, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Folded Reload
+; AVX512DQ-FCP-NEXT: # ymm11 = mem[0,1,1,3,4,5,5,7]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm10 = ymm11[0,1],ymm10[2],ymm11[3,4],ymm10[5],ymm11[6,7,8,9],ymm10[10],ymm11[11,12],ymm10[13],ymm11[14,15]
+; AVX512DQ-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm11 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21,18,19,20,21,24,25,26,27,22,23,22,23]
+; AVX512DQ-FCP-NEXT: vpshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Folded Reload
+; AVX512DQ-FCP-NEXT: # ymm12 = mem[1,1,1,1,5,5,5,5]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm11 = ymm11[0,1],ymm12[2],ymm11[3,4],ymm12[5],ymm11[6,7,8,9],ymm12[10],ymm11[11,12],ymm12[13],ymm11[14,15]
+; AVX512DQ-FCP-NEXT: vmovdqa 32(%rax), %ymm12
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm13 = [0,1,4,5,4,5,5,7]
+; AVX512DQ-FCP-NEXT: vpermd %ymm12, %ymm13, %ymm13
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm12 = ymm12[12,13],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm12[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm12[16,17],zero,zero
+; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm14 = [65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535]
+; AVX512DQ-FCP-NEXT: vpandn %ymm13, %ymm14, %ymm13
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm13, %zmm12, %zmm12
; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} xmm5 = xmm5[0,2,3,3,4,5,6,7]
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm5 = ymm5[0,0,2,1]
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,0,1,3]
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,1,3,2]
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,2,2,3]
+; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} xmm8 = xmm8[0,2,3,3,4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm8 = ymm8[0,0,2,1]
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm9 = ymm9[0,0,1,3]
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm10 = ymm10[2,1,3,2]
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,2,2,3]
; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm14 = zmm14 ^ (mem & (zmm14 ^ zmm13))
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm11, %zmm11 # 32-byte Folded Reload
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm3 # 32-byte Folded Reload
-; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm3 = zmm11 ^ (zmm28 & (zmm3 ^ zmm11))
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm5, %zmm5 # 32-byte Folded Reload
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm6, %zmm6 # 32-byte Folded Reload
-; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm6 = zmm5 ^ (zmm28 & (zmm6 ^ zmm5))
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm6 = zmm5 ^ (zmm26 & (zmm6 ^ zmm5))
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm5 # 32-byte Folded Reload
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm9, %zmm8 # 32-byte Folded Reload
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm8 = zmm5 ^ (zmm26 & (zmm8 ^ zmm5))
; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm5 = [0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535]
-; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm3 = zmm0 ^ (zmm5 & (zmm3 ^ zmm0))
-; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm6 = zmm2 ^ (zmm5 & (zmm6 ^ zmm2))
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm6 = zmm0 ^ (zmm5 & (zmm6 ^ zmm0))
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm8 = zmm4 ^ (zmm5 & (zmm8 ^ zmm4))
; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm7, %zmm0, %zmm0
-; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm8, %zmm2, %zmm2
-; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm2 = zmm0 ^ (mem & (zmm2 ^ zmm0))
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm10, %zmm0, %zmm0
+; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm11, %zmm4, %zmm4
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm4 = zmm0 ^ (mem & (zmm4 ^ zmm0))
; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-FCP-NEXT: vpternlogq $248, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm0 # 64-byte Folded Reload
; AVX512DQ-FCP-NEXT: # zmm0 = zmm0 | (zmm1 & mem)
-; AVX512DQ-FCP-NEXT: vpternlogq $248, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm9 # 64-byte Folded Reload
-; AVX512DQ-FCP-NEXT: # zmm9 = zmm9 | (zmm1 & mem)
-; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm9 = zmm9 ^ (mem & (zmm9 ^ zmm2))
+; AVX512DQ-FCP-NEXT: vpternlogq $248, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm12 # 64-byte Folded Reload
+; AVX512DQ-FCP-NEXT: # zmm12 = zmm12 | (zmm1 & mem)
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm12 = zmm12 ^ (mem & (zmm12 ^ zmm4))
; AVX512DQ-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm9, 320(%rax)
-; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm4, 256(%rax)
-; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm10, 192(%rax)
-; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm20, 128(%rax)
-; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm26, (%rax)
+; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm12, 320(%rax)
+; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm2, 256(%rax)
+; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm7, 192(%rax)
+; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm16, 128(%rax)
+; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm30, (%rax)
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm29, 448(%rax)
-; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm19, 704(%rax)
-; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm23, 640(%rax)
+; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm15, 704(%rax)
+; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm19, 640(%rax)
; AVX512DQ-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; AVX512DQ-FCP-NEXT: vmovaps %zmm1, 576(%rax)
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm14, 384(%rax)
-; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm6, 64(%rax)
-; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm3, 512(%rax)
-; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm12, 832(%rax)
+; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm8, 64(%rax)
+; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm6, 512(%rax)
+; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm3, 832(%rax)
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm0, 768(%rax)
-; AVX512DQ-FCP-NEXT: addq $1576, %rsp # imm = 0x628
+; AVX512DQ-FCP-NEXT: addq $1544, %rsp # imm = 0x608
; AVX512DQ-FCP-NEXT: vzeroupper
; AVX512DQ-FCP-NEXT: retq
;
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-4.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-4.ll
index e74521d5463a4..21b98dbb3843e 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-4.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-4.ll
@@ -1696,20 +1696,20 @@ define void @store_i8_stride4_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512BW-NEXT: vextracti32x4 $2, %zmm1, %xmm6
; AVX512BW-NEXT: vextracti64x4 $1, %zmm3, %ymm7
; AVX512BW-NEXT: vinserti128 $1, %xmm6, %ymm7, %ymm6
-; AVX512BW-NEXT: vextracti32x4 $2, %zmm0, %xmm8
-; AVX512BW-NEXT: vextracti64x4 $1, %zmm4, %ymm9
-; AVX512BW-NEXT: vinserti128 $1, %xmm8, %ymm9, %ymm8
+; AVX512BW-NEXT: vextracti32x4 $2, %zmm0, %xmm7
+; AVX512BW-NEXT: vextracti64x4 $1, %zmm4, %ymm8
+; AVX512BW-NEXT: vinserti128 $1, %xmm7, %ymm8, %ymm7
; AVX512BW-NEXT: vinserti64x4 $1, %ymm5, %zmm2, %zmm2
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm0, %zmm4, %zmm4
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm1, %zmm3, %zmm3
-; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm3 = zmm3[2,3,6,7],zmm4[2,3,6,7]
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm8, %zmm6, %zmm4
-; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm9[0,1,2,3],zmm0[4,5,6,7]
-; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm1 = zmm7[0,1,2,3],zmm1[4,5,6,7]
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm0, %zmm4, %zmm5
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm1, %zmm3, %zmm8
+; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm5 = zmm8[2,3,6,7],zmm5[2,3,6,7]
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm7, %zmm6, %zmm6
+; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm4[4,5,6,7],zmm0[4,5,6,7]
+; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm1 = zmm3[4,5,6,7],zmm1[4,5,6,7]
; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm1[2,3,6,7],zmm0[2,3,6,7]
-; AVX512BW-NEXT: vmovdqa64 %zmm3, 64(%r8)
; AVX512BW-NEXT: vmovdqa64 %zmm0, 192(%r8)
-; AVX512BW-NEXT: vmovdqa64 %zmm4, 128(%r8)
+; AVX512BW-NEXT: vmovdqa64 %zmm5, 64(%r8)
+; AVX512BW-NEXT: vmovdqa64 %zmm6, 128(%r8)
; AVX512BW-NEXT: vmovdqa64 %zmm2, (%r8)
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
@@ -1733,20 +1733,20 @@ define void @store_i8_stride4_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512BW-FCP-NEXT: vextracti32x4 $2, %zmm1, %xmm6
; AVX512BW-FCP-NEXT: vextracti64x4 $1, %zmm3, %ymm7
; AVX512BW-FCP-NEXT: vinserti128 $1, %xmm6, %ymm7, %ymm6
-; AVX512BW-FCP-NEXT: vextracti32x4 $2, %zmm0, %xmm8
-; AVX512BW-FCP-NEXT: vextracti64x4 $1, %zmm4, %ymm9
-; AVX512BW-FCP-NEXT: vinserti128 $1, %xmm8, %ymm9, %ymm8
+; AVX512BW-FCP-NEXT: vextracti32x4 $2, %zmm0, %xmm7
+; AVX512BW-FCP-NEXT: vextracti64x4 $1, %zmm4, %ymm8
+; AVX512BW-FCP-NEXT: vinserti128 $1, %xmm7, %ymm8, %ymm7
; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm5, %zmm2, %zmm2
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm4, %zmm4
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm1, %zmm3, %zmm3
-; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm3 = zmm3[2,3,6,7],zmm4[2,3,6,7]
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm8, %zmm6, %zmm4
-; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm9[0,1,2,3],zmm0[4,5,6,7]
-; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm1 = zmm7[0,1,2,3],zmm1[4,5,6,7]
+; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm4, %zmm5
+; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm1, %zmm3, %zmm8
+; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm5 = zmm8[2,3,6,7],zmm5[2,3,6,7]
+; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm7, %zmm6, %zmm6
+; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm4[4,5,6,7],zmm0[4,5,6,7]
+; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm1 = zmm3[4,5,6,7],zmm1[4,5,6,7]
; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm1[2,3,6,7],zmm0[2,3,6,7]
-; AVX512BW-FCP-NEXT: vmovdqa64 %zmm3, 64(%r8)
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm0, 192(%r8)
-; AVX512BW-FCP-NEXT: vmovdqa64 %zmm4, 128(%r8)
+; AVX512BW-FCP-NEXT: vmovdqa64 %zmm5, 64(%r8)
+; AVX512BW-FCP-NEXT: vmovdqa64 %zmm6, 128(%r8)
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm2, (%r8)
; AVX512BW-FCP-NEXT: vzeroupper
; AVX512BW-FCP-NEXT: retq
@@ -1770,20 +1770,20 @@ define void @store_i8_stride4_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-BW-NEXT: vextracti32x4 $2, %zmm1, %xmm6
; AVX512DQ-BW-NEXT: vextracti64x4 $1, %zmm3, %ymm7
; AVX512DQ-BW-NEXT: vinserti128 $1, %xmm6, %ymm7, %ymm6
-; AVX512DQ-BW-NEXT: vextracti32x4 $2, %zmm0, %xmm8
-; AVX512DQ-BW-NEXT: vextracti64x4 $1, %zmm4, %ymm9
-; AVX512DQ-BW-NEXT: vinserti128 $1, %xmm8, %ymm9, %ymm8
+; AVX512DQ-BW-NEXT: vextracti32x4 $2, %zmm0, %xmm7
+; AVX512DQ-BW-NEXT: vextracti64x4 $1, %zmm4, %ymm8
+; AVX512DQ-BW-NEXT: vinserti128 $1, %xmm7, %ymm8, %ymm7
; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm5, %zmm2, %zmm2
-; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm0, %zmm4, %zmm4
-; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm1, %zmm3, %zmm3
-; AVX512DQ-BW-NEXT: vshufi64x2 {{.*#+}} zmm3 = zmm3[2,3,6,7],zmm4[2,3,6,7]
-; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm8, %zmm6, %zmm4
-; AVX512DQ-BW-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm9[0,1,2,3],zmm0[4,5,6,7]
-; AVX512DQ-BW-NEXT: vshufi64x2 {{.*#+}} zmm1 = zmm7[0,1,2,3],zmm1[4,5,6,7]
+; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm0, %zmm4, %zmm5
+; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm1, %zmm3, %zmm8
+; AVX512DQ-BW-NEXT: vshufi64x2 {{.*#+}} zmm5 = zmm8[2,3,6,7],zmm5[2,3,6,7]
+; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm7, %zmm6, %zmm6
+; AVX512DQ-BW-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm4[4,5,6,7],zmm0[4,5,6,7]
+; AVX512DQ-BW-NEXT: vshufi64x2 {{.*#+}} zmm1 = zmm3[4,5,6,7],zmm1[4,5,6,7]
; AVX512DQ-BW-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm1[2,3,6,7],zmm0[2,3,6,7]
-; AVX512DQ-BW-NEXT: vmovdqa64 %zmm3, 64(%r8)
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm0, 192(%r8)
-; AVX512DQ-BW-NEXT: vmovdqa64 %zmm4, 128(%r8)
+; AVX512DQ-BW-NEXT: vmovdqa64 %zmm5, 64(%r8)
+; AVX512DQ-BW-NEXT: vmovdqa64 %zmm6, 128(%r8)
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm2, (%r8)
; AVX512DQ-BW-NEXT: vzeroupper
; AVX512DQ-BW-NEXT: retq
@@ -1807,20 +1807,20 @@ define void @store_i8_stride4_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-BW-FCP-NEXT: vextracti32x4 $2, %zmm1, %xmm6
; AVX512DQ-BW-FCP-NEXT: vextracti64x4 $1, %zmm3, %ymm7
; AVX512DQ-BW-FCP-NEXT: vinserti128 $1, %xmm6, %ymm7, %ymm6
-; AVX512DQ-BW-FCP-NEXT: vextracti32x4 $2, %zmm0, %xmm8
-; AVX512DQ-BW-FCP-NEXT: vextracti64x4 $1, %zmm4, %ymm9
-; AVX512DQ-BW-FCP-NEXT: vinserti128 $1, %xmm8, %ymm9, %ymm8
+; AVX512DQ-BW-FCP-NEXT: vextracti32x4 $2, %zmm0, %xmm7
+; AVX512DQ-BW-FCP-NEXT: vextracti64x4 $1, %zmm4, %ymm8
+; AVX512DQ-BW-FCP-NEXT: vinserti128 $1, %xmm7, %ymm8, %ymm7
; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm5, %zmm2, %zmm2
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm4, %zmm4
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm1, %zmm3, %zmm3
-; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm3 = zmm3[2,3,6,7],zmm4[2,3,6,7]
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm8, %zmm6, %zmm4
-; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm9[0,1,2,3],zmm0[4,5,6,7]
-; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm1 = zmm7[0,1,2,3],zmm1[4,5,6,7]
+; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm4, %zmm5
+; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm1, %zmm3, %zmm8
+; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm5 = zmm8[2,3,6,7],zmm5[2,3,6,7]
+; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm7, %zmm6, %zmm6
+; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm4[4,5,6,7],zmm0[4,5,6,7]
+; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm1 = zmm3[4,5,6,7],zmm1[4,5,6,7]
; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm1[2,3,6,7],zmm0[2,3,6,7]
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm3, 64(%r8)
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm0, 192(%r8)
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm4, 128(%r8)
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm5, 64(%r8)
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm6, 128(%r8)
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm2, (%r8)
; AVX512DQ-BW-FCP-NEXT: vzeroupper
; AVX512DQ-BW-FCP-NEXT: retq
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-5.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-5.ll
index d25f8cf6b0bca..302da6ef63796 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-5.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-5.ll
@@ -4664,176 +4664,176 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
;
; AVX512-LABEL: store_i8_stride5_vf64:
; AVX512: # %bb.0:
-; AVX512-NEXT: vmovdqa 32(%rsi), %ymm10
+; AVX512-NEXT: vmovdqa 32(%rsi), %ymm11
; AVX512-NEXT: vmovdqa {{.*#+}} ymm1 = [128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128,128,128,128]
-; AVX512-NEXT: vpshufb %ymm1, %ymm10, %ymm0
-; AVX512-NEXT: vmovdqa64 %ymm1, %ymm18
+; AVX512-NEXT: vpshufb %ymm1, %ymm11, %ymm0
+; AVX512-NEXT: vmovdqa64 %ymm1, %ymm20
; AVX512-NEXT: vmovdqa 32(%rdi), %ymm5
; AVX512-NEXT: vmovdqa {{.*#+}} ymm2 = [12,13,128,15,12,13,14,128,12,13,14,15,128,u,u,u,16,128,18,19,16,17,128,19,16,17,18,128,16,17,18,19]
; AVX512-NEXT: vpshufb %ymm2, %ymm5, %ymm1
; AVX512-NEXT: vmovdqa64 %ymm2, %ymm21
-; AVX512-NEXT: vporq %ymm0, %ymm1, %ymm19
-; AVX512-NEXT: vmovdqa 32(%rdi), %xmm14
+; AVX512-NEXT: vporq %ymm0, %ymm1, %ymm17
+; AVX512-NEXT: vmovdqa 32(%rdi), %xmm12
; AVX512-NEXT: vmovdqa {{.*#+}} xmm1 = [8,128,u,7,128,9,128,u,128,u,10,128,12,128,u,11]
-; AVX512-NEXT: vpshufb %xmm1, %xmm14, %xmm0
+; AVX512-NEXT: vpshufb %xmm1, %xmm12, %xmm0
; AVX512-NEXT: vmovdqa64 %xmm1, %xmm28
-; AVX512-NEXT: vmovdqa 32(%rsi), %xmm13
+; AVX512-NEXT: vmovdqa 32(%rsi), %xmm10
; AVX512-NEXT: vmovdqa {{.*#+}} xmm2 = [128,8,u,128,7,128,9,u,11,u,128,10,128,12,u,128]
-; AVX512-NEXT: vpshufb %xmm2, %xmm13, %xmm1
+; AVX512-NEXT: vpshufb %xmm2, %xmm10, %xmm1
; AVX512-NEXT: vmovdqa64 %xmm2, %xmm29
-; AVX512-NEXT: vporq %xmm0, %xmm1, %xmm20
+; AVX512-NEXT: vporq %xmm0, %xmm1, %xmm19
; AVX512-NEXT: vmovdqa 32(%rcx), %ymm15
-; AVX512-NEXT: vmovdqa {{.*#+}} ymm7 = [128,128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128,128]
-; AVX512-NEXT: vpshufb %ymm7, %ymm15, %ymm0
-; AVX512-NEXT: vmovdqa 32(%rdx), %ymm12
+; AVX512-NEXT: vmovdqa {{.*#+}} ymm8 = [128,128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128,128]
+; AVX512-NEXT: vpshufb %ymm8, %ymm15, %ymm0
+; AVX512-NEXT: vmovdqa 32(%rdx), %ymm13
; AVX512-NEXT: vmovdqa {{.*#+}} ymm3 = [u,u,12,13,128,u,u,u,14,128,u,u,14,15,128,u,u,u,16,128,u,u,16,17,128,u,u,u,18,128,u,u]
-; AVX512-NEXT: vpshufb %ymm3, %ymm12, %ymm1
+; AVX512-NEXT: vpshufb %ymm3, %ymm13, %ymm1
; AVX512-NEXT: vporq %ymm0, %ymm1, %ymm22
; AVX512-NEXT: vmovdqa 32(%rcx), %xmm6
; AVX512-NEXT: vmovdqa {{.*#+}} xmm1 = [128,6,128,8,u,128,7,128,9,128,11,u,128,10,128,12]
; AVX512-NEXT: vpshufb %xmm1, %xmm6, %xmm0
; AVX512-NEXT: vmovdqa64 %xmm1, %xmm30
-; AVX512-NEXT: vmovdqa 32(%rdx), %xmm8
+; AVX512-NEXT: vmovdqa 32(%rdx), %xmm7
; AVX512-NEXT: vmovdqa {{.*#+}} xmm2 = [6,128,8,128,u,7,128,9,128,11,128,u,10,128,12,128]
-; AVX512-NEXT: vpshufb %xmm2, %xmm8, %xmm1
+; AVX512-NEXT: vpshufb %xmm2, %xmm7, %xmm1
; AVX512-NEXT: vmovdqa64 %xmm2, %xmm31
; AVX512-NEXT: vporq %xmm0, %xmm1, %xmm23
-; AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm9 = [128,128,26,128,28,128,128,27,128,29,128,31,128,128,30,128,128,128,26,128,28,128,128,27,128,29,128,31,128,128,30,128]
-; AVX512-NEXT: # ymm9 = mem[0,1,0,1]
-; AVX512-NEXT: vpshufb %ymm9, %ymm5, %ymm0
+; AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm14 = [128,128,26,128,28,128,128,27,128,29,128,31,128,128,30,128,128,128,26,128,28,128,128,27,128,29,128,31,128,128,30,128]
+; AVX512-NEXT: # ymm14 = mem[0,1,0,1]
+; AVX512-NEXT: vpshufb %ymm14, %ymm5, %ymm0
; AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm4 = [128,21,128,128,20,128,22,128,24,128,128,23,128,25,128,128,128,21,128,128,20,128,22,128,24,128,128,23,128,25,128,128]
; AVX512-NEXT: # ymm4 = mem[0,1,0,1]
; AVX512-NEXT: vpshufb %ymm4, %ymm5, %ymm1
; AVX512-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm24
; AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm5 = [27,0,128,26,128,28,0,128,0,128,29,128,31,0,128,30,27,0,128,26,128,28,0,128,0,128,29,128,31,0,128,30]
; AVX512-NEXT: # ymm5 = mem[0,1,0,1]
-; AVX512-NEXT: vpshufb %ymm5, %ymm10, %ymm1
+; AVX512-NEXT: vpshufb %ymm5, %ymm11, %ymm1
; AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [19,128,21,0,128,20,128,22,128,24,0,128,23,128,25,0,19,128,21,0,128,20,128,22,128,24,0,128,23,128,25,0]
; AVX512-NEXT: # ymm2 = mem[0,1,0,1]
-; AVX512-NEXT: vpshufb %ymm2, %ymm10, %ymm10
-; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm10, %zmm26
-; AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm10 = [128,27,128,128,26,128,28,128,30,128,128,29,128,31,128,128,128,27,128,128,26,128,28,128,30,128,128,29,128,31,128,128]
-; AVX512-NEXT: # ymm10 = mem[0,1,0,1]
-; AVX512-NEXT: vpshufb %ymm10, %ymm12, %ymm1
+; AVX512-NEXT: vpshufb %ymm2, %ymm11, %ymm11
+; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm11, %zmm26
+; AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm11 = [128,27,128,128,26,128,28,128,30,128,128,29,128,31,128,128,128,27,128,128,26,128,28,128,30,128,128,29,128,31,128,128]
+; AVX512-NEXT: # ymm11 = mem[0,1,0,1]
+; AVX512-NEXT: vpshufb %ymm11, %ymm13, %ymm1
; AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm0 = [128,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128,128,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128]
; AVX512-NEXT: # ymm0 = mem[0,1,0,1]
-; AVX512-NEXT: vpshufb %ymm0, %ymm15, %ymm11
-; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm11, %zmm25
+; AVX512-NEXT: vpshufb %ymm0, %ymm15, %ymm9
+; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm9, %zmm25
; AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [25,128,27,0,128,26,128,28,128,30,0,128,29,128,31,0,25,128,27,0,128,26,128,28,128,30,0,128,29,128,31,0]
; AVX512-NEXT: # ymm1 = mem[0,1,0,1]
-; AVX512-NEXT: vpshufb %ymm1, %ymm15, %ymm11
+; AVX512-NEXT: vpshufb %ymm1, %ymm15, %ymm9
; AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm15 = [18,19,128,21,128,21,20,128,22,128,24,128,22,23,128,25,18,19,128,21,128,21,20,128,22,128,24,128,22,23,128,25]
; AVX512-NEXT: # ymm15 = mem[0,1,0,1]
-; AVX512-NEXT: vpshufb %ymm15, %ymm12, %ymm12
-; AVX512-NEXT: vinserti64x4 $1, %ymm11, %zmm12, %zmm27
-; AVX512-NEXT: vmovdqa (%rcx), %ymm12
-; AVX512-NEXT: vpshufb %ymm7, %ymm12, %ymm7
-; AVX512-NEXT: vmovdqa (%rdx), %ymm11
-; AVX512-NEXT: vpshufb %ymm3, %ymm11, %ymm3
-; AVX512-NEXT: vporq %ymm7, %ymm3, %ymm16
-; AVX512-NEXT: vpshufb %ymm0, %ymm12, %ymm0
-; AVX512-NEXT: vpshufb %ymm15, %ymm11, %ymm3
-; AVX512-NEXT: vporq %ymm0, %ymm3, %ymm17
+; AVX512-NEXT: vpshufb %ymm15, %ymm13, %ymm13
+; AVX512-NEXT: vinserti64x4 $1, %ymm9, %zmm13, %zmm27
+; AVX512-NEXT: vmovdqa (%rcx), %ymm13
+; AVX512-NEXT: vpshufb %ymm8, %ymm13, %ymm8
+; AVX512-NEXT: vmovdqa (%rdx), %ymm9
+; AVX512-NEXT: vpshufb %ymm3, %ymm9, %ymm3
+; AVX512-NEXT: vporq %ymm8, %ymm3, %ymm16
+; AVX512-NEXT: vpshufb %ymm0, %ymm13, %ymm0
+; AVX512-NEXT: vpshufb %ymm15, %ymm9, %ymm3
+; AVX512-NEXT: vporq %ymm0, %ymm3, %ymm18
; AVX512-NEXT: vmovdqa (%rsi), %ymm3
-; AVX512-NEXT: vmovdqa64 %ymm18, %ymm0
+; AVX512-NEXT: vmovdqa64 %ymm20, %ymm0
; AVX512-NEXT: vpshufb %ymm0, %ymm3, %ymm0
-; AVX512-NEXT: vmovdqa (%rdi), %ymm7
+; AVX512-NEXT: vmovdqa (%rdi), %ymm8
; AVX512-NEXT: vmovdqa64 %ymm21, %ymm15
-; AVX512-NEXT: vpshufb %ymm15, %ymm7, %ymm15
-; AVX512-NEXT: vporq %ymm0, %ymm15, %ymm18
-; AVX512-NEXT: vpshufb %ymm4, %ymm7, %ymm0
+; AVX512-NEXT: vpshufb %ymm15, %ymm8, %ymm15
+; AVX512-NEXT: vpor %ymm0, %ymm15, %ymm15
+; AVX512-NEXT: vpshufb %ymm4, %ymm8, %ymm0
; AVX512-NEXT: vpshufb %ymm2, %ymm3, %ymm2
-; AVX512-NEXT: vpor %ymm0, %ymm2, %ymm4
-; AVX512-NEXT: vpshufb %ymm10, %ymm11, %ymm0
-; AVX512-NEXT: vpshufb %ymm1, %ymm12, %ymm1
-; AVX512-NEXT: vpor %ymm0, %ymm1, %ymm1
-; AVX512-NEXT: vpshufb %ymm9, %ymm7, %ymm0
+; AVX512-NEXT: vpor %ymm0, %ymm2, %ymm0
+; AVX512-NEXT: vpshufb %ymm11, %ymm9, %ymm2
+; AVX512-NEXT: vpshufb %ymm1, %ymm13, %ymm1
+; AVX512-NEXT: vporq %ymm2, %ymm1, %ymm20
+; AVX512-NEXT: vpshufb %ymm14, %ymm8, %ymm1
; AVX512-NEXT: vpshufb %ymm5, %ymm3, %ymm2
+; AVX512-NEXT: vporq %ymm1, %ymm2, %ymm21
; AVX512-NEXT: vmovdqa (%rdi), %xmm5
-; AVX512-NEXT: vporq %ymm0, %ymm2, %ymm21
-; AVX512-NEXT: vmovdqa (%rsi), %xmm9
-; AVX512-NEXT: vmovdqa (%rcx), %xmm7
-; AVX512-NEXT: vmovdqa64 %xmm28, %xmm0
-; AVX512-NEXT: vpshufb %xmm0, %xmm5, %xmm0
+; AVX512-NEXT: vmovdqa (%rsi), %xmm8
+; AVX512-NEXT: vmovdqa64 %xmm28, %xmm1
+; AVX512-NEXT: vpshufb %xmm1, %xmm5, %xmm1
; AVX512-NEXT: vmovdqa64 %xmm29, %xmm2
-; AVX512-NEXT: vpshufb %xmm2, %xmm9, %xmm2
-; AVX512-NEXT: vpor %xmm0, %xmm2, %xmm10
-; AVX512-NEXT: vmovdqa (%rdx), %xmm0
-; AVX512-NEXT: vmovdqa 32(%r8), %ymm3
-; AVX512-NEXT: vmovdqa64 %xmm30, %xmm2
-; AVX512-NEXT: vpshufb %xmm2, %xmm7, %xmm2
-; AVX512-NEXT: vmovdqa64 %xmm31, %xmm11
-; AVX512-NEXT: vpshufb %xmm11, %xmm0, %xmm11
-; AVX512-NEXT: vpor %xmm2, %xmm11, %xmm11
-; AVX512-NEXT: vmovdqa {{.*#+}} ymm12 = [12,128,128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128]
-; AVX512-NEXT: vpshufb %ymm12, %ymm3, %ymm2
-; AVX512-NEXT: vpshufd {{.*#+}} xmm15 = mem[1,1,2,2]
-; AVX512-NEXT: vpermq {{.*#+}} ymm15 = ymm15[0,1,1,1]
-; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm28 = [255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255]
-; AVX512-NEXT: vpandnq %ymm15, %ymm28, %ymm15
-; AVX512-NEXT: vinserti64x4 $1, %ymm2, %zmm15, %zmm2
-; AVX512-NEXT: vmovdqa (%r8), %ymm15
-; AVX512-NEXT: vpshufb %ymm12, %ymm15, %ymm12
-; AVX512-NEXT: vpshufd {{.*#+}} ymm15 = ymm15[0,2,1,1,4,6,5,5]
-; AVX512-NEXT: vpermq {{.*#+}} ymm15 = ymm15[2,3,3,2]
-; AVX512-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm15, %ymm15
-; AVX512-NEXT: vinserti64x4 $1, %ymm15, %zmm12, %zmm12
-; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm6 = xmm6[0],xmm8[0],xmm6[1],xmm8[1],xmm6[2],xmm8[2],xmm6[3],xmm8[3],xmm6[4],xmm8[4],xmm6[5],xmm8[5],xmm6[6],xmm8[6],xmm6[7],xmm8[7]
-; AVX512-NEXT: vmovdqa {{.*#+}} xmm8 = [2,u,1,0,5,4,u,3,u,7,6,11,10,u,9,8]
-; AVX512-NEXT: vpshufb %xmm8, %xmm6, %xmm6
+; AVX512-NEXT: vpshufb %xmm2, %xmm8, %xmm2
+; AVX512-NEXT: vpor %xmm1, %xmm2, %xmm3
+; AVX512-NEXT: vmovdqa (%rcx), %xmm1
+; AVX512-NEXT: vmovdqa (%rdx), %xmm2
+; AVX512-NEXT: vmovdqa64 %xmm30, %xmm4
+; AVX512-NEXT: vpshufb %xmm4, %xmm1, %xmm4
+; AVX512-NEXT: vmovdqa64 %xmm31, %xmm9
+; AVX512-NEXT: vpshufb %xmm9, %xmm2, %xmm9
+; AVX512-NEXT: vpor %xmm4, %xmm9, %xmm9
+; AVX512-NEXT: vmovdqa 32(%r8), %ymm4
+; AVX512-NEXT: vmovdqa {{.*#+}} ymm11 = [12,128,128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128]
+; AVX512-NEXT: vpshufb %ymm11, %ymm4, %ymm4
+; AVX512-NEXT: vpshufd {{.*#+}} xmm13 = mem[1,1,2,2]
+; AVX512-NEXT: vpermq {{.*#+}} ymm13 = ymm13[0,1,1,1]
+; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm14 = [255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255]
+; AVX512-NEXT: vpandn %ymm13, %ymm14, %ymm13
+; AVX512-NEXT: vinserti64x4 $1, %ymm4, %zmm13, %zmm4
+; AVX512-NEXT: vmovdqa (%r8), %ymm13
+; AVX512-NEXT: vpshufb %ymm11, %ymm13, %ymm11
+; AVX512-NEXT: vpshufd {{.*#+}} ymm13 = ymm13[0,2,1,1,4,6,5,5]
+; AVX512-NEXT: vpermq {{.*#+}} ymm13 = ymm13[2,3,3,2]
+; AVX512-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm13, %ymm13
+; AVX512-NEXT: vinserti64x4 $1, %ymm13, %zmm11, %zmm11
+; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3],xmm6[4],xmm7[4],xmm6[5],xmm7[5],xmm6[6],xmm7[6],xmm6[7],xmm7[7]
+; AVX512-NEXT: vmovdqa {{.*#+}} xmm7 = [2,u,1,0,5,4,u,3,u,7,6,11,10,u,9,8]
+; AVX512-NEXT: vpshufb %xmm7, %xmm6, %xmm6
; AVX512-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,0,1,1]
-; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm13 = xmm14[0],xmm13[0],xmm14[1],xmm13[1],xmm14[2],xmm13[2],xmm14[3],xmm13[3],xmm14[4],xmm13[4],xmm14[5],xmm13[5],xmm14[6],xmm13[6],xmm14[7],xmm13[7]
-; AVX512-NEXT: vmovdqa {{.*#+}} xmm14 = [0,1,4,5,u,2,3,6,7,10,11,u,8,9,12,13]
-; AVX512-NEXT: vpshufb %xmm14, %xmm13, %xmm13
-; AVX512-NEXT: vpermq {{.*#+}} ymm13 = ymm13[0,0,1,1]
-; AVX512-NEXT: vmovdqa64 (%r8), %zmm15
-; AVX512-NEXT: vpmovsxbd {{.*#+}} zmm29 = [4,0,5,5,5,5,0,6,6,6,6,0,7,7,7,7]
-; AVX512-NEXT: vpermd %zmm3, %zmm29, %zmm29
-; AVX512-NEXT: vpmovsxbd {{.*#+}} zmm30 = [6,6,6,0,7,7,7,7,0,16,16,16,16,0,17,17]
-; AVX512-NEXT: vpermi2d %zmm3, %zmm15, %zmm30
-; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm3 = xmm5[0],xmm9[0],xmm5[1],xmm9[1],xmm5[2],xmm9[2],xmm5[3],xmm9[3],xmm5[4],xmm9[4],xmm5[5],xmm9[5],xmm5[6],xmm9[6],xmm5[7],xmm9[7]
-; AVX512-NEXT: vpshufb %xmm14, %xmm3, %xmm3
-; AVX512-NEXT: vinserti32x4 $2, %xmm10, %zmm3, %zmm3
-; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm7[0],xmm0[0],xmm7[1],xmm0[1],xmm7[2],xmm0[2],xmm7[3],xmm0[3],xmm7[4],xmm0[4],xmm7[5],xmm0[5],xmm7[6],xmm0[6],xmm7[7],xmm0[7]
-; AVX512-NEXT: vpshufb %xmm8, %xmm0, %xmm0
-; AVX512-NEXT: vinserti32x4 $2, %xmm11, %zmm0, %zmm0
-; AVX512-NEXT: vpermq {{.*#+}} ymm5 = ymm20[0,0,1,1]
-; AVX512-NEXT: vinserti64x4 $1, %ymm19, %zmm5, %zmm5
-; AVX512-NEXT: vpermq {{.*#+}} ymm7 = ymm23[0,0,1,1]
-; AVX512-NEXT: vinserti64x4 $1, %ymm22, %zmm7, %zmm7
-; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm8 = [255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0]
-; AVX512-NEXT: vpternlogq {{.*#+}} zmm7 = zmm5 ^ (zmm8 & (zmm7 ^ zmm5))
-; AVX512-NEXT: vpternlogq {{.*#+}} zmm2 = zmm2 | (zmm7 & zmm28)
-; AVX512-NEXT: vpermq {{.*#+}} ymm5 = ymm17[2,2,3,3]
-; AVX512-NEXT: vinserti64x4 $1, %ymm5, %zmm16, %zmm5
-; AVX512-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,2,3,3]
-; AVX512-NEXT: vinserti64x4 $1, %ymm4, %zmm18, %zmm4
-; AVX512-NEXT: vpternlogq {{.*#+}} zmm4 = zmm5 ^ (zmm8 & (zmm4 ^ zmm5))
-; AVX512-NEXT: vporq %zmm24, %zmm26, %zmm5
+; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm10 = xmm12[0],xmm10[0],xmm12[1],xmm10[1],xmm12[2],xmm10[2],xmm12[3],xmm10[3],xmm12[4],xmm10[4],xmm12[5],xmm10[5],xmm12[6],xmm10[6],xmm12[7],xmm10[7]
+; AVX512-NEXT: vmovdqa {{.*#+}} xmm12 = [0,1,4,5,u,2,3,6,7,10,11,u,8,9,12,13]
+; AVX512-NEXT: vpshufb %xmm12, %xmm10, %xmm10
+; AVX512-NEXT: vpermq {{.*#+}} ymm10 = ymm10[0,0,1,1]
+; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm5 = xmm5[0],xmm8[0],xmm5[1],xmm8[1],xmm5[2],xmm8[2],xmm5[3],xmm8[3],xmm5[4],xmm8[4],xmm5[5],xmm8[5],xmm5[6],xmm8[6],xmm5[7],xmm8[7]
+; AVX512-NEXT: vpshufb %xmm12, %xmm5, %xmm5
+; AVX512-NEXT: vinserti32x4 $2, %xmm3, %zmm5, %zmm3
+; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; AVX512-NEXT: vpshufb %xmm7, %xmm1, %xmm1
+; AVX512-NEXT: vinserti32x4 $2, %xmm9, %zmm1, %zmm1
+; AVX512-NEXT: vpermq {{.*#+}} ymm2 = ymm19[0,0,1,1]
+; AVX512-NEXT: vinserti64x4 $1, %ymm17, %zmm2, %zmm2
+; AVX512-NEXT: vpermq {{.*#+}} ymm5 = ymm23[0,0,1,1]
+; AVX512-NEXT: vinserti64x4 $1, %ymm22, %zmm5, %zmm5
+; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm7 = [255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0]
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm5 = zmm2 ^ (zmm7 & (zmm5 ^ zmm2))
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm4 = zmm4 | (zmm5 & zmm14)
+; AVX512-NEXT: vpermq {{.*#+}} ymm2 = ymm18[2,2,3,3]
+; AVX512-NEXT: vinserti64x4 $1, %ymm2, %zmm16, %zmm2
+; AVX512-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,2,3,3]
+; AVX512-NEXT: vinserti64x4 $1, %ymm0, %zmm15, %zmm0
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm0 = zmm2 ^ (zmm7 & (zmm0 ^ zmm2))
+; AVX512-NEXT: vporq %zmm24, %zmm26, %zmm2
+; AVX512-NEXT: vpermq {{.*#+}} zmm2 = zmm2[2,2,3,3,6,6,7,7]
+; AVX512-NEXT: vporq %zmm25, %zmm27, %zmm5
; AVX512-NEXT: vpermq {{.*#+}} zmm5 = zmm5[2,2,3,3,6,6,7,7]
-; AVX512-NEXT: vporq %zmm25, %zmm27, %zmm7
-; AVX512-NEXT: vpermq {{.*#+}} zmm7 = zmm7[2,2,3,3,6,6,7,7]
-; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm8 = [0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255]
-; AVX512-NEXT: vpternlogq {{.*#+}} zmm7 = zmm5 ^ (zmm8 & (zmm7 ^ zmm5))
-; AVX512-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,2,3,3]
-; AVX512-NEXT: vinserti64x4 $1, %ymm6, %zmm1, %zmm1
-; AVX512-NEXT: vpermq {{.*#+}} ymm5 = ymm21[2,2,3,3]
-; AVX512-NEXT: vinserti64x4 $1, %ymm13, %zmm5, %zmm5
-; AVX512-NEXT: vpternlogq {{.*#+}} zmm5 = zmm1 ^ (zmm8 & (zmm5 ^ zmm1))
-; AVX512-NEXT: vpternlogq {{.*#+}} zmm29 = zmm29 ^ (mem & (zmm29 ^ zmm7))
-; AVX512-NEXT: vpternlogq {{.*#+}} zmm12 = zmm12 | (zmm4 & mem)
-; AVX512-NEXT: vpternlogq {{.*#+}} zmm30 = zmm30 ^ (mem & (zmm30 ^ zmm5))
-; AVX512-NEXT: vpermq {{.*#+}} zmm1 = zmm3[0,0,1,1,4,4,5,5]
-; AVX512-NEXT: vpermq {{.*#+}} zmm0 = zmm0[0,0,1,1,4,4,5,5]
-; AVX512-NEXT: vpternlogq {{.*#+}} zmm0 = zmm0 ^ (mem & (zmm0 ^ zmm1))
-; AVX512-NEXT: vpmovsxbd {{.*#+}} zmm1 = [0,0,0,0,0,0,1,1,1,1,0,2,2,2,2,0]
-; AVX512-NEXT: vpermd %zmm15, %zmm1, %zmm1
-; AVX512-NEXT: vpternlogq {{.*#+}} zmm1 = zmm1 ^ (mem & (zmm1 ^ zmm0))
-; AVX512-NEXT: vmovdqa64 %zmm12, 64(%r9)
-; AVX512-NEXT: vmovdqa64 %zmm1, (%r9)
-; AVX512-NEXT: vmovdqa64 %zmm30, 128(%r9)
-; AVX512-NEXT: vmovdqa64 %zmm29, 256(%r9)
-; AVX512-NEXT: vmovdqa64 %zmm2, 192(%r9)
+; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm7 = [0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255]
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm5 = zmm2 ^ (zmm7 & (zmm5 ^ zmm2))
+; AVX512-NEXT: vpermq {{.*#+}} ymm2 = ymm20[2,2,3,3]
+; AVX512-NEXT: vinserti64x4 $1, %ymm6, %zmm2, %zmm2
+; AVX512-NEXT: vpermq {{.*#+}} ymm6 = ymm21[2,2,3,3]
+; AVX512-NEXT: vinserti64x4 $1, %ymm10, %zmm6, %zmm6
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm6 = zmm2 ^ (zmm7 & (zmm6 ^ zmm2))
+; AVX512-NEXT: vmovdqa64 (%r8), %zmm2
+; AVX512-NEXT: vpmovsxbd {{.*#+}} zmm7 = [12,0,13,13,13,13,0,14,14,14,14,0,15,15,15,15]
+; AVX512-NEXT: vpermd %zmm2, %zmm7, %zmm7
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm7 = zmm7 ^ (mem & (zmm7 ^ zmm5))
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm11 = zmm11 | (zmm0 & mem)
+; AVX512-NEXT: vpmovsxbd {{.*#+}} zmm0 = [6,6,6,0,7,7,7,7,0,8,8,8,8,0,9,9]
+; AVX512-NEXT: vpermd %zmm2, %zmm0, %zmm0
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm0 = zmm0 ^ (mem & (zmm0 ^ zmm6))
+; AVX512-NEXT: vpermq {{.*#+}} zmm3 = zmm3[0,0,1,1,4,4,5,5]
+; AVX512-NEXT: vpermq {{.*#+}} zmm1 = zmm1[0,0,1,1,4,4,5,5]
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm1 = zmm1 ^ (mem & (zmm1 ^ zmm3))
+; AVX512-NEXT: vpmovsxbd {{.*#+}} zmm3 = [0,0,0,0,0,0,1,1,1,1,0,2,2,2,2,0]
+; AVX512-NEXT: vpermd %zmm2, %zmm3, %zmm2
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm2 = zmm2 ^ (mem & (zmm2 ^ zmm1))
+; AVX512-NEXT: vmovdqa64 %zmm11, 64(%r9)
+; AVX512-NEXT: vmovdqa64 %zmm2, (%r9)
+; AVX512-NEXT: vmovdqa64 %zmm0, 128(%r9)
+; AVX512-NEXT: vmovdqa64 %zmm7, 256(%r9)
+; AVX512-NEXT: vmovdqa64 %zmm4, 192(%r9)
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
;
@@ -4915,53 +4915,52 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-FCP-NEXT: vpshufb %ymm4, %ymm6, %ymm1
; AVX512-FCP-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX512-FCP-NEXT: vpshufb %ymm13, %ymm10, %ymm1
-; AVX512-FCP-NEXT: vpshufb %ymm7, %ymm9, %ymm3
+; AVX512-FCP-NEXT: vpshufb %ymm7, %ymm9, %ymm2
+; AVX512-FCP-NEXT: vpor %ymm1, %ymm2, %ymm1
; AVX512-FCP-NEXT: vmovdqa (%rdi), %xmm2
-; AVX512-FCP-NEXT: vpor %ymm1, %ymm3, %ymm1
-; AVX512-FCP-NEXT: vmovdqa (%rsi), %xmm5
-; AVX512-FCP-NEXT: vmovdqa (%rcx), %xmm3
+; AVX512-FCP-NEXT: vmovdqa (%rsi), %xmm3
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm4 = xmm2[8],zero,xmm2[u,7],zero,xmm2[9],zero,xmm2[u],zero,xmm2[u,10],zero,xmm2[12],zero,xmm2[u,11]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm6 = zero,xmm5[8,u],zero,xmm5[7],zero,xmm5[9,u,11,u],zero,xmm5[10],zero,xmm5[12,u],zero
-; AVX512-FCP-NEXT: vpor %xmm4, %xmm6, %xmm6
-; AVX512-FCP-NEXT: vmovdqa (%rdx), %xmm4
-; AVX512-FCP-NEXT: vmovdqa 32(%r8), %ymm7
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm8 = zero,xmm3[6],zero,xmm3[8,u],zero,xmm3[7],zero,xmm3[9],zero,xmm3[11,u],zero,xmm3[10],zero,xmm3[12]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm9 = xmm4[6],zero,xmm4[8],zero,xmm4[u,7],zero,xmm4[9],zero,xmm4[11],zero,xmm4[u,10],zero,xmm4[12],zero
-; AVX512-FCP-NEXT: vpor %xmm8, %xmm9, %xmm8
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm5 = zero,xmm3[8,u],zero,xmm3[7],zero,xmm3[9,u,11,u],zero,xmm3[10],zero,xmm3[12,u],zero
+; AVX512-FCP-NEXT: vpor %xmm4, %xmm5, %xmm6
+; AVX512-FCP-NEXT: vmovdqa (%rcx), %xmm4
+; AVX512-FCP-NEXT: vmovdqa (%rdx), %xmm5
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm7 = zero,xmm4[6],zero,xmm4[8,u],zero,xmm4[7],zero,xmm4[9],zero,xmm4[11,u],zero,xmm4[10],zero,xmm4[12]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm8 = xmm5[6],zero,xmm5[8],zero,xmm5[u,7],zero,xmm5[9],zero,xmm5[11],zero,xmm5[u,10],zero,xmm5[12],zero
+; AVX512-FCP-NEXT: vpor %xmm7, %xmm8, %xmm7
+; AVX512-FCP-NEXT: vmovdqa 32(%r8), %ymm8
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm9 = [12,128,128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128]
-; AVX512-FCP-NEXT: vpshufb %ymm9, %ymm7, %ymm10
-; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm11 = [1,1,2,2,2,2,2,2]
-; AVX512-FCP-NEXT: vpermd %ymm7, %ymm11, %ymm11
-; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm12 = [255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255]
-; AVX512-FCP-NEXT: vpandn %ymm11, %ymm12, %ymm11
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm10, %zmm11, %zmm10
-; AVX512-FCP-NEXT: vmovdqa (%r8), %ymm11
-; AVX512-FCP-NEXT: vpshufb %ymm9, %ymm11, %ymm9
-; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} zmm13 = [4,0,5,5,5,5,0,6,6,6,6,0,7,7,7,7]
-; AVX512-FCP-NEXT: vpermd %ymm11, %ymm13, %ymm11
-; AVX512-FCP-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm11, %ymm11
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm11, %zmm9, %zmm9
-; AVX512-FCP-NEXT: vpermd %zmm7, %zmm13, %zmm7
-; AVX512-FCP-NEXT: vmovdqa64 %xmm30, %xmm11
-; AVX512-FCP-NEXT: vmovdqa64 %xmm31, %xmm13
-; AVX512-FCP-NEXT: vpunpcklbw {{.*#+}} xmm11 = xmm11[0],xmm13[0],xmm11[1],xmm13[1],xmm11[2],xmm13[2],xmm11[3],xmm13[3],xmm11[4],xmm13[4],xmm11[5],xmm13[5],xmm11[6],xmm13[6],xmm11[7],xmm13[7]
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm13 = [2,u,1,0,5,4,u,3,u,7,6,11,10,u,9,8]
-; AVX512-FCP-NEXT: vpshufb %xmm13, %xmm11, %xmm11
-; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} zmm14 = [2,2,3,3,8,8,9,9]
-; AVX512-FCP-NEXT: vpermt2q %zmm11, %zmm14, %zmm0
-; AVX512-FCP-NEXT: vmovdqa64 %xmm28, %xmm11
-; AVX512-FCP-NEXT: vmovdqa64 %xmm29, %xmm15
-; AVX512-FCP-NEXT: vpunpcklbw {{.*#+}} xmm11 = xmm11[0],xmm15[0],xmm11[1],xmm15[1],xmm11[2],xmm15[2],xmm11[3],xmm15[3],xmm11[4],xmm15[4],xmm11[5],xmm15[5],xmm11[6],xmm15[6],xmm11[7],xmm15[7]
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm15 = [0,1,4,5,u,2,3,6,7,10,11,u,8,9,12,13]
-; AVX512-FCP-NEXT: vpshufb %xmm15, %xmm11, %xmm11
-; AVX512-FCP-NEXT: vpermt2q %zmm11, %zmm14, %zmm1
-; AVX512-FCP-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1],xmm2[2],xmm5[2],xmm2[3],xmm5[3],xmm2[4],xmm5[4],xmm2[5],xmm5[5],xmm2[6],xmm5[6],xmm2[7],xmm5[7]
-; AVX512-FCP-NEXT: vpshufb %xmm15, %xmm2, %xmm2
-; AVX512-FCP-NEXT: vpunpcklbw {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3],xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
-; AVX512-FCP-NEXT: vpshufb %xmm13, %xmm3, %xmm3
+; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm10 = [1,1,2,2,2,2,2,2]
+; AVX512-FCP-NEXT: vpermd %ymm8, %ymm10, %ymm10
+; AVX512-FCP-NEXT: vpshufb %ymm9, %ymm8, %ymm8
+; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm11 = [255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255]
+; AVX512-FCP-NEXT: vpandn %ymm10, %ymm11, %ymm10
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm8, %zmm10, %zmm8
+; AVX512-FCP-NEXT: vmovdqa (%r8), %ymm10
+; AVX512-FCP-NEXT: vpshufb %ymm9, %ymm10, %ymm9
+; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm12 = [4,0,5,5,5,5,0,6]
+; AVX512-FCP-NEXT: vpermd %ymm10, %ymm12, %ymm10
+; AVX512-FCP-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm10, %ymm10
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm10, %zmm9, %zmm9
+; AVX512-FCP-NEXT: vmovdqa64 %xmm30, %xmm10
+; AVX512-FCP-NEXT: vmovdqa64 %xmm31, %xmm12
+; AVX512-FCP-NEXT: vpunpcklbw {{.*#+}} xmm10 = xmm10[0],xmm12[0],xmm10[1],xmm12[1],xmm10[2],xmm12[2],xmm10[3],xmm12[3],xmm10[4],xmm12[4],xmm10[5],xmm12[5],xmm10[6],xmm12[6],xmm10[7],xmm12[7]
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm12 = [2,u,1,0,5,4,u,3,u,7,6,11,10,u,9,8]
+; AVX512-FCP-NEXT: vpshufb %xmm12, %xmm10, %xmm10
+; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} zmm13 = [2,2,3,3,8,8,9,9]
+; AVX512-FCP-NEXT: vpermt2q %zmm10, %zmm13, %zmm0
+; AVX512-FCP-NEXT: vmovdqa64 %xmm28, %xmm10
+; AVX512-FCP-NEXT: vmovdqa64 %xmm29, %xmm14
+; AVX512-FCP-NEXT: vpunpcklbw {{.*#+}} xmm10 = xmm10[0],xmm14[0],xmm10[1],xmm14[1],xmm10[2],xmm14[2],xmm10[3],xmm14[3],xmm10[4],xmm14[4],xmm10[5],xmm14[5],xmm10[6],xmm14[6],xmm10[7],xmm14[7]
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm14 = [0,1,4,5,u,2,3,6,7,10,11,u,8,9,12,13]
+; AVX512-FCP-NEXT: vpshufb %xmm14, %xmm10, %xmm10
+; AVX512-FCP-NEXT: vpermt2q %zmm10, %zmm13, %zmm1
+; AVX512-FCP-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
+; AVX512-FCP-NEXT: vpshufb %xmm14, %xmm2, %xmm2
+; AVX512-FCP-NEXT: vpunpcklbw {{.*#+}} xmm3 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3],xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7]
+; AVX512-FCP-NEXT: vpshufb %xmm12, %xmm3, %xmm3
; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} zmm4 = [0,0,1,1,8,8,9,9]
; AVX512-FCP-NEXT: vpermt2q %zmm6, %zmm4, %zmm2
-; AVX512-FCP-NEXT: vpermt2q %zmm8, %zmm4, %zmm3
+; AVX512-FCP-NEXT: vpermt2q %zmm7, %zmm4, %zmm3
; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm3 = zmm3 ^ (mem & (zmm3 ^ zmm2))
; AVX512-FCP-NEXT: vmovdqa64 (%r8), %zmm2
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} zmm4 = [0,0,0,0,0,0,1,1,1,1,0,2,2,2,2,0]
@@ -4973,7 +4972,7 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm22, %zmm5, %zmm5
; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm6 = [255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0]
; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm5 = zmm3 ^ (zmm6 & (zmm5 ^ zmm3))
-; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm10 = zmm10 | (zmm5 & zmm12)
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm8 = zmm8 | (zmm5 & zmm11)
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm3 = ymm25[2,2,3,3]
; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm3, %zmm24, %zmm3
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm5 = ymm27[2,2,3,3]
@@ -4986,190 +4985,192 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm18 = zmm17 ^ (zmm3 & (zmm18 ^ zmm17))
; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm1 = zmm0 ^ (zmm3 & (zmm1 ^ zmm0))
; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm9 = zmm9 | (zmm5 & mem)
-; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm7 = zmm7 ^ (mem & (zmm7 ^ zmm18))
-; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} zmm0 = [6,6,6,0,7,7,7,7,0,8,8,8,8,0,9,9]
+; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} zmm0 = [12,0,13,13,13,13,0,14,14,14,14,0,15,15,15,15]
; AVX512-FCP-NEXT: vpermd %zmm2, %zmm0, %zmm0
-; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm0 = zmm0 ^ (mem & (zmm0 ^ zmm1))
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm0 = zmm0 ^ (mem & (zmm0 ^ zmm18))
+; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} zmm3 = [6,6,6,0,7,7,7,7,0,8,8,8,8,0,9,9]
+; AVX512-FCP-NEXT: vpermd %zmm2, %zmm3, %zmm2
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm2 = zmm2 ^ (mem & (zmm2 ^ zmm1))
; AVX512-FCP-NEXT: vmovdqa64 %zmm4, (%r9)
-; AVX512-FCP-NEXT: vmovdqa64 %zmm0, 128(%r9)
-; AVX512-FCP-NEXT: vmovdqa64 %zmm7, 256(%r9)
+; AVX512-FCP-NEXT: vmovdqa64 %zmm2, 128(%r9)
+; AVX512-FCP-NEXT: vmovdqa64 %zmm0, 256(%r9)
; AVX512-FCP-NEXT: vmovdqa64 %zmm9, 64(%r9)
-; AVX512-FCP-NEXT: vmovdqa64 %zmm10, 192(%r9)
+; AVX512-FCP-NEXT: vmovdqa64 %zmm8, 192(%r9)
; AVX512-FCP-NEXT: vzeroupper
; AVX512-FCP-NEXT: retq
;
; AVX512DQ-LABEL: store_i8_stride5_vf64:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: vmovdqa 32(%rsi), %ymm10
+; AVX512DQ-NEXT: vmovdqa 32(%rsi), %ymm11
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm1 = [128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128,128,128,128]
-; AVX512DQ-NEXT: vpshufb %ymm1, %ymm10, %ymm0
-; AVX512DQ-NEXT: vmovdqa64 %ymm1, %ymm18
+; AVX512DQ-NEXT: vpshufb %ymm1, %ymm11, %ymm0
+; AVX512DQ-NEXT: vmovdqa64 %ymm1, %ymm20
; AVX512DQ-NEXT: vmovdqa 32(%rdi), %ymm5
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm2 = [12,13,128,15,12,13,14,128,12,13,14,15,128,u,u,u,16,128,18,19,16,17,128,19,16,17,18,128,16,17,18,19]
; AVX512DQ-NEXT: vpshufb %ymm2, %ymm5, %ymm1
; AVX512DQ-NEXT: vmovdqa64 %ymm2, %ymm21
-; AVX512DQ-NEXT: vporq %ymm0, %ymm1, %ymm19
-; AVX512DQ-NEXT: vmovdqa 32(%rdi), %xmm14
+; AVX512DQ-NEXT: vporq %ymm0, %ymm1, %ymm17
+; AVX512DQ-NEXT: vmovdqa 32(%rdi), %xmm12
; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm1 = [8,128,u,7,128,9,128,u,128,u,10,128,12,128,u,11]
-; AVX512DQ-NEXT: vpshufb %xmm1, %xmm14, %xmm0
+; AVX512DQ-NEXT: vpshufb %xmm1, %xmm12, %xmm0
; AVX512DQ-NEXT: vmovdqa64 %xmm1, %xmm28
-; AVX512DQ-NEXT: vmovdqa 32(%rsi), %xmm13
+; AVX512DQ-NEXT: vmovdqa 32(%rsi), %xmm10
; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm2 = [128,8,u,128,7,128,9,u,11,u,128,10,128,12,u,128]
-; AVX512DQ-NEXT: vpshufb %xmm2, %xmm13, %xmm1
+; AVX512DQ-NEXT: vpshufb %xmm2, %xmm10, %xmm1
; AVX512DQ-NEXT: vmovdqa64 %xmm2, %xmm29
-; AVX512DQ-NEXT: vporq %xmm0, %xmm1, %xmm20
+; AVX512DQ-NEXT: vporq %xmm0, %xmm1, %xmm19
; AVX512DQ-NEXT: vmovdqa 32(%rcx), %ymm15
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm7 = [128,128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128,128]
-; AVX512DQ-NEXT: vpshufb %ymm7, %ymm15, %ymm0
-; AVX512DQ-NEXT: vmovdqa 32(%rdx), %ymm12
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm8 = [128,128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128,128]
+; AVX512DQ-NEXT: vpshufb %ymm8, %ymm15, %ymm0
+; AVX512DQ-NEXT: vmovdqa 32(%rdx), %ymm13
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm3 = [u,u,12,13,128,u,u,u,14,128,u,u,14,15,128,u,u,u,16,128,u,u,16,17,128,u,u,u,18,128,u,u]
-; AVX512DQ-NEXT: vpshufb %ymm3, %ymm12, %ymm1
+; AVX512DQ-NEXT: vpshufb %ymm3, %ymm13, %ymm1
; AVX512DQ-NEXT: vporq %ymm0, %ymm1, %ymm22
; AVX512DQ-NEXT: vmovdqa 32(%rcx), %xmm6
; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm1 = [128,6,128,8,u,128,7,128,9,128,11,u,128,10,128,12]
; AVX512DQ-NEXT: vpshufb %xmm1, %xmm6, %xmm0
; AVX512DQ-NEXT: vmovdqa64 %xmm1, %xmm30
-; AVX512DQ-NEXT: vmovdqa 32(%rdx), %xmm8
+; AVX512DQ-NEXT: vmovdqa 32(%rdx), %xmm7
; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm2 = [6,128,8,128,u,7,128,9,128,11,128,u,10,128,12,128]
-; AVX512DQ-NEXT: vpshufb %xmm2, %xmm8, %xmm1
+; AVX512DQ-NEXT: vpshufb %xmm2, %xmm7, %xmm1
; AVX512DQ-NEXT: vmovdqa64 %xmm2, %xmm31
; AVX512DQ-NEXT: vporq %xmm0, %xmm1, %xmm23
-; AVX512DQ-NEXT: vbroadcasti128 {{.*#+}} ymm9 = [128,128,26,128,28,128,128,27,128,29,128,31,128,128,30,128,128,128,26,128,28,128,128,27,128,29,128,31,128,128,30,128]
-; AVX512DQ-NEXT: # ymm9 = mem[0,1,0,1]
-; AVX512DQ-NEXT: vpshufb %ymm9, %ymm5, %ymm0
+; AVX512DQ-NEXT: vbroadcasti128 {{.*#+}} ymm14 = [128,128,26,128,28,128,128,27,128,29,128,31,128,128,30,128,128,128,26,128,28,128,128,27,128,29,128,31,128,128,30,128]
+; AVX512DQ-NEXT: # ymm14 = mem[0,1,0,1]
+; AVX512DQ-NEXT: vpshufb %ymm14, %ymm5, %ymm0
; AVX512DQ-NEXT: vbroadcasti128 {{.*#+}} ymm4 = [128,21,128,128,20,128,22,128,24,128,128,23,128,25,128,128,128,21,128,128,20,128,22,128,24,128,128,23,128,25,128,128]
; AVX512DQ-NEXT: # ymm4 = mem[0,1,0,1]
; AVX512DQ-NEXT: vpshufb %ymm4, %ymm5, %ymm1
; AVX512DQ-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm24
; AVX512DQ-NEXT: vbroadcasti128 {{.*#+}} ymm5 = [27,0,128,26,128,28,0,128,0,128,29,128,31,0,128,30,27,0,128,26,128,28,0,128,0,128,29,128,31,0,128,30]
; AVX512DQ-NEXT: # ymm5 = mem[0,1,0,1]
-; AVX512DQ-NEXT: vpshufb %ymm5, %ymm10, %ymm1
+; AVX512DQ-NEXT: vpshufb %ymm5, %ymm11, %ymm1
; AVX512DQ-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [19,128,21,0,128,20,128,22,128,24,0,128,23,128,25,0,19,128,21,0,128,20,128,22,128,24,0,128,23,128,25,0]
; AVX512DQ-NEXT: # ymm2 = mem[0,1,0,1]
-; AVX512DQ-NEXT: vpshufb %ymm2, %ymm10, %ymm10
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm1, %zmm10, %zmm26
-; AVX512DQ-NEXT: vbroadcasti128 {{.*#+}} ymm10 = [128,27,128,128,26,128,28,128,30,128,128,29,128,31,128,128,128,27,128,128,26,128,28,128,30,128,128,29,128,31,128,128]
-; AVX512DQ-NEXT: # ymm10 = mem[0,1,0,1]
-; AVX512DQ-NEXT: vpshufb %ymm10, %ymm12, %ymm1
+; AVX512DQ-NEXT: vpshufb %ymm2, %ymm11, %ymm11
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm1, %zmm11, %zmm26
+; AVX512DQ-NEXT: vbroadcasti128 {{.*#+}} ymm11 = [128,27,128,128,26,128,28,128,30,128,128,29,128,31,128,128,128,27,128,128,26,128,28,128,30,128,128,29,128,31,128,128]
+; AVX512DQ-NEXT: # ymm11 = mem[0,1,0,1]
+; AVX512DQ-NEXT: vpshufb %ymm11, %ymm13, %ymm1
; AVX512DQ-NEXT: vbroadcasti128 {{.*#+}} ymm0 = [128,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128,128,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128]
; AVX512DQ-NEXT: # ymm0 = mem[0,1,0,1]
-; AVX512DQ-NEXT: vpshufb %ymm0, %ymm15, %ymm11
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm1, %zmm11, %zmm25
+; AVX512DQ-NEXT: vpshufb %ymm0, %ymm15, %ymm9
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm1, %zmm9, %zmm25
; AVX512DQ-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [25,128,27,0,128,26,128,28,128,30,0,128,29,128,31,0,25,128,27,0,128,26,128,28,128,30,0,128,29,128,31,0]
; AVX512DQ-NEXT: # ymm1 = mem[0,1,0,1]
-; AVX512DQ-NEXT: vpshufb %ymm1, %ymm15, %ymm11
+; AVX512DQ-NEXT: vpshufb %ymm1, %ymm15, %ymm9
; AVX512DQ-NEXT: vbroadcasti128 {{.*#+}} ymm15 = [18,19,128,21,128,21,20,128,22,128,24,128,22,23,128,25,18,19,128,21,128,21,20,128,22,128,24,128,22,23,128,25]
; AVX512DQ-NEXT: # ymm15 = mem[0,1,0,1]
-; AVX512DQ-NEXT: vpshufb %ymm15, %ymm12, %ymm12
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm11, %zmm12, %zmm27
-; AVX512DQ-NEXT: vmovdqa (%rcx), %ymm12
-; AVX512DQ-NEXT: vpshufb %ymm7, %ymm12, %ymm7
-; AVX512DQ-NEXT: vmovdqa (%rdx), %ymm11
-; AVX512DQ-NEXT: vpshufb %ymm3, %ymm11, %ymm3
-; AVX512DQ-NEXT: vporq %ymm7, %ymm3, %ymm16
-; AVX512DQ-NEXT: vpshufb %ymm0, %ymm12, %ymm0
-; AVX512DQ-NEXT: vpshufb %ymm15, %ymm11, %ymm3
-; AVX512DQ-NEXT: vporq %ymm0, %ymm3, %ymm17
+; AVX512DQ-NEXT: vpshufb %ymm15, %ymm13, %ymm13
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm9, %zmm13, %zmm27
+; AVX512DQ-NEXT: vmovdqa (%rcx), %ymm13
+; AVX512DQ-NEXT: vpshufb %ymm8, %ymm13, %ymm8
+; AVX512DQ-NEXT: vmovdqa (%rdx), %ymm9
+; AVX512DQ-NEXT: vpshufb %ymm3, %ymm9, %ymm3
+; AVX512DQ-NEXT: vporq %ymm8, %ymm3, %ymm16
+; AVX512DQ-NEXT: vpshufb %ymm0, %ymm13, %ymm0
+; AVX512DQ-NEXT: vpshufb %ymm15, %ymm9, %ymm3
+; AVX512DQ-NEXT: vporq %ymm0, %ymm3, %ymm18
; AVX512DQ-NEXT: vmovdqa (%rsi), %ymm3
-; AVX512DQ-NEXT: vmovdqa64 %ymm18, %ymm0
+; AVX512DQ-NEXT: vmovdqa64 %ymm20, %ymm0
; AVX512DQ-NEXT: vpshufb %ymm0, %ymm3, %ymm0
-; AVX512DQ-NEXT: vmovdqa (%rdi), %ymm7
+; AVX512DQ-NEXT: vmovdqa (%rdi), %ymm8
; AVX512DQ-NEXT: vmovdqa64 %ymm21, %ymm15
-; AVX512DQ-NEXT: vpshufb %ymm15, %ymm7, %ymm15
-; AVX512DQ-NEXT: vporq %ymm0, %ymm15, %ymm18
-; AVX512DQ-NEXT: vpshufb %ymm4, %ymm7, %ymm0
+; AVX512DQ-NEXT: vpshufb %ymm15, %ymm8, %ymm15
+; AVX512DQ-NEXT: vpor %ymm0, %ymm15, %ymm15
+; AVX512DQ-NEXT: vpshufb %ymm4, %ymm8, %ymm0
; AVX512DQ-NEXT: vpshufb %ymm2, %ymm3, %ymm2
-; AVX512DQ-NEXT: vpor %ymm0, %ymm2, %ymm4
-; AVX512DQ-NEXT: vpshufb %ymm10, %ymm11, %ymm0
-; AVX512DQ-NEXT: vpshufb %ymm1, %ymm12, %ymm1
-; AVX512DQ-NEXT: vpor %ymm0, %ymm1, %ymm1
-; AVX512DQ-NEXT: vpshufb %ymm9, %ymm7, %ymm0
+; AVX512DQ-NEXT: vpor %ymm0, %ymm2, %ymm0
+; AVX512DQ-NEXT: vpshufb %ymm11, %ymm9, %ymm2
+; AVX512DQ-NEXT: vpshufb %ymm1, %ymm13, %ymm1
+; AVX512DQ-NEXT: vporq %ymm2, %ymm1, %ymm20
+; AVX512DQ-NEXT: vpshufb %ymm14, %ymm8, %ymm1
; AVX512DQ-NEXT: vpshufb %ymm5, %ymm3, %ymm2
+; AVX512DQ-NEXT: vporq %ymm1, %ymm2, %ymm21
; AVX512DQ-NEXT: vmovdqa (%rdi), %xmm5
-; AVX512DQ-NEXT: vporq %ymm0, %ymm2, %ymm21
-; AVX512DQ-NEXT: vmovdqa (%rsi), %xmm9
-; AVX512DQ-NEXT: vmovdqa (%rcx), %xmm7
-; AVX512DQ-NEXT: vmovdqa64 %xmm28, %xmm0
-; AVX512DQ-NEXT: vpshufb %xmm0, %xmm5, %xmm0
+; AVX512DQ-NEXT: vmovdqa (%rsi), %xmm8
+; AVX512DQ-NEXT: vmovdqa64 %xmm28, %xmm1
+; AVX512DQ-NEXT: vpshufb %xmm1, %xmm5, %xmm1
; AVX512DQ-NEXT: vmovdqa64 %xmm29, %xmm2
-; AVX512DQ-NEXT: vpshufb %xmm2, %xmm9, %xmm2
-; AVX512DQ-NEXT: vpor %xmm0, %xmm2, %xmm10
-; AVX512DQ-NEXT: vmovdqa (%rdx), %xmm0
-; AVX512DQ-NEXT: vmovdqa 32(%r8), %ymm3
-; AVX512DQ-NEXT: vmovdqa64 %xmm30, %xmm2
-; AVX512DQ-NEXT: vpshufb %xmm2, %xmm7, %xmm2
-; AVX512DQ-NEXT: vmovdqa64 %xmm31, %xmm11
-; AVX512DQ-NEXT: vpshufb %xmm11, %xmm0, %xmm11
-; AVX512DQ-NEXT: vpor %xmm2, %xmm11, %xmm11
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm12 = [12,128,128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128]
-; AVX512DQ-NEXT: vpshufb %ymm12, %ymm3, %ymm2
-; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm15 = mem[1,1,2,2]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm15 = ymm15[0,1,1,1]
-; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm28 = [255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255]
-; AVX512DQ-NEXT: vpandnq %ymm15, %ymm28, %ymm15
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm2, %zmm15, %zmm2
-; AVX512DQ-NEXT: vmovdqa (%r8), %ymm15
-; AVX512DQ-NEXT: vpshufb %ymm12, %ymm15, %ymm12
-; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm15 = ymm15[0,2,1,1,4,6,5,5]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm15 = ymm15[2,3,3,2]
-; AVX512DQ-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm15, %ymm15
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm15, %zmm12, %zmm12
-; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} xmm6 = xmm6[0],xmm8[0],xmm6[1],xmm8[1],xmm6[2],xmm8[2],xmm6[3],xmm8[3],xmm6[4],xmm8[4],xmm6[5],xmm8[5],xmm6[6],xmm8[6],xmm6[7],xmm8[7]
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm8 = [2,u,1,0,5,4,u,3,u,7,6,11,10,u,9,8]
-; AVX512DQ-NEXT: vpshufb %xmm8, %xmm6, %xmm6
+; AVX512DQ-NEXT: vpshufb %xmm2, %xmm8, %xmm2
+; AVX512DQ-NEXT: vpor %xmm1, %xmm2, %xmm3
+; AVX512DQ-NEXT: vmovdqa (%rcx), %xmm1
+; AVX512DQ-NEXT: vmovdqa (%rdx), %xmm2
+; AVX512DQ-NEXT: vmovdqa64 %xmm30, %xmm4
+; AVX512DQ-NEXT: vpshufb %xmm4, %xmm1, %xmm4
+; AVX512DQ-NEXT: vmovdqa64 %xmm31, %xmm9
+; AVX512DQ-NEXT: vpshufb %xmm9, %xmm2, %xmm9
+; AVX512DQ-NEXT: vpor %xmm4, %xmm9, %xmm9
+; AVX512DQ-NEXT: vmovdqa 32(%r8), %ymm4
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm11 = [12,128,128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128]
+; AVX512DQ-NEXT: vpshufb %ymm11, %ymm4, %ymm4
+; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm13 = mem[1,1,2,2]
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm13 = ymm13[0,1,1,1]
+; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm14 = [255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255]
+; AVX512DQ-NEXT: vpandn %ymm13, %ymm14, %ymm13
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm4, %zmm13, %zmm4
+; AVX512DQ-NEXT: vmovdqa (%r8), %ymm13
+; AVX512DQ-NEXT: vpshufb %ymm11, %ymm13, %ymm11
+; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm13 = ymm13[0,2,1,1,4,6,5,5]
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm13 = ymm13[2,3,3,2]
+; AVX512DQ-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm13, %ymm13
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm13, %zmm11, %zmm11
+; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3],xmm6[4],xmm7[4],xmm6[5],xmm7[5],xmm6[6],xmm7[6],xmm6[7],xmm7[7]
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm7 = [2,u,1,0,5,4,u,3,u,7,6,11,10,u,9,8]
+; AVX512DQ-NEXT: vpshufb %xmm7, %xmm6, %xmm6
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,0,1,1]
-; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} xmm13 = xmm14[0],xmm13[0],xmm14[1],xmm13[1],xmm14[2],xmm13[2],xmm14[3],xmm13[3],xmm14[4],xmm13[4],xmm14[5],xmm13[5],xmm14[6],xmm13[6],xmm14[7],xmm13[7]
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm14 = [0,1,4,5,u,2,3,6,7,10,11,u,8,9,12,13]
-; AVX512DQ-NEXT: vpshufb %xmm14, %xmm13, %xmm13
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm13 = ymm13[0,0,1,1]
-; AVX512DQ-NEXT: vmovdqa64 (%r8), %zmm15
-; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} zmm29 = [4,0,5,5,5,5,0,6,6,6,6,0,7,7,7,7]
-; AVX512DQ-NEXT: vpermd %zmm3, %zmm29, %zmm29
-; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} zmm30 = [6,6,6,0,7,7,7,7,0,16,16,16,16,0,17,17]
-; AVX512DQ-NEXT: vpermi2d %zmm3, %zmm15, %zmm30
-; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} xmm3 = xmm5[0],xmm9[0],xmm5[1],xmm9[1],xmm5[2],xmm9[2],xmm5[3],xmm9[3],xmm5[4],xmm9[4],xmm5[5],xmm9[5],xmm5[6],xmm9[6],xmm5[7],xmm9[7]
-; AVX512DQ-NEXT: vpshufb %xmm14, %xmm3, %xmm3
-; AVX512DQ-NEXT: vinserti32x4 $2, %xmm10, %zmm3, %zmm3
-; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm7[0],xmm0[0],xmm7[1],xmm0[1],xmm7[2],xmm0[2],xmm7[3],xmm0[3],xmm7[4],xmm0[4],xmm7[5],xmm0[5],xmm7[6],xmm0[6],xmm7[7],xmm0[7]
-; AVX512DQ-NEXT: vpshufb %xmm8, %xmm0, %xmm0
-; AVX512DQ-NEXT: vinserti32x4 $2, %xmm11, %zmm0, %zmm0
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm5 = ymm20[0,0,1,1]
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm19, %zmm5, %zmm5
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm7 = ymm23[0,0,1,1]
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm22, %zmm7, %zmm7
-; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm8 = [255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0]
-; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm7 = zmm5 ^ (zmm8 & (zmm7 ^ zmm5))
-; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm2 = zmm2 | (zmm7 & zmm28)
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm5 = ymm17[2,2,3,3]
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm5, %zmm16, %zmm5
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,2,3,3]
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm4, %zmm18, %zmm4
-; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm4 = zmm5 ^ (zmm8 & (zmm4 ^ zmm5))
-; AVX512DQ-NEXT: vporq %zmm24, %zmm26, %zmm5
+; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} xmm10 = xmm12[0],xmm10[0],xmm12[1],xmm10[1],xmm12[2],xmm10[2],xmm12[3],xmm10[3],xmm12[4],xmm10[4],xmm12[5],xmm10[5],xmm12[6],xmm10[6],xmm12[7],xmm10[7]
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm12 = [0,1,4,5,u,2,3,6,7,10,11,u,8,9,12,13]
+; AVX512DQ-NEXT: vpshufb %xmm12, %xmm10, %xmm10
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm10 = ymm10[0,0,1,1]
+; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} xmm5 = xmm5[0],xmm8[0],xmm5[1],xmm8[1],xmm5[2],xmm8[2],xmm5[3],xmm8[3],xmm5[4],xmm8[4],xmm5[5],xmm8[5],xmm5[6],xmm8[6],xmm5[7],xmm8[7]
+; AVX512DQ-NEXT: vpshufb %xmm12, %xmm5, %xmm5
+; AVX512DQ-NEXT: vinserti32x4 $2, %xmm3, %zmm5, %zmm3
+; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; AVX512DQ-NEXT: vpshufb %xmm7, %xmm1, %xmm1
+; AVX512DQ-NEXT: vinserti32x4 $2, %xmm9, %zmm1, %zmm1
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm2 = ymm19[0,0,1,1]
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm17, %zmm2, %zmm2
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm5 = ymm23[0,0,1,1]
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm22, %zmm5, %zmm5
+; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm7 = [255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0]
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm5 = zmm2 ^ (zmm7 & (zmm5 ^ zmm2))
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm4 = zmm4 | (zmm5 & zmm14)
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm2 = ymm18[2,2,3,3]
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm2, %zmm16, %zmm2
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,2,3,3]
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm0, %zmm15, %zmm0
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm0 = zmm2 ^ (zmm7 & (zmm0 ^ zmm2))
+; AVX512DQ-NEXT: vporq %zmm24, %zmm26, %zmm2
+; AVX512DQ-NEXT: vpermq {{.*#+}} zmm2 = zmm2[2,2,3,3,6,6,7,7]
+; AVX512DQ-NEXT: vporq %zmm25, %zmm27, %zmm5
; AVX512DQ-NEXT: vpermq {{.*#+}} zmm5 = zmm5[2,2,3,3,6,6,7,7]
-; AVX512DQ-NEXT: vporq %zmm25, %zmm27, %zmm7
-; AVX512DQ-NEXT: vpermq {{.*#+}} zmm7 = zmm7[2,2,3,3,6,6,7,7]
-; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm8 = [0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255]
-; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm7 = zmm5 ^ (zmm8 & (zmm7 ^ zmm5))
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,2,3,3]
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm6, %zmm1, %zmm1
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm5 = ymm21[2,2,3,3]
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm13, %zmm5, %zmm5
-; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm5 = zmm1 ^ (zmm8 & (zmm5 ^ zmm1))
-; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm29 = zmm29 ^ (mem & (zmm29 ^ zmm7))
-; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm12 = zmm12 | (zmm4 & mem)
-; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm30 = zmm30 ^ (mem & (zmm30 ^ zmm5))
-; AVX512DQ-NEXT: vpermq {{.*#+}} zmm1 = zmm3[0,0,1,1,4,4,5,5]
-; AVX512DQ-NEXT: vpermq {{.*#+}} zmm0 = zmm0[0,0,1,1,4,4,5,5]
-; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm0 = zmm0 ^ (mem & (zmm0 ^ zmm1))
-; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} zmm1 = [0,0,0,0,0,0,1,1,1,1,0,2,2,2,2,0]
-; AVX512DQ-NEXT: vpermd %zmm15, %zmm1, %zmm1
-; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm1 = zmm1 ^ (mem & (zmm1 ^ zmm0))
-; AVX512DQ-NEXT: vmovdqa64 %zmm12, 64(%r9)
-; AVX512DQ-NEXT: vmovdqa64 %zmm1, (%r9)
-; AVX512DQ-NEXT: vmovdqa64 %zmm30, 128(%r9)
-; AVX512DQ-NEXT: vmovdqa64 %zmm29, 256(%r9)
-; AVX512DQ-NEXT: vmovdqa64 %zmm2, 192(%r9)
+; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm7 = [0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255]
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm5 = zmm2 ^ (zmm7 & (zmm5 ^ zmm2))
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm2 = ymm20[2,2,3,3]
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm6, %zmm2, %zmm2
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm6 = ymm21[2,2,3,3]
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm10, %zmm6, %zmm6
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm6 = zmm2 ^ (zmm7 & (zmm6 ^ zmm2))
+; AVX512DQ-NEXT: vmovdqa64 (%r8), %zmm2
+; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} zmm7 = [12,0,13,13,13,13,0,14,14,14,14,0,15,15,15,15]
+; AVX512DQ-NEXT: vpermd %zmm2, %zmm7, %zmm7
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm7 = zmm7 ^ (mem & (zmm7 ^ zmm5))
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm11 = zmm11 | (zmm0 & mem)
+; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} zmm0 = [6,6,6,0,7,7,7,7,0,8,8,8,8,0,9,9]
+; AVX512DQ-NEXT: vpermd %zmm2, %zmm0, %zmm0
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm0 = zmm0 ^ (mem & (zmm0 ^ zmm6))
+; AVX512DQ-NEXT: vpermq {{.*#+}} zmm3 = zmm3[0,0,1,1,4,4,5,5]
+; AVX512DQ-NEXT: vpermq {{.*#+}} zmm1 = zmm1[0,0,1,1,4,4,5,5]
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm1 = zmm1 ^ (mem & (zmm1 ^ zmm3))
+; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} zmm3 = [0,0,0,0,0,0,1,1,1,1,0,2,2,2,2,0]
+; AVX512DQ-NEXT: vpermd %zmm2, %zmm3, %zmm2
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm2 = zmm2 ^ (mem & (zmm2 ^ zmm1))
+; AVX512DQ-NEXT: vmovdqa64 %zmm11, 64(%r9)
+; AVX512DQ-NEXT: vmovdqa64 %zmm2, (%r9)
+; AVX512DQ-NEXT: vmovdqa64 %zmm0, 128(%r9)
+; AVX512DQ-NEXT: vmovdqa64 %zmm7, 256(%r9)
+; AVX512DQ-NEXT: vmovdqa64 %zmm4, 192(%r9)
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
@@ -5251,53 +5252,52 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-FCP-NEXT: vpshufb %ymm4, %ymm6, %ymm1
; AVX512DQ-FCP-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX512DQ-FCP-NEXT: vpshufb %ymm13, %ymm10, %ymm1
-; AVX512DQ-FCP-NEXT: vpshufb %ymm7, %ymm9, %ymm3
+; AVX512DQ-FCP-NEXT: vpshufb %ymm7, %ymm9, %ymm2
+; AVX512DQ-FCP-NEXT: vpor %ymm1, %ymm2, %ymm1
; AVX512DQ-FCP-NEXT: vmovdqa (%rdi), %xmm2
-; AVX512DQ-FCP-NEXT: vpor %ymm1, %ymm3, %ymm1
-; AVX512DQ-FCP-NEXT: vmovdqa (%rsi), %xmm5
-; AVX512DQ-FCP-NEXT: vmovdqa (%rcx), %xmm3
+; AVX512DQ-FCP-NEXT: vmovdqa (%rsi), %xmm3
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm4 = xmm2[8],zero,xmm2[u,7],zero,xmm2[9],zero,xmm2[u],zero,xmm2[u,10],zero,xmm2[12],zero,xmm2[u,11]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm6 = zero,xmm5[8,u],zero,xmm5[7],zero,xmm5[9,u,11,u],zero,xmm5[10],zero,xmm5[12,u],zero
-; AVX512DQ-FCP-NEXT: vpor %xmm4, %xmm6, %xmm6
-; AVX512DQ-FCP-NEXT: vmovdqa (%rdx), %xmm4
-; AVX512DQ-FCP-NEXT: vmovdqa 32(%r8), %ymm7
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm8 = zero,xmm3[6],zero,xmm3[8,u],zero,xmm3[7],zero,xmm3[9],zero,xmm3[11,u],zero,xmm3[10],zero,xmm3[12]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm9 = xmm4[6],zero,xmm4[8],zero,xmm4[u,7],zero,xmm4[9],zero,xmm4[11],zero,xmm4[u,10],zero,xmm4[12],zero
-; AVX512DQ-FCP-NEXT: vpor %xmm8, %xmm9, %xmm8
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm5 = zero,xmm3[8,u],zero,xmm3[7],zero,xmm3[9,u,11,u],zero,xmm3[10],zero,xmm3[12,u],zero
+; AVX512DQ-FCP-NEXT: vpor %xmm4, %xmm5, %xmm6
+; AVX512DQ-FCP-NEXT: vmovdqa (%rcx), %xmm4
+; AVX512DQ-FCP-NEXT: vmovdqa (%rdx), %xmm5
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm7 = zero,xmm4[6],zero,xmm4[8,u],zero,xmm4[7],zero,xmm4[9],zero,xmm4[11,u],zero,xmm4[10],zero,xmm4[12]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm8 = xmm5[6],zero,xmm5[8],zero,xmm5[u,7],zero,xmm5[9],zero,xmm5[11],zero,xmm5[u,10],zero,xmm5[12],zero
+; AVX512DQ-FCP-NEXT: vpor %xmm7, %xmm8, %xmm7
+; AVX512DQ-FCP-NEXT: vmovdqa 32(%r8), %ymm8
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm9 = [12,128,128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm9, %ymm7, %ymm10
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm11 = [1,1,2,2,2,2,2,2]
-; AVX512DQ-FCP-NEXT: vpermd %ymm7, %ymm11, %ymm11
-; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm12 = [255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255]
-; AVX512DQ-FCP-NEXT: vpandn %ymm11, %ymm12, %ymm11
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm10, %zmm11, %zmm10
-; AVX512DQ-FCP-NEXT: vmovdqa (%r8), %ymm11
-; AVX512DQ-FCP-NEXT: vpshufb %ymm9, %ymm11, %ymm9
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} zmm13 = [4,0,5,5,5,5,0,6,6,6,6,0,7,7,7,7]
-; AVX512DQ-FCP-NEXT: vpermd %ymm11, %ymm13, %ymm11
-; AVX512DQ-FCP-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm11, %ymm11
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm11, %zmm9, %zmm9
-; AVX512DQ-FCP-NEXT: vpermd %zmm7, %zmm13, %zmm7
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm30, %xmm11
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm31, %xmm13
-; AVX512DQ-FCP-NEXT: vpunpcklbw {{.*#+}} xmm11 = xmm11[0],xmm13[0],xmm11[1],xmm13[1],xmm11[2],xmm13[2],xmm11[3],xmm13[3],xmm11[4],xmm13[4],xmm11[5],xmm13[5],xmm11[6],xmm13[6],xmm11[7],xmm13[7]
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm13 = [2,u,1,0,5,4,u,3,u,7,6,11,10,u,9,8]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm13, %xmm11, %xmm11
-; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} zmm14 = [2,2,3,3,8,8,9,9]
-; AVX512DQ-FCP-NEXT: vpermt2q %zmm11, %zmm14, %zmm0
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm28, %xmm11
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm29, %xmm15
-; AVX512DQ-FCP-NEXT: vpunpcklbw {{.*#+}} xmm11 = xmm11[0],xmm15[0],xmm11[1],xmm15[1],xmm11[2],xmm15[2],xmm11[3],xmm15[3],xmm11[4],xmm15[4],xmm11[5],xmm15[5],xmm11[6],xmm15[6],xmm11[7],xmm15[7]
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm15 = [0,1,4,5,u,2,3,6,7,10,11,u,8,9,12,13]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm15, %xmm11, %xmm11
-; AVX512DQ-FCP-NEXT: vpermt2q %zmm11, %zmm14, %zmm1
-; AVX512DQ-FCP-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1],xmm2[2],xmm5[2],xmm2[3],xmm5[3],xmm2[4],xmm5[4],xmm2[5],xmm5[5],xmm2[6],xmm5[6],xmm2[7],xmm5[7]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm15, %xmm2, %xmm2
-; AVX512DQ-FCP-NEXT: vpunpcklbw {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3],xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm13, %xmm3, %xmm3
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm10 = [1,1,2,2,2,2,2,2]
+; AVX512DQ-FCP-NEXT: vpermd %ymm8, %ymm10, %ymm10
+; AVX512DQ-FCP-NEXT: vpshufb %ymm9, %ymm8, %ymm8
+; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm11 = [255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255]
+; AVX512DQ-FCP-NEXT: vpandn %ymm10, %ymm11, %ymm10
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm8, %zmm10, %zmm8
+; AVX512DQ-FCP-NEXT: vmovdqa (%r8), %ymm10
+; AVX512DQ-FCP-NEXT: vpshufb %ymm9, %ymm10, %ymm9
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm12 = [4,0,5,5,5,5,0,6]
+; AVX512DQ-FCP-NEXT: vpermd %ymm10, %ymm12, %ymm10
+; AVX512DQ-FCP-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm10, %ymm10
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm10, %zmm9, %zmm9
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm30, %xmm10
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm31, %xmm12
+; AVX512DQ-FCP-NEXT: vpunpcklbw {{.*#+}} xmm10 = xmm10[0],xmm12[0],xmm10[1],xmm12[1],xmm10[2],xmm12[2],xmm10[3],xmm12[3],xmm10[4],xmm12[4],xmm10[5],xmm12[5],xmm10[6],xmm12[6],xmm10[7],xmm12[7]
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm12 = [2,u,1,0,5,4,u,3,u,7,6,11,10,u,9,8]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm12, %xmm10, %xmm10
+; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} zmm13 = [2,2,3,3,8,8,9,9]
+; AVX512DQ-FCP-NEXT: vpermt2q %zmm10, %zmm13, %zmm0
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm28, %xmm10
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm29, %xmm14
+; AVX512DQ-FCP-NEXT: vpunpcklbw {{.*#+}} xmm10 = xmm10[0],xmm14[0],xmm10[1],xmm14[1],xmm10[2],xmm14[2],xmm10[3],xmm14[3],xmm10[4],xmm14[4],xmm10[5],xmm14[5],xmm10[6],xmm14[6],xmm10[7],xmm14[7]
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm14 = [0,1,4,5,u,2,3,6,7,10,11,u,8,9,12,13]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm14, %xmm10, %xmm10
+; AVX512DQ-FCP-NEXT: vpermt2q %zmm10, %zmm13, %zmm1
+; AVX512DQ-FCP-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm14, %xmm2, %xmm2
+; AVX512DQ-FCP-NEXT: vpunpcklbw {{.*#+}} xmm3 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3],xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm12, %xmm3, %xmm3
; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} zmm4 = [0,0,1,1,8,8,9,9]
; AVX512DQ-FCP-NEXT: vpermt2q %zmm6, %zmm4, %zmm2
-; AVX512DQ-FCP-NEXT: vpermt2q %zmm8, %zmm4, %zmm3
+; AVX512DQ-FCP-NEXT: vpermt2q %zmm7, %zmm4, %zmm3
; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm3 = zmm3 ^ (mem & (zmm3 ^ zmm2))
; AVX512DQ-FCP-NEXT: vmovdqa64 (%r8), %zmm2
; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} zmm4 = [0,0,0,0,0,0,1,1,1,1,0,2,2,2,2,0]
@@ -5309,7 +5309,7 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm22, %zmm5, %zmm5
; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm6 = [255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0]
; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm5 = zmm3 ^ (zmm6 & (zmm5 ^ zmm3))
-; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm10 = zmm10 | (zmm5 & zmm12)
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm8 = zmm8 | (zmm5 & zmm11)
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm3 = ymm25[2,2,3,3]
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm3, %zmm24, %zmm3
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm5 = ymm27[2,2,3,3]
@@ -5322,15 +5322,17 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm18 = zmm17 ^ (zmm3 & (zmm18 ^ zmm17))
; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm1 = zmm0 ^ (zmm3 & (zmm1 ^ zmm0))
; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm9 = zmm9 | (zmm5 & mem)
-; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm7 = zmm7 ^ (mem & (zmm7 ^ zmm18))
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} zmm0 = [6,6,6,0,7,7,7,7,0,8,8,8,8,0,9,9]
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} zmm0 = [12,0,13,13,13,13,0,14,14,14,14,0,15,15,15,15]
; AVX512DQ-FCP-NEXT: vpermd %zmm2, %zmm0, %zmm0
-; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm0 = zmm0 ^ (mem & (zmm0 ^ zmm1))
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm0 = zmm0 ^ (mem & (zmm0 ^ zmm18))
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} zmm3 = [6,6,6,0,7,7,7,7,0,8,8,8,8,0,9,9]
+; AVX512DQ-FCP-NEXT: vpermd %zmm2, %zmm3, %zmm2
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm2 = zmm2 ^ (mem & (zmm2 ^ zmm1))
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm4, (%r9)
-; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm0, 128(%r9)
-; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm7, 256(%r9)
+; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm2, 128(%r9)
+; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm0, 256(%r9)
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm9, 64(%r9)
-; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm10, 192(%r9)
+; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm8, 192(%r9)
; AVX512DQ-FCP-NEXT: vzeroupper
; AVX512DQ-FCP-NEXT: retq
;
@@ -5503,149 +5505,151 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
;
; AVX512BW-FCP-LABEL: store_i8_stride5_vf64:
; AVX512BW-FCP: # %bb.0:
+; AVX512BW-FCP-NEXT: vmovdqa64 (%rdi), %zmm7
+; AVX512BW-FCP-NEXT: vmovdqa64 (%rsi), %zmm9
+; AVX512BW-FCP-NEXT: vmovdqa64 (%rdx), %zmm3
+; AVX512BW-FCP-NEXT: vmovdqa64 (%rcx), %zmm5
; AVX512BW-FCP-NEXT: vmovdqa64 (%r8), %zmm0
-; AVX512BW-FCP-NEXT: vmovdqa 32(%rdx), %ymm8
-; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} ymm6 = [128,128,12,13,128,128,128,128,14,128,128,128,14,15,128,128,128,128,16,128,128,128,16,17,128,128,128,128,18,128,128,128]
-; AVX512BW-FCP-NEXT: vpshufb %ymm6, %ymm8, %ymm1
-; AVX512BW-FCP-NEXT: vmovdqa64 32(%rcx), %ymm21
-; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} ymm7 = [128,128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128,128]
-; AVX512BW-FCP-NEXT: vpshufb %ymm7, %ymm21, %ymm2
+; AVX512BW-FCP-NEXT: vmovdqa 32(%rdx), %ymm1
+; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} ymm13 = [128,128,12,13,128,128,128,128,14,128,128,128,14,15,128,128,128,128,16,128,128,128,16,17,128,128,128,128,18,128,128,128]
+; AVX512BW-FCP-NEXT: vpshufb %ymm13, %ymm1, %ymm1
+; AVX512BW-FCP-NEXT: vmovdqa 32(%rcx), %ymm2
+; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} ymm14 = [128,128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128,128]
+; AVX512BW-FCP-NEXT: vpshufb %ymm14, %ymm2, %ymm2
; AVX512BW-FCP-NEXT: vpor %ymm1, %ymm2, %ymm1
-; AVX512BW-FCP-NEXT: vmovdqa (%rcx), %xmm11
+; AVX512BW-FCP-NEXT: vmovdqa (%rcx), %xmm10
; AVX512BW-FCP-NEXT: vmovdqa 32(%rcx), %xmm2
-; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} xmm14 = [128,6,128,8,u,128,7,128,9,128,11,u,128,10,128,12]
-; AVX512BW-FCP-NEXT: vpshufb %xmm14, %xmm2, %xmm3
+; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} xmm11 = [128,6,128,8,u,128,7,128,9,128,11,u,128,10,128,12]
+; AVX512BW-FCP-NEXT: vpshufb %xmm11, %xmm2, %xmm4
; AVX512BW-FCP-NEXT: vmovdqa (%rdx), %xmm12
-; AVX512BW-FCP-NEXT: vmovdqa 32(%rdx), %xmm4
-; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} xmm16 = [6,128,8,128,u,7,128,9,128,11,128,u,10,128,12,128]
-; AVX512BW-FCP-NEXT: vpshufb %xmm16, %xmm4, %xmm5
-; AVX512BW-FCP-NEXT: vpor %xmm3, %xmm5, %xmm3
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,0,1,1]
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm1, %zmm3, %zmm1
-; AVX512BW-FCP-NEXT: vmovdqa (%rdi), %xmm13
-; AVX512BW-FCP-NEXT: vmovdqa 32(%rdi), %xmm3
+; AVX512BW-FCP-NEXT: vmovdqa 32(%rdx), %xmm6
+; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} xmm17 = [6,128,8,128,u,7,128,9,128,11,128,u,10,128,12,128]
+; AVX512BW-FCP-NEXT: vpshufb %xmm17, %xmm6, %xmm8
+; AVX512BW-FCP-NEXT: vpor %xmm4, %xmm8, %xmm4
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,0,1,1]
+; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm1, %zmm4, %zmm1
+; AVX512BW-FCP-NEXT: vmovdqa64 (%rdi), %xmm18
+; AVX512BW-FCP-NEXT: vmovdqa 32(%rdi), %xmm4
; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} xmm19 = [8,128,u,7,128,9,128,u,128,u,10,128,12,128,u,11]
-; AVX512BW-FCP-NEXT: vpshufb %xmm19, %xmm3, %xmm9
-; AVX512BW-FCP-NEXT: vmovdqa64 (%rsi), %xmm17
-; AVX512BW-FCP-NEXT: vmovdqa 32(%rsi), %xmm5
-; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} xmm20 = [128,8,u,128,7,128,9,u,11,u,128,10,128,12,u,128]
-; AVX512BW-FCP-NEXT: vpshufb %xmm20, %xmm5, %xmm10
-; AVX512BW-FCP-NEXT: vpor %xmm9, %xmm10, %xmm9
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm15 = ymm9[0,0,1,1]
-; AVX512BW-FCP-NEXT: vmovdqa64 32(%rdi), %ymm18
-; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm9 = [3,3,3,0,4,4,4,4]
-; AVX512BW-FCP-NEXT: vpermd %ymm18, %ymm9, %ymm22
-; AVX512BW-FCP-NEXT: vmovdqa64 32(%rsi), %ymm23
-; AVX512BW-FCP-NEXT: vpbroadcastq {{.*#+}} ymm10 = [0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14]
+; AVX512BW-FCP-NEXT: vpshufb %xmm19, %xmm4, %xmm15
+; AVX512BW-FCP-NEXT: vmovdqa64 (%rsi), %xmm20
+; AVX512BW-FCP-NEXT: vmovdqa 32(%rsi), %xmm8
+; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} xmm21 = [128,8,u,128,7,128,9,u,11,u,128,10,128,12,u,128]
+; AVX512BW-FCP-NEXT: vpshufb %xmm21, %xmm8, %xmm16
+; AVX512BW-FCP-NEXT: vporq %xmm15, %xmm16, %xmm15
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm22 = ymm15[0,0,1,1]
+; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm15 = [3,3,3,0,4,4,4,4]
+; AVX512BW-FCP-NEXT: vpermd 32(%rdi), %ymm15, %ymm23
+; AVX512BW-FCP-NEXT: vmovdqa64 32(%rsi), %ymm24
+; AVX512BW-FCP-NEXT: vpbroadcastq {{.*#+}} ymm16 = [0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14]
; AVX512BW-FCP-NEXT: movl $138547332, %eax # imm = 0x8421084
; AVX512BW-FCP-NEXT: kmovd %eax, %k1
-; AVX512BW-FCP-NEXT: vpshufb %ymm10, %ymm23, %ymm22 {%k1}
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm22, %zmm15, %zmm15
+; AVX512BW-FCP-NEXT: vpshufb %ymm16, %ymm24, %ymm23 {%k1}
+; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm23, %zmm22, %zmm22
; AVX512BW-FCP-NEXT: movabsq $-8330787646191410408, %rax # imm = 0x8C6318C6318C6318
; AVX512BW-FCP-NEXT: kmovq %rax, %k2
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm15, %zmm1 {%k2}
-; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm15 = [9,9,10,10,10,10,10,10,11,11,11,11,0,12,12,12]
-; AVX512BW-FCP-NEXT: vpermd %zmm0, %zmm15, %zmm15
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm22, %zmm1 {%k2}
+; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm22 = [9,9,10,10,10,10,10,10,11,11,11,11,0,12,12,12]
+; AVX512BW-FCP-NEXT: vpermd %zmm0, %zmm22, %zmm22
; AVX512BW-FCP-NEXT: movabsq $4760450083537948804, %rax # imm = 0x4210842108421084
; AVX512BW-FCP-NEXT: kmovq %rax, %k3
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm15, %zmm1 {%k3}
-; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm22 = zmm23[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm15 = [u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,19,128,21,128,128,20,128,22,128,24,128,128,23,128,25,128,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27,128,128,26,128,28,128,128,128,128,29,128,31,128,128,30]
-; AVX512BW-FCP-NEXT: vpshufb %zmm15, %zmm22, %zmm22
-; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm23 = zmm18[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm18 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,21,128,128,20,128,22,128,24,128,128,23,128,25,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,26,128,28,128,128,27,128,29,128,31,128,128,30,128]
-; AVX512BW-FCP-NEXT: vpshufb %zmm18, %zmm23, %zmm23
-; AVX512BW-FCP-NEXT: vporq %zmm22, %zmm23, %zmm22
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm22 = zmm22[2,2,3,3,6,6,7,7]
-; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm8 = zmm8[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm23 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,18,19,128,21,128,21,20,128,22,128,24,128,22,23,128,25,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25,128,27,128,128,26,128,28,128,30,128,128,29,128,31,128]
-; AVX512BW-FCP-NEXT: vpshufb %zmm23, %zmm8, %zmm8
-; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm21 = zmm21[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm24 = [u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,27,128,128,26,128,28,128,30,128,128,29,128,31,128,128]
-; AVX512BW-FCP-NEXT: vpshufb %zmm24, %zmm21, %zmm21
-; AVX512BW-FCP-NEXT: vporq %zmm8, %zmm21, %zmm8
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm8 = zmm8[2,2,3,3,6,6,7,7]
-; AVX512BW-FCP-NEXT: movabsq $1785168781326730801, %rax # imm = 0x18C6318C6318C631
-; AVX512BW-FCP-NEXT: kmovq %rax, %k3
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm22, %zmm8 {%k3}
-; AVX512BW-FCP-NEXT: vmovdqa64 32(%r8), %ymm21
-; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm22 = [4,6,5,5,5,5,4,6,6,6,6,6,7,7,7,7]
-; AVX512BW-FCP-NEXT: vpermd %zmm21, %zmm22, %zmm21
-; AVX512BW-FCP-NEXT: movabsq $-8925843906633654008, %rax # imm = 0x8421084210842108
-; AVX512BW-FCP-NEXT: kmovq %rax, %k4
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm21, %zmm8 {%k4}
-; AVX512BW-FCP-NEXT: vpshufb %xmm14, %xmm11, %xmm14
-; AVX512BW-FCP-NEXT: vpshufb %xmm16, %xmm12, %xmm16
-; AVX512BW-FCP-NEXT: vporq %xmm14, %xmm16, %xmm14
-; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm11 = xmm11[0],xmm12[0],xmm11[1],xmm12[1],xmm11[2],xmm12[2],xmm11[3],xmm12[3],xmm11[4],xmm12[4],xmm11[5],xmm12[5],xmm11[6],xmm12[6],xmm11[7],xmm12[7]
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm22, %zmm1 {%k3}
+; AVX512BW-FCP-NEXT: vpshufb %xmm11, %xmm10, %xmm11
+; AVX512BW-FCP-NEXT: vpshufb %xmm17, %xmm12, %xmm17
+; AVX512BW-FCP-NEXT: vporq %xmm11, %xmm17, %xmm11
+; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm10 = xmm10[0],xmm12[0],xmm10[1],xmm12[1],xmm10[2],xmm12[2],xmm10[3],xmm12[3],xmm10[4],xmm12[4],xmm10[5],xmm12[5],xmm10[6],xmm12[6],xmm10[7],xmm12[7]
; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} xmm12 = [2,u,1,0,5,4,u,3,u,7,6,11,10,u,9,8]
-; AVX512BW-FCP-NEXT: vpshufb %xmm12, %xmm11, %xmm11
-; AVX512BW-FCP-NEXT: vinserti32x4 $2, %xmm14, %zmm11, %zmm11
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm14 = zmm11[0,0,1,1,4,4,5,5]
-; AVX512BW-FCP-NEXT: vpshufb %xmm19, %xmm13, %xmm11
-; AVX512BW-FCP-NEXT: vpshufb %xmm20, %xmm17, %xmm16
-; AVX512BW-FCP-NEXT: vporq %xmm11, %xmm16, %xmm11
-; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm13 = xmm13[0],xmm17[0],xmm13[1],xmm17[1],xmm13[2],xmm17[2],xmm13[3],xmm17[3],xmm13[4],xmm17[4],xmm13[5],xmm17[5],xmm13[6],xmm17[6],xmm13[7],xmm17[7]
-; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} xmm16 = [0,1,4,5,u,2,3,6,7,10,11,u,8,9,12,13]
-; AVX512BW-FCP-NEXT: vpshufb %xmm16, %xmm13, %xmm13
-; AVX512BW-FCP-NEXT: vinserti32x4 $2, %xmm11, %zmm13, %zmm11
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm11 = zmm11[0,0,1,1,4,4,5,5]
+; AVX512BW-FCP-NEXT: vpshufb %xmm12, %xmm10, %xmm10
+; AVX512BW-FCP-NEXT: vinserti32x4 $2, %xmm11, %zmm10, %zmm10
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm17 = zmm10[0,0,1,1,4,4,5,5]
+; AVX512BW-FCP-NEXT: vpshufb %xmm19, %xmm18, %xmm10
+; AVX512BW-FCP-NEXT: vpshufb %xmm21, %xmm20, %xmm11
+; AVX512BW-FCP-NEXT: vpor %xmm10, %xmm11, %xmm10
+; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm18 = xmm18[0],xmm20[0],xmm18[1],xmm20[1],xmm18[2],xmm20[2],xmm18[3],xmm20[3],xmm18[4],xmm20[4],xmm18[5],xmm20[5],xmm18[6],xmm20[6],xmm18[7],xmm20[7]
+; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} xmm11 = [0,1,4,5,u,2,3,6,7,10,11,u,8,9,12,13]
+; AVX512BW-FCP-NEXT: vpshufb %xmm11, %xmm18, %xmm18
+; AVX512BW-FCP-NEXT: vinserti32x4 $2, %xmm10, %zmm18, %zmm10
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm10 = zmm10[0,0,1,1,4,4,5,5]
; AVX512BW-FCP-NEXT: movabsq $-4165393823095705204, %rax # imm = 0xC6318C6318C6318C
-; AVX512BW-FCP-NEXT: kmovq %rax, %k4
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm14, %zmm11 {%k4}
-; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm13 = [0,0,0,0,0,0,1,1,1,1,2,2,2,2,2,2]
-; AVX512BW-FCP-NEXT: vpermd %zmm0, %zmm13, %zmm13
+; AVX512BW-FCP-NEXT: kmovq %rax, %k3
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm17, %zmm10 {%k3}
+; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm17 = [0,0,0,0,0,0,1,1,1,1,2,2,2,2,2,2]
+; AVX512BW-FCP-NEXT: vpermd %zmm0, %zmm17, %zmm17
; AVX512BW-FCP-NEXT: movabsq $595056260442243600, %rax # imm = 0x842108421084210
-; AVX512BW-FCP-NEXT: kmovq %rax, %k4
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm13, %zmm11 {%k4}
-; AVX512BW-FCP-NEXT: vmovdqa (%rdx), %ymm13
-; AVX512BW-FCP-NEXT: vpshufb %ymm6, %ymm13, %ymm6
-; AVX512BW-FCP-NEXT: vmovdqa (%rcx), %ymm14
-; AVX512BW-FCP-NEXT: vpshufb %ymm7, %ymm14, %ymm7
-; AVX512BW-FCP-NEXT: vpor %ymm6, %ymm7, %ymm6
-; AVX512BW-FCP-NEXT: vpshufb %ymm23, %ymm13, %ymm7
-; AVX512BW-FCP-NEXT: vpshufb %ymm24, %ymm14, %ymm17
-; AVX512BW-FCP-NEXT: vporq %ymm7, %ymm17, %ymm7
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,2,3,3]
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm7, %zmm6, %zmm6
-; AVX512BW-FCP-NEXT: vmovdqa (%rsi), %ymm7
-; AVX512BW-FCP-NEXT: vpshufb %ymm15, %ymm7, %ymm15
-; AVX512BW-FCP-NEXT: vmovdqa64 (%rdi), %ymm17
-; AVX512BW-FCP-NEXT: vpshufb %ymm18, %ymm17, %ymm18
-; AVX512BW-FCP-NEXT: vporq %ymm15, %ymm18, %ymm15
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm15 = ymm15[2,2,3,3]
-; AVX512BW-FCP-NEXT: vpermd %ymm17, %ymm9, %ymm9
-; AVX512BW-FCP-NEXT: vpshufb %ymm10, %ymm7, %ymm9 {%k1}
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm15, %zmm9, %zmm9
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm6, %zmm9 {%k2}
-; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm6 = [3,3,3,3,0,4,4,4,4,6,5,5,5,5,4,6]
-; AVX512BW-FCP-NEXT: vpermd %zmm0, %zmm6, %zmm6
+; AVX512BW-FCP-NEXT: kmovq %rax, %k3
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm17, %zmm10 {%k3}
+; AVX512BW-FCP-NEXT: vmovdqa64 (%rdx), %ymm17
+; AVX512BW-FCP-NEXT: vpshufb %ymm13, %ymm17, %ymm13
+; AVX512BW-FCP-NEXT: vmovdqa64 (%rcx), %ymm18
+; AVX512BW-FCP-NEXT: vpshufb %ymm14, %ymm18, %ymm14
+; AVX512BW-FCP-NEXT: vpor %ymm13, %ymm14, %ymm13
+; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm14 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,18,19,128,21,128,21,20,128,22,128,24,128,22,23,128,25,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25,128,27,128,128,26,128,28,128,30,128,128,29,128,31,128]
+; AVX512BW-FCP-NEXT: vpshufb %ymm14, %ymm17, %ymm19
+; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm20 = [u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,27,128,128,26,128,28,128,30,128,128,29,128,31,128,128]
+; AVX512BW-FCP-NEXT: vpshufb %ymm20, %ymm18, %ymm21
+; AVX512BW-FCP-NEXT: vporq %ymm19, %ymm21, %ymm19
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm19 = ymm19[2,2,3,3]
+; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm19, %zmm13, %zmm13
+; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm19 = [u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,19,128,21,128,128,20,128,22,128,24,128,128,23,128,25,128,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27,128,128,26,128,28,128,128,128,128,29,128,31,128,128,30]
+; AVX512BW-FCP-NEXT: vmovdqa64 (%rsi), %ymm21
+; AVX512BW-FCP-NEXT: vpshufb %ymm19, %ymm21, %ymm22
+; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm23 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,21,128,128,20,128,22,128,24,128,128,23,128,25,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,26,128,28,128,128,27,128,29,128,31,128,128,30,128]
+; AVX512BW-FCP-NEXT: vmovdqa64 (%rdi), %ymm24
+; AVX512BW-FCP-NEXT: vpshufb %ymm23, %ymm24, %ymm25
+; AVX512BW-FCP-NEXT: vporq %ymm22, %ymm25, %ymm22
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm22 = ymm22[2,2,3,3]
+; AVX512BW-FCP-NEXT: vpermd %ymm24, %ymm15, %ymm15
+; AVX512BW-FCP-NEXT: vpshufb %ymm16, %ymm21, %ymm15 {%k1}
+; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm22, %zmm15, %zmm15
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm13, %zmm15 {%k2}
+; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm13 = [3,3,3,3,0,4,4,4,4,6,5,5,5,5,4,6]
+; AVX512BW-FCP-NEXT: vpermd %zmm0, %zmm13, %zmm13
; AVX512BW-FCP-NEXT: movabsq $1190112520884487201, %rax # imm = 0x1084210842108421
; AVX512BW-FCP-NEXT: kmovq %rax, %k1
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm6, %zmm9 {%k1}
-; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3],xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm13, %zmm15 {%k1}
+; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm9 = zmm9[4,5,6,7,4,5,6,7]
+; AVX512BW-FCP-NEXT: vpshufb %zmm19, %zmm9, %zmm9
+; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm7 = zmm7[4,5,6,7,4,5,6,7]
+; AVX512BW-FCP-NEXT: vpshufb %zmm23, %zmm7, %zmm7
+; AVX512BW-FCP-NEXT: vporq %zmm9, %zmm7, %zmm7
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm7 = zmm7[2,2,3,3,6,6,7,7]
+; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm9 = zmm3[4,5,6,7],zmm5[4,5,6,7]
+; AVX512BW-FCP-NEXT: vpshufb %zmm14, %zmm9, %zmm9
+; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm3 = zmm5[4,5,6,7],zmm3[4,5,6,7]
+; AVX512BW-FCP-NEXT: vpshufb %zmm20, %zmm3, %zmm3
+; AVX512BW-FCP-NEXT: vporq %zmm9, %zmm3, %zmm3
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm3 = zmm3[2,2,3,3,6,6,7,7]
+; AVX512BW-FCP-NEXT: movabsq $1785168781326730801, %rax # imm = 0x18C6318C6318C631
+; AVX512BW-FCP-NEXT: kmovq %rax, %k1
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm7, %zmm3 {%k1}
+; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm5 = [12,14,13,13,13,13,12,14,14,14,14,14,15,15,15,15]
+; AVX512BW-FCP-NEXT: vpermd %zmm0, %zmm5, %zmm5
+; AVX512BW-FCP-NEXT: movabsq $-8925843906633654008, %rax # imm = 0x8421084210842108
+; AVX512BW-FCP-NEXT: kmovq %rax, %k2
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm5, %zmm3 {%k2}
+; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm2[0],xmm6[0],xmm2[1],xmm6[1],xmm2[2],xmm6[2],xmm2[3],xmm6[3],xmm2[4],xmm6[4],xmm2[5],xmm6[5],xmm2[6],xmm6[6],xmm2[7],xmm6[7]
; AVX512BW-FCP-NEXT: vpshufb %xmm12, %xmm2, %xmm2
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm4 = ymm14[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25],zero,ymm14[27],zero,zero,ymm14[26],zero,ymm14[28],zero,ymm14[30],zero,zero,ymm14[29],zero,ymm14[31],zero
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm6 = ymm13[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm13[27],zero,zero,ymm13[26],zero,ymm13[28],zero,ymm13[30],zero,zero,ymm13[29],zero,ymm13[31],zero,zero
-; AVX512BW-FCP-NEXT: vpor %ymm4, %ymm6, %ymm4
+; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm5 = ymm18[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25],zero,ymm18[27],zero,zero,ymm18[26],zero,ymm18[28],zero,ymm18[30],zero,zero,ymm18[29],zero,ymm18[31],zero
+; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm6 = ymm17[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm17[27],zero,zero,ymm17[26],zero,ymm17[28],zero,ymm17[30],zero,zero,ymm17[29],zero,ymm17[31],zero,zero
+; AVX512BW-FCP-NEXT: vpor %ymm5, %ymm6, %ymm5
; AVX512BW-FCP-NEXT: vpmovsxbq {{.*#+}} zmm6 = [2,2,3,3,8,8,9,9]
+; AVX512BW-FCP-NEXT: vpermt2q %zmm2, %zmm6, %zmm5
+; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm4[0],xmm8[0],xmm4[1],xmm8[1],xmm4[2],xmm8[2],xmm4[3],xmm8[3],xmm4[4],xmm8[4],xmm4[5],xmm8[5],xmm4[6],xmm8[6],xmm4[7],xmm8[7]
+; AVX512BW-FCP-NEXT: vpshufb %xmm11, %xmm2, %xmm2
+; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm4 = ymm21[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27],zero,zero,ymm21[26],zero,ymm21[28],zero,zero,zero,zero,ymm21[29],zero,ymm21[31],zero,zero,ymm21[30]
+; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm7 = ymm24[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm24[26],zero,ymm24[28],zero,zero,ymm24[27],zero,ymm24[29],zero,ymm24[31],zero,zero,ymm24[30],zero
+; AVX512BW-FCP-NEXT: vpor %ymm4, %ymm7, %ymm4
; AVX512BW-FCP-NEXT: vpermt2q %zmm2, %zmm6, %zmm4
-; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm3[0],xmm5[0],xmm3[1],xmm5[1],xmm3[2],xmm5[2],xmm3[3],xmm5[3],xmm3[4],xmm5[4],xmm3[5],xmm5[5],xmm3[6],xmm5[6],xmm3[7],xmm5[7]
-; AVX512BW-FCP-NEXT: vpshufb %xmm16, %xmm2, %xmm2
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm7[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27],zero,zero,ymm7[26],zero,ymm7[28],zero,zero,zero,zero,ymm7[29],zero,ymm7[31],zero,zero,ymm7[30]
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm5 = ymm17[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm17[26],zero,ymm17[28],zero,zero,ymm17[27],zero,ymm17[29],zero,ymm17[31],zero,zero,ymm17[30],zero
-; AVX512BW-FCP-NEXT: vpor %ymm3, %ymm5, %ymm3
-; AVX512BW-FCP-NEXT: vpermt2q %zmm2, %zmm6, %zmm3
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm4, %zmm3 {%k3}
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm5, %zmm4 {%k1}
; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm2 = [6,6,6,6,7,7,7,7,8,8,8,8,8,8,9,9]
; AVX512BW-FCP-NEXT: vpermd %zmm0, %zmm2, %zmm0
; AVX512BW-FCP-NEXT: movabsq $2380225041768974402, %rax # imm = 0x2108421084210842
; AVX512BW-FCP-NEXT: kmovq %rax, %k1
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm0, %zmm3 {%k1}
-; AVX512BW-FCP-NEXT: vmovdqa64 %zmm3, 128(%r9)
-; AVX512BW-FCP-NEXT: vmovdqa64 %zmm9, 64(%r9)
-; AVX512BW-FCP-NEXT: vmovdqa64 %zmm11, (%r9)
-; AVX512BW-FCP-NEXT: vmovdqa64 %zmm8, 256(%r9)
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm0, %zmm4 {%k1}
+; AVX512BW-FCP-NEXT: vmovdqa64 %zmm4, 128(%r9)
+; AVX512BW-FCP-NEXT: vmovdqa64 %zmm3, 256(%r9)
+; AVX512BW-FCP-NEXT: vmovdqa64 %zmm15, 64(%r9)
+; AVX512BW-FCP-NEXT: vmovdqa64 %zmm10, (%r9)
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm1, 192(%r9)
; AVX512BW-FCP-NEXT: vzeroupper
; AVX512BW-FCP-NEXT: retq
@@ -5819,149 +5823,151 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
;
; AVX512DQ-BW-FCP-LABEL: store_i8_stride5_vf64:
; AVX512DQ-BW-FCP: # %bb.0:
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rdi), %zmm7
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rsi), %zmm9
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rdx), %zmm3
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rcx), %zmm5
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%r8), %zmm0
-; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%rdx), %ymm8
-; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} ymm6 = [128,128,12,13,128,128,128,128,14,128,128,128,14,15,128,128,128,128,16,128,128,128,16,17,128,128,128,128,18,128,128,128]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm6, %ymm8, %ymm1
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 32(%rcx), %ymm21
-; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} ymm7 = [128,128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128,128]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm7, %ymm21, %ymm2
+; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%rdx), %ymm1
+; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} ymm13 = [128,128,12,13,128,128,128,128,14,128,128,128,14,15,128,128,128,128,16,128,128,128,16,17,128,128,128,128,18,128,128,128]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm13, %ymm1, %ymm1
+; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%rcx), %ymm2
+; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} ymm14 = [128,128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128,128]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm14, %ymm2, %ymm2
; AVX512DQ-BW-FCP-NEXT: vpor %ymm1, %ymm2, %ymm1
-; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rcx), %xmm11
+; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rcx), %xmm10
; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%rcx), %xmm2
-; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} xmm14 = [128,6,128,8,u,128,7,128,9,128,11,u,128,10,128,12]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm14, %xmm2, %xmm3
+; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} xmm11 = [128,6,128,8,u,128,7,128,9,128,11,u,128,10,128,12]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm11, %xmm2, %xmm4
; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdx), %xmm12
-; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%rdx), %xmm4
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} xmm16 = [6,128,8,128,u,7,128,9,128,11,128,u,10,128,12,128]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm16, %xmm4, %xmm5
-; AVX512DQ-BW-FCP-NEXT: vpor %xmm3, %xmm5, %xmm3
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,0,1,1]
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm1, %zmm3, %zmm1
-; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdi), %xmm13
-; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%rdi), %xmm3
+; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%rdx), %xmm6
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} xmm17 = [6,128,8,128,u,7,128,9,128,11,128,u,10,128,12,128]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm17, %xmm6, %xmm8
+; AVX512DQ-BW-FCP-NEXT: vpor %xmm4, %xmm8, %xmm4
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,0,1,1]
+; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm1, %zmm4, %zmm1
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rdi), %xmm18
+; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%rdi), %xmm4
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} xmm19 = [8,128,u,7,128,9,128,u,128,u,10,128,12,128,u,11]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm19, %xmm3, %xmm9
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rsi), %xmm17
-; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%rsi), %xmm5
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} xmm20 = [128,8,u,128,7,128,9,u,11,u,128,10,128,12,u,128]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm20, %xmm5, %xmm10
-; AVX512DQ-BW-FCP-NEXT: vpor %xmm9, %xmm10, %xmm9
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm15 = ymm9[0,0,1,1]
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 32(%rdi), %ymm18
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm9 = [3,3,3,0,4,4,4,4]
-; AVX512DQ-BW-FCP-NEXT: vpermd %ymm18, %ymm9, %ymm22
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 32(%rsi), %ymm23
-; AVX512DQ-BW-FCP-NEXT: vpbroadcastq {{.*#+}} ymm10 = [0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm19, %xmm4, %xmm15
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rsi), %xmm20
+; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%rsi), %xmm8
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} xmm21 = [128,8,u,128,7,128,9,u,11,u,128,10,128,12,u,128]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm21, %xmm8, %xmm16
+; AVX512DQ-BW-FCP-NEXT: vporq %xmm15, %xmm16, %xmm15
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm22 = ymm15[0,0,1,1]
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm15 = [3,3,3,0,4,4,4,4]
+; AVX512DQ-BW-FCP-NEXT: vpermd 32(%rdi), %ymm15, %ymm23
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 32(%rsi), %ymm24
+; AVX512DQ-BW-FCP-NEXT: vpbroadcastq {{.*#+}} ymm16 = [0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14]
; AVX512DQ-BW-FCP-NEXT: movl $138547332, %eax # imm = 0x8421084
; AVX512DQ-BW-FCP-NEXT: kmovd %eax, %k1
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm10, %ymm23, %ymm22 {%k1}
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm22, %zmm15, %zmm15
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm16, %ymm24, %ymm23 {%k1}
+; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm23, %zmm22, %zmm22
; AVX512DQ-BW-FCP-NEXT: movabsq $-8330787646191410408, %rax # imm = 0x8C6318C6318C6318
; AVX512DQ-BW-FCP-NEXT: kmovq %rax, %k2
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm15, %zmm1 {%k2}
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm15 = [9,9,10,10,10,10,10,10,11,11,11,11,0,12,12,12]
-; AVX512DQ-BW-FCP-NEXT: vpermd %zmm0, %zmm15, %zmm15
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm22, %zmm1 {%k2}
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm22 = [9,9,10,10,10,10,10,10,11,11,11,11,0,12,12,12]
+; AVX512DQ-BW-FCP-NEXT: vpermd %zmm0, %zmm22, %zmm22
; AVX512DQ-BW-FCP-NEXT: movabsq $4760450083537948804, %rax # imm = 0x4210842108421084
; AVX512DQ-BW-FCP-NEXT: kmovq %rax, %k3
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm15, %zmm1 {%k3}
-; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm22 = zmm23[0,1,2,3],mem[4,5,6,7]
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm15 = [u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,19,128,21,128,128,20,128,22,128,24,128,128,23,128,25,128,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27,128,128,26,128,28,128,128,128,128,29,128,31,128,128,30]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %zmm15, %zmm22, %zmm22
-; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm23 = zmm18[0,1,2,3],mem[4,5,6,7]
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm18 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,21,128,128,20,128,22,128,24,128,128,23,128,25,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,26,128,28,128,128,27,128,29,128,31,128,128,30,128]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %zmm18, %zmm23, %zmm23
-; AVX512DQ-BW-FCP-NEXT: vporq %zmm22, %zmm23, %zmm22
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm22 = zmm22[2,2,3,3,6,6,7,7]
-; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm8 = zmm8[0,1,2,3],mem[4,5,6,7]
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm23 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,18,19,128,21,128,21,20,128,22,128,24,128,22,23,128,25,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25,128,27,128,128,26,128,28,128,30,128,128,29,128,31,128]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %zmm23, %zmm8, %zmm8
-; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm21 = zmm21[0,1,2,3],mem[4,5,6,7]
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm24 = [u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,27,128,128,26,128,28,128,30,128,128,29,128,31,128,128]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %zmm24, %zmm21, %zmm21
-; AVX512DQ-BW-FCP-NEXT: vporq %zmm8, %zmm21, %zmm8
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm8 = zmm8[2,2,3,3,6,6,7,7]
-; AVX512DQ-BW-FCP-NEXT: movabsq $1785168781326730801, %rax # imm = 0x18C6318C6318C631
-; AVX512DQ-BW-FCP-NEXT: kmovq %rax, %k3
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm22, %zmm8 {%k3}
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 32(%r8), %ymm21
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm22 = [4,6,5,5,5,5,4,6,6,6,6,6,7,7,7,7]
-; AVX512DQ-BW-FCP-NEXT: vpermd %zmm21, %zmm22, %zmm21
-; AVX512DQ-BW-FCP-NEXT: movabsq $-8925843906633654008, %rax # imm = 0x8421084210842108
-; AVX512DQ-BW-FCP-NEXT: kmovq %rax, %k4
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm21, %zmm8 {%k4}
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm14, %xmm11, %xmm14
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm16, %xmm12, %xmm16
-; AVX512DQ-BW-FCP-NEXT: vporq %xmm14, %xmm16, %xmm14
-; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm11 = xmm11[0],xmm12[0],xmm11[1],xmm12[1],xmm11[2],xmm12[2],xmm11[3],xmm12[3],xmm11[4],xmm12[4],xmm11[5],xmm12[5],xmm11[6],xmm12[6],xmm11[7],xmm12[7]
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm22, %zmm1 {%k3}
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm11, %xmm10, %xmm11
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm17, %xmm12, %xmm17
+; AVX512DQ-BW-FCP-NEXT: vporq %xmm11, %xmm17, %xmm11
+; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm10 = xmm10[0],xmm12[0],xmm10[1],xmm12[1],xmm10[2],xmm12[2],xmm10[3],xmm12[3],xmm10[4],xmm12[4],xmm10[5],xmm12[5],xmm10[6],xmm12[6],xmm10[7],xmm12[7]
; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} xmm12 = [2,u,1,0,5,4,u,3,u,7,6,11,10,u,9,8]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm12, %xmm11, %xmm11
-; AVX512DQ-BW-FCP-NEXT: vinserti32x4 $2, %xmm14, %zmm11, %zmm11
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm14 = zmm11[0,0,1,1,4,4,5,5]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm19, %xmm13, %xmm11
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm20, %xmm17, %xmm16
-; AVX512DQ-BW-FCP-NEXT: vporq %xmm11, %xmm16, %xmm11
-; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm13 = xmm13[0],xmm17[0],xmm13[1],xmm17[1],xmm13[2],xmm17[2],xmm13[3],xmm17[3],xmm13[4],xmm17[4],xmm13[5],xmm17[5],xmm13[6],xmm17[6],xmm13[7],xmm17[7]
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} xmm16 = [0,1,4,5,u,2,3,6,7,10,11,u,8,9,12,13]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm16, %xmm13, %xmm13
-; AVX512DQ-BW-FCP-NEXT: vinserti32x4 $2, %xmm11, %zmm13, %zmm11
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm11 = zmm11[0,0,1,1,4,4,5,5]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm12, %xmm10, %xmm10
+; AVX512DQ-BW-FCP-NEXT: vinserti32x4 $2, %xmm11, %zmm10, %zmm10
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm17 = zmm10[0,0,1,1,4,4,5,5]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm19, %xmm18, %xmm10
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm21, %xmm20, %xmm11
+; AVX512DQ-BW-FCP-NEXT: vpor %xmm10, %xmm11, %xmm10
+; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm18 = xmm18[0],xmm20[0],xmm18[1],xmm20[1],xmm18[2],xmm20[2],xmm18[3],xmm20[3],xmm18[4],xmm20[4],xmm18[5],xmm20[5],xmm18[6],xmm20[6],xmm18[7],xmm20[7]
+; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} xmm11 = [0,1,4,5,u,2,3,6,7,10,11,u,8,9,12,13]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm11, %xmm18, %xmm18
+; AVX512DQ-BW-FCP-NEXT: vinserti32x4 $2, %xmm10, %zmm18, %zmm10
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm10 = zmm10[0,0,1,1,4,4,5,5]
; AVX512DQ-BW-FCP-NEXT: movabsq $-4165393823095705204, %rax # imm = 0xC6318C6318C6318C
-; AVX512DQ-BW-FCP-NEXT: kmovq %rax, %k4
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm14, %zmm11 {%k4}
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm13 = [0,0,0,0,0,0,1,1,1,1,2,2,2,2,2,2]
-; AVX512DQ-BW-FCP-NEXT: vpermd %zmm0, %zmm13, %zmm13
+; AVX512DQ-BW-FCP-NEXT: kmovq %rax, %k3
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm17, %zmm10 {%k3}
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm17 = [0,0,0,0,0,0,1,1,1,1,2,2,2,2,2,2]
+; AVX512DQ-BW-FCP-NEXT: vpermd %zmm0, %zmm17, %zmm17
; AVX512DQ-BW-FCP-NEXT: movabsq $595056260442243600, %rax # imm = 0x842108421084210
-; AVX512DQ-BW-FCP-NEXT: kmovq %rax, %k4
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm13, %zmm11 {%k4}
-; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdx), %ymm13
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm6, %ymm13, %ymm6
-; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rcx), %ymm14
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm7, %ymm14, %ymm7
-; AVX512DQ-BW-FCP-NEXT: vpor %ymm6, %ymm7, %ymm6
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm23, %ymm13, %ymm7
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm24, %ymm14, %ymm17
-; AVX512DQ-BW-FCP-NEXT: vporq %ymm7, %ymm17, %ymm7
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,2,3,3]
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm7, %zmm6, %zmm6
-; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rsi), %ymm7
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm15, %ymm7, %ymm15
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rdi), %ymm17
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm18, %ymm17, %ymm18
-; AVX512DQ-BW-FCP-NEXT: vporq %ymm15, %ymm18, %ymm15
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm15 = ymm15[2,2,3,3]
-; AVX512DQ-BW-FCP-NEXT: vpermd %ymm17, %ymm9, %ymm9
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm10, %ymm7, %ymm9 {%k1}
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm15, %zmm9, %zmm9
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm6, %zmm9 {%k2}
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm6 = [3,3,3,3,0,4,4,4,4,6,5,5,5,5,4,6]
-; AVX512DQ-BW-FCP-NEXT: vpermd %zmm0, %zmm6, %zmm6
+; AVX512DQ-BW-FCP-NEXT: kmovq %rax, %k3
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm17, %zmm10 {%k3}
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rdx), %ymm17
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm13, %ymm17, %ymm13
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rcx), %ymm18
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm14, %ymm18, %ymm14
+; AVX512DQ-BW-FCP-NEXT: vpor %ymm13, %ymm14, %ymm13
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm14 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,18,19,128,21,128,21,20,128,22,128,24,128,22,23,128,25,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25,128,27,128,128,26,128,28,128,30,128,128,29,128,31,128]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm14, %ymm17, %ymm19
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm20 = [u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,27,128,128,26,128,28,128,30,128,128,29,128,31,128,128]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm20, %ymm18, %ymm21
+; AVX512DQ-BW-FCP-NEXT: vporq %ymm19, %ymm21, %ymm19
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm19 = ymm19[2,2,3,3]
+; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm19, %zmm13, %zmm13
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm19 = [u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,19,128,21,128,128,20,128,22,128,24,128,128,23,128,25,128,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27,128,128,26,128,28,128,128,128,128,29,128,31,128,128,30]
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rsi), %ymm21
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm19, %ymm21, %ymm22
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm23 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,21,128,128,20,128,22,128,24,128,128,23,128,25,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,26,128,28,128,128,27,128,29,128,31,128,128,30,128]
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rdi), %ymm24
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm23, %ymm24, %ymm25
+; AVX512DQ-BW-FCP-NEXT: vporq %ymm22, %ymm25, %ymm22
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm22 = ymm22[2,2,3,3]
+; AVX512DQ-BW-FCP-NEXT: vpermd %ymm24, %ymm15, %ymm15
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm16, %ymm21, %ymm15 {%k1}
+; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm22, %zmm15, %zmm15
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm13, %zmm15 {%k2}
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm13 = [3,3,3,3,0,4,4,4,4,6,5,5,5,5,4,6]
+; AVX512DQ-BW-FCP-NEXT: vpermd %zmm0, %zmm13, %zmm13
; AVX512DQ-BW-FCP-NEXT: movabsq $1190112520884487201, %rax # imm = 0x1084210842108421
; AVX512DQ-BW-FCP-NEXT: kmovq %rax, %k1
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm6, %zmm9 {%k1}
-; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3],xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm13, %zmm15 {%k1}
+; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm9 = zmm9[4,5,6,7,4,5,6,7]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %zmm19, %zmm9, %zmm9
+; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm7 = zmm7[4,5,6,7,4,5,6,7]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %zmm23, %zmm7, %zmm7
+; AVX512DQ-BW-FCP-NEXT: vporq %zmm9, %zmm7, %zmm7
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm7 = zmm7[2,2,3,3,6,6,7,7]
+; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm9 = zmm3[4,5,6,7],zmm5[4,5,6,7]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %zmm14, %zmm9, %zmm9
+; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm3 = zmm5[4,5,6,7],zmm3[4,5,6,7]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %zmm20, %zmm3, %zmm3
+; AVX512DQ-BW-FCP-NEXT: vporq %zmm9, %zmm3, %zmm3
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm3 = zmm3[2,2,3,3,6,6,7,7]
+; AVX512DQ-BW-FCP-NEXT: movabsq $1785168781326730801, %rax # imm = 0x18C6318C6318C631
+; AVX512DQ-BW-FCP-NEXT: kmovq %rax, %k1
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm7, %zmm3 {%k1}
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm5 = [12,14,13,13,13,13,12,14,14,14,14,14,15,15,15,15]
+; AVX512DQ-BW-FCP-NEXT: vpermd %zmm0, %zmm5, %zmm5
+; AVX512DQ-BW-FCP-NEXT: movabsq $-8925843906633654008, %rax # imm = 0x8421084210842108
+; AVX512DQ-BW-FCP-NEXT: kmovq %rax, %k2
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm5, %zmm3 {%k2}
+; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm2[0],xmm6[0],xmm2[1],xmm6[1],xmm2[2],xmm6[2],xmm2[3],xmm6[3],xmm2[4],xmm6[4],xmm2[5],xmm6[5],xmm2[6],xmm6[6],xmm2[7],xmm6[7]
; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm12, %xmm2, %xmm2
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm4 = ymm14[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25],zero,ymm14[27],zero,zero,ymm14[26],zero,ymm14[28],zero,ymm14[30],zero,zero,ymm14[29],zero,ymm14[31],zero
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm6 = ymm13[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm13[27],zero,zero,ymm13[26],zero,ymm13[28],zero,ymm13[30],zero,zero,ymm13[29],zero,ymm13[31],zero,zero
-; AVX512DQ-BW-FCP-NEXT: vpor %ymm4, %ymm6, %ymm4
+; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm5 = ymm18[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25],zero,ymm18[27],zero,zero,ymm18[26],zero,ymm18[28],zero,ymm18[30],zero,zero,ymm18[29],zero,ymm18[31],zero
+; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm6 = ymm17[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm17[27],zero,zero,ymm17[26],zero,ymm17[28],zero,ymm17[30],zero,zero,ymm17[29],zero,ymm17[31],zero,zero
+; AVX512DQ-BW-FCP-NEXT: vpor %ymm5, %ymm6, %ymm5
; AVX512DQ-BW-FCP-NEXT: vpmovsxbq {{.*#+}} zmm6 = [2,2,3,3,8,8,9,9]
+; AVX512DQ-BW-FCP-NEXT: vpermt2q %zmm2, %zmm6, %zmm5
+; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm4[0],xmm8[0],xmm4[1],xmm8[1],xmm4[2],xmm8[2],xmm4[3],xmm8[3],xmm4[4],xmm8[4],xmm4[5],xmm8[5],xmm4[6],xmm8[6],xmm4[7],xmm8[7]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm11, %xmm2, %xmm2
+; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm4 = ymm21[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27],zero,zero,ymm21[26],zero,ymm21[28],zero,zero,zero,zero,ymm21[29],zero,ymm21[31],zero,zero,ymm21[30]
+; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm7 = ymm24[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm24[26],zero,ymm24[28],zero,zero,ymm24[27],zero,ymm24[29],zero,ymm24[31],zero,zero,ymm24[30],zero
+; AVX512DQ-BW-FCP-NEXT: vpor %ymm4, %ymm7, %ymm4
; AVX512DQ-BW-FCP-NEXT: vpermt2q %zmm2, %zmm6, %zmm4
-; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm3[0],xmm5[0],xmm3[1],xmm5[1],xmm3[2],xmm5[2],xmm3[3],xmm5[3],xmm3[4],xmm5[4],xmm3[5],xmm5[5],xmm3[6],xmm5[6],xmm3[7],xmm5[7]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm16, %xmm2, %xmm2
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm7[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27],zero,zero,ymm7[26],zero,ymm7[28],zero,zero,zero,zero,ymm7[29],zero,ymm7[31],zero,zero,ymm7[30]
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm5 = ymm17[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm17[26],zero,ymm17[28],zero,zero,ymm17[27],zero,ymm17[29],zero,ymm17[31],zero,zero,ymm17[30],zero
-; AVX512DQ-BW-FCP-NEXT: vpor %ymm3, %ymm5, %ymm3
-; AVX512DQ-BW-FCP-NEXT: vpermt2q %zmm2, %zmm6, %zmm3
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm4, %zmm3 {%k3}
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm5, %zmm4 {%k1}
; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm2 = [6,6,6,6,7,7,7,7,8,8,8,8,8,8,9,9]
; AVX512DQ-BW-FCP-NEXT: vpermd %zmm0, %zmm2, %zmm0
; AVX512DQ-BW-FCP-NEXT: movabsq $2380225041768974402, %rax # imm = 0x2108421084210842
; AVX512DQ-BW-FCP-NEXT: kmovq %rax, %k1
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm0, %zmm3 {%k1}
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm3, 128(%r9)
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm9, 64(%r9)
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm11, (%r9)
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm8, 256(%r9)
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm0, %zmm4 {%k1}
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm4, 128(%r9)
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm3, 256(%r9)
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm15, 64(%r9)
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm10, (%r9)
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm1, 192(%r9)
; AVX512DQ-BW-FCP-NEXT: vzeroupper
; AVX512DQ-BW-FCP-NEXT: retq
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-6.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-6.ll
index 6205be83f5123..5c30c4cdf94d3 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-6.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-6.ll
@@ -6359,210 +6359,200 @@ define void @store_i8_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
;
; AVX512BW-LABEL: store_i8_stride6_vf64:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: vmovdqa64 (%r8), %zmm4
-; AVX512BW-NEXT: vmovdqa 32(%rsi), %ymm2
-; AVX512BW-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512BW-NEXT: vpbroadcastq {{.*#+}} ymm6 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
-; AVX512BW-NEXT: vpshufb %ymm6, %ymm2, %ymm0
-; AVX512BW-NEXT: vmovdqa 32(%rdi), %ymm3
-; AVX512BW-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512BW-NEXT: vpshufb %ymm6, %ymm3, %ymm1
-; AVX512BW-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6],ymm1[7],ymm0[7],ymm1[16],ymm0[16],ymm1[17],ymm0[17],ymm1[18],ymm0[18],ymm1[19],ymm0[19],ymm1[20],ymm0[20],ymm1[21],ymm0[21],ymm1[22],ymm0[22],ymm1[23],ymm0[23]
+; AVX512BW-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; AVX512BW-NEXT: vmovdqa64 (%r8), %zmm7
+; AVX512BW-NEXT: vmovdqa64 (%r9), %zmm6
+; AVX512BW-NEXT: vmovdqa 32(%rsi), %ymm1
+; AVX512BW-NEXT: vpbroadcastq {{.*#+}} ymm5 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
+; AVX512BW-NEXT: vpshufb %ymm5, %ymm1, %ymm0
+; AVX512BW-NEXT: vmovdqa 32(%rdi), %ymm2
+; AVX512BW-NEXT: vpshufb %ymm5, %ymm2, %ymm3
+; AVX512BW-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm3[0],ymm0[0],ymm3[1],ymm0[1],ymm3[2],ymm0[2],ymm3[3],ymm0[3],ymm3[4],ymm0[4],ymm3[5],ymm0[5],ymm3[6],ymm0[6],ymm3[7],ymm0[7],ymm3[16],ymm0[16],ymm3[17],ymm0[17],ymm3[18],ymm0[18],ymm3[19],ymm0[19],ymm3[20],ymm0[20],ymm3[21],ymm0[21],ymm3[22],ymm0[22],ymm3[23],ymm0[23]
; AVX512BW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,2,2,3]
-; AVX512BW-NEXT: vpunpckhbw {{.*#+}} ymm1 = ymm3[8],ymm2[8],ymm3[9],ymm2[9],ymm3[10],ymm2[10],ymm3[11],ymm2[11],ymm3[12],ymm2[12],ymm3[13],ymm2[13],ymm3[14],ymm2[14],ymm3[15],ymm2[15],ymm3[24],ymm2[24],ymm3[25],ymm2[25],ymm3[26],ymm2[26],ymm3[27],ymm2[27],ymm3[28],ymm2[28],ymm3[29],ymm2[29],ymm3[30],ymm2[30],ymm3[31],ymm2[31]
-; AVX512BW-NEXT: vpmovsxbw {{.*#+}} ymm2 = [12,11,14,13,12,11,14,13,12,11,14,13,15,15,15,15]
-; AVX512BW-NEXT: vpermw %ymm1, %ymm2, %ymm1
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm5
-; AVX512BW-NEXT: vmovdqa 32(%rcx), %ymm12
-; AVX512BW-NEXT: vpbroadcastq {{.*#+}} ymm1 = [5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10]
-; AVX512BW-NEXT: vpshufb %ymm1, %ymm12, %ymm0
-; AVX512BW-NEXT: vmovdqa 32(%rdx), %ymm13
-; AVX512BW-NEXT: vpshufb %ymm1, %ymm13, %ymm8
-; AVX512BW-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm8[0],ymm0[0],ymm8[1],ymm0[1],ymm8[2],ymm0[2],ymm8[3],ymm0[3],ymm8[4],ymm0[4],ymm8[5],ymm0[5],ymm8[6],ymm0[6],ymm8[7],ymm0[7],ymm8[16],ymm0[16],ymm8[17],ymm0[17],ymm8[18],ymm0[18],ymm8[19],ymm0[19],ymm8[20],ymm0[20],ymm8[21],ymm0[21],ymm8[22],ymm0[22],ymm8[23],ymm0[23]
+; AVX512BW-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm2[8],ymm1[8],ymm2[9],ymm1[9],ymm2[10],ymm1[10],ymm2[11],ymm1[11],ymm2[12],ymm1[12],ymm2[13],ymm1[13],ymm2[14],ymm1[14],ymm2[15],ymm1[15],ymm2[24],ymm1[24],ymm2[25],ymm1[25],ymm2[26],ymm1[26],ymm2[27],ymm1[27],ymm2[28],ymm1[28],ymm2[29],ymm1[29],ymm2[30],ymm1[30],ymm2[31],ymm1[31]
+; AVX512BW-NEXT: vpmovsxbw {{.*#+}} ymm8 = [12,11,14,13,12,11,14,13,12,11,14,13,15,15,15,15]
+; AVX512BW-NEXT: vpermw %ymm3, %ymm8, %ymm3
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm3, %zmm0, %zmm9
+; AVX512BW-NEXT: vmovdqa 32(%rcx), %ymm3
+; AVX512BW-NEXT: vpbroadcastq {{.*#+}} ymm11 = [5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10]
+; AVX512BW-NEXT: vpshufb %ymm11, %ymm3, %ymm0
+; AVX512BW-NEXT: vmovdqa 32(%rdx), %ymm4
+; AVX512BW-NEXT: vpshufb %ymm11, %ymm4, %ymm10
+; AVX512BW-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm10[0],ymm0[0],ymm10[1],ymm0[1],ymm10[2],ymm0[2],ymm10[3],ymm0[3],ymm10[4],ymm0[4],ymm10[5],ymm0[5],ymm10[6],ymm0[6],ymm10[7],ymm0[7],ymm10[16],ymm0[16],ymm10[17],ymm0[17],ymm10[18],ymm0[18],ymm10[19],ymm0[19],ymm10[20],ymm0[20],ymm10[21],ymm0[21],ymm10[22],ymm0[22],ymm10[23],ymm0[23]
; AVX512BW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,2,2,3]
-; AVX512BW-NEXT: vpunpckhbw {{.*#+}} ymm9 = ymm13[8],ymm12[8],ymm13[9],ymm12[9],ymm13[10],ymm12[10],ymm13[11],ymm12[11],ymm13[12],ymm12[12],ymm13[13],ymm12[13],ymm13[14],ymm12[14],ymm13[15],ymm12[15],ymm13[24],ymm12[24],ymm13[25],ymm12[25],ymm13[26],ymm12[26],ymm13[27],ymm12[27],ymm13[28],ymm12[28],ymm13[29],ymm12[29],ymm13[30],ymm12[30],ymm13[31],ymm12[31]
-; AVX512BW-NEXT: vpmovsxbw {{.*#+}} ymm1 = [13,12,11,14,13,12,11,14,13,12,11,14,15,15,15,15]
-; AVX512BW-NEXT: vpermw %ymm9, %ymm1, %ymm9
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm9, %zmm0, %zmm10
-; AVX512BW-NEXT: movl $613566756, %eax # imm = 0x24924924
-; AVX512BW-NEXT: kmovd %eax, %k1
-; AVX512BW-NEXT: vmovdqu16 %zmm5, %zmm10 {%k1}
+; AVX512BW-NEXT: vpunpckhbw {{.*#+}} ymm10 = ymm4[8],ymm3[8],ymm4[9],ymm3[9],ymm4[10],ymm3[10],ymm4[11],ymm3[11],ymm4[12],ymm3[12],ymm4[13],ymm3[13],ymm4[14],ymm3[14],ymm4[15],ymm3[15],ymm4[24],ymm3[24],ymm4[25],ymm3[25],ymm4[26],ymm3[26],ymm4[27],ymm3[27],ymm4[28],ymm3[28],ymm4[29],ymm3[29],ymm4[30],ymm3[30],ymm4[31],ymm3[31]
+; AVX512BW-NEXT: vpmovsxbw {{.*#+}} ymm12 = [13,12,11,14,13,12,11,14,13,12,11,14,15,15,15,15]
+; AVX512BW-NEXT: vpermw %ymm10, %ymm12, %ymm10
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm10, %zmm0, %zmm0
+; AVX512BW-NEXT: movl $613566756, %r10d # imm = 0x24924924
+; AVX512BW-NEXT: kmovd %r10d, %k1
+; AVX512BW-NEXT: vmovdqu16 %zmm9, %zmm0 {%k1}
+; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm9 = zmm7[4,5,6,7,4,5,6,7]
+; AVX512BW-NEXT: vpshufb {{.*#+}} zmm9 = zmm9[6,u,5,u,8,u,7,u,u,u,9,u,u,u,u,u,22,u,21,u,24,u,23,u,u,u,25,u,u,u,u,u,42,u,45,u,44,u,43,u,46,u,u,u,u,u,47,u,58,u,61,u,60,u,59,u,62,u,u,u,u,u,63,u]
+; AVX512BW-NEXT: vpermq {{.*#+}} zmm9 = zmm9[2,2,2,3,6,6,6,7]
+; AVX512BW-NEXT: movl $-1840700270, %r10d # imm = 0x92492492
+; AVX512BW-NEXT: kmovd %r10d, %k2
+; AVX512BW-NEXT: vmovdqu16 %zmm9, %zmm0 {%k2}
+; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm9 = zmm6[4,5,6,7,4,5,6,7]
+; AVX512BW-NEXT: vpshufb {{.*#+}} zmm9 = zmm9[u,6,u,5,u,8,u,7,u,u,u,9,u,u,u,u,u,22,u,21,u,24,u,23,u,u,u,25,u,u,u,u,u,42,u,45,u,44,u,43,u,46,u,u,u,u,u,47,u,58,u,61,u,60,u,59,u,62,u,u,u,u,u,63]
+; AVX512BW-NEXT: vpermq {{.*#+}} zmm9 = zmm9[2,2,2,3,6,6,6,7]
+; AVX512BW-NEXT: movabsq $-9076969306111049208, %r10 # imm = 0x8208208208208208
+; AVX512BW-NEXT: kmovq %r10, %k3
+; AVX512BW-NEXT: vmovdqu8 %zmm9, %zmm0 {%k3}
+; AVX512BW-NEXT: vmovdqa (%rsi), %ymm9
+; AVX512BW-NEXT: vpshufb %ymm5, %ymm9, %ymm13
+; AVX512BW-NEXT: vmovdqa (%rdi), %ymm10
+; AVX512BW-NEXT: vpshufb %ymm5, %ymm10, %ymm5
+; AVX512BW-NEXT: vpunpcklbw {{.*#+}} ymm5 = ymm5[0],ymm13[0],ymm5[1],ymm13[1],ymm5[2],ymm13[2],ymm5[3],ymm13[3],ymm5[4],ymm13[4],ymm5[5],ymm13[5],ymm5[6],ymm13[6],ymm5[7],ymm13[7],ymm5[16],ymm13[16],ymm5[17],ymm13[17],ymm5[18],ymm13[18],ymm5[19],ymm13[19],ymm5[20],ymm13[20],ymm5[21],ymm13[21],ymm5[22],ymm13[22],ymm5[23],ymm13[23]
+; AVX512BW-NEXT: vpermq {{.*#+}} ymm5 = ymm5[2,2,2,3]
+; AVX512BW-NEXT: vpunpckhbw {{.*#+}} ymm13 = ymm10[8],ymm9[8],ymm10[9],ymm9[9],ymm10[10],ymm9[10],ymm10[11],ymm9[11],ymm10[12],ymm9[12],ymm10[13],ymm9[13],ymm10[14],ymm9[14],ymm10[15],ymm9[15],ymm10[24],ymm9[24],ymm10[25],ymm9[25],ymm10[26],ymm9[26],ymm10[27],ymm9[27],ymm10[28],ymm9[28],ymm10[29],ymm9[29],ymm10[30],ymm9[30],ymm10[31],ymm9[31]
+; AVX512BW-NEXT: vpermw %ymm13, %ymm8, %ymm8
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm8, %zmm5, %zmm8
; AVX512BW-NEXT: vmovdqa64 (%rcx), %ymm16
+; AVX512BW-NEXT: vpshufb %ymm11, %ymm16, %ymm5
; AVX512BW-NEXT: vmovdqa64 (%rdx), %ymm17
-; AVX512BW-NEXT: vmovdqa64 (%rsi), %xmm20
-; AVX512BW-NEXT: vmovdqa64 32(%rsi), %xmm18
-; AVX512BW-NEXT: vpbroadcastq {{.*#+}} xmm14 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
-; AVX512BW-NEXT: vpshufb %xmm14, %xmm18, %xmm5
+; AVX512BW-NEXT: vpshufb %ymm11, %ymm17, %ymm13
+; AVX512BW-NEXT: vpunpcklbw {{.*#+}} ymm5 = ymm13[0],ymm5[0],ymm13[1],ymm5[1],ymm13[2],ymm5[2],ymm13[3],ymm5[3],ymm13[4],ymm5[4],ymm13[5],ymm5[5],ymm13[6],ymm5[6],ymm13[7],ymm5[7],ymm13[16],ymm5[16],ymm13[17],ymm5[17],ymm13[18],ymm5[18],ymm13[19],ymm5[19],ymm13[20],ymm5[20],ymm13[21],ymm5[21],ymm13[22],ymm5[22],ymm13[23],ymm5[23]
+; AVX512BW-NEXT: vpermq {{.*#+}} ymm5 = ymm5[2,2,2,3]
+; AVX512BW-NEXT: vpunpckhbw {{.*#+}} ymm13 = ymm17[8],ymm16[8],ymm17[9],ymm16[9],ymm17[10],ymm16[10],ymm17[11],ymm16[11],ymm17[12],ymm16[12],ymm17[13],ymm16[13],ymm17[14],ymm16[14],ymm17[15],ymm16[15],ymm17[24],ymm16[24],ymm17[25],ymm16[25],ymm17[26],ymm16[26],ymm17[27],ymm16[27],ymm17[28],ymm16[28],ymm17[29],ymm16[29],ymm17[30],ymm16[30],ymm17[31],ymm16[31]
+; AVX512BW-NEXT: vpermw %ymm13, %ymm12, %ymm12
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm12, %zmm5, %zmm5
+; AVX512BW-NEXT: vmovdqu16 %zmm8, %zmm5 {%k1}
+; AVX512BW-NEXT: vmovdqa (%r8), %ymm13
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm13, %zmm7, %zmm7
+; AVX512BW-NEXT: vpshufb {{.*#+}} zmm7 = zmm7[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,u,21,u,24,u,23,u,u,u,25,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,58,u,61,u,60,u,59,u,62,u,u,u,u,u,63,u]
+; AVX512BW-NEXT: vpermq {{.*#+}} zmm7 = zmm7[2,2,2,3,6,6,6,7]
+; AVX512BW-NEXT: vmovdqu16 %zmm7, %zmm5 {%k2}
+; AVX512BW-NEXT: vmovdqa (%r9), %ymm12
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm12, %zmm6, %zmm6
+; AVX512BW-NEXT: vpshufb {{.*#+}} zmm6 = zmm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,u,21,u,24,u,23,u,u,u,25,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,58,u,61,u,60,u,59,u,62,u,u,u,u,u,63]
+; AVX512BW-NEXT: vpermq {{.*#+}} zmm6 = zmm6[2,2,2,3,6,6,6,7]
+; AVX512BW-NEXT: vmovdqu8 %zmm6, %zmm5 {%k3}
+; AVX512BW-NEXT: vmovdqa64 (%rsi), %xmm21
+; AVX512BW-NEXT: vmovdqa 32(%rsi), %xmm7
+; AVX512BW-NEXT: vpbroadcastq {{.*#+}} xmm20 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
+; AVX512BW-NEXT: vpshufb %xmm20, %xmm7, %xmm6
; AVX512BW-NEXT: vmovdqa64 (%rdi), %xmm22
-; AVX512BW-NEXT: vmovdqa64 32(%rdi), %xmm21
-; AVX512BW-NEXT: vpshufb %xmm14, %xmm21, %xmm9
-; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm9 = xmm9[8],xmm5[8],xmm9[9],xmm5[9],xmm9[10],xmm5[10],xmm9[11],xmm5[11],xmm9[12],xmm5[12],xmm9[13],xmm5[13],xmm9[14],xmm5[14],xmm9[15],xmm5[15]
-; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm5 = xmm21[0],xmm18[0],xmm21[1],xmm18[1],xmm21[2],xmm18[2],xmm21[3],xmm18[3],xmm21[4],xmm18[4],xmm21[5],xmm18[5],xmm21[6],xmm18[6],xmm21[7],xmm18[7]
-; AVX512BW-NEXT: vpmovsxbw {{.*#+}} zmm26 = [0,3,2,1,0,3,2,1,0,3,2,1,4,5,6,5,32,33,34,35,32,33,34,35,32,33,34,35,36,37,38,39]
-; AVX512BW-NEXT: vpermt2w %zmm9, %zmm26, %zmm5
-; AVX512BW-NEXT: vmovdqa (%rcx), %xmm0
-; AVX512BW-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512BW-NEXT: vmovdqa 32(%rcx), %xmm11
-; AVX512BW-NEXT: vmovdqa (%rdx), %xmm9
-; AVX512BW-NEXT: vmovdqa64 (%r8), %xmm25
-; AVX512BW-NEXT: vmovdqa64 32(%r8), %xmm23
-; AVX512BW-NEXT: vpmovzxbw {{.*#+}} xmm15 = xmm23[0],zero,xmm23[1],zero,xmm23[2],zero,xmm23[3],zero,xmm23[4],zero,xmm23[5],zero,xmm23[6],zero,xmm23[7],zero
-; AVX512BW-NEXT: vpshufd {{.*#+}} xmm19 = xmm23[2,1,2,3]
-; AVX512BW-NEXT: vpmovzxbw {{.*#+}} xmm19 = xmm19[0],zero,xmm19[1],zero,xmm19[2],zero,xmm19[3],zero,xmm19[4],zero,xmm19[5],zero,xmm19[6],zero,xmm19[7],zero
-; AVX512BW-NEXT: vpmovsxbw {{.*#+}} zmm29 = [2,1,0,3,2,1,0,3,2,1,0,3,4,4,4,4,38,37,32,39,38,37,32,39,38,37,32,39,33,33,33,33]
-; AVX512BW-NEXT: vpermt2w %zmm19, %zmm29, %zmm15
-; AVX512BW-NEXT: vmovdqa64 (%r9), %xmm27
-; AVX512BW-NEXT: vmovdqa64 32(%r9), %xmm24
-; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm19 = xmm24[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; AVX512BW-NEXT: vpshufd {{.*#+}} xmm28 = xmm24[2,1,2,3]
-; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm28 = xmm28[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; AVX512BW-NEXT: vpermt2w %zmm28, %zmm29, %zmm19
-; AVX512BW-NEXT: vpshufb %xmm14, %xmm20, %xmm28
-; AVX512BW-NEXT: vpshufb %xmm14, %xmm22, %xmm14
-; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm28 = xmm14[8],xmm28[8],xmm14[9],xmm28[9],xmm14[10],xmm28[10],xmm14[11],xmm28[11],xmm14[12],xmm28[12],xmm14[13],xmm28[13],xmm14[14],xmm28[14],xmm14[15],xmm28[15]
-; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm14 = xmm22[0],xmm20[0],xmm22[1],xmm20[1],xmm22[2],xmm20[2],xmm22[3],xmm20[3],xmm22[4],xmm20[4],xmm22[5],xmm20[5],xmm22[6],xmm20[6],xmm22[7],xmm20[7]
-; AVX512BW-NEXT: vpermt2w %zmm28, %zmm26, %zmm14
-; AVX512BW-NEXT: vpmovzxbw {{.*#+}} xmm26 = xmm25[0],zero,xmm25[1],zero,xmm25[2],zero,xmm25[3],zero,xmm25[4],zero,xmm25[5],zero,xmm25[6],zero,xmm25[7],zero
-; AVX512BW-NEXT: vpshufd {{.*#+}} xmm28 = xmm25[2,1,2,3]
-; AVX512BW-NEXT: vpmovzxbw {{.*#+}} xmm28 = xmm28[0],zero,xmm28[1],zero,xmm28[2],zero,xmm28[3],zero,xmm28[4],zero,xmm28[5],zero,xmm28[6],zero,xmm28[7],zero
-; AVX512BW-NEXT: vpermt2w %zmm28, %zmm29, %zmm26
-; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm28 = xmm27[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; AVX512BW-NEXT: vpshufd {{.*#+}} xmm30 = xmm27[2,1,2,3]
+; AVX512BW-NEXT: vmovdqa 32(%rdi), %xmm8
+; AVX512BW-NEXT: vpshufb %xmm20, %xmm8, %xmm14
+; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm14 = xmm14[8],xmm6[8],xmm14[9],xmm6[9],xmm14[10],xmm6[10],xmm14[11],xmm6[11],xmm14[12],xmm6[12],xmm14[13],xmm6[13],xmm14[14],xmm6[14],xmm14[15],xmm6[15]
+; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm6 = xmm8[0],xmm7[0],xmm8[1],xmm7[1],xmm8[2],xmm7[2],xmm8[3],xmm7[3],xmm8[4],xmm7[4],xmm8[5],xmm7[5],xmm8[6],xmm7[6],xmm8[7],xmm7[7]
+; AVX512BW-NEXT: vpmovsxbw {{.*#+}} zmm27 = [0,3,2,1,0,3,2,1,0,3,2,1,4,5,6,5,32,33,34,35,32,33,34,35,32,33,34,35,36,37,38,39]
+; AVX512BW-NEXT: vpermt2w %zmm14, %zmm27, %zmm6
+; AVX512BW-NEXT: vmovdqa64 (%rcx), %xmm25
+; AVX512BW-NEXT: vmovdqa 32(%rcx), %xmm14
+; AVX512BW-NEXT: vpshufb %xmm11, %xmm14, %xmm15
+; AVX512BW-NEXT: vmovdqa64 (%rdx), %xmm26
+; AVX512BW-NEXT: vmovdqa64 32(%rdx), %xmm18
+; AVX512BW-NEXT: vpshufb %xmm11, %xmm18, %xmm19
+; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm15 = xmm19[0],xmm15[0],xmm19[1],xmm15[1],xmm19[2],xmm15[2],xmm19[3],xmm15[3],xmm19[4],xmm15[4],xmm19[5],xmm15[5],xmm19[6],xmm15[6],xmm19[7],xmm15[7]
+; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm19 = xmm18[0],xmm14[0],xmm18[1],xmm14[1],xmm18[2],xmm14[2],xmm18[3],xmm14[3],xmm18[4],xmm14[4],xmm18[5],xmm14[5],xmm18[6],xmm14[6],xmm18[7],xmm14[7]
+; AVX512BW-NEXT: vprold $16, %xmm19, %xmm19
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm15, %zmm19, %zmm15
+; AVX512BW-NEXT: vpermq {{.*#+}} zmm15 = zmm15[0,0,0,1,4,4,4,5]
+; AVX512BW-NEXT: vmovdqu16 %zmm15, %zmm6 {%k2}
+; AVX512BW-NEXT: vmovdqa64 (%r8), %xmm23
+; AVX512BW-NEXT: vmovdqa 32(%r8), %xmm15
+; AVX512BW-NEXT: vpmovzxbw {{.*#+}} xmm19 = xmm15[0],zero,xmm15[1],zero,xmm15[2],zero,xmm15[3],zero,xmm15[4],zero,xmm15[5],zero,xmm15[6],zero,xmm15[7],zero
+; AVX512BW-NEXT: vpshufd {{.*#+}} xmm24 = xmm15[2,1,2,3]
+; AVX512BW-NEXT: vpmovzxbw {{.*#+}} xmm24 = xmm24[0],zero,xmm24[1],zero,xmm24[2],zero,xmm24[3],zero,xmm24[4],zero,xmm24[5],zero,xmm24[6],zero,xmm24[7],zero
+; AVX512BW-NEXT: vpmovsxbw {{.*#+}} zmm28 = [2,1,0,3,2,1,0,3,2,1,0,3,4,4,4,4,38,37,32,39,38,37,32,39,38,37,32,39,33,33,33,33]
+; AVX512BW-NEXT: vpermt2w %zmm24, %zmm28, %zmm19
+; AVX512BW-NEXT: vmovdqu16 %zmm19, %zmm6 {%k1}
+; AVX512BW-NEXT: vmovdqa64 (%r9), %xmm24
+; AVX512BW-NEXT: vmovdqa64 32(%r9), %xmm19
+; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm29 = xmm19[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; AVX512BW-NEXT: vpshufd {{.*#+}} xmm30 = xmm19[2,1,2,3]
; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm30 = xmm30[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; AVX512BW-NEXT: vpermt2w %zmm30, %zmm29, %zmm28
-; AVX512BW-NEXT: vpunpcklbw {{.*#+}} ymm30 = ymm17[0],ymm16[0],ymm17[1],ymm16[1],ymm17[2],ymm16[2],ymm17[3],ymm16[3],ymm17[4],ymm16[4],ymm17[5],ymm16[5],ymm17[6],ymm16[6],ymm17[7],ymm16[7],ymm17[16],ymm16[16],ymm17[17],ymm16[17],ymm17[18],ymm16[18],ymm17[19],ymm16[19],ymm17[20],ymm16[20],ymm17[21],ymm16[21],ymm17[22],ymm16[22],ymm17[23],ymm16[23]
-; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm29 = xmm9[8],xmm0[8],xmm9[9],xmm0[9],xmm9[10],xmm0[10],xmm9[11],xmm0[11],xmm9[12],xmm0[12],xmm9[13],xmm0[13],xmm9[14],xmm0[14],xmm9[15],xmm0[15]
-; AVX512BW-NEXT: vpmovsxbw {{.*#+}} zmm31 = [5,4,3,6,5,4,3,6,5,4,3,6,7,7,7,7,41,40,43,42,41,40,43,42,41,40,43,42,45,44,47,46]
-; AVX512BW-NEXT: vpermt2w %zmm30, %zmm31, %zmm29
-; AVX512BW-NEXT: vmovdqa64 32(%rdx), %xmm30
-; AVX512BW-NEXT: vpunpcklbw {{.*#+}} ymm13 = ymm13[0],ymm12[0],ymm13[1],ymm12[1],ymm13[2],ymm12[2],ymm13[3],ymm12[3],ymm13[4],ymm12[4],ymm13[5],ymm12[5],ymm13[6],ymm12[6],ymm13[7],ymm12[7],ymm13[16],ymm12[16],ymm13[17],ymm12[17],ymm13[18],ymm12[18],ymm13[19],ymm12[19],ymm13[20],ymm12[20],ymm13[21],ymm12[21],ymm13[22],ymm12[22],ymm13[23],ymm12[23]
-; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm12 = xmm30[8],xmm11[8],xmm30[9],xmm11[9],xmm30[10],xmm11[10],xmm30[11],xmm11[11],xmm30[12],xmm11[12],xmm30[13],xmm11[13],xmm30[14],xmm11[14],xmm30[15],xmm11[15]
-; AVX512BW-NEXT: vpermt2w %zmm13, %zmm31, %zmm12
-; AVX512BW-NEXT: vmovdqa 32(%r8), %ymm13
-; AVX512BW-NEXT: vmovdqa64 %zmm4, %zmm3
-; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm31 = zmm13[0,1,2,3],zmm4[4,5,6,7]
-; AVX512BW-NEXT: vpshufb {{.*#+}} zmm31 = zmm31[6,u,5,u,8,u,7,u,u,u,9,u,u,u,u,u,22,u,21,u,24,u,23,u,u,u,25,u,u,u,u,u,42,u,45,u,44,u,43,u,46,u,u,u,u,u,47,u,58,u,61,u,60,u,59,u,62,u,u,u,u,u,63,u]
-; AVX512BW-NEXT: vpermq {{.*#+}} zmm31 = zmm31[2,2,2,3,6,6,6,7]
-; AVX512BW-NEXT: movl $-1840700270, %eax # imm = 0x92492492
-; AVX512BW-NEXT: kmovd %eax, %k2
-; AVX512BW-NEXT: vmovdqu16 %zmm31, %zmm10 {%k2}
-; AVX512BW-NEXT: vmovdqa64 (%r9), %zmm31
-; AVX512BW-NEXT: vmovdqa 32(%r9), %ymm7
-; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm8 = zmm7[0,1,2,3],zmm31[4,5,6,7]
-; AVX512BW-NEXT: vpshufb {{.*#+}} zmm8 = zmm8[u,6,u,5,u,8,u,7,u,u,u,9,u,u,u,u,u,22,u,21,u,24,u,23,u,u,u,25,u,u,u,u,u,42,u,45,u,44,u,43,u,46,u,u,u,u,u,47,u,58,u,61,u,60,u,59,u,62,u,u,u,u,u,63]
-; AVX512BW-NEXT: vpermq {{.*#+}} zmm8 = zmm8[2,2,2,3,6,6,6,7]
-; AVX512BW-NEXT: movabsq $-9076969306111049208, %rax # imm = 0x8208208208208208
-; AVX512BW-NEXT: kmovq %rax, %k3
-; AVX512BW-NEXT: vmovdqu8 %zmm8, %zmm10 {%k3}
-; AVX512BW-NEXT: vmovdqa (%rsi), %ymm8
-; AVX512BW-NEXT: vpshufb %ymm6, %ymm8, %ymm1
-; AVX512BW-NEXT: vmovdqa (%rdi), %ymm0
-; AVX512BW-NEXT: vpshufb %ymm6, %ymm0, %ymm6
-; AVX512BW-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm6[0],ymm1[0],ymm6[1],ymm1[1],ymm6[2],ymm1[2],ymm6[3],ymm1[3],ymm6[4],ymm1[4],ymm6[5],ymm1[5],ymm6[6],ymm1[6],ymm6[7],ymm1[7],ymm6[16],ymm1[16],ymm6[17],ymm1[17],ymm6[18],ymm1[18],ymm6[19],ymm1[19],ymm6[20],ymm1[20],ymm6[21],ymm1[21],ymm6[22],ymm1[22],ymm6[23],ymm1[23]
-; AVX512BW-NEXT: vpunpckhbw {{.*#+}} ymm6 = ymm0[8],ymm8[8],ymm0[9],ymm8[9],ymm0[10],ymm8[10],ymm0[11],ymm8[11],ymm0[12],ymm8[12],ymm0[13],ymm8[13],ymm0[14],ymm8[14],ymm0[15],ymm8[15],ymm0[24],ymm8[24],ymm0[25],ymm8[25],ymm0[26],ymm8[26],ymm0[27],ymm8[27],ymm0[28],ymm8[28],ymm0[29],ymm8[29],ymm0[30],ymm8[30],ymm0[31],ymm8[31]
-; AVX512BW-NEXT: vpermw %ymm6, %ymm2, %ymm6
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,2,2,3]
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm6, %zmm1, %zmm2
-; AVX512BW-NEXT: vpbroadcastq {{.*#+}} ymm1 = [5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10]
-; AVX512BW-NEXT: vpshufb %ymm1, %ymm16, %ymm6
-; AVX512BW-NEXT: vpshufb %ymm1, %ymm17, %ymm4
-; AVX512BW-NEXT: vpunpcklbw {{.*#+}} ymm4 = ymm4[0],ymm6[0],ymm4[1],ymm6[1],ymm4[2],ymm6[2],ymm4[3],ymm6[3],ymm4[4],ymm6[4],ymm4[5],ymm6[5],ymm4[6],ymm6[6],ymm4[7],ymm6[7],ymm4[16],ymm6[16],ymm4[17],ymm6[17],ymm4[18],ymm6[18],ymm4[19],ymm6[19],ymm4[20],ymm6[20],ymm4[21],ymm6[21],ymm4[22],ymm6[22],ymm4[23],ymm6[23]
-; AVX512BW-NEXT: vpunpckhbw {{.*#+}} ymm6 = ymm17[8],ymm16[8],ymm17[9],ymm16[9],ymm17[10],ymm16[10],ymm17[11],ymm16[11],ymm17[12],ymm16[12],ymm17[13],ymm16[13],ymm17[14],ymm16[14],ymm17[15],ymm16[15],ymm17[24],ymm16[24],ymm17[25],ymm16[25],ymm17[26],ymm16[26],ymm17[27],ymm16[27],ymm17[28],ymm16[28],ymm17[29],ymm16[29],ymm17[30],ymm16[30],ymm17[31],ymm16[31]
-; AVX512BW-NEXT: vpmovsxbw {{.*#+}} ymm1 = [13,12,11,14,13,12,11,14,13,12,11,14,15,15,15,15]
-; AVX512BW-NEXT: vpermw %ymm6, %ymm1, %ymm6
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,2,2,3]
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm6, %zmm4, %zmm6
-; AVX512BW-NEXT: vmovdqu16 %zmm2, %zmm6 {%k1}
-; AVX512BW-NEXT: vmovdqa (%r8), %ymm1
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm1, %zmm3, %zmm4
-; AVX512BW-NEXT: vpshufb {{.*#+}} zmm4 = zmm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,u,21,u,24,u,23,u,u,u,25,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,58,u,61,u,60,u,59,u,62,u,u,u,u,u,63,u]
-; AVX512BW-NEXT: vpermq {{.*#+}} zmm4 = zmm4[2,2,2,3,6,6,6,7]
-; AVX512BW-NEXT: vmovdqu16 %zmm4, %zmm6 {%k2}
-; AVX512BW-NEXT: vmovdqa64 (%r9), %ymm16
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm16, %zmm31, %zmm4
-; AVX512BW-NEXT: vpshufb {{.*#+}} zmm4 = zmm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,u,21,u,24,u,23,u,u,u,25,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,58,u,61,u,60,u,59,u,62,u,u,u,u,u,63]
-; AVX512BW-NEXT: vpermq {{.*#+}} zmm4 = zmm4[2,2,2,3,6,6,6,7]
-; AVX512BW-NEXT: vmovdqu8 %zmm4, %zmm6 {%k3}
-; AVX512BW-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0],ymm8[0],ymm0[1],ymm8[1],ymm0[2],ymm8[2],ymm0[3],ymm8[3],ymm0[4],ymm8[4],ymm0[5],ymm8[5],ymm0[6],ymm8[6],ymm0[7],ymm8[7],ymm0[16],ymm8[16],ymm0[17],ymm8[17],ymm0[18],ymm8[18],ymm0[19],ymm8[19],ymm0[20],ymm8[20],ymm0[21],ymm8[21],ymm0[22],ymm8[22],ymm0[23],ymm8[23]
-; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm22[8],xmm20[8],xmm22[9],xmm20[9],xmm22[10],xmm20[10],xmm22[11],xmm20[11],xmm22[12],xmm20[12],xmm22[13],xmm20[13],xmm22[14],xmm20[14],xmm22[15],xmm20[15]
-; AVX512BW-NEXT: vpmovsxbw {{.*#+}} zmm8 = [4,3,6,5,4,3,6,5,4,3,6,5,7,7,7,7,40,43,42,41,40,43,42,41,40,43,42,41,44,45,46,45]
-; AVX512BW-NEXT: vpermt2w %zmm0, %zmm8, %zmm4
-; AVX512BW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512BW-NEXT: vpunpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
-; AVX512BW-NEXT: # ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[2],mem[2],ymm0[3],mem[3],ymm0[4],mem[4],ymm0[5],mem[5],ymm0[6],mem[6],ymm0[7],mem[7],ymm0[16],mem[16],ymm0[17],mem[17],ymm0[18],mem[18],ymm0[19],mem[19],ymm0[20],mem[20],ymm0[21],mem[21],ymm0[22],mem[22],ymm0[23],mem[23]
-; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm17 = xmm21[8],xmm18[8],xmm21[9],xmm18[9],xmm21[10],xmm18[10],xmm21[11],xmm18[11],xmm21[12],xmm18[12],xmm21[13],xmm18[13],xmm21[14],xmm18[14],xmm21[15],xmm18[15]
-; AVX512BW-NEXT: vpermt2w %zmm0, %zmm8, %zmm17
-; AVX512BW-NEXT: vbroadcasti128 {{.*#+}} ymm0 = [2,0,1,0,0,0,3,0,0,0,0,0,4,0,0,0,2,0,1,0,0,0,3,0,0,0,0,0,4,0,0,0]
-; AVX512BW-NEXT: # ymm0 = mem[0,1,0,1]
-; AVX512BW-NEXT: vpshufb %ymm0, %ymm1, %ymm1
-; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm8 = xmm25[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX512BW-NEXT: vpmovsxbw {{.*#+}} zmm18 = [2,5,4,3,2,5,4,3,2,5,4,3,6,5,6,7,40,41,42,43,40,41,42,43,40,41,42,43,44,45,46,47]
-; AVX512BW-NEXT: vpermt2w %zmm1, %zmm18, %zmm8
-; AVX512BW-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [0,2,0,1,0,0,0,3,0,0,0,0,0,4,0,0,0,2,0,1,0,0,0,3,0,0,0,0,0,4,0,0]
-; AVX512BW-NEXT: # ymm1 = mem[0,1,0,1]
-; AVX512BW-NEXT: vpshufb %ymm1, %ymm16, %ymm16
-; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm20 = xmm27[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX512BW-NEXT: vpermt2w %zmm16, %zmm18, %zmm20
-; AVX512BW-NEXT: vpshufb %ymm0, %ymm13, %ymm0
-; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm13 = xmm23[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX512BW-NEXT: vpermt2w %zmm0, %zmm18, %zmm13
-; AVX512BW-NEXT: vpshufb %ymm1, %ymm7, %ymm0
-; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm24[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX512BW-NEXT: vpermt2w %zmm0, %zmm18, %zmm1
-; AVX512BW-NEXT: vpbroadcastq {{.*#+}} ymm3 = [5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10]
-; AVX512BW-NEXT: vpshufb %xmm3, %xmm11, %xmm0
-; AVX512BW-NEXT: vpshufb %xmm3, %xmm30, %xmm2
-; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm30[0],xmm11[0],xmm30[1],xmm11[1],xmm30[2],xmm11[2],xmm30[3],xmm11[3],xmm30[4],xmm11[4],xmm30[5],xmm11[5],xmm30[6],xmm11[6],xmm30[7],xmm11[7]
-; AVX512BW-NEXT: vprold $16, %xmm2, %xmm2
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm0, %zmm2, %zmm0
-; AVX512BW-NEXT: vpermq {{.*#+}} zmm0 = zmm0[0,0,0,1,4,4,4,5]
-; AVX512BW-NEXT: vmovdqu16 %zmm0, %zmm5 {%k2}
-; AVX512BW-NEXT: vmovdqu16 %zmm15, %zmm5 {%k1}
-; AVX512BW-NEXT: movabsq $585610922974906400, %rax # imm = 0x820820820820820
-; AVX512BW-NEXT: kmovq %rax, %k3
-; AVX512BW-NEXT: vmovdqu8 %zmm19, %zmm5 {%k3}
-; AVX512BW-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
-; AVX512BW-NEXT: vpshufb %xmm3, %xmm7, %xmm0
-; AVX512BW-NEXT: vpshufb %xmm3, %xmm9, %xmm2
-; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm9[0],xmm7[0],xmm9[1],xmm7[1],xmm9[2],xmm7[2],xmm9[3],xmm7[3],xmm9[4],xmm7[4],xmm9[5],xmm7[5],xmm9[6],xmm7[6],xmm9[7],xmm7[7]
-; AVX512BW-NEXT: vprold $16, %xmm2, %xmm2
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm0, %zmm2, %zmm0
-; AVX512BW-NEXT: vpermq {{.*#+}} zmm0 = zmm0[0,0,0,1,4,4,4,5]
-; AVX512BW-NEXT: vmovdqu16 %zmm0, %zmm14 {%k2}
-; AVX512BW-NEXT: vmovdqu16 %zmm26, %zmm14 {%k1}
-; AVX512BW-NEXT: vmovdqu8 %zmm28, %zmm14 {%k3}
-; AVX512BW-NEXT: vmovdqu16 %zmm29, %zmm4 {%k1}
-; AVX512BW-NEXT: movl $1227133513, %eax # imm = 0x49249249
-; AVX512BW-NEXT: kmovd %eax, %k2
-; AVX512BW-NEXT: vmovdqu16 %zmm8, %zmm4 {%k2}
-; AVX512BW-NEXT: movabsq $2342443691899625602, %rax # imm = 0x2082082082082082
-; AVX512BW-NEXT: kmovq %rax, %k3
-; AVX512BW-NEXT: vmovdqu8 %zmm20, %zmm4 {%k3}
-; AVX512BW-NEXT: vmovdqu16 %zmm12, %zmm17 {%k1}
-; AVX512BW-NEXT: vmovdqu16 %zmm13, %zmm17 {%k2}
-; AVX512BW-NEXT: vmovdqu8 %zmm1, %zmm17 {%k3}
-; AVX512BW-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-NEXT: vmovdqa64 %zmm17, 256(%rax)
-; AVX512BW-NEXT: vmovdqa64 %zmm4, 64(%rax)
-; AVX512BW-NEXT: vmovdqa64 %zmm14, (%rax)
-; AVX512BW-NEXT: vmovdqa64 %zmm5, 192(%rax)
-; AVX512BW-NEXT: vmovdqa64 %zmm6, 128(%rax)
-; AVX512BW-NEXT: vmovdqa64 %zmm10, 320(%rax)
+; AVX512BW-NEXT: vpermt2w %zmm30, %zmm28, %zmm29
+; AVX512BW-NEXT: movabsq $585610922974906400, %rcx # imm = 0x820820820820820
+; AVX512BW-NEXT: kmovq %rcx, %k3
+; AVX512BW-NEXT: vmovdqu8 %zmm29, %zmm6 {%k3}
+; AVX512BW-NEXT: vpshufb %xmm20, %xmm21, %xmm29
+; AVX512BW-NEXT: vpshufb %xmm20, %xmm22, %xmm20
+; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm29 = xmm20[8],xmm29[8],xmm20[9],xmm29[9],xmm20[10],xmm29[10],xmm20[11],xmm29[11],xmm20[12],xmm29[12],xmm20[13],xmm29[13],xmm20[14],xmm29[14],xmm20[15],xmm29[15]
+; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm20 = xmm22[0],xmm21[0],xmm22[1],xmm21[1],xmm22[2],xmm21[2],xmm22[3],xmm21[3],xmm22[4],xmm21[4],xmm22[5],xmm21[5],xmm22[6],xmm21[6],xmm22[7],xmm21[7]
+; AVX512BW-NEXT: vpermt2w %zmm29, %zmm27, %zmm20
+; AVX512BW-NEXT: vpshufb %xmm11, %xmm25, %xmm27
+; AVX512BW-NEXT: vpshufb %xmm11, %xmm26, %xmm11
+; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm11 = xmm11[0],xmm27[0],xmm11[1],xmm27[1],xmm11[2],xmm27[2],xmm11[3],xmm27[3],xmm11[4],xmm27[4],xmm11[5],xmm27[5],xmm11[6],xmm27[6],xmm11[7],xmm27[7]
+; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm27 = xmm26[0],xmm25[0],xmm26[1],xmm25[1],xmm26[2],xmm25[2],xmm26[3],xmm25[3],xmm26[4],xmm25[4],xmm26[5],xmm25[5],xmm26[6],xmm25[6],xmm26[7],xmm25[7]
+; AVX512BW-NEXT: vprold $16, %xmm27, %xmm27
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm11, %zmm27, %zmm11
+; AVX512BW-NEXT: vpermq {{.*#+}} zmm11 = zmm11[0,0,0,1,4,4,4,5]
+; AVX512BW-NEXT: vmovdqu16 %zmm11, %zmm20 {%k2}
+; AVX512BW-NEXT: vpmovzxbw {{.*#+}} xmm11 = xmm23[0],zero,xmm23[1],zero,xmm23[2],zero,xmm23[3],zero,xmm23[4],zero,xmm23[5],zero,xmm23[6],zero,xmm23[7],zero
+; AVX512BW-NEXT: vpshufd {{.*#+}} xmm27 = xmm23[2,1,2,3]
+; AVX512BW-NEXT: vpmovzxbw {{.*#+}} xmm27 = xmm27[0],zero,xmm27[1],zero,xmm27[2],zero,xmm27[3],zero,xmm27[4],zero,xmm27[5],zero,xmm27[6],zero,xmm27[7],zero
+; AVX512BW-NEXT: vpermt2w %zmm27, %zmm28, %zmm11
+; AVX512BW-NEXT: vmovdqu16 %zmm11, %zmm20 {%k1}
+; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm11 = xmm24[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; AVX512BW-NEXT: vpshufd {{.*#+}} xmm27 = xmm24[2,1,2,3]
+; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm27 = xmm27[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; AVX512BW-NEXT: vpermt2w %zmm27, %zmm28, %zmm11
+; AVX512BW-NEXT: vmovdqu8 %zmm11, %zmm20 {%k3}
+; AVX512BW-NEXT: vpunpcklbw {{.*#+}} ymm11 = ymm17[0],ymm16[0],ymm17[1],ymm16[1],ymm17[2],ymm16[2],ymm17[3],ymm16[3],ymm17[4],ymm16[4],ymm17[5],ymm16[5],ymm17[6],ymm16[6],ymm17[7],ymm16[7],ymm17[16],ymm16[16],ymm17[17],ymm16[17],ymm17[18],ymm16[18],ymm17[19],ymm16[19],ymm17[20],ymm16[20],ymm17[21],ymm16[21],ymm17[22],ymm16[22],ymm17[23],ymm16[23]
+; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm16 = xmm26[8],xmm25[8],xmm26[9],xmm25[9],xmm26[10],xmm25[10],xmm26[11],xmm25[11],xmm26[12],xmm25[12],xmm26[13],xmm25[13],xmm26[14],xmm25[14],xmm26[15],xmm25[15]
+; AVX512BW-NEXT: vpmovsxbw {{.*#+}} zmm17 = [5,4,3,6,5,4,3,6,5,4,3,6,7,7,7,7,41,40,43,42,41,40,43,42,41,40,43,42,45,44,47,46]
+; AVX512BW-NEXT: vpermt2w %zmm11, %zmm17, %zmm16
+; AVX512BW-NEXT: vpunpcklbw {{.*#+}} ymm9 = ymm10[0],ymm9[0],ymm10[1],ymm9[1],ymm10[2],ymm9[2],ymm10[3],ymm9[3],ymm10[4],ymm9[4],ymm10[5],ymm9[5],ymm10[6],ymm9[6],ymm10[7],ymm9[7],ymm10[16],ymm9[16],ymm10[17],ymm9[17],ymm10[18],ymm9[18],ymm10[19],ymm9[19],ymm10[20],ymm9[20],ymm10[21],ymm9[21],ymm10[22],ymm9[22],ymm10[23],ymm9[23]
+; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm10 = xmm22[8],xmm21[8],xmm22[9],xmm21[9],xmm22[10],xmm21[10],xmm22[11],xmm21[11],xmm22[12],xmm21[12],xmm22[13],xmm21[13],xmm22[14],xmm21[14],xmm22[15],xmm21[15]
+; AVX512BW-NEXT: vpmovsxbw {{.*#+}} zmm11 = [4,3,6,5,4,3,6,5,4,3,6,5,7,7,7,7,40,43,42,41,40,43,42,41,40,43,42,41,44,45,46,45]
+; AVX512BW-NEXT: vpermt2w %zmm9, %zmm11, %zmm10
+; AVX512BW-NEXT: vmovdqu16 %zmm16, %zmm10 {%k1}
+; AVX512BW-NEXT: vbroadcasti128 {{.*#+}} ymm9 = [2,0,1,0,0,0,3,0,0,0,0,0,4,0,0,0,2,0,1,0,0,0,3,0,0,0,0,0,4,0,0,0]
+; AVX512BW-NEXT: # ymm9 = mem[0,1,0,1]
+; AVX512BW-NEXT: vpshufb %ymm9, %ymm13, %ymm13
+; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm16 = xmm23[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX512BW-NEXT: vpmovsxbw {{.*#+}} zmm21 = [2,5,4,3,2,5,4,3,2,5,4,3,6,5,6,7,40,41,42,43,40,41,42,43,40,41,42,43,44,45,46,47]
+; AVX512BW-NEXT: vpermt2w %zmm13, %zmm21, %zmm16
+; AVX512BW-NEXT: movl $1227133513, %ecx # imm = 0x49249249
+; AVX512BW-NEXT: kmovd %ecx, %k2
+; AVX512BW-NEXT: vmovdqu16 %zmm16, %zmm10 {%k2}
+; AVX512BW-NEXT: vbroadcasti128 {{.*#+}} ymm13 = [0,2,0,1,0,0,0,3,0,0,0,0,0,4,0,0,0,2,0,1,0,0,0,3,0,0,0,0,0,4,0,0]
+; AVX512BW-NEXT: # ymm13 = mem[0,1,0,1]
+; AVX512BW-NEXT: vpshufb %ymm13, %ymm12, %ymm12
+; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm16 = xmm24[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX512BW-NEXT: vpermt2w %zmm12, %zmm21, %zmm16
+; AVX512BW-NEXT: movabsq $2342443691899625602, %rcx # imm = 0x2082082082082082
+; AVX512BW-NEXT: kmovq %rcx, %k3
+; AVX512BW-NEXT: vmovdqu8 %zmm16, %zmm10 {%k3}
+; AVX512BW-NEXT: vpunpcklbw {{.*#+}} ymm3 = ymm4[0],ymm3[0],ymm4[1],ymm3[1],ymm4[2],ymm3[2],ymm4[3],ymm3[3],ymm4[4],ymm3[4],ymm4[5],ymm3[5],ymm4[6],ymm3[6],ymm4[7],ymm3[7],ymm4[16],ymm3[16],ymm4[17],ymm3[17],ymm4[18],ymm3[18],ymm4[19],ymm3[19],ymm4[20],ymm3[20],ymm4[21],ymm3[21],ymm4[22],ymm3[22],ymm4[23],ymm3[23]
+; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm18[8],xmm14[8],xmm18[9],xmm14[9],xmm18[10],xmm14[10],xmm18[11],xmm14[11],xmm18[12],xmm14[12],xmm18[13],xmm14[13],xmm18[14],xmm14[14],xmm18[15],xmm14[15]
+; AVX512BW-NEXT: vpermt2w %zmm3, %zmm17, %zmm4
+; AVX512BW-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm2[0],ymm1[0],ymm2[1],ymm1[1],ymm2[2],ymm1[2],ymm2[3],ymm1[3],ymm2[4],ymm1[4],ymm2[5],ymm1[5],ymm2[6],ymm1[6],ymm2[7],ymm1[7],ymm2[16],ymm1[16],ymm2[17],ymm1[17],ymm2[18],ymm1[18],ymm2[19],ymm1[19],ymm2[20],ymm1[20],ymm2[21],ymm1[21],ymm2[22],ymm1[22],ymm2[23],ymm1[23]
+; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm8[8],xmm7[8],xmm8[9],xmm7[9],xmm8[10],xmm7[10],xmm8[11],xmm7[11],xmm8[12],xmm7[12],xmm8[13],xmm7[13],xmm8[14],xmm7[14],xmm8[15],xmm7[15]
+; AVX512BW-NEXT: vpermt2w %zmm1, %zmm11, %zmm2
+; AVX512BW-NEXT: vmovdqu16 %zmm4, %zmm2 {%k1}
+; AVX512BW-NEXT: vmovdqa 32(%r8), %ymm1
+; AVX512BW-NEXT: vpshufb %ymm9, %ymm1, %ymm1
+; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm15[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX512BW-NEXT: vpermt2w %zmm1, %zmm21, %zmm3
+; AVX512BW-NEXT: vmovdqu16 %zmm3, %zmm2 {%k2}
+; AVX512BW-NEXT: vmovdqa 32(%r9), %ymm1
+; AVX512BW-NEXT: vpshufb %ymm13, %ymm1, %ymm1
+; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm19[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX512BW-NEXT: vpermt2w %zmm1, %zmm21, %zmm3
+; AVX512BW-NEXT: vmovdqu8 %zmm3, %zmm2 {%k3}
+; AVX512BW-NEXT: vmovdqa64 %zmm2, 256(%rax)
+; AVX512BW-NEXT: vmovdqa64 %zmm10, 64(%rax)
+; AVX512BW-NEXT: vmovdqa64 %zmm20, (%rax)
+; AVX512BW-NEXT: vmovdqa64 %zmm6, 192(%rax)
+; AVX512BW-NEXT: vmovdqa64 %zmm5, 128(%rax)
+; AVX512BW-NEXT: vmovdqa64 %zmm0, 320(%rax)
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512BW-FCP-LABEL: store_i8_stride6_vf64:
; AVX512BW-FCP: # %bb.0:
-; AVX512BW-FCP-NEXT: vmovdqa 32(%rsi), %ymm8
+; AVX512BW-FCP-NEXT: vmovdqa 32(%rsi), %ymm9
; AVX512BW-FCP-NEXT: vpbroadcastq {{.*#+}} ymm0 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
-; AVX512BW-FCP-NEXT: vpshufb %ymm0, %ymm8, %ymm1
-; AVX512BW-FCP-NEXT: vmovdqa 32(%rdi), %ymm9
-; AVX512BW-FCP-NEXT: vpshufb %ymm0, %ymm9, %ymm2
+; AVX512BW-FCP-NEXT: vpshufb %ymm0, %ymm9, %ymm1
+; AVX512BW-FCP-NEXT: vmovdqa 32(%rdi), %ymm10
+; AVX512BW-FCP-NEXT: vpshufb %ymm0, %ymm10, %ymm2
; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm2[0],ymm1[0],ymm2[1],ymm1[1],ymm2[2],ymm1[2],ymm2[3],ymm1[3],ymm2[4],ymm1[4],ymm2[5],ymm1[5],ymm2[6],ymm1[6],ymm2[7],ymm1[7],ymm2[16],ymm1[16],ymm2[17],ymm1[17],ymm2[18],ymm1[18],ymm2[19],ymm1[19],ymm2[20],ymm1[20],ymm2[21],ymm1[21],ymm2[22],ymm1[22],ymm2[23],ymm1[23]
; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm1[2,2,2,3]
-; AVX512BW-FCP-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm9[8],ymm8[8],ymm9[9],ymm8[9],ymm9[10],ymm8[10],ymm9[11],ymm8[11],ymm9[12],ymm8[12],ymm9[13],ymm8[13],ymm9[14],ymm8[14],ymm9[15],ymm8[15],ymm9[24],ymm8[24],ymm9[25],ymm8[25],ymm9[26],ymm8[26],ymm9[27],ymm8[27],ymm9[28],ymm8[28],ymm9[29],ymm8[29],ymm9[30],ymm8[30],ymm9[31],ymm8[31]
+; AVX512BW-FCP-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm10[8],ymm9[8],ymm10[9],ymm9[9],ymm10[10],ymm9[10],ymm10[11],ymm9[11],ymm10[12],ymm9[12],ymm10[13],ymm9[13],ymm10[14],ymm9[14],ymm10[15],ymm9[15],ymm10[24],ymm9[24],ymm10[25],ymm9[25],ymm10[26],ymm9[26],ymm10[27],ymm9[27],ymm10[28],ymm9[28],ymm10[29],ymm9[29],ymm10[30],ymm9[30],ymm10[31],ymm9[31]
; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm0 = [12,11,14,13,12,11,14,13,12,11,14,13,15,15,15,15]
; AVX512BW-FCP-NEXT: vpermw %ymm3, %ymm0, %ymm3
; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm3, %zmm2, %zmm3
@@ -6573,25 +6563,25 @@ define void @store_i8_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512BW-FCP-NEXT: vmovdqa (%rcx), %ymm7
; AVX512BW-FCP-NEXT: vmovdqa64 (%rcx), %xmm18
; AVX512BW-FCP-NEXT: vmovdqa64 32(%rcx), %xmm21
-; AVX512BW-FCP-NEXT: vpshufb %xmm2, %xmm21, %xmm10
+; AVX512BW-FCP-NEXT: vpshufb %xmm2, %xmm21, %xmm8
; AVX512BW-FCP-NEXT: vmovdqa64 (%rdx), %xmm30
; AVX512BW-FCP-NEXT: vmovdqa64 32(%rdx), %xmm24
; AVX512BW-FCP-NEXT: vpshufb %xmm2, %xmm24, %xmm11
-; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm10 = xmm11[0],xmm10[0],xmm11[1],xmm10[1],xmm11[2],xmm10[2],xmm11[3],xmm10[3],xmm11[4],xmm10[4],xmm11[5],xmm10[5],xmm11[6],xmm10[6],xmm11[7],xmm10[7]
+; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm8 = xmm11[0],xmm8[0],xmm11[1],xmm8[1],xmm11[2],xmm8[2],xmm11[3],xmm8[3],xmm11[4],xmm8[4],xmm11[5],xmm8[5],xmm11[6],xmm8[6],xmm11[7],xmm8[7]
; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm11 = xmm24[0],xmm21[0],xmm24[1],xmm21[1],xmm24[2],xmm21[2],xmm24[3],xmm21[3],xmm24[4],xmm21[4],xmm24[5],xmm21[5],xmm24[6],xmm21[6],xmm24[7],xmm21[7]
; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} zmm13 = [1,0,3,2,1,0,3,2,1,0,3,2,5,4,7,6,32,33,34,35,32,33,34,35,32,33,34,35,36,37,38,39]
-; AVX512BW-FCP-NEXT: vpermt2w %zmm10, %zmm13, %zmm11
+; AVX512BW-FCP-NEXT: vpermt2w %zmm8, %zmm13, %zmm11
; AVX512BW-FCP-NEXT: vmovdqa64 (%rsi), %xmm25
; AVX512BW-FCP-NEXT: vmovdqa64 32(%rsi), %xmm19
; AVX512BW-FCP-NEXT: vpbroadcastq {{.*#+}} xmm15 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
-; AVX512BW-FCP-NEXT: vpshufb %xmm15, %xmm19, %xmm10
+; AVX512BW-FCP-NEXT: vpshufb %xmm15, %xmm19, %xmm8
; AVX512BW-FCP-NEXT: vmovdqa64 (%rdi), %xmm26
; AVX512BW-FCP-NEXT: vmovdqa64 32(%rdi), %xmm22
; AVX512BW-FCP-NEXT: vpshufb %xmm15, %xmm22, %xmm12
-; AVX512BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm12 = xmm12[8],xmm10[8],xmm12[9],xmm10[9],xmm12[10],xmm10[10],xmm12[11],xmm10[11],xmm12[12],xmm10[12],xmm12[13],xmm10[13],xmm12[14],xmm10[14],xmm12[15],xmm10[15]
-; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm10 = xmm22[0],xmm19[0],xmm22[1],xmm19[1],xmm22[2],xmm19[2],xmm22[3],xmm19[3],xmm22[4],xmm19[4],xmm22[5],xmm19[5],xmm22[6],xmm19[6],xmm22[7],xmm19[7]
+; AVX512BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm12 = xmm12[8],xmm8[8],xmm12[9],xmm8[9],xmm12[10],xmm8[10],xmm12[11],xmm8[11],xmm12[12],xmm8[12],xmm12[13],xmm8[13],xmm12[14],xmm8[14],xmm12[15],xmm8[15]
+; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm8 = xmm22[0],xmm19[0],xmm22[1],xmm19[1],xmm22[2],xmm19[2],xmm22[3],xmm19[3],xmm22[4],xmm19[4],xmm22[5],xmm19[5],xmm22[6],xmm19[6],xmm22[7],xmm19[7]
; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} zmm16 = [0,3,2,1,0,3,2,1,0,3,2,1,4,5,6,5,32,33,34,35,32,33,34,35,32,33,34,35,36,37,38,39]
-; AVX512BW-FCP-NEXT: vpermt2w %zmm12, %zmm16, %zmm10
+; AVX512BW-FCP-NEXT: vpermt2w %zmm12, %zmm16, %zmm8
; AVX512BW-FCP-NEXT: vmovdqa64 (%r8), %xmm29
; AVX512BW-FCP-NEXT: vmovdqa64 32(%r8), %xmm20
; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} xmm17 = [8,9,0,0,0,5,6,7]
@@ -6641,57 +6631,57 @@ define void @store_i8_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512BW-FCP-NEXT: vpshufb %ymm1, %ymm31, %ymm29
; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} zmm0 = [2,5,4,3,2,5,4,3,2,5,4,3,6,5,6,7,40,41,42,43,40,41,42,43,40,41,42,43,44,45,46,47]
; AVX512BW-FCP-NEXT: vpermt2w %zmm29, %zmm0, %zmm25
-; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} ymm9 = ymm9[0],ymm8[0],ymm9[1],ymm8[1],ymm9[2],ymm8[2],ymm9[3],ymm8[3],ymm9[4],ymm8[4],ymm9[5],ymm8[5],ymm9[6],ymm8[6],ymm9[7],ymm8[7],ymm9[16],ymm8[16],ymm9[17],ymm8[17],ymm9[18],ymm8[18],ymm9[19],ymm8[19],ymm9[20],ymm8[20],ymm9[21],ymm8[21],ymm9[22],ymm8[22],ymm9[23],ymm8[23]
-; AVX512BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm8 = xmm22[8],xmm19[8],xmm22[9],xmm19[9],xmm22[10],xmm19[10],xmm22[11],xmm19[11],xmm22[12],xmm19[12],xmm22[13],xmm19[13],xmm22[14],xmm19[14],xmm22[15],xmm19[15]
+; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} ymm10 = ymm10[0],ymm9[0],ymm10[1],ymm9[1],ymm10[2],ymm9[2],ymm10[3],ymm9[3],ymm10[4],ymm9[4],ymm10[5],ymm9[5],ymm10[6],ymm9[6],ymm10[7],ymm9[7],ymm10[16],ymm9[16],ymm10[17],ymm9[17],ymm10[18],ymm9[18],ymm10[19],ymm9[19],ymm10[20],ymm9[20],ymm10[21],ymm9[21],ymm10[22],ymm9[22],ymm10[23],ymm9[23]
+; AVX512BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm9 = xmm22[8],xmm19[8],xmm22[9],xmm19[9],xmm22[10],xmm19[10],xmm22[11],xmm19[11],xmm22[12],xmm19[12],xmm22[13],xmm19[13],xmm22[14],xmm19[14],xmm22[15],xmm19[15]
; AVX512BW-FCP-NEXT: vmovdqa64 (%r9), %ymm22
-; AVX512BW-FCP-NEXT: vpermt2w %zmm9, %zmm26, %zmm8
+; AVX512BW-FCP-NEXT: vpermt2w %zmm10, %zmm26, %zmm9
; AVX512BW-FCP-NEXT: vbroadcasti32x4 {{.*#+}} ymm26 = [0,2,0,1,0,0,0,3,0,0,0,0,0,4,0,0,0,2,0,1,0,0,0,3,0,0,0,0,0,4,0,0]
; AVX512BW-FCP-NEXT: # ymm26 = mem[0,1,2,3,0,1,2,3]
; AVX512BW-FCP-NEXT: vpshufb %ymm26, %ymm22, %ymm19
-; AVX512BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm9 = xmm28[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX512BW-FCP-NEXT: vpermt2w %zmm19, %zmm0, %zmm9
-; AVX512BW-FCP-NEXT: vmovdqa64 32(%r8), %ymm28
-; AVX512BW-FCP-NEXT: vpshufb %ymm1, %ymm28, %ymm1
+; AVX512BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm10 = xmm28[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX512BW-FCP-NEXT: vpermt2w %zmm19, %zmm0, %zmm10
+; AVX512BW-FCP-NEXT: vmovdqa64 32(%r8), %ymm19
+; AVX512BW-FCP-NEXT: vpshufb %ymm1, %ymm19, %ymm1
; AVX512BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm19 = xmm20[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
; AVX512BW-FCP-NEXT: vpermt2w %zmm1, %zmm0, %zmm19
; AVX512BW-FCP-NEXT: vmovdqa 32(%r9), %ymm1
-; AVX512BW-FCP-NEXT: vpshufb %ymm26, %ymm1, %ymm26
+; AVX512BW-FCP-NEXT: vpshufb %ymm26, %ymm1, %ymm1
; AVX512BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm20 = xmm23[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX512BW-FCP-NEXT: vpermt2w %zmm26, %zmm0, %zmm20
+; AVX512BW-FCP-NEXT: vpermt2w %zmm1, %zmm0, %zmm20
; AVX512BW-FCP-NEXT: vpshufb %ymm2, %ymm4, %ymm0
-; AVX512BW-FCP-NEXT: vpshufb %ymm2, %ymm5, %ymm23
-; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm23[0],ymm0[0],ymm23[1],ymm0[1],ymm23[2],ymm0[2],ymm23[3],ymm0[3],ymm23[4],ymm0[4],ymm23[5],ymm0[5],ymm23[6],ymm0[6],ymm23[7],ymm0[7],ymm23[16],ymm0[16],ymm23[17],ymm0[17],ymm23[18],ymm0[18],ymm23[19],ymm0[19],ymm23[20],ymm0[20],ymm23[21],ymm0[21],ymm23[22],ymm0[22],ymm23[23],ymm0[23]
+; AVX512BW-FCP-NEXT: vpshufb %ymm2, %ymm5, %ymm1
+; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6],ymm1[7],ymm0[7],ymm1[16],ymm0[16],ymm1[17],ymm0[17],ymm1[18],ymm0[18],ymm1[19],ymm0[19],ymm1[20],ymm0[20],ymm1[21],ymm0[21],ymm1[22],ymm0[22],ymm1[23],ymm0[23]
+; AVX512BW-FCP-NEXT: vmovdqa64 (%r8), %zmm1
; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,2,2,3]
; AVX512BW-FCP-NEXT: vpunpckhbw {{.*#+}} ymm4 = ymm5[8],ymm4[8],ymm5[9],ymm4[9],ymm5[10],ymm4[10],ymm5[11],ymm4[11],ymm5[12],ymm4[12],ymm5[13],ymm4[13],ymm5[14],ymm4[14],ymm5[15],ymm4[15],ymm5[24],ymm4[24],ymm5[25],ymm4[25],ymm5[26],ymm4[26],ymm5[27],ymm4[27],ymm5[28],ymm4[28],ymm5[29],ymm4[29],ymm5[30],ymm4[30],ymm5[31],ymm4[31]
; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm5 = [13,12,11,14,13,12,11,14,13,12,11,14,15,15,15,15]
; AVX512BW-FCP-NEXT: vpermw %ymm4, %ymm5, %ymm4
; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm4, %zmm0, %zmm4
+; AVX512BW-FCP-NEXT: vmovdqa64 (%r9), %zmm0
; AVX512BW-FCP-NEXT: movl $613566756, %eax # imm = 0x24924924
; AVX512BW-FCP-NEXT: kmovd %eax, %k1
; AVX512BW-FCP-NEXT: vmovdqu16 %zmm3, %zmm4 {%k1}
-; AVX512BW-FCP-NEXT: vmovdqa64 (%r8), %zmm0
-; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm3 = zmm28[0,1,2,3],zmm0[4,5,6,7]
+; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm3 = zmm1[4,5,6,7,4,5,6,7]
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} zmm3 = zmm3[6,u,5,u,8,u,7,u,u,u,9,u,u,u,u,u,22,u,21,u,24,u,23,u,u,u,25,u,u,u,u,u,42,u,45,u,44,u,43,u,46,u,u,u,u,u,47,u,58,u,61,u,60,u,59,u,62,u,u,u,u,u,63,u]
; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm3 = zmm3[2,2,2,3,6,6,6,7]
; AVX512BW-FCP-NEXT: movl $-1840700270, %eax # imm = 0x92492492
; AVX512BW-FCP-NEXT: kmovd %eax, %k2
; AVX512BW-FCP-NEXT: vmovdqu16 %zmm3, %zmm4 {%k2}
-; AVX512BW-FCP-NEXT: vmovdqa64 (%r9), %zmm3
-; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm1 = zmm1[0,1,2,3],zmm3[4,5,6,7]
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} zmm1 = zmm1[u,6,u,5,u,8,u,7,u,u,u,9,u,u,u,u,u,22,u,21,u,24,u,23,u,u,u,25,u,u,u,u,u,42,u,45,u,44,u,43,u,46,u,u,u,u,u,47,u,58,u,61,u,60,u,59,u,62,u,u,u,u,u,63]
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm1 = zmm1[2,2,2,3,6,6,6,7]
+; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm3 = zmm0[4,5,6,7,4,5,6,7]
+; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} zmm3 = zmm3[u,6,u,5,u,8,u,7,u,u,u,9,u,u,u,u,u,22,u,21,u,24,u,23,u,u,u,25,u,u,u,u,u,42,u,45,u,44,u,43,u,46,u,u,u,u,u,47,u,58,u,61,u,60,u,59,u,62,u,u,u,u,u,63]
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm3 = zmm3[2,2,2,3,6,6,6,7]
; AVX512BW-FCP-NEXT: movabsq $-9076969306111049208, %rax # imm = 0x8208208208208208
; AVX512BW-FCP-NEXT: kmovq %rax, %k3
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm1, %zmm4 {%k3}
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm3, %zmm4 {%k3}
; AVX512BW-FCP-NEXT: vpbroadcastq {{.*#+}} ymm23 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
-; AVX512BW-FCP-NEXT: vpshufb %ymm23, %ymm6, %ymm1
+; AVX512BW-FCP-NEXT: vpshufb %ymm23, %ymm6, %ymm3
; AVX512BW-FCP-NEXT: vpshufb %ymm23, %ymm30, %ymm23
-; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm23[0],ymm1[0],ymm23[1],ymm1[1],ymm23[2],ymm1[2],ymm23[3],ymm1[3],ymm23[4],ymm1[4],ymm23[5],ymm1[5],ymm23[6],ymm1[6],ymm23[7],ymm1[7],ymm23[16],ymm1[16],ymm23[17],ymm1[17],ymm23[18],ymm1[18],ymm23[19],ymm1[19],ymm23[20],ymm1[20],ymm23[21],ymm1[21],ymm23[22],ymm1[22],ymm23[23],ymm1[23]
+; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} ymm3 = ymm23[0],ymm3[0],ymm23[1],ymm3[1],ymm23[2],ymm3[2],ymm23[3],ymm3[3],ymm23[4],ymm3[4],ymm23[5],ymm3[5],ymm23[6],ymm3[6],ymm23[7],ymm3[7],ymm23[16],ymm3[16],ymm23[17],ymm3[17],ymm23[18],ymm3[18],ymm23[19],ymm3[19],ymm23[20],ymm3[20],ymm23[21],ymm3[21],ymm23[22],ymm3[22],ymm23[23],ymm3[23]
; AVX512BW-FCP-NEXT: vpunpckhbw {{.*#+}} ymm6 = ymm30[8],ymm6[8],ymm30[9],ymm6[9],ymm30[10],ymm6[10],ymm30[11],ymm6[11],ymm30[12],ymm6[12],ymm30[13],ymm6[13],ymm30[14],ymm6[14],ymm30[15],ymm6[15],ymm30[24],ymm6[24],ymm30[25],ymm6[25],ymm30[26],ymm6[26],ymm30[27],ymm6[27],ymm30[28],ymm6[28],ymm30[29],ymm6[29],ymm30[30],ymm6[30],ymm30[31],ymm6[31]
; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm23 = [12,11,14,13,12,11,14,13,12,11,14,13,15,15,15,15]
; AVX512BW-FCP-NEXT: vpermw %ymm6, %ymm23, %ymm6
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,2,2,3]
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm6, %zmm1, %zmm1
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,2,2,3]
+; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm6, %zmm3, %zmm3
; AVX512BW-FCP-NEXT: vpshufb %ymm2, %ymm7, %ymm6
; AVX512BW-FCP-NEXT: vpshufb %ymm2, %ymm27, %ymm2
; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm2[0],ymm6[0],ymm2[1],ymm6[1],ymm2[2],ymm6[2],ymm2[3],ymm6[3],ymm2[4],ymm6[4],ymm2[5],ymm6[5],ymm2[6],ymm6[6],ymm2[7],ymm6[7],ymm2[16],ymm6[16],ymm2[17],ymm6[17],ymm2[18],ymm6[18],ymm2[19],ymm6[19],ymm2[20],ymm6[20],ymm2[21],ymm6[21],ymm2[22],ymm6[22],ymm2[23],ymm6[23]
@@ -6699,20 +6689,20 @@ define void @store_i8_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512BW-FCP-NEXT: vpermw %ymm6, %ymm5, %ymm5
; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,2,2,3]
; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm5, %zmm2, %zmm2
-; AVX512BW-FCP-NEXT: vmovdqu16 %zmm1, %zmm2 {%k1}
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm31, %zmm0, %zmm0
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} zmm0 = zmm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,u,21,u,24,u,23,u,u,u,25,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,58,u,61,u,60,u,59,u,62,u,u,u,u,u,63,u]
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm0 = zmm0[2,2,2,3,6,6,6,7]
-; AVX512BW-FCP-NEXT: vmovdqu16 %zmm0, %zmm2 {%k2}
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm22, %zmm3, %zmm0
+; AVX512BW-FCP-NEXT: vmovdqu16 %zmm3, %zmm2 {%k1}
+; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm31, %zmm1, %zmm1
+; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} zmm1 = zmm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,u,21,u,24,u,23,u,u,u,25,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,58,u,61,u,60,u,59,u,62,u,u,u,u,u,63,u]
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm1 = zmm1[2,2,2,3,6,6,6,7]
+; AVX512BW-FCP-NEXT: vmovdqu16 %zmm1, %zmm2 {%k2}
+; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm22, %zmm0, %zmm0
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} zmm0 = zmm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,u,21,u,24,u,23,u,u,u,25,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,58,u,61,u,60,u,59,u,62,u,u,u,u,u,63]
; AVX512BW-FCP-NEXT: vpermq {{.*#+}} zmm0 = zmm0[2,2,2,3,6,6,6,7]
; AVX512BW-FCP-NEXT: vmovdqu8 %zmm0, %zmm2 {%k3}
-; AVX512BW-FCP-NEXT: vmovdqu16 %zmm11, %zmm10 {%k2}
-; AVX512BW-FCP-NEXT: vmovdqu16 %zmm12, %zmm10 {%k1}
+; AVX512BW-FCP-NEXT: vmovdqu16 %zmm11, %zmm8 {%k2}
+; AVX512BW-FCP-NEXT: vmovdqu16 %zmm12, %zmm8 {%k1}
; AVX512BW-FCP-NEXT: movabsq $585610922974906400, %rax # imm = 0x820820820820820
; AVX512BW-FCP-NEXT: kmovq %rax, %k3
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm15, %zmm10 {%k3}
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm15, %zmm8 {%k3}
; AVX512BW-FCP-NEXT: vmovdqu16 %zmm14, %zmm13 {%k2}
; AVX512BW-FCP-NEXT: vmovdqu16 %zmm16, %zmm13 {%k1}
; AVX512BW-FCP-NEXT: vmovdqu8 %zmm17, %zmm13 {%k3}
@@ -6722,15 +6712,15 @@ define void @store_i8_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512BW-FCP-NEXT: vmovdqu16 %zmm25, %zmm24 {%k2}
; AVX512BW-FCP-NEXT: movabsq $2342443691899625602, %rax # imm = 0x2082082082082082
; AVX512BW-FCP-NEXT: kmovq %rax, %k3
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm9, %zmm24 {%k3}
-; AVX512BW-FCP-NEXT: vmovdqu16 %zmm21, %zmm8 {%k1}
-; AVX512BW-FCP-NEXT: vmovdqu16 %zmm19, %zmm8 {%k2}
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm20, %zmm8 {%k3}
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm10, %zmm24 {%k3}
+; AVX512BW-FCP-NEXT: vmovdqu16 %zmm21, %zmm9 {%k1}
+; AVX512BW-FCP-NEXT: vmovdqu16 %zmm19, %zmm9 {%k2}
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm20, %zmm9 {%k3}
; AVX512BW-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-FCP-NEXT: vmovdqa64 %zmm8, 256(%rax)
+; AVX512BW-FCP-NEXT: vmovdqa64 %zmm9, 256(%rax)
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm24, 64(%rax)
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm13, (%rax)
-; AVX512BW-FCP-NEXT: vmovdqa64 %zmm10, 192(%rax)
+; AVX512BW-FCP-NEXT: vmovdqa64 %zmm8, 192(%rax)
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm2, 128(%rax)
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm4, 320(%rax)
; AVX512BW-FCP-NEXT: vzeroupper
@@ -6738,210 +6728,200 @@ define void @store_i8_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
;
; AVX512DQ-BW-LABEL: store_i8_stride6_vf64:
; AVX512DQ-BW: # %bb.0:
-; AVX512DQ-BW-NEXT: vmovdqa64 (%r8), %zmm4
-; AVX512DQ-BW-NEXT: vmovdqa 32(%rsi), %ymm2
-; AVX512DQ-BW-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-BW-NEXT: vpbroadcastq {{.*#+}} ymm6 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
-; AVX512DQ-BW-NEXT: vpshufb %ymm6, %ymm2, %ymm0
-; AVX512DQ-BW-NEXT: vmovdqa 32(%rdi), %ymm3
-; AVX512DQ-BW-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-BW-NEXT: vpshufb %ymm6, %ymm3, %ymm1
-; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6],ymm1[7],ymm0[7],ymm1[16],ymm0[16],ymm1[17],ymm0[17],ymm1[18],ymm0[18],ymm1[19],ymm0[19],ymm1[20],ymm0[20],ymm1[21],ymm0[21],ymm1[22],ymm0[22],ymm1[23],ymm0[23]
+; AVX512DQ-BW-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; AVX512DQ-BW-NEXT: vmovdqa64 (%r8), %zmm7
+; AVX512DQ-BW-NEXT: vmovdqa64 (%r9), %zmm6
+; AVX512DQ-BW-NEXT: vmovdqa 32(%rsi), %ymm1
+; AVX512DQ-BW-NEXT: vpbroadcastq {{.*#+}} ymm5 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
+; AVX512DQ-BW-NEXT: vpshufb %ymm5, %ymm1, %ymm0
+; AVX512DQ-BW-NEXT: vmovdqa 32(%rdi), %ymm2
+; AVX512DQ-BW-NEXT: vpshufb %ymm5, %ymm2, %ymm3
+; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm3[0],ymm0[0],ymm3[1],ymm0[1],ymm3[2],ymm0[2],ymm3[3],ymm0[3],ymm3[4],ymm0[4],ymm3[5],ymm0[5],ymm3[6],ymm0[6],ymm3[7],ymm0[7],ymm3[16],ymm0[16],ymm3[17],ymm0[17],ymm3[18],ymm0[18],ymm3[19],ymm0[19],ymm3[20],ymm0[20],ymm3[21],ymm0[21],ymm3[22],ymm0[22],ymm3[23],ymm0[23]
; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,2,2,3]
-; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} ymm1 = ymm3[8],ymm2[8],ymm3[9],ymm2[9],ymm3[10],ymm2[10],ymm3[11],ymm2[11],ymm3[12],ymm2[12],ymm3[13],ymm2[13],ymm3[14],ymm2[14],ymm3[15],ymm2[15],ymm3[24],ymm2[24],ymm3[25],ymm2[25],ymm3[26],ymm2[26],ymm3[27],ymm2[27],ymm3[28],ymm2[28],ymm3[29],ymm2[29],ymm3[30],ymm2[30],ymm3[31],ymm2[31]
-; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} ymm2 = [12,11,14,13,12,11,14,13,12,11,14,13,15,15,15,15]
-; AVX512DQ-BW-NEXT: vpermw %ymm1, %ymm2, %ymm1
-; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm5
-; AVX512DQ-BW-NEXT: vmovdqa 32(%rcx), %ymm12
-; AVX512DQ-BW-NEXT: vpbroadcastq {{.*#+}} ymm1 = [5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10]
-; AVX512DQ-BW-NEXT: vpshufb %ymm1, %ymm12, %ymm0
-; AVX512DQ-BW-NEXT: vmovdqa 32(%rdx), %ymm13
-; AVX512DQ-BW-NEXT: vpshufb %ymm1, %ymm13, %ymm8
-; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm8[0],ymm0[0],ymm8[1],ymm0[1],ymm8[2],ymm0[2],ymm8[3],ymm0[3],ymm8[4],ymm0[4],ymm8[5],ymm0[5],ymm8[6],ymm0[6],ymm8[7],ymm0[7],ymm8[16],ymm0[16],ymm8[17],ymm0[17],ymm8[18],ymm0[18],ymm8[19],ymm0[19],ymm8[20],ymm0[20],ymm8[21],ymm0[21],ymm8[22],ymm0[22],ymm8[23],ymm0[23]
+; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm2[8],ymm1[8],ymm2[9],ymm1[9],ymm2[10],ymm1[10],ymm2[11],ymm1[11],ymm2[12],ymm1[12],ymm2[13],ymm1[13],ymm2[14],ymm1[14],ymm2[15],ymm1[15],ymm2[24],ymm1[24],ymm2[25],ymm1[25],ymm2[26],ymm1[26],ymm2[27],ymm1[27],ymm2[28],ymm1[28],ymm2[29],ymm1[29],ymm2[30],ymm1[30],ymm2[31],ymm1[31]
+; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} ymm8 = [12,11,14,13,12,11,14,13,12,11,14,13,15,15,15,15]
+; AVX512DQ-BW-NEXT: vpermw %ymm3, %ymm8, %ymm3
+; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm3, %zmm0, %zmm9
+; AVX512DQ-BW-NEXT: vmovdqa 32(%rcx), %ymm3
+; AVX512DQ-BW-NEXT: vpbroadcastq {{.*#+}} ymm11 = [5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10]
+; AVX512DQ-BW-NEXT: vpshufb %ymm11, %ymm3, %ymm0
+; AVX512DQ-BW-NEXT: vmovdqa 32(%rdx), %ymm4
+; AVX512DQ-BW-NEXT: vpshufb %ymm11, %ymm4, %ymm10
+; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm10[0],ymm0[0],ymm10[1],ymm0[1],ymm10[2],ymm0[2],ymm10[3],ymm0[3],ymm10[4],ymm0[4],ymm10[5],ymm0[5],ymm10[6],ymm0[6],ymm10[7],ymm0[7],ymm10[16],ymm0[16],ymm10[17],ymm0[17],ymm10[18],ymm0[18],ymm10[19],ymm0[19],ymm10[20],ymm0[20],ymm10[21],ymm0[21],ymm10[22],ymm0[22],ymm10[23],ymm0[23]
; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,2,2,3]
-; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} ymm9 = ymm13[8],ymm12[8],ymm13[9],ymm12[9],ymm13[10],ymm12[10],ymm13[11],ymm12[11],ymm13[12],ymm12[12],ymm13[13],ymm12[13],ymm13[14],ymm12[14],ymm13[15],ymm12[15],ymm13[24],ymm12[24],ymm13[25],ymm12[25],ymm13[26],ymm12[26],ymm13[27],ymm12[27],ymm13[28],ymm12[28],ymm13[29],ymm12[29],ymm13[30],ymm12[30],ymm13[31],ymm12[31]
-; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} ymm1 = [13,12,11,14,13,12,11,14,13,12,11,14,15,15,15,15]
-; AVX512DQ-BW-NEXT: vpermw %ymm9, %ymm1, %ymm9
-; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm9, %zmm0, %zmm10
-; AVX512DQ-BW-NEXT: movl $613566756, %eax # imm = 0x24924924
-; AVX512DQ-BW-NEXT: kmovd %eax, %k1
-; AVX512DQ-BW-NEXT: vmovdqu16 %zmm5, %zmm10 {%k1}
+; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} ymm10 = ymm4[8],ymm3[8],ymm4[9],ymm3[9],ymm4[10],ymm3[10],ymm4[11],ymm3[11],ymm4[12],ymm3[12],ymm4[13],ymm3[13],ymm4[14],ymm3[14],ymm4[15],ymm3[15],ymm4[24],ymm3[24],ymm4[25],ymm3[25],ymm4[26],ymm3[26],ymm4[27],ymm3[27],ymm4[28],ymm3[28],ymm4[29],ymm3[29],ymm4[30],ymm3[30],ymm4[31],ymm3[31]
+; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} ymm12 = [13,12,11,14,13,12,11,14,13,12,11,14,15,15,15,15]
+; AVX512DQ-BW-NEXT: vpermw %ymm10, %ymm12, %ymm10
+; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm10, %zmm0, %zmm0
+; AVX512DQ-BW-NEXT: movl $613566756, %r10d # imm = 0x24924924
+; AVX512DQ-BW-NEXT: kmovd %r10d, %k1
+; AVX512DQ-BW-NEXT: vmovdqu16 %zmm9, %zmm0 {%k1}
+; AVX512DQ-BW-NEXT: vshufi64x2 {{.*#+}} zmm9 = zmm7[4,5,6,7,4,5,6,7]
+; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} zmm9 = zmm9[6,u,5,u,8,u,7,u,u,u,9,u,u,u,u,u,22,u,21,u,24,u,23,u,u,u,25,u,u,u,u,u,42,u,45,u,44,u,43,u,46,u,u,u,u,u,47,u,58,u,61,u,60,u,59,u,62,u,u,u,u,u,63,u]
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} zmm9 = zmm9[2,2,2,3,6,6,6,7]
+; AVX512DQ-BW-NEXT: movl $-1840700270, %r10d # imm = 0x92492492
+; AVX512DQ-BW-NEXT: kmovd %r10d, %k2
+; AVX512DQ-BW-NEXT: vmovdqu16 %zmm9, %zmm0 {%k2}
+; AVX512DQ-BW-NEXT: vshufi64x2 {{.*#+}} zmm9 = zmm6[4,5,6,7,4,5,6,7]
+; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} zmm9 = zmm9[u,6,u,5,u,8,u,7,u,u,u,9,u,u,u,u,u,22,u,21,u,24,u,23,u,u,u,25,u,u,u,u,u,42,u,45,u,44,u,43,u,46,u,u,u,u,u,47,u,58,u,61,u,60,u,59,u,62,u,u,u,u,u,63]
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} zmm9 = zmm9[2,2,2,3,6,6,6,7]
+; AVX512DQ-BW-NEXT: movabsq $-9076969306111049208, %r10 # imm = 0x8208208208208208
+; AVX512DQ-BW-NEXT: kmovq %r10, %k3
+; AVX512DQ-BW-NEXT: vmovdqu8 %zmm9, %zmm0 {%k3}
+; AVX512DQ-BW-NEXT: vmovdqa (%rsi), %ymm9
+; AVX512DQ-BW-NEXT: vpshufb %ymm5, %ymm9, %ymm13
+; AVX512DQ-BW-NEXT: vmovdqa (%rdi), %ymm10
+; AVX512DQ-BW-NEXT: vpshufb %ymm5, %ymm10, %ymm5
+; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} ymm5 = ymm5[0],ymm13[0],ymm5[1],ymm13[1],ymm5[2],ymm13[2],ymm5[3],ymm13[3],ymm5[4],ymm13[4],ymm5[5],ymm13[5],ymm5[6],ymm13[6],ymm5[7],ymm13[7],ymm5[16],ymm13[16],ymm5[17],ymm13[17],ymm5[18],ymm13[18],ymm5[19],ymm13[19],ymm5[20],ymm13[20],ymm5[21],ymm13[21],ymm5[22],ymm13[22],ymm5[23],ymm13[23]
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm5 = ymm5[2,2,2,3]
+; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} ymm13 = ymm10[8],ymm9[8],ymm10[9],ymm9[9],ymm10[10],ymm9[10],ymm10[11],ymm9[11],ymm10[12],ymm9[12],ymm10[13],ymm9[13],ymm10[14],ymm9[14],ymm10[15],ymm9[15],ymm10[24],ymm9[24],ymm10[25],ymm9[25],ymm10[26],ymm9[26],ymm10[27],ymm9[27],ymm10[28],ymm9[28],ymm10[29],ymm9[29],ymm10[30],ymm9[30],ymm10[31],ymm9[31]
+; AVX512DQ-BW-NEXT: vpermw %ymm13, %ymm8, %ymm8
+; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm8, %zmm5, %zmm8
; AVX512DQ-BW-NEXT: vmovdqa64 (%rcx), %ymm16
+; AVX512DQ-BW-NEXT: vpshufb %ymm11, %ymm16, %ymm5
; AVX512DQ-BW-NEXT: vmovdqa64 (%rdx), %ymm17
-; AVX512DQ-BW-NEXT: vmovdqa64 (%rsi), %xmm20
-; AVX512DQ-BW-NEXT: vmovdqa64 32(%rsi), %xmm18
-; AVX512DQ-BW-NEXT: vpbroadcastq {{.*#+}} xmm14 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
-; AVX512DQ-BW-NEXT: vpshufb %xmm14, %xmm18, %xmm5
+; AVX512DQ-BW-NEXT: vpshufb %ymm11, %ymm17, %ymm13
+; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} ymm5 = ymm13[0],ymm5[0],ymm13[1],ymm5[1],ymm13[2],ymm5[2],ymm13[3],ymm5[3],ymm13[4],ymm5[4],ymm13[5],ymm5[5],ymm13[6],ymm5[6],ymm13[7],ymm5[7],ymm13[16],ymm5[16],ymm13[17],ymm5[17],ymm13[18],ymm5[18],ymm13[19],ymm5[19],ymm13[20],ymm5[20],ymm13[21],ymm5[21],ymm13[22],ymm5[22],ymm13[23],ymm5[23]
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm5 = ymm5[2,2,2,3]
+; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} ymm13 = ymm17[8],ymm16[8],ymm17[9],ymm16[9],ymm17[10],ymm16[10],ymm17[11],ymm16[11],ymm17[12],ymm16[12],ymm17[13],ymm16[13],ymm17[14],ymm16[14],ymm17[15],ymm16[15],ymm17[24],ymm16[24],ymm17[25],ymm16[25],ymm17[26],ymm16[26],ymm17[27],ymm16[27],ymm17[28],ymm16[28],ymm17[29],ymm16[29],ymm17[30],ymm16[30],ymm17[31],ymm16[31]
+; AVX512DQ-BW-NEXT: vpermw %ymm13, %ymm12, %ymm12
+; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm12, %zmm5, %zmm5
+; AVX512DQ-BW-NEXT: vmovdqu16 %zmm8, %zmm5 {%k1}
+; AVX512DQ-BW-NEXT: vmovdqa (%r8), %ymm13
+; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm13, %zmm7, %zmm7
+; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} zmm7 = zmm7[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,u,21,u,24,u,23,u,u,u,25,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,58,u,61,u,60,u,59,u,62,u,u,u,u,u,63,u]
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} zmm7 = zmm7[2,2,2,3,6,6,6,7]
+; AVX512DQ-BW-NEXT: vmovdqu16 %zmm7, %zmm5 {%k2}
+; AVX512DQ-BW-NEXT: vmovdqa (%r9), %ymm12
+; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm12, %zmm6, %zmm6
+; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} zmm6 = zmm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,u,21,u,24,u,23,u,u,u,25,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,58,u,61,u,60,u,59,u,62,u,u,u,u,u,63]
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} zmm6 = zmm6[2,2,2,3,6,6,6,7]
+; AVX512DQ-BW-NEXT: vmovdqu8 %zmm6, %zmm5 {%k3}
+; AVX512DQ-BW-NEXT: vmovdqa64 (%rsi), %xmm21
+; AVX512DQ-BW-NEXT: vmovdqa 32(%rsi), %xmm7
+; AVX512DQ-BW-NEXT: vpbroadcastq {{.*#+}} xmm20 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
+; AVX512DQ-BW-NEXT: vpshufb %xmm20, %xmm7, %xmm6
; AVX512DQ-BW-NEXT: vmovdqa64 (%rdi), %xmm22
-; AVX512DQ-BW-NEXT: vmovdqa64 32(%rdi), %xmm21
-; AVX512DQ-BW-NEXT: vpshufb %xmm14, %xmm21, %xmm9
-; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm9 = xmm9[8],xmm5[8],xmm9[9],xmm5[9],xmm9[10],xmm5[10],xmm9[11],xmm5[11],xmm9[12],xmm5[12],xmm9[13],xmm5[13],xmm9[14],xmm5[14],xmm9[15],xmm5[15]
-; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm5 = xmm21[0],xmm18[0],xmm21[1],xmm18[1],xmm21[2],xmm18[2],xmm21[3],xmm18[3],xmm21[4],xmm18[4],xmm21[5],xmm18[5],xmm21[6],xmm18[6],xmm21[7],xmm18[7]
-; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} zmm26 = [0,3,2,1,0,3,2,1,0,3,2,1,4,5,6,5,32,33,34,35,32,33,34,35,32,33,34,35,36,37,38,39]
-; AVX512DQ-BW-NEXT: vpermt2w %zmm9, %zmm26, %zmm5
-; AVX512DQ-BW-NEXT: vmovdqa (%rcx), %xmm0
-; AVX512DQ-BW-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-BW-NEXT: vmovdqa 32(%rcx), %xmm11
-; AVX512DQ-BW-NEXT: vmovdqa (%rdx), %xmm9
-; AVX512DQ-BW-NEXT: vmovdqa64 (%r8), %xmm25
-; AVX512DQ-BW-NEXT: vmovdqa64 32(%r8), %xmm23
-; AVX512DQ-BW-NEXT: vpmovzxbw {{.*#+}} xmm15 = xmm23[0],zero,xmm23[1],zero,xmm23[2],zero,xmm23[3],zero,xmm23[4],zero,xmm23[5],zero,xmm23[6],zero,xmm23[7],zero
-; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} xmm19 = xmm23[2,1,2,3]
-; AVX512DQ-BW-NEXT: vpmovzxbw {{.*#+}} xmm19 = xmm19[0],zero,xmm19[1],zero,xmm19[2],zero,xmm19[3],zero,xmm19[4],zero,xmm19[5],zero,xmm19[6],zero,xmm19[7],zero
-; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} zmm29 = [2,1,0,3,2,1,0,3,2,1,0,3,4,4,4,4,38,37,32,39,38,37,32,39,38,37,32,39,33,33,33,33]
-; AVX512DQ-BW-NEXT: vpermt2w %zmm19, %zmm29, %zmm15
-; AVX512DQ-BW-NEXT: vmovdqa64 (%r9), %xmm27
-; AVX512DQ-BW-NEXT: vmovdqa64 32(%r9), %xmm24
-; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm19 = xmm24[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} xmm28 = xmm24[2,1,2,3]
-; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm28 = xmm28[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; AVX512DQ-BW-NEXT: vpermt2w %zmm28, %zmm29, %zmm19
-; AVX512DQ-BW-NEXT: vpshufb %xmm14, %xmm20, %xmm28
-; AVX512DQ-BW-NEXT: vpshufb %xmm14, %xmm22, %xmm14
-; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm28 = xmm14[8],xmm28[8],xmm14[9],xmm28[9],xmm14[10],xmm28[10],xmm14[11],xmm28[11],xmm14[12],xmm28[12],xmm14[13],xmm28[13],xmm14[14],xmm28[14],xmm14[15],xmm28[15]
-; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm14 = xmm22[0],xmm20[0],xmm22[1],xmm20[1],xmm22[2],xmm20[2],xmm22[3],xmm20[3],xmm22[4],xmm20[4],xmm22[5],xmm20[5],xmm22[6],xmm20[6],xmm22[7],xmm20[7]
-; AVX512DQ-BW-NEXT: vpermt2w %zmm28, %zmm26, %zmm14
-; AVX512DQ-BW-NEXT: vpmovzxbw {{.*#+}} xmm26 = xmm25[0],zero,xmm25[1],zero,xmm25[2],zero,xmm25[3],zero,xmm25[4],zero,xmm25[5],zero,xmm25[6],zero,xmm25[7],zero
-; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} xmm28 = xmm25[2,1,2,3]
-; AVX512DQ-BW-NEXT: vpmovzxbw {{.*#+}} xmm28 = xmm28[0],zero,xmm28[1],zero,xmm28[2],zero,xmm28[3],zero,xmm28[4],zero,xmm28[5],zero,xmm28[6],zero,xmm28[7],zero
-; AVX512DQ-BW-NEXT: vpermt2w %zmm28, %zmm29, %zmm26
-; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm28 = xmm27[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} xmm30 = xmm27[2,1,2,3]
+; AVX512DQ-BW-NEXT: vmovdqa 32(%rdi), %xmm8
+; AVX512DQ-BW-NEXT: vpshufb %xmm20, %xmm8, %xmm14
+; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm14 = xmm14[8],xmm6[8],xmm14[9],xmm6[9],xmm14[10],xmm6[10],xmm14[11],xmm6[11],xmm14[12],xmm6[12],xmm14[13],xmm6[13],xmm14[14],xmm6[14],xmm14[15],xmm6[15]
+; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm6 = xmm8[0],xmm7[0],xmm8[1],xmm7[1],xmm8[2],xmm7[2],xmm8[3],xmm7[3],xmm8[4],xmm7[4],xmm8[5],xmm7[5],xmm8[6],xmm7[6],xmm8[7],xmm7[7]
+; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} zmm27 = [0,3,2,1,0,3,2,1,0,3,2,1,4,5,6,5,32,33,34,35,32,33,34,35,32,33,34,35,36,37,38,39]
+; AVX512DQ-BW-NEXT: vpermt2w %zmm14, %zmm27, %zmm6
+; AVX512DQ-BW-NEXT: vmovdqa64 (%rcx), %xmm25
+; AVX512DQ-BW-NEXT: vmovdqa 32(%rcx), %xmm14
+; AVX512DQ-BW-NEXT: vpshufb %xmm11, %xmm14, %xmm15
+; AVX512DQ-BW-NEXT: vmovdqa64 (%rdx), %xmm26
+; AVX512DQ-BW-NEXT: vmovdqa64 32(%rdx), %xmm18
+; AVX512DQ-BW-NEXT: vpshufb %xmm11, %xmm18, %xmm19
+; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm15 = xmm19[0],xmm15[0],xmm19[1],xmm15[1],xmm19[2],xmm15[2],xmm19[3],xmm15[3],xmm19[4],xmm15[4],xmm19[5],xmm15[5],xmm19[6],xmm15[6],xmm19[7],xmm15[7]
+; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm19 = xmm18[0],xmm14[0],xmm18[1],xmm14[1],xmm18[2],xmm14[2],xmm18[3],xmm14[3],xmm18[4],xmm14[4],xmm18[5],xmm14[5],xmm18[6],xmm14[6],xmm18[7],xmm14[7]
+; AVX512DQ-BW-NEXT: vprold $16, %xmm19, %xmm19
+; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm15, %zmm19, %zmm15
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} zmm15 = zmm15[0,0,0,1,4,4,4,5]
+; AVX512DQ-BW-NEXT: vmovdqu16 %zmm15, %zmm6 {%k2}
+; AVX512DQ-BW-NEXT: vmovdqa64 (%r8), %xmm23
+; AVX512DQ-BW-NEXT: vmovdqa 32(%r8), %xmm15
+; AVX512DQ-BW-NEXT: vpmovzxbw {{.*#+}} xmm19 = xmm15[0],zero,xmm15[1],zero,xmm15[2],zero,xmm15[3],zero,xmm15[4],zero,xmm15[5],zero,xmm15[6],zero,xmm15[7],zero
+; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} xmm24 = xmm15[2,1,2,3]
+; AVX512DQ-BW-NEXT: vpmovzxbw {{.*#+}} xmm24 = xmm24[0],zero,xmm24[1],zero,xmm24[2],zero,xmm24[3],zero,xmm24[4],zero,xmm24[5],zero,xmm24[6],zero,xmm24[7],zero
+; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} zmm28 = [2,1,0,3,2,1,0,3,2,1,0,3,4,4,4,4,38,37,32,39,38,37,32,39,38,37,32,39,33,33,33,33]
+; AVX512DQ-BW-NEXT: vpermt2w %zmm24, %zmm28, %zmm19
+; AVX512DQ-BW-NEXT: vmovdqu16 %zmm19, %zmm6 {%k1}
+; AVX512DQ-BW-NEXT: vmovdqa64 (%r9), %xmm24
+; AVX512DQ-BW-NEXT: vmovdqa64 32(%r9), %xmm19
+; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm29 = xmm19[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} xmm30 = xmm19[2,1,2,3]
; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm30 = xmm30[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; AVX512DQ-BW-NEXT: vpermt2w %zmm30, %zmm29, %zmm28
-; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} ymm30 = ymm17[0],ymm16[0],ymm17[1],ymm16[1],ymm17[2],ymm16[2],ymm17[3],ymm16[3],ymm17[4],ymm16[4],ymm17[5],ymm16[5],ymm17[6],ymm16[6],ymm17[7],ymm16[7],ymm17[16],ymm16[16],ymm17[17],ymm16[17],ymm17[18],ymm16[18],ymm17[19],ymm16[19],ymm17[20],ymm16[20],ymm17[21],ymm16[21],ymm17[22],ymm16[22],ymm17[23],ymm16[23]
-; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm29 = xmm9[8],xmm0[8],xmm9[9],xmm0[9],xmm9[10],xmm0[10],xmm9[11],xmm0[11],xmm9[12],xmm0[12],xmm9[13],xmm0[13],xmm9[14],xmm0[14],xmm9[15],xmm0[15]
-; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} zmm31 = [5,4,3,6,5,4,3,6,5,4,3,6,7,7,7,7,41,40,43,42,41,40,43,42,41,40,43,42,45,44,47,46]
-; AVX512DQ-BW-NEXT: vpermt2w %zmm30, %zmm31, %zmm29
-; AVX512DQ-BW-NEXT: vmovdqa64 32(%rdx), %xmm30
-; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} ymm13 = ymm13[0],ymm12[0],ymm13[1],ymm12[1],ymm13[2],ymm12[2],ymm13[3],ymm12[3],ymm13[4],ymm12[4],ymm13[5],ymm12[5],ymm13[6],ymm12[6],ymm13[7],ymm12[7],ymm13[16],ymm12[16],ymm13[17],ymm12[17],ymm13[18],ymm12[18],ymm13[19],ymm12[19],ymm13[20],ymm12[20],ymm13[21],ymm12[21],ymm13[22],ymm12[22],ymm13[23],ymm12[23]
-; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm12 = xmm30[8],xmm11[8],xmm30[9],xmm11[9],xmm30[10],xmm11[10],xmm30[11],xmm11[11],xmm30[12],xmm11[12],xmm30[13],xmm11[13],xmm30[14],xmm11[14],xmm30[15],xmm11[15]
-; AVX512DQ-BW-NEXT: vpermt2w %zmm13, %zmm31, %zmm12
-; AVX512DQ-BW-NEXT: vmovdqa 32(%r8), %ymm13
-; AVX512DQ-BW-NEXT: vmovdqa64 %zmm4, %zmm3
-; AVX512DQ-BW-NEXT: vshufi64x2 {{.*#+}} zmm31 = zmm13[0,1,2,3],zmm4[4,5,6,7]
-; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} zmm31 = zmm31[6,u,5,u,8,u,7,u,u,u,9,u,u,u,u,u,22,u,21,u,24,u,23,u,u,u,25,u,u,u,u,u,42,u,45,u,44,u,43,u,46,u,u,u,u,u,47,u,58,u,61,u,60,u,59,u,62,u,u,u,u,u,63,u]
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} zmm31 = zmm31[2,2,2,3,6,6,6,7]
-; AVX512DQ-BW-NEXT: movl $-1840700270, %eax # imm = 0x92492492
-; AVX512DQ-BW-NEXT: kmovd %eax, %k2
-; AVX512DQ-BW-NEXT: vmovdqu16 %zmm31, %zmm10 {%k2}
-; AVX512DQ-BW-NEXT: vmovdqa64 (%r9), %zmm31
-; AVX512DQ-BW-NEXT: vmovdqa 32(%r9), %ymm7
-; AVX512DQ-BW-NEXT: vshufi64x2 {{.*#+}} zmm8 = zmm7[0,1,2,3],zmm31[4,5,6,7]
-; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} zmm8 = zmm8[u,6,u,5,u,8,u,7,u,u,u,9,u,u,u,u,u,22,u,21,u,24,u,23,u,u,u,25,u,u,u,u,u,42,u,45,u,44,u,43,u,46,u,u,u,u,u,47,u,58,u,61,u,60,u,59,u,62,u,u,u,u,u,63]
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} zmm8 = zmm8[2,2,2,3,6,6,6,7]
-; AVX512DQ-BW-NEXT: movabsq $-9076969306111049208, %rax # imm = 0x8208208208208208
-; AVX512DQ-BW-NEXT: kmovq %rax, %k3
-; AVX512DQ-BW-NEXT: vmovdqu8 %zmm8, %zmm10 {%k3}
-; AVX512DQ-BW-NEXT: vmovdqa (%rsi), %ymm8
-; AVX512DQ-BW-NEXT: vpshufb %ymm6, %ymm8, %ymm1
-; AVX512DQ-BW-NEXT: vmovdqa (%rdi), %ymm0
-; AVX512DQ-BW-NEXT: vpshufb %ymm6, %ymm0, %ymm6
-; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm6[0],ymm1[0],ymm6[1],ymm1[1],ymm6[2],ymm1[2],ymm6[3],ymm1[3],ymm6[4],ymm1[4],ymm6[5],ymm1[5],ymm6[6],ymm1[6],ymm6[7],ymm1[7],ymm6[16],ymm1[16],ymm6[17],ymm1[17],ymm6[18],ymm1[18],ymm6[19],ymm1[19],ymm6[20],ymm1[20],ymm6[21],ymm1[21],ymm6[22],ymm1[22],ymm6[23],ymm1[23]
-; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} ymm6 = ymm0[8],ymm8[8],ymm0[9],ymm8[9],ymm0[10],ymm8[10],ymm0[11],ymm8[11],ymm0[12],ymm8[12],ymm0[13],ymm8[13],ymm0[14],ymm8[14],ymm0[15],ymm8[15],ymm0[24],ymm8[24],ymm0[25],ymm8[25],ymm0[26],ymm8[26],ymm0[27],ymm8[27],ymm0[28],ymm8[28],ymm0[29],ymm8[29],ymm0[30],ymm8[30],ymm0[31],ymm8[31]
-; AVX512DQ-BW-NEXT: vpermw %ymm6, %ymm2, %ymm6
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,2,2,3]
-; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm6, %zmm1, %zmm2
-; AVX512DQ-BW-NEXT: vpbroadcastq {{.*#+}} ymm1 = [5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10]
-; AVX512DQ-BW-NEXT: vpshufb %ymm1, %ymm16, %ymm6
-; AVX512DQ-BW-NEXT: vpshufb %ymm1, %ymm17, %ymm4
-; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} ymm4 = ymm4[0],ymm6[0],ymm4[1],ymm6[1],ymm4[2],ymm6[2],ymm4[3],ymm6[3],ymm4[4],ymm6[4],ymm4[5],ymm6[5],ymm4[6],ymm6[6],ymm4[7],ymm6[7],ymm4[16],ymm6[16],ymm4[17],ymm6[17],ymm4[18],ymm6[18],ymm4[19],ymm6[19],ymm4[20],ymm6[20],ymm4[21],ymm6[21],ymm4[22],ymm6[22],ymm4[23],ymm6[23]
-; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} ymm6 = ymm17[8],ymm16[8],ymm17[9],ymm16[9],ymm17[10],ymm16[10],ymm17[11],ymm16[11],ymm17[12],ymm16[12],ymm17[13],ymm16[13],ymm17[14],ymm16[14],ymm17[15],ymm16[15],ymm17[24],ymm16[24],ymm17[25],ymm16[25],ymm17[26],ymm16[26],ymm17[27],ymm16[27],ymm17[28],ymm16[28],ymm17[29],ymm16[29],ymm17[30],ymm16[30],ymm17[31],ymm16[31]
-; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} ymm1 = [13,12,11,14,13,12,11,14,13,12,11,14,15,15,15,15]
-; AVX512DQ-BW-NEXT: vpermw %ymm6, %ymm1, %ymm6
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,2,2,3]
-; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm6, %zmm4, %zmm6
-; AVX512DQ-BW-NEXT: vmovdqu16 %zmm2, %zmm6 {%k1}
-; AVX512DQ-BW-NEXT: vmovdqa (%r8), %ymm1
-; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm1, %zmm3, %zmm4
-; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} zmm4 = zmm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,u,21,u,24,u,23,u,u,u,25,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,58,u,61,u,60,u,59,u,62,u,u,u,u,u,63,u]
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} zmm4 = zmm4[2,2,2,3,6,6,6,7]
-; AVX512DQ-BW-NEXT: vmovdqu16 %zmm4, %zmm6 {%k2}
-; AVX512DQ-BW-NEXT: vmovdqa64 (%r9), %ymm16
-; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm16, %zmm31, %zmm4
-; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} zmm4 = zmm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,u,21,u,24,u,23,u,u,u,25,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,58,u,61,u,60,u,59,u,62,u,u,u,u,u,63]
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} zmm4 = zmm4[2,2,2,3,6,6,6,7]
-; AVX512DQ-BW-NEXT: vmovdqu8 %zmm4, %zmm6 {%k3}
-; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0],ymm8[0],ymm0[1],ymm8[1],ymm0[2],ymm8[2],ymm0[3],ymm8[3],ymm0[4],ymm8[4],ymm0[5],ymm8[5],ymm0[6],ymm8[6],ymm0[7],ymm8[7],ymm0[16],ymm8[16],ymm0[17],ymm8[17],ymm0[18],ymm8[18],ymm0[19],ymm8[19],ymm0[20],ymm8[20],ymm0[21],ymm8[21],ymm0[22],ymm8[22],ymm0[23],ymm8[23]
-; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm22[8],xmm20[8],xmm22[9],xmm20[9],xmm22[10],xmm20[10],xmm22[11],xmm20[11],xmm22[12],xmm20[12],xmm22[13],xmm20[13],xmm22[14],xmm20[14],xmm22[15],xmm20[15]
-; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} zmm8 = [4,3,6,5,4,3,6,5,4,3,6,5,7,7,7,7,40,43,42,41,40,43,42,41,40,43,42,41,44,45,46,45]
-; AVX512DQ-BW-NEXT: vpermt2w %zmm0, %zmm8, %zmm4
-; AVX512DQ-BW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512DQ-BW-NEXT: vpunpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
-; AVX512DQ-BW-NEXT: # ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[2],mem[2],ymm0[3],mem[3],ymm0[4],mem[4],ymm0[5],mem[5],ymm0[6],mem[6],ymm0[7],mem[7],ymm0[16],mem[16],ymm0[17],mem[17],ymm0[18],mem[18],ymm0[19],mem[19],ymm0[20],mem[20],ymm0[21],mem[21],ymm0[22],mem[22],ymm0[23],mem[23]
-; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm17 = xmm21[8],xmm18[8],xmm21[9],xmm18[9],xmm21[10],xmm18[10],xmm21[11],xmm18[11],xmm21[12],xmm18[12],xmm21[13],xmm18[13],xmm21[14],xmm18[14],xmm21[15],xmm18[15]
-; AVX512DQ-BW-NEXT: vpermt2w %zmm0, %zmm8, %zmm17
-; AVX512DQ-BW-NEXT: vbroadcasti128 {{.*#+}} ymm0 = [2,0,1,0,0,0,3,0,0,0,0,0,4,0,0,0,2,0,1,0,0,0,3,0,0,0,0,0,4,0,0,0]
-; AVX512DQ-BW-NEXT: # ymm0 = mem[0,1,0,1]
-; AVX512DQ-BW-NEXT: vpshufb %ymm0, %ymm1, %ymm1
-; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm8 = xmm25[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} zmm18 = [2,5,4,3,2,5,4,3,2,5,4,3,6,5,6,7,40,41,42,43,40,41,42,43,40,41,42,43,44,45,46,47]
-; AVX512DQ-BW-NEXT: vpermt2w %zmm1, %zmm18, %zmm8
-; AVX512DQ-BW-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [0,2,0,1,0,0,0,3,0,0,0,0,0,4,0,0,0,2,0,1,0,0,0,3,0,0,0,0,0,4,0,0]
-; AVX512DQ-BW-NEXT: # ymm1 = mem[0,1,0,1]
-; AVX512DQ-BW-NEXT: vpshufb %ymm1, %ymm16, %ymm16
-; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm20 = xmm27[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX512DQ-BW-NEXT: vpermt2w %zmm16, %zmm18, %zmm20
-; AVX512DQ-BW-NEXT: vpshufb %ymm0, %ymm13, %ymm0
-; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm13 = xmm23[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX512DQ-BW-NEXT: vpermt2w %zmm0, %zmm18, %zmm13
-; AVX512DQ-BW-NEXT: vpshufb %ymm1, %ymm7, %ymm0
-; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm24[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX512DQ-BW-NEXT: vpermt2w %zmm0, %zmm18, %zmm1
-; AVX512DQ-BW-NEXT: vpbroadcastq {{.*#+}} ymm3 = [5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10]
-; AVX512DQ-BW-NEXT: vpshufb %xmm3, %xmm11, %xmm0
-; AVX512DQ-BW-NEXT: vpshufb %xmm3, %xmm30, %xmm2
-; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm30[0],xmm11[0],xmm30[1],xmm11[1],xmm30[2],xmm11[2],xmm30[3],xmm11[3],xmm30[4],xmm11[4],xmm30[5],xmm11[5],xmm30[6],xmm11[6],xmm30[7],xmm11[7]
-; AVX512DQ-BW-NEXT: vprold $16, %xmm2, %xmm2
-; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm0, %zmm2, %zmm0
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} zmm0 = zmm0[0,0,0,1,4,4,4,5]
-; AVX512DQ-BW-NEXT: vmovdqu16 %zmm0, %zmm5 {%k2}
-; AVX512DQ-BW-NEXT: vmovdqu16 %zmm15, %zmm5 {%k1}
-; AVX512DQ-BW-NEXT: movabsq $585610922974906400, %rax # imm = 0x820820820820820
-; AVX512DQ-BW-NEXT: kmovq %rax, %k3
-; AVX512DQ-BW-NEXT: vmovdqu8 %zmm19, %zmm5 {%k3}
-; AVX512DQ-BW-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
-; AVX512DQ-BW-NEXT: vpshufb %xmm3, %xmm7, %xmm0
-; AVX512DQ-BW-NEXT: vpshufb %xmm3, %xmm9, %xmm2
-; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm9[0],xmm7[0],xmm9[1],xmm7[1],xmm9[2],xmm7[2],xmm9[3],xmm7[3],xmm9[4],xmm7[4],xmm9[5],xmm7[5],xmm9[6],xmm7[6],xmm9[7],xmm7[7]
-; AVX512DQ-BW-NEXT: vprold $16, %xmm2, %xmm2
-; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm0, %zmm2, %zmm0
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} zmm0 = zmm0[0,0,0,1,4,4,4,5]
-; AVX512DQ-BW-NEXT: vmovdqu16 %zmm0, %zmm14 {%k2}
-; AVX512DQ-BW-NEXT: vmovdqu16 %zmm26, %zmm14 {%k1}
-; AVX512DQ-BW-NEXT: vmovdqu8 %zmm28, %zmm14 {%k3}
-; AVX512DQ-BW-NEXT: vmovdqu16 %zmm29, %zmm4 {%k1}
-; AVX512DQ-BW-NEXT: movl $1227133513, %eax # imm = 0x49249249
-; AVX512DQ-BW-NEXT: kmovd %eax, %k2
-; AVX512DQ-BW-NEXT: vmovdqu16 %zmm8, %zmm4 {%k2}
-; AVX512DQ-BW-NEXT: movabsq $2342443691899625602, %rax # imm = 0x2082082082082082
-; AVX512DQ-BW-NEXT: kmovq %rax, %k3
-; AVX512DQ-BW-NEXT: vmovdqu8 %zmm20, %zmm4 {%k3}
-; AVX512DQ-BW-NEXT: vmovdqu16 %zmm12, %zmm17 {%k1}
-; AVX512DQ-BW-NEXT: vmovdqu16 %zmm13, %zmm17 {%k2}
-; AVX512DQ-BW-NEXT: vmovdqu8 %zmm1, %zmm17 {%k3}
-; AVX512DQ-BW-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-BW-NEXT: vmovdqa64 %zmm17, 256(%rax)
-; AVX512DQ-BW-NEXT: vmovdqa64 %zmm4, 64(%rax)
-; AVX512DQ-BW-NEXT: vmovdqa64 %zmm14, (%rax)
-; AVX512DQ-BW-NEXT: vmovdqa64 %zmm5, 192(%rax)
-; AVX512DQ-BW-NEXT: vmovdqa64 %zmm6, 128(%rax)
-; AVX512DQ-BW-NEXT: vmovdqa64 %zmm10, 320(%rax)
+; AVX512DQ-BW-NEXT: vpermt2w %zmm30, %zmm28, %zmm29
+; AVX512DQ-BW-NEXT: movabsq $585610922974906400, %rcx # imm = 0x820820820820820
+; AVX512DQ-BW-NEXT: kmovq %rcx, %k3
+; AVX512DQ-BW-NEXT: vmovdqu8 %zmm29, %zmm6 {%k3}
+; AVX512DQ-BW-NEXT: vpshufb %xmm20, %xmm21, %xmm29
+; AVX512DQ-BW-NEXT: vpshufb %xmm20, %xmm22, %xmm20
+; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm29 = xmm20[8],xmm29[8],xmm20[9],xmm29[9],xmm20[10],xmm29[10],xmm20[11],xmm29[11],xmm20[12],xmm29[12],xmm20[13],xmm29[13],xmm20[14],xmm29[14],xmm20[15],xmm29[15]
+; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm20 = xmm22[0],xmm21[0],xmm22[1],xmm21[1],xmm22[2],xmm21[2],xmm22[3],xmm21[3],xmm22[4],xmm21[4],xmm22[5],xmm21[5],xmm22[6],xmm21[6],xmm22[7],xmm21[7]
+; AVX512DQ-BW-NEXT: vpermt2w %zmm29, %zmm27, %zmm20
+; AVX512DQ-BW-NEXT: vpshufb %xmm11, %xmm25, %xmm27
+; AVX512DQ-BW-NEXT: vpshufb %xmm11, %xmm26, %xmm11
+; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm11 = xmm11[0],xmm27[0],xmm11[1],xmm27[1],xmm11[2],xmm27[2],xmm11[3],xmm27[3],xmm11[4],xmm27[4],xmm11[5],xmm27[5],xmm11[6],xmm27[6],xmm11[7],xmm27[7]
+; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm27 = xmm26[0],xmm25[0],xmm26[1],xmm25[1],xmm26[2],xmm25[2],xmm26[3],xmm25[3],xmm26[4],xmm25[4],xmm26[5],xmm25[5],xmm26[6],xmm25[6],xmm26[7],xmm25[7]
+; AVX512DQ-BW-NEXT: vprold $16, %xmm27, %xmm27
+; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm11, %zmm27, %zmm11
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} zmm11 = zmm11[0,0,0,1,4,4,4,5]
+; AVX512DQ-BW-NEXT: vmovdqu16 %zmm11, %zmm20 {%k2}
+; AVX512DQ-BW-NEXT: vpmovzxbw {{.*#+}} xmm11 = xmm23[0],zero,xmm23[1],zero,xmm23[2],zero,xmm23[3],zero,xmm23[4],zero,xmm23[5],zero,xmm23[6],zero,xmm23[7],zero
+; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} xmm27 = xmm23[2,1,2,3]
+; AVX512DQ-BW-NEXT: vpmovzxbw {{.*#+}} xmm27 = xmm27[0],zero,xmm27[1],zero,xmm27[2],zero,xmm27[3],zero,xmm27[4],zero,xmm27[5],zero,xmm27[6],zero,xmm27[7],zero
+; AVX512DQ-BW-NEXT: vpermt2w %zmm27, %zmm28, %zmm11
+; AVX512DQ-BW-NEXT: vmovdqu16 %zmm11, %zmm20 {%k1}
+; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm11 = xmm24[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} xmm27 = xmm24[2,1,2,3]
+; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm27 = xmm27[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; AVX512DQ-BW-NEXT: vpermt2w %zmm27, %zmm28, %zmm11
+; AVX512DQ-BW-NEXT: vmovdqu8 %zmm11, %zmm20 {%k3}
+; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} ymm11 = ymm17[0],ymm16[0],ymm17[1],ymm16[1],ymm17[2],ymm16[2],ymm17[3],ymm16[3],ymm17[4],ymm16[4],ymm17[5],ymm16[5],ymm17[6],ymm16[6],ymm17[7],ymm16[7],ymm17[16],ymm16[16],ymm17[17],ymm16[17],ymm17[18],ymm16[18],ymm17[19],ymm16[19],ymm17[20],ymm16[20],ymm17[21],ymm16[21],ymm17[22],ymm16[22],ymm17[23],ymm16[23]
+; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm16 = xmm26[8],xmm25[8],xmm26[9],xmm25[9],xmm26[10],xmm25[10],xmm26[11],xmm25[11],xmm26[12],xmm25[12],xmm26[13],xmm25[13],xmm26[14],xmm25[14],xmm26[15],xmm25[15]
+; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} zmm17 = [5,4,3,6,5,4,3,6,5,4,3,6,7,7,7,7,41,40,43,42,41,40,43,42,41,40,43,42,45,44,47,46]
+; AVX512DQ-BW-NEXT: vpermt2w %zmm11, %zmm17, %zmm16
+; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} ymm9 = ymm10[0],ymm9[0],ymm10[1],ymm9[1],ymm10[2],ymm9[2],ymm10[3],ymm9[3],ymm10[4],ymm9[4],ymm10[5],ymm9[5],ymm10[6],ymm9[6],ymm10[7],ymm9[7],ymm10[16],ymm9[16],ymm10[17],ymm9[17],ymm10[18],ymm9[18],ymm10[19],ymm9[19],ymm10[20],ymm9[20],ymm10[21],ymm9[21],ymm10[22],ymm9[22],ymm10[23],ymm9[23]
+; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm10 = xmm22[8],xmm21[8],xmm22[9],xmm21[9],xmm22[10],xmm21[10],xmm22[11],xmm21[11],xmm22[12],xmm21[12],xmm22[13],xmm21[13],xmm22[14],xmm21[14],xmm22[15],xmm21[15]
+; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} zmm11 = [4,3,6,5,4,3,6,5,4,3,6,5,7,7,7,7,40,43,42,41,40,43,42,41,40,43,42,41,44,45,46,45]
+; AVX512DQ-BW-NEXT: vpermt2w %zmm9, %zmm11, %zmm10
+; AVX512DQ-BW-NEXT: vmovdqu16 %zmm16, %zmm10 {%k1}
+; AVX512DQ-BW-NEXT: vbroadcasti128 {{.*#+}} ymm9 = [2,0,1,0,0,0,3,0,0,0,0,0,4,0,0,0,2,0,1,0,0,0,3,0,0,0,0,0,4,0,0,0]
+; AVX512DQ-BW-NEXT: # ymm9 = mem[0,1,0,1]
+; AVX512DQ-BW-NEXT: vpshufb %ymm9, %ymm13, %ymm13
+; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm16 = xmm23[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} zmm21 = [2,5,4,3,2,5,4,3,2,5,4,3,6,5,6,7,40,41,42,43,40,41,42,43,40,41,42,43,44,45,46,47]
+; AVX512DQ-BW-NEXT: vpermt2w %zmm13, %zmm21, %zmm16
+; AVX512DQ-BW-NEXT: movl $1227133513, %ecx # imm = 0x49249249
+; AVX512DQ-BW-NEXT: kmovd %ecx, %k2
+; AVX512DQ-BW-NEXT: vmovdqu16 %zmm16, %zmm10 {%k2}
+; AVX512DQ-BW-NEXT: vbroadcasti128 {{.*#+}} ymm13 = [0,2,0,1,0,0,0,3,0,0,0,0,0,4,0,0,0,2,0,1,0,0,0,3,0,0,0,0,0,4,0,0]
+; AVX512DQ-BW-NEXT: # ymm13 = mem[0,1,0,1]
+; AVX512DQ-BW-NEXT: vpshufb %ymm13, %ymm12, %ymm12
+; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm16 = xmm24[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX512DQ-BW-NEXT: vpermt2w %zmm12, %zmm21, %zmm16
+; AVX512DQ-BW-NEXT: movabsq $2342443691899625602, %rcx # imm = 0x2082082082082082
+; AVX512DQ-BW-NEXT: kmovq %rcx, %k3
+; AVX512DQ-BW-NEXT: vmovdqu8 %zmm16, %zmm10 {%k3}
+; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} ymm3 = ymm4[0],ymm3[0],ymm4[1],ymm3[1],ymm4[2],ymm3[2],ymm4[3],ymm3[3],ymm4[4],ymm3[4],ymm4[5],ymm3[5],ymm4[6],ymm3[6],ymm4[7],ymm3[7],ymm4[16],ymm3[16],ymm4[17],ymm3[17],ymm4[18],ymm3[18],ymm4[19],ymm3[19],ymm4[20],ymm3[20],ymm4[21],ymm3[21],ymm4[22],ymm3[22],ymm4[23],ymm3[23]
+; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm18[8],xmm14[8],xmm18[9],xmm14[9],xmm18[10],xmm14[10],xmm18[11],xmm14[11],xmm18[12],xmm14[12],xmm18[13],xmm14[13],xmm18[14],xmm14[14],xmm18[15],xmm14[15]
+; AVX512DQ-BW-NEXT: vpermt2w %zmm3, %zmm17, %zmm4
+; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm2[0],ymm1[0],ymm2[1],ymm1[1],ymm2[2],ymm1[2],ymm2[3],ymm1[3],ymm2[4],ymm1[4],ymm2[5],ymm1[5],ymm2[6],ymm1[6],ymm2[7],ymm1[7],ymm2[16],ymm1[16],ymm2[17],ymm1[17],ymm2[18],ymm1[18],ymm2[19],ymm1[19],ymm2[20],ymm1[20],ymm2[21],ymm1[21],ymm2[22],ymm1[22],ymm2[23],ymm1[23]
+; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm8[8],xmm7[8],xmm8[9],xmm7[9],xmm8[10],xmm7[10],xmm8[11],xmm7[11],xmm8[12],xmm7[12],xmm8[13],xmm7[13],xmm8[14],xmm7[14],xmm8[15],xmm7[15]
+; AVX512DQ-BW-NEXT: vpermt2w %zmm1, %zmm11, %zmm2
+; AVX512DQ-BW-NEXT: vmovdqu16 %zmm4, %zmm2 {%k1}
+; AVX512DQ-BW-NEXT: vmovdqa 32(%r8), %ymm1
+; AVX512DQ-BW-NEXT: vpshufb %ymm9, %ymm1, %ymm1
+; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm15[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX512DQ-BW-NEXT: vpermt2w %zmm1, %zmm21, %zmm3
+; AVX512DQ-BW-NEXT: vmovdqu16 %zmm3, %zmm2 {%k2}
+; AVX512DQ-BW-NEXT: vmovdqa 32(%r9), %ymm1
+; AVX512DQ-BW-NEXT: vpshufb %ymm13, %ymm1, %ymm1
+; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm19[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX512DQ-BW-NEXT: vpermt2w %zmm1, %zmm21, %zmm3
+; AVX512DQ-BW-NEXT: vmovdqu8 %zmm3, %zmm2 {%k3}
+; AVX512DQ-BW-NEXT: vmovdqa64 %zmm2, 256(%rax)
+; AVX512DQ-BW-NEXT: vmovdqa64 %zmm10, 64(%rax)
+; AVX512DQ-BW-NEXT: vmovdqa64 %zmm20, (%rax)
+; AVX512DQ-BW-NEXT: vmovdqa64 %zmm6, 192(%rax)
+; AVX512DQ-BW-NEXT: vmovdqa64 %zmm5, 128(%rax)
+; AVX512DQ-BW-NEXT: vmovdqa64 %zmm0, 320(%rax)
; AVX512DQ-BW-NEXT: vzeroupper
; AVX512DQ-BW-NEXT: retq
;
; AVX512DQ-BW-FCP-LABEL: store_i8_stride6_vf64:
; AVX512DQ-BW-FCP: # %bb.0:
-; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%rsi), %ymm8
+; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%rsi), %ymm9
; AVX512DQ-BW-FCP-NEXT: vpbroadcastq {{.*#+}} ymm0 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm0, %ymm8, %ymm1
-; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%rdi), %ymm9
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm0, %ymm9, %ymm2
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm0, %ymm9, %ymm1
+; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%rdi), %ymm10
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm0, %ymm10, %ymm2
; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm2[0],ymm1[0],ymm2[1],ymm1[1],ymm2[2],ymm1[2],ymm2[3],ymm1[3],ymm2[4],ymm1[4],ymm2[5],ymm1[5],ymm2[6],ymm1[6],ymm2[7],ymm1[7],ymm2[16],ymm1[16],ymm2[17],ymm1[17],ymm2[18],ymm1[18],ymm2[19],ymm1[19],ymm2[20],ymm1[20],ymm2[21],ymm1[21],ymm2[22],ymm1[22],ymm2[23],ymm1[23]
; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm1[2,2,2,3]
-; AVX512DQ-BW-FCP-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm9[8],ymm8[8],ymm9[9],ymm8[9],ymm9[10],ymm8[10],ymm9[11],ymm8[11],ymm9[12],ymm8[12],ymm9[13],ymm8[13],ymm9[14],ymm8[14],ymm9[15],ymm8[15],ymm9[24],ymm8[24],ymm9[25],ymm8[25],ymm9[26],ymm8[26],ymm9[27],ymm8[27],ymm9[28],ymm8[28],ymm9[29],ymm8[29],ymm9[30],ymm8[30],ymm9[31],ymm8[31]
+; AVX512DQ-BW-FCP-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm10[8],ymm9[8],ymm10[9],ymm9[9],ymm10[10],ymm9[10],ymm10[11],ymm9[11],ymm10[12],ymm9[12],ymm10[13],ymm9[13],ymm10[14],ymm9[14],ymm10[15],ymm9[15],ymm10[24],ymm9[24],ymm10[25],ymm9[25],ymm10[26],ymm9[26],ymm10[27],ymm9[27],ymm10[28],ymm9[28],ymm10[29],ymm9[29],ymm10[30],ymm9[30],ymm10[31],ymm9[31]
; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm0 = [12,11,14,13,12,11,14,13,12,11,14,13,15,15,15,15]
; AVX512DQ-BW-FCP-NEXT: vpermw %ymm3, %ymm0, %ymm3
; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm3, %zmm2, %zmm3
@@ -6952,25 +6932,25 @@ define void @store_i8_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rcx), %ymm7
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rcx), %xmm18
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 32(%rcx), %xmm21
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm2, %xmm21, %xmm10
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm2, %xmm21, %xmm8
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rdx), %xmm30
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 32(%rdx), %xmm24
; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm2, %xmm24, %xmm11
-; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm10 = xmm11[0],xmm10[0],xmm11[1],xmm10[1],xmm11[2],xmm10[2],xmm11[3],xmm10[3],xmm11[4],xmm10[4],xmm11[5],xmm10[5],xmm11[6],xmm10[6],xmm11[7],xmm10[7]
+; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm8 = xmm11[0],xmm8[0],xmm11[1],xmm8[1],xmm11[2],xmm8[2],xmm11[3],xmm8[3],xmm11[4],xmm8[4],xmm11[5],xmm8[5],xmm11[6],xmm8[6],xmm11[7],xmm8[7]
; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm11 = xmm24[0],xmm21[0],xmm24[1],xmm21[1],xmm24[2],xmm21[2],xmm24[3],xmm21[3],xmm24[4],xmm21[4],xmm24[5],xmm21[5],xmm24[6],xmm21[6],xmm24[7],xmm21[7]
; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} zmm13 = [1,0,3,2,1,0,3,2,1,0,3,2,5,4,7,6,32,33,34,35,32,33,34,35,32,33,34,35,36,37,38,39]
-; AVX512DQ-BW-FCP-NEXT: vpermt2w %zmm10, %zmm13, %zmm11
+; AVX512DQ-BW-FCP-NEXT: vpermt2w %zmm8, %zmm13, %zmm11
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rsi), %xmm25
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 32(%rsi), %xmm19
; AVX512DQ-BW-FCP-NEXT: vpbroadcastq {{.*#+}} xmm15 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm15, %xmm19, %xmm10
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm15, %xmm19, %xmm8
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rdi), %xmm26
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 32(%rdi), %xmm22
; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm15, %xmm22, %xmm12
-; AVX512DQ-BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm12 = xmm12[8],xmm10[8],xmm12[9],xmm10[9],xmm12[10],xmm10[10],xmm12[11],xmm10[11],xmm12[12],xmm10[12],xmm12[13],xmm10[13],xmm12[14],xmm10[14],xmm12[15],xmm10[15]
-; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm10 = xmm22[0],xmm19[0],xmm22[1],xmm19[1],xmm22[2],xmm19[2],xmm22[3],xmm19[3],xmm22[4],xmm19[4],xmm22[5],xmm19[5],xmm22[6],xmm19[6],xmm22[7],xmm19[7]
+; AVX512DQ-BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm12 = xmm12[8],xmm8[8],xmm12[9],xmm8[9],xmm12[10],xmm8[10],xmm12[11],xmm8[11],xmm12[12],xmm8[12],xmm12[13],xmm8[13],xmm12[14],xmm8[14],xmm12[15],xmm8[15]
+; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm8 = xmm22[0],xmm19[0],xmm22[1],xmm19[1],xmm22[2],xmm19[2],xmm22[3],xmm19[3],xmm22[4],xmm19[4],xmm22[5],xmm19[5],xmm22[6],xmm19[6],xmm22[7],xmm19[7]
; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} zmm16 = [0,3,2,1,0,3,2,1,0,3,2,1,4,5,6,5,32,33,34,35,32,33,34,35,32,33,34,35,36,37,38,39]
-; AVX512DQ-BW-FCP-NEXT: vpermt2w %zmm12, %zmm16, %zmm10
+; AVX512DQ-BW-FCP-NEXT: vpermt2w %zmm12, %zmm16, %zmm8
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%r8), %xmm29
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 32(%r8), %xmm20
; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} xmm17 = [8,9,0,0,0,5,6,7]
@@ -7020,57 +7000,57 @@ define void @store_i8_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm1, %ymm31, %ymm29
; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} zmm0 = [2,5,4,3,2,5,4,3,2,5,4,3,6,5,6,7,40,41,42,43,40,41,42,43,40,41,42,43,44,45,46,47]
; AVX512DQ-BW-FCP-NEXT: vpermt2w %zmm29, %zmm0, %zmm25
-; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} ymm9 = ymm9[0],ymm8[0],ymm9[1],ymm8[1],ymm9[2],ymm8[2],ymm9[3],ymm8[3],ymm9[4],ymm8[4],ymm9[5],ymm8[5],ymm9[6],ymm8[6],ymm9[7],ymm8[7],ymm9[16],ymm8[16],ymm9[17],ymm8[17],ymm9[18],ymm8[18],ymm9[19],ymm8[19],ymm9[20],ymm8[20],ymm9[21],ymm8[21],ymm9[22],ymm8[22],ymm9[23],ymm8[23]
-; AVX512DQ-BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm8 = xmm22[8],xmm19[8],xmm22[9],xmm19[9],xmm22[10],xmm19[10],xmm22[11],xmm19[11],xmm22[12],xmm19[12],xmm22[13],xmm19[13],xmm22[14],xmm19[14],xmm22[15],xmm19[15]
+; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} ymm10 = ymm10[0],ymm9[0],ymm10[1],ymm9[1],ymm10[2],ymm9[2],ymm10[3],ymm9[3],ymm10[4],ymm9[4],ymm10[5],ymm9[5],ymm10[6],ymm9[6],ymm10[7],ymm9[7],ymm10[16],ymm9[16],ymm10[17],ymm9[17],ymm10[18],ymm9[18],ymm10[19],ymm9[19],ymm10[20],ymm9[20],ymm10[21],ymm9[21],ymm10[22],ymm9[22],ymm10[23],ymm9[23]
+; AVX512DQ-BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm9 = xmm22[8],xmm19[8],xmm22[9],xmm19[9],xmm22[10],xmm19[10],xmm22[11],xmm19[11],xmm22[12],xmm19[12],xmm22[13],xmm19[13],xmm22[14],xmm19[14],xmm22[15],xmm19[15]
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%r9), %ymm22
-; AVX512DQ-BW-FCP-NEXT: vpermt2w %zmm9, %zmm26, %zmm8
+; AVX512DQ-BW-FCP-NEXT: vpermt2w %zmm10, %zmm26, %zmm9
; AVX512DQ-BW-FCP-NEXT: vbroadcasti32x4 {{.*#+}} ymm26 = [0,2,0,1,0,0,0,3,0,0,0,0,0,4,0,0,0,2,0,1,0,0,0,3,0,0,0,0,0,4,0,0]
; AVX512DQ-BW-FCP-NEXT: # ymm26 = mem[0,1,2,3,0,1,2,3]
; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm26, %ymm22, %ymm19
-; AVX512DQ-BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm9 = xmm28[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX512DQ-BW-FCP-NEXT: vpermt2w %zmm19, %zmm0, %zmm9
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 32(%r8), %ymm28
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm1, %ymm28, %ymm1
+; AVX512DQ-BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm10 = xmm28[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX512DQ-BW-FCP-NEXT: vpermt2w %zmm19, %zmm0, %zmm10
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 32(%r8), %ymm19
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm1, %ymm19, %ymm1
; AVX512DQ-BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm19 = xmm20[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
; AVX512DQ-BW-FCP-NEXT: vpermt2w %zmm1, %zmm0, %zmm19
; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%r9), %ymm1
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm26, %ymm1, %ymm26
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm26, %ymm1, %ymm1
; AVX512DQ-BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm20 = xmm23[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX512DQ-BW-FCP-NEXT: vpermt2w %zmm26, %zmm0, %zmm20
+; AVX512DQ-BW-FCP-NEXT: vpermt2w %zmm1, %zmm0, %zmm20
; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm2, %ymm4, %ymm0
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm2, %ymm5, %ymm23
-; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm23[0],ymm0[0],ymm23[1],ymm0[1],ymm23[2],ymm0[2],ymm23[3],ymm0[3],ymm23[4],ymm0[4],ymm23[5],ymm0[5],ymm23[6],ymm0[6],ymm23[7],ymm0[7],ymm23[16],ymm0[16],ymm23[17],ymm0[17],ymm23[18],ymm0[18],ymm23[19],ymm0[19],ymm23[20],ymm0[20],ymm23[21],ymm0[21],ymm23[22],ymm0[22],ymm23[23],ymm0[23]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm2, %ymm5, %ymm1
+; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6],ymm1[7],ymm0[7],ymm1[16],ymm0[16],ymm1[17],ymm0[17],ymm1[18],ymm0[18],ymm1[19],ymm0[19],ymm1[20],ymm0[20],ymm1[21],ymm0[21],ymm1[22],ymm0[22],ymm1[23],ymm0[23]
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%r8), %zmm1
; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,2,2,3]
; AVX512DQ-BW-FCP-NEXT: vpunpckhbw {{.*#+}} ymm4 = ymm5[8],ymm4[8],ymm5[9],ymm4[9],ymm5[10],ymm4[10],ymm5[11],ymm4[11],ymm5[12],ymm4[12],ymm5[13],ymm4[13],ymm5[14],ymm4[14],ymm5[15],ymm4[15],ymm5[24],ymm4[24],ymm5[25],ymm4[25],ymm5[26],ymm4[26],ymm5[27],ymm4[27],ymm5[28],ymm4[28],ymm5[29],ymm4[29],ymm5[30],ymm4[30],ymm5[31],ymm4[31]
; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm5 = [13,12,11,14,13,12,11,14,13,12,11,14,15,15,15,15]
; AVX512DQ-BW-FCP-NEXT: vpermw %ymm4, %ymm5, %ymm4
; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm4, %zmm0, %zmm4
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%r9), %zmm0
; AVX512DQ-BW-FCP-NEXT: movl $613566756, %eax # imm = 0x24924924
; AVX512DQ-BW-FCP-NEXT: kmovd %eax, %k1
; AVX512DQ-BW-FCP-NEXT: vmovdqu16 %zmm3, %zmm4 {%k1}
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%r8), %zmm0
-; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm3 = zmm28[0,1,2,3],zmm0[4,5,6,7]
+; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm3 = zmm1[4,5,6,7,4,5,6,7]
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} zmm3 = zmm3[6,u,5,u,8,u,7,u,u,u,9,u,u,u,u,u,22,u,21,u,24,u,23,u,u,u,25,u,u,u,u,u,42,u,45,u,44,u,43,u,46,u,u,u,u,u,47,u,58,u,61,u,60,u,59,u,62,u,u,u,u,u,63,u]
; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm3 = zmm3[2,2,2,3,6,6,6,7]
; AVX512DQ-BW-FCP-NEXT: movl $-1840700270, %eax # imm = 0x92492492
; AVX512DQ-BW-FCP-NEXT: kmovd %eax, %k2
; AVX512DQ-BW-FCP-NEXT: vmovdqu16 %zmm3, %zmm4 {%k2}
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%r9), %zmm3
-; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm1 = zmm1[0,1,2,3],zmm3[4,5,6,7]
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} zmm1 = zmm1[u,6,u,5,u,8,u,7,u,u,u,9,u,u,u,u,u,22,u,21,u,24,u,23,u,u,u,25,u,u,u,u,u,42,u,45,u,44,u,43,u,46,u,u,u,u,u,47,u,58,u,61,u,60,u,59,u,62,u,u,u,u,u,63]
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm1 = zmm1[2,2,2,3,6,6,6,7]
+; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm3 = zmm0[4,5,6,7,4,5,6,7]
+; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} zmm3 = zmm3[u,6,u,5,u,8,u,7,u,u,u,9,u,u,u,u,u,22,u,21,u,24,u,23,u,u,u,25,u,u,u,u,u,42,u,45,u,44,u,43,u,46,u,u,u,u,u,47,u,58,u,61,u,60,u,59,u,62,u,u,u,u,u,63]
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm3 = zmm3[2,2,2,3,6,6,6,7]
; AVX512DQ-BW-FCP-NEXT: movabsq $-9076969306111049208, %rax # imm = 0x8208208208208208
; AVX512DQ-BW-FCP-NEXT: kmovq %rax, %k3
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm1, %zmm4 {%k3}
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm3, %zmm4 {%k3}
; AVX512DQ-BW-FCP-NEXT: vpbroadcastq {{.*#+}} ymm23 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm23, %ymm6, %ymm1
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm23, %ymm6, %ymm3
; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm23, %ymm30, %ymm23
-; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm23[0],ymm1[0],ymm23[1],ymm1[1],ymm23[2],ymm1[2],ymm23[3],ymm1[3],ymm23[4],ymm1[4],ymm23[5],ymm1[5],ymm23[6],ymm1[6],ymm23[7],ymm1[7],ymm23[16],ymm1[16],ymm23[17],ymm1[17],ymm23[18],ymm1[18],ymm23[19],ymm1[19],ymm23[20],ymm1[20],ymm23[21],ymm1[21],ymm23[22],ymm1[22],ymm23[23],ymm1[23]
+; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} ymm3 = ymm23[0],ymm3[0],ymm23[1],ymm3[1],ymm23[2],ymm3[2],ymm23[3],ymm3[3],ymm23[4],ymm3[4],ymm23[5],ymm3[5],ymm23[6],ymm3[6],ymm23[7],ymm3[7],ymm23[16],ymm3[16],ymm23[17],ymm3[17],ymm23[18],ymm3[18],ymm23[19],ymm3[19],ymm23[20],ymm3[20],ymm23[21],ymm3[21],ymm23[22],ymm3[22],ymm23[23],ymm3[23]
; AVX512DQ-BW-FCP-NEXT: vpunpckhbw {{.*#+}} ymm6 = ymm30[8],ymm6[8],ymm30[9],ymm6[9],ymm30[10],ymm6[10],ymm30[11],ymm6[11],ymm30[12],ymm6[12],ymm30[13],ymm6[13],ymm30[14],ymm6[14],ymm30[15],ymm6[15],ymm30[24],ymm6[24],ymm30[25],ymm6[25],ymm30[26],ymm6[26],ymm30[27],ymm6[27],ymm30[28],ymm6[28],ymm30[29],ymm6[29],ymm30[30],ymm6[30],ymm30[31],ymm6[31]
; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm23 = [12,11,14,13,12,11,14,13,12,11,14,13,15,15,15,15]
; AVX512DQ-BW-FCP-NEXT: vpermw %ymm6, %ymm23, %ymm6
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,2,2,3]
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm6, %zmm1, %zmm1
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,2,2,3]
+; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm6, %zmm3, %zmm3
; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm2, %ymm7, %ymm6
; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm2, %ymm27, %ymm2
; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm2[0],ymm6[0],ymm2[1],ymm6[1],ymm2[2],ymm6[2],ymm2[3],ymm6[3],ymm2[4],ymm6[4],ymm2[5],ymm6[5],ymm2[6],ymm6[6],ymm2[7],ymm6[7],ymm2[16],ymm6[16],ymm2[17],ymm6[17],ymm2[18],ymm6[18],ymm2[19],ymm6[19],ymm2[20],ymm6[20],ymm2[21],ymm6[21],ymm2[22],ymm6[22],ymm2[23],ymm6[23]
@@ -7078,20 +7058,20 @@ define void @store_i8_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-BW-FCP-NEXT: vpermw %ymm6, %ymm5, %ymm5
; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,2,2,3]
; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm5, %zmm2, %zmm2
-; AVX512DQ-BW-FCP-NEXT: vmovdqu16 %zmm1, %zmm2 {%k1}
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm31, %zmm0, %zmm0
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} zmm0 = zmm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,u,21,u,24,u,23,u,u,u,25,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,58,u,61,u,60,u,59,u,62,u,u,u,u,u,63,u]
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm0 = zmm0[2,2,2,3,6,6,6,7]
-; AVX512DQ-BW-FCP-NEXT: vmovdqu16 %zmm0, %zmm2 {%k2}
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm22, %zmm3, %zmm0
+; AVX512DQ-BW-FCP-NEXT: vmovdqu16 %zmm3, %zmm2 {%k1}
+; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm31, %zmm1, %zmm1
+; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} zmm1 = zmm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,u,21,u,24,u,23,u,u,u,25,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,58,u,61,u,60,u,59,u,62,u,u,u,u,u,63,u]
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm1 = zmm1[2,2,2,3,6,6,6,7]
+; AVX512DQ-BW-FCP-NEXT: vmovdqu16 %zmm1, %zmm2 {%k2}
+; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm22, %zmm0, %zmm0
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} zmm0 = zmm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,u,21,u,24,u,23,u,u,u,25,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,58,u,61,u,60,u,59,u,62,u,u,u,u,u,63]
; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} zmm0 = zmm0[2,2,2,3,6,6,6,7]
; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm0, %zmm2 {%k3}
-; AVX512DQ-BW-FCP-NEXT: vmovdqu16 %zmm11, %zmm10 {%k2}
-; AVX512DQ-BW-FCP-NEXT: vmovdqu16 %zmm12, %zmm10 {%k1}
+; AVX512DQ-BW-FCP-NEXT: vmovdqu16 %zmm11, %zmm8 {%k2}
+; AVX512DQ-BW-FCP-NEXT: vmovdqu16 %zmm12, %zmm8 {%k1}
; AVX512DQ-BW-FCP-NEXT: movabsq $585610922974906400, %rax # imm = 0x820820820820820
; AVX512DQ-BW-FCP-NEXT: kmovq %rax, %k3
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm15, %zmm10 {%k3}
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm15, %zmm8 {%k3}
; AVX512DQ-BW-FCP-NEXT: vmovdqu16 %zmm14, %zmm13 {%k2}
; AVX512DQ-BW-FCP-NEXT: vmovdqu16 %zmm16, %zmm13 {%k1}
; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm17, %zmm13 {%k3}
@@ -7101,15 +7081,15 @@ define void @store_i8_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-BW-FCP-NEXT: vmovdqu16 %zmm25, %zmm24 {%k2}
; AVX512DQ-BW-FCP-NEXT: movabsq $2342443691899625602, %rax # imm = 0x2082082082082082
; AVX512DQ-BW-FCP-NEXT: kmovq %rax, %k3
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm9, %zmm24 {%k3}
-; AVX512DQ-BW-FCP-NEXT: vmovdqu16 %zmm21, %zmm8 {%k1}
-; AVX512DQ-BW-FCP-NEXT: vmovdqu16 %zmm19, %zmm8 {%k2}
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm20, %zmm8 {%k3}
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm10, %zmm24 {%k3}
+; AVX512DQ-BW-FCP-NEXT: vmovdqu16 %zmm21, %zmm9 {%k1}
+; AVX512DQ-BW-FCP-NEXT: vmovdqu16 %zmm19, %zmm9 {%k2}
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm20, %zmm9 {%k3}
; AVX512DQ-BW-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm8, 256(%rax)
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm9, 256(%rax)
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm24, 64(%rax)
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm13, (%rax)
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm10, 192(%rax)
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm8, 192(%rax)
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm2, 128(%rax)
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm4, 320(%rax)
; AVX512DQ-BW-FCP-NEXT: vzeroupper
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-7.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-7.ll
index 02ec9fc66feab..aadb8b7635636 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-7.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-7.ll
@@ -9934,297 +9934,297 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512BW-LABEL: store_i8_stride7_vf64:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-NEXT: vmovdqa64 (%rax), %zmm0
+; AVX512BW-NEXT: movq {{[0-9]+}}(%rsp), %r10
+; AVX512BW-NEXT: vmovdqa64 (%r10), %zmm0
; AVX512BW-NEXT: vmovdqa 32(%rsi), %ymm2
-; AVX512BW-NEXT: vmovdqa 32(%rdi), %ymm14
-; AVX512BW-NEXT: vpshufhw {{.*#+}} ymm1 = ymm14[0,1,2,3,6,7,7,6,8,9,10,11,14,15,15,14]
+; AVX512BW-NEXT: vmovdqa 32(%rdi), %ymm13
+; AVX512BW-NEXT: vpshufhw {{.*#+}} ymm1 = ymm13[0,1,2,3,6,7,7,6,8,9,10,11,14,15,15,14]
; AVX512BW-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[2,2,3,3,6,6,7,7]
-; AVX512BW-NEXT: vpbroadcastd {{.*#+}} ymm4 = [13,12,15,14,13,12,15,14,13,12,15,14,13,12,15,14,13,12,15,14,13,12,15,14,13,12,15,14,13,12,15,14]
-; AVX512BW-NEXT: movl $338170920, %r10d # imm = 0x14281428
-; AVX512BW-NEXT: kmovd %r10d, %k2
-; AVX512BW-NEXT: vpshufb %ymm4, %ymm2, %ymm1 {%k2}
-; AVX512BW-NEXT: vbroadcasti128 {{.*#+}} ymm9 = [25,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128]
-; AVX512BW-NEXT: # ymm9 = mem[0,1,0,1]
-; AVX512BW-NEXT: vpshufb %ymm9, %ymm2, %ymm3
-; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm10 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128,25]
-; AVX512BW-NEXT: vpshufb %ymm10, %ymm14, %ymm5
-; AVX512BW-NEXT: vpor %ymm3, %ymm5, %ymm3
-; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm1 = zmm3[2,3,2,3],zmm1[2,3,2,3]
-; AVX512BW-NEXT: vmovdqa 32(%rdx), %ymm15
-; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm5 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,24,25,128,23,128,21,22,23,26,128,24,128,28,29,26,27]
-; AVX512BW-NEXT: vpshufb %ymm5, %ymm15, %ymm3
-; AVX512BW-NEXT: vmovdqa64 32(%rcx), %ymm17
-; AVX512BW-NEXT: vbroadcasti128 {{.*#+}} ymm11 = [128,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128,128,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128]
-; AVX512BW-NEXT: # ymm11 = mem[0,1,0,1]
-; AVX512BW-NEXT: vpshufb %ymm11, %ymm17, %ymm6
-; AVX512BW-NEXT: vpor %ymm3, %ymm6, %ymm3
-; AVX512BW-NEXT: vpbroadcastq {{.*#+}} ymm6 = [11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12]
-; AVX512BW-NEXT: vpshufb %ymm6, %ymm17, %ymm7
-; AVX512BW-NEXT: vpshufhw {{.*#+}} ymm8 = ymm15[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
-; AVX512BW-NEXT: vpshufd {{.*#+}} ymm8 = ymm8[0,2,3,3,4,6,7,7]
-; AVX512BW-NEXT: movl $676341840, %r10d # imm = 0x28502850
-; AVX512BW-NEXT: kmovd %r10d, %k3
-; AVX512BW-NEXT: vmovdqu8 %ymm8, %ymm7 {%k3}
-; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm3 = zmm3[2,3,2,3],zmm7[2,3,2,3]
-; AVX512BW-NEXT: movabsq $1742999440035548184, %r10 # imm = 0x183060C183060C18
-; AVX512BW-NEXT: kmovq %r10, %k1
-; AVX512BW-NEXT: vmovdqu8 %zmm3, %zmm1 {%k1}
+; AVX512BW-NEXT: vpbroadcastd {{.*#+}} ymm3 = [13,12,15,14,13,12,15,14,13,12,15,14,13,12,15,14,13,12,15,14,13,12,15,14,13,12,15,14,13,12,15,14]
+; AVX512BW-NEXT: movl $338170920, %r11d # imm = 0x14281428
+; AVX512BW-NEXT: kmovd %r11d, %k2
+; AVX512BW-NEXT: vpshufb %ymm3, %ymm2, %ymm1 {%k2}
+; AVX512BW-NEXT: vbroadcasti128 {{.*#+}} ymm8 = [25,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128]
+; AVX512BW-NEXT: # ymm8 = mem[0,1,0,1]
+; AVX512BW-NEXT: vpshufb %ymm8, %ymm2, %ymm4
+; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm9 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128,25]
+; AVX512BW-NEXT: vpshufb %ymm9, %ymm13, %ymm5
+; AVX512BW-NEXT: vpor %ymm4, %ymm5, %ymm4
+; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm1 = zmm4[2,3,2,3],zmm1[2,3,2,3]
+; AVX512BW-NEXT: vmovdqa 32(%rdx), %ymm14
+; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm4 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,24,25,128,23,128,21,22,23,26,128,24,128,28,29,26,27]
+; AVX512BW-NEXT: vpshufb %ymm4, %ymm14, %ymm5
+; AVX512BW-NEXT: vmovdqa 32(%rcx), %ymm15
+; AVX512BW-NEXT: vbroadcasti128 {{.*#+}} ymm10 = [128,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128,128,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128]
+; AVX512BW-NEXT: # ymm10 = mem[0,1,0,1]
+; AVX512BW-NEXT: vpshufb %ymm10, %ymm15, %ymm6
+; AVX512BW-NEXT: vpor %ymm5, %ymm6, %ymm6
+; AVX512BW-NEXT: vpbroadcastq {{.*#+}} ymm5 = [11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12]
+; AVX512BW-NEXT: vpshufb %ymm5, %ymm15, %ymm7
+; AVX512BW-NEXT: vpshufhw {{.*#+}} ymm11 = ymm14[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
+; AVX512BW-NEXT: vpshufd {{.*#+}} ymm11 = ymm11[0,2,3,3,4,6,7,7]
+; AVX512BW-NEXT: movl $676341840, %r11d # imm = 0x28502850
+; AVX512BW-NEXT: kmovd %r11d, %k3
+; AVX512BW-NEXT: vmovdqu8 %ymm11, %ymm7 {%k3}
+; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm6 = zmm6[2,3,2,3],zmm7[2,3,2,3]
+; AVX512BW-NEXT: movabsq $1742999440035548184, %r11 # imm = 0x183060C183060C18
+; AVX512BW-NEXT: kmovq %r11, %k1
+; AVX512BW-NEXT: vmovdqu8 %zmm6, %zmm1 {%k1}
; AVX512BW-NEXT: vmovdqa64 32(%r9), %ymm16
-; AVX512BW-NEXT: vbroadcasti128 {{.*#+}} ymm7 = [29,128,27,128,128,128,128,30,128,28,128,128,128,128,31,128,29,128,27,128,128,128,128,30,128,28,128,128,128,128,31,128]
-; AVX512BW-NEXT: # ymm7 = mem[0,1,0,1]
-; AVX512BW-NEXT: vpshufb %ymm7, %ymm16, %ymm3
-; AVX512BW-NEXT: vmovdqa64 32(%r8), %ymm18
-; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm8 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,27,128,128,128,128,30,128,28,128,128,128,128,31,128,29]
-; AVX512BW-NEXT: vpshufb %ymm8, %ymm18, %ymm12
-; AVX512BW-NEXT: vpor %ymm3, %ymm12, %ymm3
-; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm12 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,24,25,24,25,128,23,128,23,24,25,26,128,24,128,30,31]
-; AVX512BW-NEXT: vpshufb %ymm12, %ymm18, %ymm19
-; AVX512BW-NEXT: vbroadcasti128 {{.*#+}} ymm13 = [128,128,128,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128,128,128,25,128,23,128,128,128,128,26,128,24,128,128]
-; AVX512BW-NEXT: # ymm13 = mem[0,1,0,1]
-; AVX512BW-NEXT: vpshufb %ymm13, %ymm16, %ymm20
-; AVX512BW-NEXT: vporq %ymm19, %ymm20, %ymm19
-; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm3 = zmm19[2,3,2,3],zmm3[2,3,2,3]
-; AVX512BW-NEXT: movabsq $6971997760142192736, %r10 # imm = 0x60C183060C183060
-; AVX512BW-NEXT: kmovq %r10, %k1
-; AVX512BW-NEXT: vmovdqu8 %zmm3, %zmm1 {%k1}
-; AVX512BW-NEXT: vmovdqa 32(%rax), %ymm3
-; AVX512BW-NEXT: vpmovsxbw {{.*#+}} zmm19 = [11,13,12,11,12,13,13,12,11,13,12,11,12,13,13,12,14,13,14,15,15,14,14,15,14,13,14,15,15,14,14,15]
-; AVX512BW-NEXT: vpermw %zmm3, %zmm19, %zmm19
-; AVX512BW-NEXT: movabsq $-9150747060186627967, %rax # imm = 0x8102040810204081
-; AVX512BW-NEXT: kmovq %rax, %k4
-; AVX512BW-NEXT: vmovdqu8 %zmm19, %zmm1 {%k4}
-; AVX512BW-NEXT: vmovdqa64 {{.*#+}} ymm19 = [0,1,0,1,14,128,14,15,0,1,14,15,128,13,14,15,16,17,16,128,30,31,30,31,16,17,128,31,28,29,30,31]
-; AVX512BW-NEXT: vpshufb %ymm19, %ymm15, %ymm21
-; AVX512BW-NEXT: vmovdqa64 {{.*#+}} ymm20 = [128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128]
-; AVX512BW-NEXT: vpshufb %ymm20, %ymm17, %ymm22
-; AVX512BW-NEXT: vporq %ymm21, %ymm22, %ymm21
+; AVX512BW-NEXT: vbroadcasti128 {{.*#+}} ymm6 = [29,128,27,128,128,128,128,30,128,28,128,128,128,128,31,128,29,128,27,128,128,128,128,30,128,28,128,128,128,128,31,128]
+; AVX512BW-NEXT: # ymm6 = mem[0,1,0,1]
+; AVX512BW-NEXT: vpshufb %ymm6, %ymm16, %ymm11
+; AVX512BW-NEXT: vmovdqa64 32(%r8), %ymm19
+; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm7 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,27,128,128,128,128,30,128,28,128,128,128,128,31,128,29]
+; AVX512BW-NEXT: vpshufb %ymm7, %ymm19, %ymm12
+; AVX512BW-NEXT: vpor %ymm11, %ymm12, %ymm11
+; AVX512BW-NEXT: vmovdqa64 {{.*#+}} ymm17 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,24,25,24,25,128,23,128,23,24,25,26,128,24,128,30,31]
+; AVX512BW-NEXT: vpshufb %ymm17, %ymm19, %ymm12
+; AVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} ymm18 = [128,128,128,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128,128,128,25,128,23,128,128,128,128,26,128,24,128,128]
+; AVX512BW-NEXT: # ymm18 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT: vpshufb %ymm18, %ymm16, %ymm20
+; AVX512BW-NEXT: vporq %ymm12, %ymm20, %ymm12
+; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm11 = zmm12[2,3,2,3],zmm11[2,3,2,3]
+; AVX512BW-NEXT: movabsq $6971997760142192736, %r11 # imm = 0x60C183060C183060
+; AVX512BW-NEXT: kmovq %r11, %k1
+; AVX512BW-NEXT: vmovdqu8 %zmm11, %zmm1 {%k1}
+; AVX512BW-NEXT: vpmovsxbw {{.*#+}} zmm11 = [27,29,28,27,28,29,29,28,27,29,28,27,28,29,29,28,30,29,30,31,31,30,30,31,30,29,30,31,31,30,30,31]
+; AVX512BW-NEXT: vpermw %zmm0, %zmm11, %zmm11
+; AVX512BW-NEXT: movabsq $-9150747060186627967, %r11 # imm = 0x8102040810204081
+; AVX512BW-NEXT: kmovq %r11, %k4
+; AVX512BW-NEXT: vmovdqu8 %zmm11, %zmm1 {%k4}
+; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm11 = [0,1,0,1,14,128,14,15,0,1,14,15,128,13,14,15,16,17,16,128,30,31,30,31,16,17,128,31,28,29,30,31]
+; AVX512BW-NEXT: vpshufb %ymm11, %ymm14, %ymm20
+; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm12 = [128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128]
+; AVX512BW-NEXT: vpshufb %ymm12, %ymm15, %ymm21
+; AVX512BW-NEXT: vporq %ymm20, %ymm21, %ymm20
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} ymm22 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,18,128,18,19,20,21,128,19,128,25,26,27,22,128,20,128]
-; AVX512BW-NEXT: vpshufb %ymm22, %ymm15, %ymm15
-; AVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} ymm25 = [128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20]
-; AVX512BW-NEXT: # ymm25 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT: vpshufb %ymm25, %ymm17, %ymm17
-; AVX512BW-NEXT: vporq %ymm15, %ymm17, %ymm15
+; AVX512BW-NEXT: vpshufb %ymm22, %ymm14, %ymm14
+; AVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} ymm23 = [128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20]
+; AVX512BW-NEXT: # ymm23 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT: vpshufb %ymm23, %ymm15, %ymm15
+; AVX512BW-NEXT: vpor %ymm14, %ymm15, %ymm14
+; AVX512BW-NEXT: vpermq {{.*#+}} ymm14 = ymm14[2,3,2,3]
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm14, %zmm20, %zmm14
+; AVX512BW-NEXT: vpshuflw {{.*#+}} ymm15 = ymm13[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
+; AVX512BW-NEXT: vpshufd {{.*#+}} ymm15 = ymm15[0,0,1,1,4,4,5,5]
+; AVX512BW-NEXT: vpbroadcastd {{.*#+}} ymm25 = [5,4,3,6,5,4,3,6,5,4,3,6,5,4,3,6,5,4,3,6,5,4,3,6,5,4,3,6,5,4,3,6]
+; AVX512BW-NEXT: vpshufb %ymm25, %ymm2, %ymm15 {%k3}
; AVX512BW-NEXT: vpermq {{.*#+}} ymm15 = ymm15[2,3,2,3]
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm15, %zmm21, %zmm15
-; AVX512BW-NEXT: vpshuflw {{.*#+}} ymm17 = ymm14[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
-; AVX512BW-NEXT: vpshufd {{.*#+}} ymm17 = ymm17[0,0,1,1,4,4,5,5]
-; AVX512BW-NEXT: vpbroadcastd {{.*#+}} ymm21 = [5,4,3,6,5,4,3,6,5,4,3,6,5,4,3,6,5,4,3,6,5,4,3,6,5,4,3,6,5,4,3,6]
-; AVX512BW-NEXT: vpshufb %ymm21, %ymm2, %ymm17 {%k3}
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm17 = ymm17[2,3,2,3]
-; AVX512BW-NEXT: vmovdqa64 {{.*#+}} ymm23 = [0,1,14,128,12,13,0,1,14,15,128,3,12,13,2,3,16,128,30,31,28,29,16,17,128,31,18,19,28,29,18,128]
-; AVX512BW-NEXT: vpshufb %ymm23, %ymm14, %ymm14
-; AVX512BW-NEXT: vmovdqa64 {{.*#+}} ymm24 = [128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128,128,18]
-; AVX512BW-NEXT: vpshufb %ymm24, %ymm2, %ymm2
-; AVX512BW-NEXT: vpor %ymm2, %ymm14, %ymm2
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm17, %zmm2, %zmm2
-; AVX512BW-NEXT: movabsq $3485998880071096368, %rax # imm = 0x3060C183060C1830
-; AVX512BW-NEXT: kmovq %rax, %k4
-; AVX512BW-NEXT: vmovdqu8 %zmm15, %zmm2 {%k4}
-; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm14 = [13,128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128]
-; AVX512BW-NEXT: vpshufb %ymm14, %ymm16, %ymm17
-; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm15 = [128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128]
-; AVX512BW-NEXT: vpshufb %ymm15, %ymm18, %ymm26
-; AVX512BW-NEXT: vporq %ymm17, %ymm26, %ymm17
+; AVX512BW-NEXT: vmovdqa64 {{.*#+}} ymm20 = [0,1,14,128,12,13,0,1,14,15,128,3,12,13,2,3,16,128,30,31,28,29,16,17,128,31,18,19,28,29,18,128]
+; AVX512BW-NEXT: vpshufb %ymm20, %ymm13, %ymm13
+; AVX512BW-NEXT: vmovdqa64 {{.*#+}} ymm21 = [128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128,128,18]
+; AVX512BW-NEXT: vpshufb %ymm21, %ymm2, %ymm2
+; AVX512BW-NEXT: vpor %ymm2, %ymm13, %ymm2
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm15, %zmm2, %zmm2
+; AVX512BW-NEXT: movabsq $3485998880071096368, %r11 # imm = 0x3060C183060C1830
+; AVX512BW-NEXT: kmovq %r11, %k4
+; AVX512BW-NEXT: vmovdqu8 %zmm14, %zmm2 {%k4}
+; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm13 = [13,128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128]
+; AVX512BW-NEXT: vpshufb %ymm13, %ymm16, %ymm15
+; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm14 = [128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128]
+; AVX512BW-NEXT: vpshufb %ymm14, %ymm19, %ymm24
+; AVX512BW-NEXT: vporq %ymm15, %ymm24, %ymm15
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} ymm26 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,20,128,18,128,20,21,20,21,128,19,128,19,20,21,22,128]
-; AVX512BW-NEXT: vpshufb %ymm26, %ymm18, %ymm18
+; AVX512BW-NEXT: vpshufb %ymm26, %ymm19, %ymm19
; AVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} ymm28 = [128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22]
; AVX512BW-NEXT: # ymm28 = mem[0,1,2,3,0,1,2,3]
; AVX512BW-NEXT: vpshufb %ymm28, %ymm16, %ymm16
-; AVX512BW-NEXT: vporq %ymm18, %ymm16, %ymm16
+; AVX512BW-NEXT: vporq %ymm19, %ymm16, %ymm16
; AVX512BW-NEXT: vpermq {{.*#+}} ymm16 = ymm16[2,3,2,3]
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm16, %zmm17, %zmm16
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm16, %zmm15, %zmm15
+; AVX512BW-NEXT: vmovdqa64 32(%r10), %ymm16
; AVX512BW-NEXT: vpmovsxbw {{.*#+}} zmm29 = [10,9,9,10,10,9,9,10,9,10,14,15,10,9,9,10,11,13,12,11,12,13,13,12,11,13,12,11,12,13,13,12]
-; AVX512BW-NEXT: vpermw %ymm3, %ymm29, %ymm17
-; AVX512BW-NEXT: vpshufb {{.*#+}} ymm18 = ymm3[12,13,2,3,12,13,0,1,14,15,2,3,0,1,14,15,28,29,18,19,28,29,16,17,30,31,18,19,16,17,30,31]
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm17, %zmm18, %zmm17
-; AVX512BW-NEXT: movabsq $145249953336295682, %rax # imm = 0x204081020408102
-; AVX512BW-NEXT: kmovq %rax, %k5
-; AVX512BW-NEXT: vmovdqu8 %zmm17, %zmm16 {%k5}
-; AVX512BW-NEXT: movabsq $-4357498600088870461, %rax # imm = 0xC3870E1C3870E1C3
-; AVX512BW-NEXT: kmovq %rax, %k5
-; AVX512BW-NEXT: vmovdqu8 %zmm16, %zmm2 {%k5}
-; AVX512BW-NEXT: vmovdqa64 (%rdx), %ymm16
-; AVX512BW-NEXT: vpshufb %ymm5, %ymm16, %ymm5
-; AVX512BW-NEXT: vmovdqa64 (%rcx), %ymm17
-; AVX512BW-NEXT: vpshufb %ymm11, %ymm17, %ymm11
-; AVX512BW-NEXT: vpor %ymm5, %ymm11, %ymm5
-; AVX512BW-NEXT: vpshufb %ymm22, %ymm16, %ymm11
-; AVX512BW-NEXT: vpshufb %ymm25, %ymm17, %ymm18
-; AVX512BW-NEXT: vporq %ymm11, %ymm18, %ymm11
-; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm5 = zmm11[2,3,2,3],zmm5[2,3,2,3]
-; AVX512BW-NEXT: vmovdqa64 (%rsi), %ymm25
+; AVX512BW-NEXT: vpermw %ymm16, %ymm29, %ymm19
+; AVX512BW-NEXT: vpshufb {{.*#+}} ymm16 = ymm16[12,13,2,3,12,13,0,1,14,15,2,3,0,1,14,15,28,29,18,19,28,29,16,17,30,31,18,19,16,17,30,31]
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm19, %zmm16, %zmm16
+; AVX512BW-NEXT: movabsq $145249953336295682, %r10 # imm = 0x204081020408102
+; AVX512BW-NEXT: kmovq %r10, %k5
+; AVX512BW-NEXT: vmovdqu8 %zmm16, %zmm15 {%k5}
+; AVX512BW-NEXT: movabsq $-4357498600088870461, %r10 # imm = 0xC3870E1C3870E1C3
+; AVX512BW-NEXT: kmovq %r10, %k5
+; AVX512BW-NEXT: vmovdqu8 %zmm15, %zmm2 {%k5}
+; AVX512BW-NEXT: vmovdqa (%rdx), %ymm15
+; AVX512BW-NEXT: vpshufb %ymm4, %ymm15, %ymm4
+; AVX512BW-NEXT: vmovdqa64 (%rcx), %ymm16
+; AVX512BW-NEXT: vpshufb %ymm10, %ymm16, %ymm10
+; AVX512BW-NEXT: vpor %ymm4, %ymm10, %ymm4
+; AVX512BW-NEXT: vpshufb %ymm22, %ymm15, %ymm10
+; AVX512BW-NEXT: vpshufb %ymm23, %ymm16, %ymm19
+; AVX512BW-NEXT: vporq %ymm10, %ymm19, %ymm10
+; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm4 = zmm10[2,3,2,3],zmm4[2,3,2,3]
+; AVX512BW-NEXT: vmovdqa64 (%rsi), %ymm24
; AVX512BW-NEXT: vmovdqa64 (%rdi), %ymm27
-; AVX512BW-NEXT: vpshuflw {{.*#+}} ymm11 = ymm27[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
-; AVX512BW-NEXT: vpshufd {{.*#+}} ymm11 = ymm11[0,0,1,1,4,4,5,5]
-; AVX512BW-NEXT: vpshufb %ymm21, %ymm25, %ymm11 {%k3}
-; AVX512BW-NEXT: vpshufb %ymm9, %ymm25, %ymm9
-; AVX512BW-NEXT: vpshufb %ymm10, %ymm27, %ymm10
-; AVX512BW-NEXT: vpor %ymm9, %ymm10, %ymm9
-; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm9 = zmm11[2,3,2,3],zmm9[2,3,2,3]
-; AVX512BW-NEXT: vmovdqu8 %zmm9, %zmm5 {%k1}
-; AVX512BW-NEXT: vmovdqa64 (%r8), %ymm18
-; AVX512BW-NEXT: vpshufb %ymm12, %ymm18, %ymm9
-; AVX512BW-NEXT: vmovdqa64 (%r9), %ymm21
-; AVX512BW-NEXT: vpshufb %ymm13, %ymm21, %ymm10
+; AVX512BW-NEXT: vpshuflw {{.*#+}} ymm10 = ymm27[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
+; AVX512BW-NEXT: vpshufd {{.*#+}} ymm10 = ymm10[0,0,1,1,4,4,5,5]
+; AVX512BW-NEXT: vpshufb %ymm25, %ymm24, %ymm10 {%k3}
+; AVX512BW-NEXT: vpshufb %ymm8, %ymm24, %ymm8
+; AVX512BW-NEXT: vpshufb %ymm9, %ymm27, %ymm9
+; AVX512BW-NEXT: vpor %ymm8, %ymm9, %ymm8
+; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm8 = zmm10[2,3,2,3],zmm8[2,3,2,3]
+; AVX512BW-NEXT: vmovdqu8 %zmm8, %zmm4 {%k1}
+; AVX512BW-NEXT: vmovdqa64 (%r8), %ymm19
+; AVX512BW-NEXT: vpshufb %ymm17, %ymm19, %ymm8
+; AVX512BW-NEXT: vmovdqa64 (%r9), %ymm22
+; AVX512BW-NEXT: vpshufb %ymm18, %ymm22, %ymm9
+; AVX512BW-NEXT: vpor %ymm8, %ymm9, %ymm8
+; AVX512BW-NEXT: vpshufb %ymm26, %ymm19, %ymm9
+; AVX512BW-NEXT: vpshufb %ymm28, %ymm22, %ymm10
; AVX512BW-NEXT: vpor %ymm9, %ymm10, %ymm9
-; AVX512BW-NEXT: vpshufb %ymm26, %ymm18, %ymm10
-; AVX512BW-NEXT: vpshufb %ymm28, %ymm21, %ymm11
-; AVX512BW-NEXT: vpor %ymm10, %ymm11, %ymm10
-; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm9 = zmm10[2,3,2,3],zmm9[2,3,2,3]
-; AVX512BW-NEXT: vpermw %zmm0, %zmm29, %zmm10
-; AVX512BW-NEXT: movabsq $1161999626690365456, %rax # imm = 0x1020408102040810
-; AVX512BW-NEXT: kmovq %rax, %k5
-; AVX512BW-NEXT: vmovdqu8 %zmm10, %zmm9 {%k5}
-; AVX512BW-NEXT: movabsq $2033499346708139548, %rax # imm = 0x1C3870E1C3870E1C
-; AVX512BW-NEXT: kmovq %rax, %k5
-; AVX512BW-NEXT: vmovdqu8 %zmm9, %zmm5 {%k5}
-; AVX512BW-NEXT: vpshufb %ymm19, %ymm16, %ymm9
-; AVX512BW-NEXT: vpshufb %ymm20, %ymm17, %ymm10
-; AVX512BW-NEXT: vpor %ymm9, %ymm10, %ymm9
-; AVX512BW-NEXT: vmovdqa (%rdx), %xmm10
-; AVX512BW-NEXT: vmovdqa64 32(%rdx), %xmm19
-; AVX512BW-NEXT: vmovdqa (%rcx), %xmm11
-; AVX512BW-NEXT: vmovdqa64 32(%rcx), %xmm20
-; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm12 = xmm11[8],xmm10[8],xmm11[9],xmm10[9],xmm11[10],xmm10[10],xmm11[11],xmm10[11],xmm11[12],xmm10[12],xmm11[13],xmm10[13],xmm11[14],xmm10[14],xmm11[15],xmm10[15]
-; AVX512BW-NEXT: vmovdqa64 {{.*#+}} xmm22 = [6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7]
-; AVX512BW-NEXT: vpshufb %xmm22, %xmm12, %xmm12
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm12 = ymm12[0,1,0,1]
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm9, %zmm12, %zmm26
-; AVX512BW-NEXT: vpshufb %ymm23, %ymm27, %ymm9
-; AVX512BW-NEXT: vpshufb %ymm24, %ymm25, %ymm12
-; AVX512BW-NEXT: vpor %ymm9, %ymm12, %ymm9
-; AVX512BW-NEXT: vmovdqa (%rdi), %xmm12
-; AVX512BW-NEXT: vmovdqa (%rsi), %xmm13
-; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm24 = xmm13[8],xmm12[8],xmm13[9],xmm12[9],xmm13[10],xmm12[10],xmm13[11],xmm12[11],xmm13[12],xmm12[12],xmm13[13],xmm12[13],xmm13[14],xmm12[14],xmm13[15],xmm12[15]
-; AVX512BW-NEXT: vmovdqa64 {{.*#+}} xmm23 = [2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
-; AVX512BW-NEXT: vpshufb %xmm23, %xmm24, %xmm24
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm24 = ymm24[0,1,0,1]
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm9, %zmm24, %zmm9
-; AVX512BW-NEXT: movabsq $435749860008887046, %rax # imm = 0x60C183060C18306
-; AVX512BW-NEXT: kmovq %rax, %k5
-; AVX512BW-NEXT: vmovdqu8 %zmm26, %zmm9 {%k5}
-; AVX512BW-NEXT: vpshufb %ymm14, %ymm21, %ymm14
-; AVX512BW-NEXT: vpshufb %ymm15, %ymm18, %ymm15
-; AVX512BW-NEXT: vporq %ymm14, %ymm15, %ymm24
-; AVX512BW-NEXT: vmovdqa (%r9), %xmm14
-; AVX512BW-NEXT: vmovdqa (%r8), %xmm15
-; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm28 = xmm15[8],xmm14[8],xmm15[9],xmm14[9],xmm15[10],xmm14[10],xmm15[11],xmm14[11],xmm15[12],xmm14[12],xmm15[13],xmm14[13],xmm15[14],xmm14[14],xmm15[15],xmm14[15]
-; AVX512BW-NEXT: vmovdqa64 {{.*#+}} xmm26 = [u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10]
-; AVX512BW-NEXT: vpshufb %xmm26, %xmm28, %xmm28
+; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm8 = zmm9[2,3,2,3],zmm8[2,3,2,3]
+; AVX512BW-NEXT: vpermw %zmm0, %zmm29, %zmm9
+; AVX512BW-NEXT: movabsq $1161999626690365456, %r10 # imm = 0x1020408102040810
+; AVX512BW-NEXT: kmovq %r10, %k5
+; AVX512BW-NEXT: vmovdqu8 %zmm9, %zmm8 {%k5}
+; AVX512BW-NEXT: movabsq $2033499346708139548, %r10 # imm = 0x1C3870E1C3870E1C
+; AVX512BW-NEXT: kmovq %r10, %k5
+; AVX512BW-NEXT: vmovdqu8 %zmm8, %zmm4 {%k5}
+; AVX512BW-NEXT: vpshufb %ymm11, %ymm15, %ymm8
+; AVX512BW-NEXT: vpshufb %ymm12, %ymm16, %ymm9
+; AVX512BW-NEXT: vpor %ymm8, %ymm9, %ymm8
+; AVX512BW-NEXT: vmovdqa (%rdx), %xmm9
+; AVX512BW-NEXT: vmovdqa64 32(%rdx), %xmm17
+; AVX512BW-NEXT: vmovdqa (%rcx), %xmm10
+; AVX512BW-NEXT: vmovdqa64 32(%rcx), %xmm18
+; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm11 = xmm10[8],xmm9[8],xmm10[9],xmm9[9],xmm10[10],xmm9[10],xmm10[11],xmm9[11],xmm10[12],xmm9[12],xmm10[13],xmm9[13],xmm10[14],xmm9[14],xmm10[15],xmm9[15]
+; AVX512BW-NEXT: vmovdqa64 {{.*#+}} xmm23 = [6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7]
+; AVX512BW-NEXT: vpshufb %xmm23, %xmm11, %xmm11
+; AVX512BW-NEXT: vpermq {{.*#+}} ymm11 = ymm11[0,1,0,1]
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm8, %zmm11, %zmm26
+; AVX512BW-NEXT: vpshufb %ymm20, %ymm27, %ymm8
+; AVX512BW-NEXT: vpshufb %ymm21, %ymm24, %ymm11
+; AVX512BW-NEXT: vpor %ymm8, %ymm11, %ymm8
+; AVX512BW-NEXT: vmovdqa (%rdi), %xmm11
+; AVX512BW-NEXT: vmovdqa64 32(%rdi), %xmm20
+; AVX512BW-NEXT: vmovdqa (%rsi), %xmm12
+; AVX512BW-NEXT: vmovdqa64 32(%rsi), %xmm21
+; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm28 = xmm12[8],xmm11[8],xmm12[9],xmm11[9],xmm12[10],xmm11[10],xmm12[11],xmm11[11],xmm12[12],xmm11[12],xmm12[13],xmm11[13],xmm12[14],xmm11[14],xmm12[15],xmm11[15]
+; AVX512BW-NEXT: vmovdqa64 {{.*#+}} xmm25 = [2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
+; AVX512BW-NEXT: vpshufb %xmm25, %xmm28, %xmm28
; AVX512BW-NEXT: vpermq {{.*#+}} ymm28 = ymm28[0,1,0,1]
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm24, %zmm28, %zmm24
-; AVX512BW-NEXT: vpmovsxbw {{.*#+}} zmm28 = [4,5,4,5,5,6,5,6,4,5,4,5,5,6,5,6,6,1,6,0,7,1,0,7,14,9,14,8,15,9,8,15]
-; AVX512BW-NEXT: vpermw %zmm0, %zmm28, %zmm28
-; AVX512BW-NEXT: movabsq $2323999253380730912, %rax # imm = 0x2040810204081020
-; AVX512BW-NEXT: kmovq %rax, %k5
-; AVX512BW-NEXT: vmovdqu8 %zmm28, %zmm24 {%k5}
-; AVX512BW-NEXT: vmovdqa64 32(%rdi), %xmm28
-; AVX512BW-NEXT: movabsq $4066998693416279096, %rax # imm = 0x3870E1C3870E1C38
-; AVX512BW-NEXT: kmovq %rax, %k5
-; AVX512BW-NEXT: vmovdqu8 %zmm24, %zmm9 {%k5}
-; AVX512BW-NEXT: vmovdqa64 32(%rsi), %xmm29
-; AVX512BW-NEXT: vpshufhw {{.*#+}} ymm24 = ymm27[0,1,2,3,6,7,7,6,8,9,10,11,14,15,15,14]
-; AVX512BW-NEXT: vpshufd {{.*#+}} ymm27 = ymm24[2,2,3,3,6,6,7,7]
-; AVX512BW-NEXT: vpshufb %ymm4, %ymm25, %ymm27 {%k2}
-; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm28[0],xmm29[0],xmm28[1],xmm29[1],xmm28[2],xmm29[2],xmm28[3],xmm29[3],xmm28[4],xmm29[4],xmm28[5],xmm29[5],xmm28[6],xmm29[6],xmm28[7],xmm29[7]
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm8, %zmm28, %zmm8
+; AVX512BW-NEXT: movabsq $435749860008887046, %rcx # imm = 0x60C183060C18306
+; AVX512BW-NEXT: kmovq %rcx, %k5
+; AVX512BW-NEXT: vmovdqu8 %zmm26, %zmm8 {%k5}
+; AVX512BW-NEXT: vpshufb %ymm13, %ymm22, %ymm13
+; AVX512BW-NEXT: vpshufb %ymm14, %ymm19, %ymm14
+; AVX512BW-NEXT: vporq %ymm13, %ymm14, %ymm30
+; AVX512BW-NEXT: vmovdqa (%r9), %xmm13
+; AVX512BW-NEXT: vmovdqa64 32(%r9), %xmm26
+; AVX512BW-NEXT: vmovdqa (%r8), %xmm14
+; AVX512BW-NEXT: vmovdqa64 32(%r8), %xmm28
+; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm31 = xmm14[8],xmm13[8],xmm14[9],xmm13[9],xmm14[10],xmm13[10],xmm14[11],xmm13[11],xmm14[12],xmm13[12],xmm14[13],xmm13[13],xmm14[14],xmm13[14],xmm14[15],xmm13[15]
+; AVX512BW-NEXT: vmovdqa64 {{.*#+}} xmm29 = [u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10]
+; AVX512BW-NEXT: vpshufb %xmm29, %xmm31, %xmm31
+; AVX512BW-NEXT: vpermq {{.*#+}} ymm31 = ymm31[0,1,0,1]
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm30, %zmm31, %zmm30
+; AVX512BW-NEXT: vpmovsxbw {{.*#+}} zmm31 = [4,5,4,5,5,6,5,6,4,5,4,5,5,6,5,6,6,1,6,0,7,1,0,7,14,9,14,8,15,9,8,15]
+; AVX512BW-NEXT: vpermw %zmm0, %zmm31, %zmm31
+; AVX512BW-NEXT: movabsq $2323999253380730912, %rcx # imm = 0x2040810204081020
+; AVX512BW-NEXT: kmovq %rcx, %k5
+; AVX512BW-NEXT: vmovdqu8 %zmm31, %zmm30 {%k5}
+; AVX512BW-NEXT: movabsq $4066998693416279096, %rcx # imm = 0x3870E1C3870E1C38
+; AVX512BW-NEXT: kmovq %rcx, %k5
+; AVX512BW-NEXT: vmovdqu8 %zmm30, %zmm8 {%k5}
+; AVX512BW-NEXT: vpshufhw {{.*#+}} ymm27 = ymm27[0,1,2,3,6,7,7,6,8,9,10,11,14,15,15,14]
+; AVX512BW-NEXT: vpshufd {{.*#+}} ymm27 = ymm27[2,2,3,3,6,6,7,7]
+; AVX512BW-NEXT: vpshufb %ymm3, %ymm24, %ymm27 {%k2}
+; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm3 = xmm20[0],xmm21[0],xmm20[1],xmm21[1],xmm20[2],xmm21[2],xmm20[3],xmm21[3],xmm20[4],xmm21[4],xmm20[5],xmm21[5],xmm20[6],xmm21[6],xmm20[7],xmm21[7]
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} xmm24 = [0,1,u,u,u,6,7,2,3,u,u,u,8,9,4,5]
-; AVX512BW-NEXT: vpshufb %xmm24, %xmm4, %xmm4
-; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm27 = zmm27[2,3,2,3],zmm4[0,1,0,1]
-; AVX512BW-NEXT: vmovdqa64 32(%r9), %xmm25
-; AVX512BW-NEXT: vpshufb %ymm6, %ymm17, %ymm4
-; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm6 = xmm19[0],xmm20[0],xmm19[1],xmm20[1],xmm19[2],xmm20[2],xmm19[3],xmm20[3],xmm19[4],xmm20[4],xmm19[5],xmm20[5],xmm19[6],xmm20[6],xmm19[7],xmm20[7]
-; AVX512BW-NEXT: vpshufhw {{.*#+}} ymm16 = ymm16[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
-; AVX512BW-NEXT: vpshufd {{.*#+}} ymm16 = ymm16[0,2,3,3,4,6,7,7]
-; AVX512BW-NEXT: vmovdqu8 %ymm16, %ymm4 {%k3}
-; AVX512BW-NEXT: vmovdqa64 {{.*#+}} xmm16 = [4,5,0,1,u,u,u,6,7,2,3,u,u,u,8,9]
-; AVX512BW-NEXT: vpshufb %xmm16, %xmm6, %xmm6
-; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm4 = zmm4[2,3,2,3],zmm6[0,1,0,1]
-; AVX512BW-NEXT: vmovdqa64 32(%r8), %xmm17
-; AVX512BW-NEXT: vmovdqu8 %zmm27, %zmm4 {%k4}
-; AVX512BW-NEXT: vpshufb %ymm7, %ymm21, %ymm6
-; AVX512BW-NEXT: vpshufb %ymm8, %ymm18, %ymm7
-; AVX512BW-NEXT: vpor %ymm6, %ymm7, %ymm7
-; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm8 = xmm17[0],xmm25[0],xmm17[1],xmm25[1],xmm17[2],xmm25[2],xmm17[3],xmm25[3],xmm17[4],xmm25[4],xmm17[5],xmm25[5],xmm17[6],xmm25[6],xmm17[7],xmm25[7]
-; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm6 = [u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
-; AVX512BW-NEXT: vpshufb %xmm6, %xmm8, %xmm8
-; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm7 = zmm7[2,3,2,3],zmm8[0,1,0,1]
-; AVX512BW-NEXT: vpmovsxbw {{.*#+}} zmm8 = [14,13,14,15,15,14,14,15,14,13,14,15,15,14,14,15,17,17,16,16,17,17,16,16,20,21,17,17,17,17,16,16]
-; AVX512BW-NEXT: vpermw %zmm0, %zmm8, %zmm8
-; AVX512BW-NEXT: movabsq $580999813345182728, %rax # imm = 0x810204081020408
-; AVX512BW-NEXT: kmovq %rax, %k2
-; AVX512BW-NEXT: vmovdqu8 %zmm8, %zmm7 {%k2}
-; AVX512BW-NEXT: movabsq $1016749673354069774, %rax # imm = 0xE1C3870E1C3870E
-; AVX512BW-NEXT: kmovq %rax, %k2
-; AVX512BW-NEXT: vmovdqu8 %zmm7, %zmm4 {%k2}
-; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm7 = [u,u,u,128,7,128,5,u,u,u,128,8,128,6,u,u]
-; AVX512BW-NEXT: vpshufb %xmm7, %xmm20, %xmm8
-; AVX512BW-NEXT: vmovdqa64 {{.*#+}} xmm18 = [u,u,u,7,128,5,128,u,u,u,8,128,6,128,u,u]
-; AVX512BW-NEXT: vpshufb %xmm18, %xmm19, %xmm21
-; AVX512BW-NEXT: vporq %xmm8, %xmm21, %xmm8
-; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm19 = xmm20[8],xmm19[8],xmm20[9],xmm19[9],xmm20[10],xmm19[10],xmm20[11],xmm19[11],xmm20[12],xmm19[12],xmm20[13],xmm19[13],xmm20[14],xmm19[14],xmm20[15],xmm19[15]
-; AVX512BW-NEXT: vpshufb %xmm22, %xmm19, %xmm19
-; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm8 = zmm8[0,1,0,1],zmm19[0,1,0,1]
-; AVX512BW-NEXT: vmovdqa64 {{.*#+}} xmm19 = [u,128,7,128,5,u,u,u,128,8,128,6,u,u,u,128]
-; AVX512BW-NEXT: vpshufb %xmm19, %xmm29, %xmm20
-; AVX512BW-NEXT: vmovdqa64 {{.*#+}} xmm21 = [u,7,128,5,128,u,u,u,8,128,6,128,u,u,u,9]
-; AVX512BW-NEXT: vpshufb %xmm21, %xmm28, %xmm22
-; AVX512BW-NEXT: vporq %xmm20, %xmm22, %xmm20
-; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm22 = xmm29[8],xmm28[8],xmm29[9],xmm28[9],xmm29[10],xmm28[10],xmm29[11],xmm28[11],xmm29[12],xmm28[12],xmm29[13],xmm28[13],xmm29[14],xmm28[14],xmm29[15],xmm28[15]
-; AVX512BW-NEXT: vpshufb %xmm23, %xmm22, %xmm22
-; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm20 = zmm20[0,1,0,1],zmm22[0,1,0,1]
-; AVX512BW-NEXT: vmovdqu8 %zmm8, %zmm20 {%k1}
-; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm8 = [128,4,u,u,u,128,7,128,5,u,u,u,128,8,128,6]
-; AVX512BW-NEXT: vpshufb %xmm8, %xmm25, %xmm22
-; AVX512BW-NEXT: vmovdqa64 {{.*#+}} xmm23 = [4,128,u,u,u,7,128,5,128,u,u,u,8,128,6,128]
-; AVX512BW-NEXT: vpshufb %xmm23, %xmm17, %xmm27
-; AVX512BW-NEXT: vporq %xmm22, %xmm27, %xmm22
-; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm17 = xmm17[8],xmm25[8],xmm17[9],xmm25[9],xmm17[10],xmm25[10],xmm17[11],xmm25[11],xmm17[12],xmm25[12],xmm17[13],xmm25[13],xmm17[14],xmm25[14],xmm17[15],xmm25[15]
-; AVX512BW-NEXT: vpshufb %xmm26, %xmm17, %xmm17
-; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm17 = zmm22[0,1,0,1],zmm17[0,1,0,1]
-; AVX512BW-NEXT: vpmovsxbw {{.*#+}} zmm22 = [2,2,2,4,2,2,2,4,3,3,3,3,2,2,2,4,4,5,4,5,5,6,5,6,4,5,4,5,5,6,5,6]
-; AVX512BW-NEXT: vpermw %zmm3, %zmm22, %zmm3
-; AVX512BW-NEXT: movabsq $290499906672591364, %rax # imm = 0x408102040810204
-; AVX512BW-NEXT: kmovq %rax, %k1
-; AVX512BW-NEXT: vmovdqu8 %zmm3, %zmm17 {%k1}
-; AVX512BW-NEXT: movabsq $-8714997200177740921, %rax # imm = 0x870E1C3870E1C387
-; AVX512BW-NEXT: kmovq %rax, %k1
-; AVX512BW-NEXT: vmovdqu8 %zmm17, %zmm20 {%k1}
-; AVX512BW-NEXT: vpshufb %xmm7, %xmm11, %xmm3
-; AVX512BW-NEXT: vpshufb %xmm18, %xmm10, %xmm7
-; AVX512BW-NEXT: vpor %xmm3, %xmm7, %xmm3
-; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm7 = xmm10[0],xmm11[0],xmm10[1],xmm11[1],xmm10[2],xmm11[2],xmm10[3],xmm11[3],xmm10[4],xmm11[4],xmm10[5],xmm11[5],xmm10[6],xmm11[6],xmm10[7],xmm11[7]
-; AVX512BW-NEXT: vpshufb %xmm16, %xmm7, %xmm7
-; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm3 = zmm7[0,1,0,1],zmm3[0,1,0,1]
-; AVX512BW-NEXT: vpshufb %xmm19, %xmm13, %xmm7
-; AVX512BW-NEXT: vpshufb %xmm21, %xmm12, %xmm10
-; AVX512BW-NEXT: vpor %xmm7, %xmm10, %xmm7
-; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm10 = xmm12[0],xmm13[0],xmm12[1],xmm13[1],xmm12[2],xmm13[2],xmm12[3],xmm13[3],xmm12[4],xmm13[4],xmm12[5],xmm13[5],xmm12[6],xmm13[6],xmm12[7],xmm13[7]
+; AVX512BW-NEXT: vpshufb %xmm24, %xmm3, %xmm3
+; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm30 = zmm27[2,3,2,3],zmm3[0,1,0,1]
+; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm3 = xmm17[0],xmm18[0],xmm17[1],xmm18[1],xmm17[2],xmm18[2],xmm17[3],xmm18[3],xmm17[4],xmm18[4],xmm17[5],xmm18[5],xmm17[6],xmm18[6],xmm17[7],xmm18[7]
+; AVX512BW-NEXT: vmovdqa64 {{.*#+}} xmm27 = [4,5,0,1,u,u,u,6,7,2,3,u,u,u,8,9]
+; AVX512BW-NEXT: vpshufb %xmm27, %xmm3, %xmm3
+; AVX512BW-NEXT: vpshufb %ymm5, %ymm16, %ymm5
+; AVX512BW-NEXT: vpshufhw {{.*#+}} ymm15 = ymm15[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
+; AVX512BW-NEXT: vpshufd {{.*#+}} ymm15 = ymm15[0,2,3,3,4,6,7,7]
+; AVX512BW-NEXT: vmovdqu8 %ymm15, %ymm5 {%k3}
+; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm3 = zmm5[2,3,2,3],zmm3[0,1,0,1]
+; AVX512BW-NEXT: vmovdqu8 %zmm30, %zmm3 {%k4}
+; AVX512BW-NEXT: vpshufb %ymm6, %ymm22, %ymm5
+; AVX512BW-NEXT: vpshufb %ymm7, %ymm19, %ymm6
+; AVX512BW-NEXT: vpor %ymm5, %ymm6, %ymm6
+; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm7 = xmm28[0],xmm26[0],xmm28[1],xmm26[1],xmm28[2],xmm26[2],xmm28[3],xmm26[3],xmm28[4],xmm26[4],xmm28[5],xmm26[5],xmm28[6],xmm26[6],xmm28[7],xmm26[7]
+; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm5 = [u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
+; AVX512BW-NEXT: vpshufb %xmm5, %xmm7, %xmm7
+; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm6 = zmm6[2,3,2,3],zmm7[0,1,0,1]
+; AVX512BW-NEXT: vpmovsxbw {{.*#+}} zmm7 = [14,13,14,15,15,14,14,15,14,13,14,15,15,14,14,15,17,17,16,16,17,17,16,16,20,21,17,17,17,17,16,16]
+; AVX512BW-NEXT: vpermw %zmm0, %zmm7, %zmm7
+; AVX512BW-NEXT: movabsq $580999813345182728, %rcx # imm = 0x810204081020408
+; AVX512BW-NEXT: kmovq %rcx, %k2
+; AVX512BW-NEXT: vmovdqu8 %zmm7, %zmm6 {%k2}
+; AVX512BW-NEXT: movabsq $1016749673354069774, %rcx # imm = 0xE1C3870E1C3870E
+; AVX512BW-NEXT: kmovq %rcx, %k2
+; AVX512BW-NEXT: vmovdqu8 %zmm6, %zmm3 {%k2}
+; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm6 = [u,u,u,128,7,128,5,u,u,u,128,8,128,6,u,u]
+; AVX512BW-NEXT: vpshufb %xmm6, %xmm18, %xmm7
+; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm15 = [u,u,u,7,128,5,128,u,u,u,8,128,6,128,u,u]
+; AVX512BW-NEXT: vpshufb %xmm15, %xmm17, %xmm16
+; AVX512BW-NEXT: vporq %xmm7, %xmm16, %xmm7
+; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm16 = xmm18[8],xmm17[8],xmm18[9],xmm17[9],xmm18[10],xmm17[10],xmm18[11],xmm17[11],xmm18[12],xmm17[12],xmm18[13],xmm17[13],xmm18[14],xmm17[14],xmm18[15],xmm17[15]
+; AVX512BW-NEXT: vpshufb %xmm23, %xmm16, %xmm16
+; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm7 = zmm7[0,1,0,1],zmm16[0,1,0,1]
+; AVX512BW-NEXT: vmovdqa64 {{.*#+}} xmm16 = [u,128,7,128,5,u,u,u,128,8,128,6,u,u,u,128]
+; AVX512BW-NEXT: vpshufb %xmm16, %xmm21, %xmm17
+; AVX512BW-NEXT: vmovdqa64 {{.*#+}} xmm18 = [u,7,128,5,128,u,u,u,8,128,6,128,u,u,u,9]
+; AVX512BW-NEXT: vpshufb %xmm18, %xmm20, %xmm19
+; AVX512BW-NEXT: vporq %xmm17, %xmm19, %xmm17
+; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm19 = xmm21[8],xmm20[8],xmm21[9],xmm20[9],xmm21[10],xmm20[10],xmm21[11],xmm20[11],xmm21[12],xmm20[12],xmm21[13],xmm20[13],xmm21[14],xmm20[14],xmm21[15],xmm20[15]
+; AVX512BW-NEXT: vpshufb %xmm25, %xmm19, %xmm19
+; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm17 = zmm17[0,1,0,1],zmm19[0,1,0,1]
+; AVX512BW-NEXT: vmovdqu8 %zmm7, %zmm17 {%k1}
+; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm7 = [128,4,u,u,u,128,7,128,5,u,u,u,128,8,128,6]
+; AVX512BW-NEXT: vpshufb %xmm7, %xmm26, %xmm19
+; AVX512BW-NEXT: vmovdqa64 {{.*#+}} xmm20 = [4,128,u,u,u,7,128,5,128,u,u,u,8,128,6,128]
+; AVX512BW-NEXT: vpshufb %xmm20, %xmm28, %xmm21
+; AVX512BW-NEXT: vporq %xmm19, %xmm21, %xmm19
+; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm21 = xmm28[8],xmm26[8],xmm28[9],xmm26[9],xmm28[10],xmm26[10],xmm28[11],xmm26[11],xmm28[12],xmm26[12],xmm28[13],xmm26[13],xmm28[14],xmm26[14],xmm28[15],xmm26[15]
+; AVX512BW-NEXT: vpshufb %xmm29, %xmm21, %xmm21
+; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm19 = zmm19[0,1,0,1],zmm21[0,1,0,1]
+; AVX512BW-NEXT: vpmovsxbw {{.*#+}} zmm21 = [18,18,18,20,18,18,18,20,19,19,19,19,18,18,18,20,20,21,20,21,21,22,21,22,20,21,20,21,21,22,21,22]
+; AVX512BW-NEXT: vpermw %zmm0, %zmm21, %zmm21
+; AVX512BW-NEXT: movabsq $290499906672591364, %rcx # imm = 0x408102040810204
+; AVX512BW-NEXT: kmovq %rcx, %k1
+; AVX512BW-NEXT: vmovdqu8 %zmm21, %zmm19 {%k1}
+; AVX512BW-NEXT: movabsq $-8714997200177740921, %rcx # imm = 0x870E1C3870E1C387
+; AVX512BW-NEXT: kmovq %rcx, %k1
+; AVX512BW-NEXT: vmovdqu8 %zmm19, %zmm17 {%k1}
+; AVX512BW-NEXT: vpshufb %xmm6, %xmm10, %xmm6
+; AVX512BW-NEXT: vpshufb %xmm15, %xmm9, %xmm15
+; AVX512BW-NEXT: vpor %xmm6, %xmm15, %xmm6
+; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm9 = xmm9[0],xmm10[0],xmm9[1],xmm10[1],xmm9[2],xmm10[2],xmm9[3],xmm10[3],xmm9[4],xmm10[4],xmm9[5],xmm10[5],xmm9[6],xmm10[6],xmm9[7],xmm10[7]
+; AVX512BW-NEXT: vpshufb %xmm27, %xmm9, %xmm9
+; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm6 = zmm9[0,1,0,1],zmm6[0,1,0,1]
+; AVX512BW-NEXT: vpshufb %xmm16, %xmm12, %xmm9
+; AVX512BW-NEXT: vpshufb %xmm18, %xmm11, %xmm10
+; AVX512BW-NEXT: vpor %xmm9, %xmm10, %xmm9
+; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm10 = xmm11[0],xmm12[0],xmm11[1],xmm12[1],xmm11[2],xmm12[2],xmm11[3],xmm12[3],xmm11[4],xmm12[4],xmm11[5],xmm12[5],xmm11[6],xmm12[6],xmm11[7],xmm12[7]
; AVX512BW-NEXT: vpshufb %xmm24, %xmm10, %xmm10
-; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm7 = zmm10[0,1,0,1],zmm7[0,1,0,1]
-; AVX512BW-NEXT: movabsq $871499720017774092, %rax # imm = 0xC183060C183060C
-; AVX512BW-NEXT: kmovq %rax, %k1
-; AVX512BW-NEXT: vmovdqu8 %zmm3, %zmm7 {%k1}
-; AVX512BW-NEXT: vpshufb %xmm8, %xmm14, %xmm3
-; AVX512BW-NEXT: vpshufb %xmm23, %xmm15, %xmm8
-; AVX512BW-NEXT: vpor %xmm3, %xmm8, %xmm3
-; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm8 = xmm15[0],xmm14[0],xmm15[1],xmm14[1],xmm15[2],xmm14[2],xmm15[3],xmm14[3],xmm15[4],xmm14[4],xmm15[5],xmm14[5],xmm15[6],xmm14[6],xmm15[7],xmm14[7]
-; AVX512BW-NEXT: vpshufb %xmm6, %xmm8, %xmm6
-; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm3 = zmm6[0,1,0,1],zmm3[0,1,0,1]
+; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm9 = zmm10[0,1,0,1],zmm9[0,1,0,1]
+; AVX512BW-NEXT: movabsq $871499720017774092, %rcx # imm = 0xC183060C183060C
+; AVX512BW-NEXT: kmovq %rcx, %k1
+; AVX512BW-NEXT: vmovdqu8 %zmm6, %zmm9 {%k1}
+; AVX512BW-NEXT: vpshufb %xmm7, %xmm13, %xmm6
+; AVX512BW-NEXT: vpshufb %xmm20, %xmm14, %xmm7
+; AVX512BW-NEXT: vpor %xmm6, %xmm7, %xmm6
+; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm7 = xmm14[0],xmm13[0],xmm14[1],xmm13[1],xmm14[2],xmm13[2],xmm14[3],xmm13[3],xmm14[4],xmm13[4],xmm14[5],xmm13[5],xmm14[6],xmm13[6],xmm14[7],xmm13[7]
+; AVX512BW-NEXT: vpshufb %xmm5, %xmm7, %xmm5
+; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm5 = zmm5[0,1,0,1],zmm6[0,1,0,1]
; AVX512BW-NEXT: vpmovsxbw {{.*#+}} zmm6 = [1,1,0,0,1,1,0,0,4,5,1,1,1,1,0,0,2,2,2,4,2,2,2,4,3,3,3,3,2,2,2,4]
; AVX512BW-NEXT: vpermw %zmm0, %zmm6, %zmm0
-; AVX512BW-NEXT: movabsq $4647998506761461824, %rax # imm = 0x4081020408102040
-; AVX512BW-NEXT: kmovq %rax, %k1
-; AVX512BW-NEXT: vmovdqu8 %zmm0, %zmm3 {%k1}
-; AVX512BW-NEXT: movabsq $8133997386832558192, %rax # imm = 0x70E1C3870E1C3870
-; AVX512BW-NEXT: kmovq %rax, %k1
-; AVX512BW-NEXT: vmovdqu8 %zmm3, %zmm7 {%k1}
-; AVX512BW-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-NEXT: vmovdqa64 %zmm7, (%rax)
-; AVX512BW-NEXT: vmovdqa64 %zmm20, 256(%rax)
-; AVX512BW-NEXT: vmovdqa64 %zmm5, 128(%rax)
+; AVX512BW-NEXT: movabsq $4647998506761461824, %rcx # imm = 0x4081020408102040
+; AVX512BW-NEXT: kmovq %rcx, %k1
+; AVX512BW-NEXT: vmovdqu8 %zmm0, %zmm5 {%k1}
+; AVX512BW-NEXT: movabsq $8133997386832558192, %rcx # imm = 0x70E1C3870E1C3870
+; AVX512BW-NEXT: kmovq %rcx, %k1
+; AVX512BW-NEXT: vmovdqu8 %zmm5, %zmm9 {%k1}
+; AVX512BW-NEXT: vmovdqa64 %zmm9, (%rax)
+; AVX512BW-NEXT: vmovdqa64 %zmm17, 256(%rax)
+; AVX512BW-NEXT: vmovdqa64 %zmm4, 128(%rax)
; AVX512BW-NEXT: vmovdqa64 %zmm2, 320(%rax)
-; AVX512BW-NEXT: vmovdqa64 %zmm4, 192(%rax)
-; AVX512BW-NEXT: vmovdqa64 %zmm9, 64(%rax)
+; AVX512BW-NEXT: vmovdqa64 %zmm3, 192(%rax)
+; AVX512BW-NEXT: vmovdqa64 %zmm8, 64(%rax)
; AVX512BW-NEXT: vmovdqa64 %zmm1, 384(%rax)
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
@@ -10232,222 +10232,222 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512BW-FCP-LABEL: store_i8_stride7_vf64:
; AVX512BW-FCP: # %bb.0:
; AVX512BW-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-FCP-NEXT: vmovdqa 32(%rcx), %ymm1
+; AVX512BW-FCP-NEXT: vmovdqa64 (%rax), %zmm0
+; AVX512BW-FCP-NEXT: vmovdqa 32(%rcx), %ymm2
; AVX512BW-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [27,128,128,128,128,30,128,28,128,128,128,128,31,128,29,128,27,128,128,128,128,30,128,28,128,128,128,128,31,128,29,128]
; AVX512BW-FCP-NEXT: # ymm3 = mem[0,1,0,1]
-; AVX512BW-FCP-NEXT: vpshufb %ymm3, %ymm1, %ymm0
-; AVX512BW-FCP-NEXT: vmovdqa 32(%rdx), %ymm11
+; AVX512BW-FCP-NEXT: vpshufb %ymm3, %ymm2, %ymm1
+; AVX512BW-FCP-NEXT: vmovdqa 32(%rdx), %ymm13
; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} ymm4 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,30,128,28,128,128,128,128,31,128,29,128,128]
-; AVX512BW-FCP-NEXT: vpshufb %ymm4, %ymm11, %ymm2
-; AVX512BW-FCP-NEXT: vpor %ymm0, %ymm2, %ymm0
-; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} ymm8 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,24,25,128,23,128,21,22,23,26,128,24,128,28,29,26,27]
-; AVX512BW-FCP-NEXT: vpshufb %ymm8, %ymm11, %ymm2
+; AVX512BW-FCP-NEXT: vpshufb %ymm4, %ymm13, %ymm5
+; AVX512BW-FCP-NEXT: vpor %ymm1, %ymm5, %ymm1
+; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} ymm7 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,24,25,128,23,128,21,22,23,26,128,24,128,28,29,26,27]
+; AVX512BW-FCP-NEXT: vpshufb %ymm7, %ymm13, %ymm5
; AVX512BW-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm10 = [128,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128,128,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128]
; AVX512BW-FCP-NEXT: # ymm10 = mem[0,1,0,1]
-; AVX512BW-FCP-NEXT: vpshufb %ymm10, %ymm1, %ymm5
-; AVX512BW-FCP-NEXT: vpor %ymm2, %ymm5, %ymm2
-; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm2 = zmm2[2,3,2,3],zmm0[2,3,2,3]
-; AVX512BW-FCP-NEXT: vmovdqa64 32(%rdi), %ymm16
-; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} ymm7 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,28,29,30,128,28,128,30,31,30,31,128,29,128,31,28,29]
-; AVX512BW-FCP-NEXT: vpshufb %ymm7, %ymm16, %ymm0
-; AVX512BW-FCP-NEXT: vmovdqa64 32(%rsi), %ymm17
+; AVX512BW-FCP-NEXT: vpshufb %ymm10, %ymm2, %ymm6
+; AVX512BW-FCP-NEXT: vpor %ymm5, %ymm6, %ymm5
+; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm5 = zmm5[2,3,2,3],zmm1[2,3,2,3]
+; AVX512BW-FCP-NEXT: vmovdqa 32(%rdi), %ymm15
+; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} ymm8 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,28,29,30,128,28,128,30,31,30,31,128,29,128,31,28,29]
+; AVX512BW-FCP-NEXT: vpshufb %ymm8, %ymm15, %ymm1
+; AVX512BW-FCP-NEXT: vmovdqa64 32(%rsi), %ymm16
; AVX512BW-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm9 = [128,128,128,30,128,28,128,128,128,128,31,128,29,128,128,128,128,128,128,30,128,28,128,128,128,128,31,128,29,128,128,128]
; AVX512BW-FCP-NEXT: # ymm9 = mem[0,1,0,1]
-; AVX512BW-FCP-NEXT: vpshufb %ymm9, %ymm17, %ymm5
-; AVX512BW-FCP-NEXT: vpor %ymm0, %ymm5, %ymm0
-; AVX512BW-FCP-NEXT: vbroadcasti32x4 {{.*#+}} ymm19 = [25,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128]
-; AVX512BW-FCP-NEXT: # ymm19 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-FCP-NEXT: vpshufb %ymm19, %ymm17, %ymm5
-; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} ymm20 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128,25]
-; AVX512BW-FCP-NEXT: vpshufb %ymm20, %ymm16, %ymm6
-; AVX512BW-FCP-NEXT: vpor %ymm5, %ymm6, %ymm5
-; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm5[2,3,2,3],zmm0[2,3,2,3]
+; AVX512BW-FCP-NEXT: vpshufb %ymm9, %ymm16, %ymm6
+; AVX512BW-FCP-NEXT: vpor %ymm1, %ymm6, %ymm1
+; AVX512BW-FCP-NEXT: vbroadcasti32x4 {{.*#+}} ymm18 = [25,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128]
+; AVX512BW-FCP-NEXT: # ymm18 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-FCP-NEXT: vpshufb %ymm18, %ymm16, %ymm6
+; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} ymm19 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128,25]
+; AVX512BW-FCP-NEXT: vpshufb %ymm19, %ymm15, %ymm11
+; AVX512BW-FCP-NEXT: vpor %ymm6, %ymm11, %ymm6
+; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm1 = zmm6[2,3,2,3],zmm1[2,3,2,3]
; AVX512BW-FCP-NEXT: movabsq $1742999440035548184, %r10 # imm = 0x183060C183060C18
; AVX512BW-FCP-NEXT: kmovq %r10, %k1
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm2, %zmm0 {%k1}
-; AVX512BW-FCP-NEXT: vmovdqa64 32(%r9), %ymm18
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm5, %zmm1 {%k1}
+; AVX512BW-FCP-NEXT: vmovdqa64 32(%r9), %ymm17
; AVX512BW-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm5 = [29,128,27,128,128,128,128,30,128,28,128,128,128,128,31,128,29,128,27,128,128,128,128,30,128,28,128,128,128,128,31,128]
; AVX512BW-FCP-NEXT: # ymm5 = mem[0,1,0,1]
-; AVX512BW-FCP-NEXT: vpshufb %ymm5, %ymm18, %ymm2
+; AVX512BW-FCP-NEXT: vpshufb %ymm5, %ymm17, %ymm11
; AVX512BW-FCP-NEXT: vmovdqa64 32(%r8), %ymm22
; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} ymm6 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,27,128,128,128,128,30,128,28,128,128,128,128,31,128,29]
; AVX512BW-FCP-NEXT: vpshufb %ymm6, %ymm22, %ymm12
-; AVX512BW-FCP-NEXT: vpor %ymm2, %ymm12, %ymm2
-; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} ymm12 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,24,25,24,25,128,23,128,23,24,25,26,128,24,128,30,31]
-; AVX512BW-FCP-NEXT: vpshufb %ymm12, %ymm22, %ymm14
-; AVX512BW-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm13 = [128,128,128,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128,128,128,25,128,23,128,128,128,128,26,128,24,128,128]
-; AVX512BW-FCP-NEXT: # ymm13 = mem[0,1,0,1]
-; AVX512BW-FCP-NEXT: vpshufb %ymm13, %ymm18, %ymm15
-; AVX512BW-FCP-NEXT: vpor %ymm14, %ymm15, %ymm14
-; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm2 = zmm14[2,3,2,3],zmm2[2,3,2,3]
+; AVX512BW-FCP-NEXT: vpor %ymm11, %ymm12, %ymm11
+; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} ymm20 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,24,25,24,25,128,23,128,23,24,25,26,128,24,128,30,31]
+; AVX512BW-FCP-NEXT: vpshufb %ymm20, %ymm22, %ymm12
+; AVX512BW-FCP-NEXT: vbroadcasti32x4 {{.*#+}} ymm21 = [128,128,128,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128,128,128,25,128,23,128,128,128,128,26,128,24,128,128]
+; AVX512BW-FCP-NEXT: # ymm21 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-FCP-NEXT: vpshufb %ymm21, %ymm17, %ymm14
+; AVX512BW-FCP-NEXT: vpor %ymm12, %ymm14, %ymm12
+; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm11 = zmm12[2,3,2,3],zmm11[2,3,2,3]
; AVX512BW-FCP-NEXT: movabsq $6971997760142192736, %r10 # imm = 0x60C183060C183060
; AVX512BW-FCP-NEXT: kmovq %r10, %k1
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm2, %zmm0 {%k1}
-; AVX512BW-FCP-NEXT: vmovdqa 32(%rax), %ymm2
-; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} zmm14 = [11,13,12,11,12,13,13,12,11,13,12,11,12,13,13,12,14,13,14,15,15,14,14,15,14,13,14,15,15,14,14,15]
-; AVX512BW-FCP-NEXT: vpermw %zmm2, %zmm14, %zmm14
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm11, %zmm1 {%k1}
+; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} zmm11 = [27,29,28,27,28,29,29,28,27,29,28,27,28,29,29,28,30,29,30,31,31,30,30,31,30,29,30,31,31,30,30,31]
+; AVX512BW-FCP-NEXT: vpermw %zmm0, %zmm11, %zmm11
; AVX512BW-FCP-NEXT: movabsq $-9150747060186627967, %r10 # imm = 0x8102040810204081
; AVX512BW-FCP-NEXT: kmovq %r10, %k2
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm14, %zmm0 {%k2}
-; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} ymm14 = [0,1,0,1,14,128,14,15,0,1,14,15,128,13,14,15,16,17,16,128,30,31,30,31,16,17,128,31,28,29,30,31]
-; AVX512BW-FCP-NEXT: vpshufb %ymm14, %ymm11, %ymm15
-; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} ymm21 = [128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128]
-; AVX512BW-FCP-NEXT: vpshufb %ymm21, %ymm1, %ymm23
-; AVX512BW-FCP-NEXT: vporq %ymm15, %ymm23, %ymm15
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm11, %zmm1 {%k2}
+; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} ymm11 = [0,1,0,1,14,128,14,15,0,1,14,15,128,13,14,15,16,17,16,128,30,31,30,31,16,17,128,31,28,29,30,31]
+; AVX512BW-FCP-NEXT: vpshufb %ymm11, %ymm13, %ymm14
+; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} ymm12 = [128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128]
+; AVX512BW-FCP-NEXT: vpshufb %ymm12, %ymm2, %ymm23
+; AVX512BW-FCP-NEXT: vporq %ymm14, %ymm23, %ymm14
; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} ymm24 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,18,128,18,19,20,21,128,19,128,25,26,27,22,128,20,128]
-; AVX512BW-FCP-NEXT: vpshufb %ymm24, %ymm11, %ymm11
+; AVX512BW-FCP-NEXT: vpshufb %ymm24, %ymm13, %ymm13
; AVX512BW-FCP-NEXT: vbroadcasti32x4 {{.*#+}} ymm25 = [128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20]
; AVX512BW-FCP-NEXT: # ymm25 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-FCP-NEXT: vpshufb %ymm25, %ymm1, %ymm1
-; AVX512BW-FCP-NEXT: vpor %ymm1, %ymm11, %ymm1
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm1, %zmm15, %zmm23
-; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} ymm11 = [0,1,14,128,12,13,0,1,14,15,128,3,12,13,2,3,16,128,30,31,28,29,16,17,128,31,18,19,28,29,18,128]
-; AVX512BW-FCP-NEXT: vpshufb %ymm11, %ymm16, %ymm1
-; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} ymm15 = [128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128,128,18]
-; AVX512BW-FCP-NEXT: vpshufb %ymm15, %ymm17, %ymm26
-; AVX512BW-FCP-NEXT: vporq %ymm1, %ymm26, %ymm1
+; AVX512BW-FCP-NEXT: vpshufb %ymm25, %ymm2, %ymm2
+; AVX512BW-FCP-NEXT: vpor %ymm2, %ymm13, %ymm2
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,3,2,3]
+; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm2, %zmm14, %zmm23
+; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} ymm13 = [0,1,14,128,12,13,0,1,14,15,128,3,12,13,2,3,16,128,30,31,28,29,16,17,128,31,18,19,28,29,18,128]
+; AVX512BW-FCP-NEXT: vpshufb %ymm13, %ymm15, %ymm2
+; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} ymm14 = [128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128,128,18]
+; AVX512BW-FCP-NEXT: vpshufb %ymm14, %ymm16, %ymm26
+; AVX512BW-FCP-NEXT: vporq %ymm2, %ymm26, %ymm2
; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} ymm26 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,18,19,20,21,128,19,128,21,20,21,22,128,20,128,22,23]
-; AVX512BW-FCP-NEXT: vpshufb %ymm26, %ymm16, %ymm16
+; AVX512BW-FCP-NEXT: vpshufb %ymm26, %ymm15, %ymm15
; AVX512BW-FCP-NEXT: vbroadcasti32x4 {{.*#+}} ymm27 = [128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,128,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,128]
; AVX512BW-FCP-NEXT: # ymm27 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-FCP-NEXT: vpshufb %ymm27, %ymm17, %ymm17
-; AVX512BW-FCP-NEXT: vporq %ymm16, %ymm17, %ymm16
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm16 = ymm16[2,3,2,3]
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm16, %zmm1, %zmm1
+; AVX512BW-FCP-NEXT: vpshufb %ymm27, %ymm16, %ymm16
+; AVX512BW-FCP-NEXT: vporq %ymm15, %ymm16, %ymm15
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm15 = ymm15[2,3,2,3]
+; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm15, %zmm2, %zmm2
; AVX512BW-FCP-NEXT: movabsq $3485998880071096368, %r10 # imm = 0x3060C183060C1830
; AVX512BW-FCP-NEXT: kmovq %r10, %k2
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm23, %zmm1 {%k2}
-; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} ymm16 = [13,128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128]
-; AVX512BW-FCP-NEXT: vpshufb %ymm16, %ymm18, %ymm23
-; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} ymm17 = [128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128]
-; AVX512BW-FCP-NEXT: vpshufb %ymm17, %ymm22, %ymm28
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm23, %zmm2 {%k2}
+; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} ymm15 = [13,128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128]
+; AVX512BW-FCP-NEXT: vpshufb %ymm15, %ymm17, %ymm23
+; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} ymm16 = [128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128]
+; AVX512BW-FCP-NEXT: vpshufb %ymm16, %ymm22, %ymm28
; AVX512BW-FCP-NEXT: vporq %ymm23, %ymm28, %ymm23
; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} ymm28 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,20,128,18,128,20,21,20,21,128,19,128,19,20,21,22,128]
; AVX512BW-FCP-NEXT: vpshufb %ymm28, %ymm22, %ymm22
; AVX512BW-FCP-NEXT: vbroadcasti32x4 {{.*#+}} ymm29 = [128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22]
; AVX512BW-FCP-NEXT: # ymm29 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-FCP-NEXT: vpshufb %ymm29, %ymm18, %ymm18
-; AVX512BW-FCP-NEXT: vporq %ymm22, %ymm18, %ymm18
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm18 = ymm18[2,3,2,3]
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm18, %zmm23, %zmm18
+; AVX512BW-FCP-NEXT: vpshufb %ymm29, %ymm17, %ymm17
+; AVX512BW-FCP-NEXT: vporq %ymm22, %ymm17, %ymm17
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm17 = ymm17[2,3,2,3]
+; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm17, %zmm23, %zmm17
+; AVX512BW-FCP-NEXT: vmovdqa64 32(%rax), %ymm22
; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} zmm30 = [10,9,9,10,10,9,9,10,9,10,14,15,10,9,9,10,11,13,12,11,12,13,13,12,11,13,12,11,12,13,13,12]
-; AVX512BW-FCP-NEXT: vpermw %ymm2, %ymm30, %ymm22
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm23 = ymm2[12,13,2,3,12,13,0,1,14,15,2,3,0,1,14,15,28,29,18,19,28,29,16,17,30,31,18,19,16,17,30,31]
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm22, %zmm23, %zmm22
-; AVX512BW-FCP-NEXT: movabsq $145249953336295682, %r10 # imm = 0x204081020408102
-; AVX512BW-FCP-NEXT: kmovq %r10, %k3
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm22, %zmm18 {%k3}
-; AVX512BW-FCP-NEXT: movabsq $-4357498600088870461, %r10 # imm = 0xC3870E1C3870E1C3
-; AVX512BW-FCP-NEXT: kmovq %r10, %k3
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm18, %zmm1 {%k3}
-; AVX512BW-FCP-NEXT: vmovdqa64 (%rsi), %ymm18
-; AVX512BW-FCP-NEXT: vpshufb %ymm19, %ymm18, %ymm19
+; AVX512BW-FCP-NEXT: vpermw %ymm22, %ymm30, %ymm23
+; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm22 = ymm22[12,13,2,3,12,13,0,1,14,15,2,3,0,1,14,15,28,29,18,19,28,29,16,17,30,31,18,19,16,17,30,31]
+; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm23, %zmm22, %zmm22
+; AVX512BW-FCP-NEXT: movabsq $145249953336295682, %rax # imm = 0x204081020408102
+; AVX512BW-FCP-NEXT: kmovq %rax, %k3
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm22, %zmm17 {%k3}
+; AVX512BW-FCP-NEXT: movabsq $-4357498600088870461, %rax # imm = 0xC3870E1C3870E1C3
+; AVX512BW-FCP-NEXT: kmovq %rax, %k3
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm17, %zmm2 {%k3}
+; AVX512BW-FCP-NEXT: vmovdqa64 (%rsi), %ymm17
+; AVX512BW-FCP-NEXT: vpshufb %ymm18, %ymm17, %ymm18
; AVX512BW-FCP-NEXT: vmovdqa64 (%rdi), %ymm22
-; AVX512BW-FCP-NEXT: vpshufb %ymm20, %ymm22, %ymm20
-; AVX512BW-FCP-NEXT: vporq %ymm19, %ymm20, %ymm19
-; AVX512BW-FCP-NEXT: vpshufb %ymm26, %ymm22, %ymm20
-; AVX512BW-FCP-NEXT: vpshufb %ymm27, %ymm18, %ymm23
-; AVX512BW-FCP-NEXT: vporq %ymm20, %ymm23, %ymm20
-; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm20 = zmm20[2,3,2,3],zmm19[2,3,2,3]
-; AVX512BW-FCP-NEXT: vmovdqa64 (%rdx), %ymm19
-; AVX512BW-FCP-NEXT: vpshufb %ymm8, %ymm19, %ymm8
+; AVX512BW-FCP-NEXT: vpshufb %ymm19, %ymm22, %ymm19
+; AVX512BW-FCP-NEXT: vporq %ymm18, %ymm19, %ymm18
+; AVX512BW-FCP-NEXT: vpshufb %ymm26, %ymm22, %ymm19
+; AVX512BW-FCP-NEXT: vpshufb %ymm27, %ymm17, %ymm23
+; AVX512BW-FCP-NEXT: vporq %ymm19, %ymm23, %ymm19
+; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm19 = zmm19[2,3,2,3],zmm18[2,3,2,3]
+; AVX512BW-FCP-NEXT: vmovdqa64 (%rdx), %ymm18
+; AVX512BW-FCP-NEXT: vpshufb %ymm7, %ymm18, %ymm7
; AVX512BW-FCP-NEXT: vmovdqa64 (%rcx), %ymm23
; AVX512BW-FCP-NEXT: vpshufb %ymm10, %ymm23, %ymm10
-; AVX512BW-FCP-NEXT: vpor %ymm8, %ymm10, %ymm8
-; AVX512BW-FCP-NEXT: vmovdqa64 (%rax), %zmm10
-; AVX512BW-FCP-NEXT: vpshufb %ymm24, %ymm19, %ymm24
-; AVX512BW-FCP-NEXT: vpshufb %ymm25, %ymm23, %ymm25
-; AVX512BW-FCP-NEXT: vporq %ymm24, %ymm25, %ymm24
-; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm8 = zmm24[2,3,2,3],zmm8[2,3,2,3]
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm20, %zmm8 {%k1}
-; AVX512BW-FCP-NEXT: vmovdqa64 (%r8), %ymm20
-; AVX512BW-FCP-NEXT: vpshufb %ymm12, %ymm20, %ymm12
-; AVX512BW-FCP-NEXT: vmovdqa64 (%r9), %ymm24
-; AVX512BW-FCP-NEXT: vpshufb %ymm13, %ymm24, %ymm13
-; AVX512BW-FCP-NEXT: vpor %ymm12, %ymm13, %ymm12
-; AVX512BW-FCP-NEXT: vpshufb %ymm28, %ymm20, %ymm13
-; AVX512BW-FCP-NEXT: vpshufb %ymm29, %ymm24, %ymm25
-; AVX512BW-FCP-NEXT: vporq %ymm13, %ymm25, %ymm13
-; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm12 = zmm13[2,3,2,3],zmm12[2,3,2,3]
-; AVX512BW-FCP-NEXT: vpermw %zmm10, %zmm30, %zmm13
+; AVX512BW-FCP-NEXT: vpor %ymm7, %ymm10, %ymm7
+; AVX512BW-FCP-NEXT: vpshufb %ymm24, %ymm18, %ymm10
+; AVX512BW-FCP-NEXT: vpshufb %ymm25, %ymm23, %ymm24
+; AVX512BW-FCP-NEXT: vporq %ymm10, %ymm24, %ymm10
+; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm7 = zmm10[2,3,2,3],zmm7[2,3,2,3]
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm19, %zmm7 {%k1}
+; AVX512BW-FCP-NEXT: vmovdqa64 (%r8), %ymm19
+; AVX512BW-FCP-NEXT: vpshufb %ymm20, %ymm19, %ymm10
+; AVX512BW-FCP-NEXT: vmovdqa64 (%r9), %ymm20
+; AVX512BW-FCP-NEXT: vpshufb %ymm21, %ymm20, %ymm21
+; AVX512BW-FCP-NEXT: vporq %ymm10, %ymm21, %ymm10
+; AVX512BW-FCP-NEXT: vpshufb %ymm28, %ymm19, %ymm21
+; AVX512BW-FCP-NEXT: vpshufb %ymm29, %ymm20, %ymm24
+; AVX512BW-FCP-NEXT: vporq %ymm21, %ymm24, %ymm21
+; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm10 = zmm21[2,3,2,3],zmm10[2,3,2,3]
+; AVX512BW-FCP-NEXT: vpermw %zmm0, %zmm30, %zmm21
; AVX512BW-FCP-NEXT: movabsq $1161999626690365456, %rax # imm = 0x1020408102040810
; AVX512BW-FCP-NEXT: kmovq %rax, %k3
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm13, %zmm12 {%k3}
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm21, %zmm10 {%k3}
; AVX512BW-FCP-NEXT: movabsq $2033499346708139548, %rax # imm = 0x1C3870E1C3870E1C
; AVX512BW-FCP-NEXT: kmovq %rax, %k3
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm12, %zmm8 {%k3}
-; AVX512BW-FCP-NEXT: vpshufb %ymm14, %ymm19, %ymm12
-; AVX512BW-FCP-NEXT: vpshufb %ymm21, %ymm23, %ymm13
-; AVX512BW-FCP-NEXT: vpor %ymm12, %ymm13, %ymm14
-; AVX512BW-FCP-NEXT: vmovdqa (%rdx), %xmm12
-; AVX512BW-FCP-NEXT: vmovdqa (%rcx), %xmm13
-; AVX512BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm25 = xmm13[8],xmm12[8],xmm13[9],xmm12[9],xmm13[10],xmm12[10],xmm13[11],xmm12[11],xmm13[12],xmm12[12],xmm13[13],xmm12[13],xmm13[14],xmm12[14],xmm13[15],xmm12[15]
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm10, %zmm7 {%k3}
+; AVX512BW-FCP-NEXT: vpshufb %ymm11, %ymm18, %ymm10
+; AVX512BW-FCP-NEXT: vpshufb %ymm12, %ymm23, %ymm11
+; AVX512BW-FCP-NEXT: vpor %ymm10, %ymm11, %ymm10
+; AVX512BW-FCP-NEXT: vmovdqa (%rdx), %xmm11
+; AVX512BW-FCP-NEXT: vmovdqa (%rcx), %xmm12
+; AVX512BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm24 = xmm12[8],xmm11[8],xmm12[9],xmm11[9],xmm12[10],xmm11[10],xmm12[11],xmm11[11],xmm12[12],xmm11[12],xmm12[13],xmm11[13],xmm12[14],xmm11[14],xmm12[15],xmm11[15]
; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} xmm21 = [6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7]
-; AVX512BW-FCP-NEXT: vpshufb %xmm21, %xmm25, %xmm25
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm25 = ymm25[0,1,0,1]
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm14, %zmm25, %zmm26
-; AVX512BW-FCP-NEXT: vpshufb %ymm11, %ymm22, %ymm11
-; AVX512BW-FCP-NEXT: vpshufb %ymm15, %ymm18, %ymm14
-; AVX512BW-FCP-NEXT: vpor %ymm11, %ymm14, %ymm11
-; AVX512BW-FCP-NEXT: vmovdqa (%rdi), %xmm14
-; AVX512BW-FCP-NEXT: vmovdqa (%rsi), %xmm15
-; AVX512BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm27 = xmm15[8],xmm14[8],xmm15[9],xmm14[9],xmm15[10],xmm14[10],xmm15[11],xmm14[11],xmm15[12],xmm14[12],xmm15[13],xmm14[13],xmm15[14],xmm14[14],xmm15[15],xmm14[15]
-; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} xmm25 = [2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
-; AVX512BW-FCP-NEXT: vpshufb %xmm25, %xmm27, %xmm27
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm27 = ymm27[0,1,0,1]
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm11, %zmm27, %zmm11
+; AVX512BW-FCP-NEXT: vpshufb %xmm21, %xmm24, %xmm24
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm24 = ymm24[0,1,0,1]
+; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm10, %zmm24, %zmm25
+; AVX512BW-FCP-NEXT: vpshufb %ymm13, %ymm22, %ymm10
+; AVX512BW-FCP-NEXT: vpshufb %ymm14, %ymm17, %ymm13
+; AVX512BW-FCP-NEXT: vpor %ymm10, %ymm13, %ymm10
+; AVX512BW-FCP-NEXT: vmovdqa (%rdi), %xmm13
+; AVX512BW-FCP-NEXT: vmovdqa (%rsi), %xmm14
+; AVX512BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm26 = xmm14[8],xmm13[8],xmm14[9],xmm13[9],xmm14[10],xmm13[10],xmm14[11],xmm13[11],xmm14[12],xmm13[12],xmm14[13],xmm13[13],xmm14[14],xmm13[14],xmm14[15],xmm13[15]
+; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} xmm24 = [2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
+; AVX512BW-FCP-NEXT: vpshufb %xmm24, %xmm26, %xmm26
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm26 = ymm26[0,1,0,1]
+; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm10, %zmm26, %zmm10
; AVX512BW-FCP-NEXT: movabsq $435749860008887046, %rax # imm = 0x60C183060C18306
; AVX512BW-FCP-NEXT: kmovq %rax, %k3
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm26, %zmm11 {%k3}
-; AVX512BW-FCP-NEXT: vpshufb %ymm16, %ymm24, %ymm16
-; AVX512BW-FCP-NEXT: vpshufb %ymm17, %ymm20, %ymm17
-; AVX512BW-FCP-NEXT: vporq %ymm16, %ymm17, %ymm27
-; AVX512BW-FCP-NEXT: vmovdqa64 (%r9), %xmm16
-; AVX512BW-FCP-NEXT: vmovdqa64 (%r8), %xmm17
-; AVX512BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm28 = xmm17[8],xmm16[8],xmm17[9],xmm16[9],xmm17[10],xmm16[10],xmm17[11],xmm16[11],xmm17[12],xmm16[12],xmm17[13],xmm16[13],xmm17[14],xmm16[14],xmm17[15],xmm16[15]
-; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} xmm26 = [u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10]
-; AVX512BW-FCP-NEXT: vpshufb %xmm26, %xmm28, %xmm28
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm28 = ymm28[0,1,0,1]
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm27, %zmm28, %zmm27
-; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} zmm28 = [4,5,4,5,5,6,5,6,4,5,4,5,5,6,5,6,6,1,6,0,7,1,0,7,14,9,14,8,15,9,8,15]
-; AVX512BW-FCP-NEXT: vpermw %zmm10, %zmm28, %zmm28
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm25, %zmm10 {%k3}
+; AVX512BW-FCP-NEXT: vpshufb %ymm15, %ymm20, %ymm15
+; AVX512BW-FCP-NEXT: vpshufb %ymm16, %ymm19, %ymm16
+; AVX512BW-FCP-NEXT: vporq %ymm15, %ymm16, %ymm26
+; AVX512BW-FCP-NEXT: vmovdqa (%r9), %xmm15
+; AVX512BW-FCP-NEXT: vmovdqa64 (%r8), %xmm16
+; AVX512BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm27 = xmm16[8],xmm15[8],xmm16[9],xmm15[9],xmm16[10],xmm15[10],xmm16[11],xmm15[11],xmm16[12],xmm15[12],xmm16[13],xmm15[13],xmm16[14],xmm15[14],xmm16[15],xmm15[15]
+; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} xmm25 = [u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10]
+; AVX512BW-FCP-NEXT: vpshufb %xmm25, %xmm27, %xmm27
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm27 = ymm27[0,1,0,1]
+; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm26, %zmm27, %zmm26
+; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} zmm27 = [4,5,4,5,5,6,5,6,4,5,4,5,5,6,5,6,6,1,6,0,7,1,0,7,14,9,14,8,15,9,8,15]
+; AVX512BW-FCP-NEXT: vpermw %zmm0, %zmm27, %zmm27
; AVX512BW-FCP-NEXT: movabsq $2323999253380730912, %rax # imm = 0x2040810204081020
; AVX512BW-FCP-NEXT: kmovq %rax, %k3
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm28, %zmm27 {%k3}
-; AVX512BW-FCP-NEXT: vmovdqa64 32(%rdx), %xmm28
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm27, %zmm26 {%k3}
+; AVX512BW-FCP-NEXT: vmovdqa64 32(%rdx), %xmm27
; AVX512BW-FCP-NEXT: movabsq $4066998693416279096, %rax # imm = 0x3870E1C3870E1C38
; AVX512BW-FCP-NEXT: kmovq %rax, %k3
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm27, %zmm11 {%k3}
-; AVX512BW-FCP-NEXT: vmovdqa64 32(%rdi), %xmm27
-; AVX512BW-FCP-NEXT: vpshufb %ymm7, %ymm22, %ymm7
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm26, %zmm10 {%k3}
+; AVX512BW-FCP-NEXT: vmovdqa64 32(%rdi), %xmm26
+; AVX512BW-FCP-NEXT: vpshufb %ymm8, %ymm22, %ymm8
; AVX512BW-FCP-NEXT: vmovdqa64 32(%rsi), %xmm22
-; AVX512BW-FCP-NEXT: vpshufb %ymm9, %ymm18, %ymm9
-; AVX512BW-FCP-NEXT: vpor %ymm7, %ymm9, %ymm9
-; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm18 = xmm27[0],xmm22[0],xmm27[1],xmm22[1],xmm27[2],xmm22[2],xmm27[3],xmm22[3],xmm27[4],xmm22[4],xmm27[5],xmm22[5],xmm27[6],xmm22[6],xmm27[7],xmm22[7]
-; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} xmm7 = [0,1,u,u,u,6,7,2,3,u,u,u,8,9,4,5]
-; AVX512BW-FCP-NEXT: vpshufb %xmm7, %xmm18, %xmm18
-; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm18 = zmm9[2,3,2,3],zmm18[0,1,0,1]
-; AVX512BW-FCP-NEXT: vmovdqa64 32(%rcx), %xmm29
+; AVX512BW-FCP-NEXT: vpshufb %ymm9, %ymm17, %ymm9
+; AVX512BW-FCP-NEXT: vpor %ymm8, %ymm9, %ymm9
+; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm17 = xmm26[0],xmm22[0],xmm26[1],xmm22[1],xmm26[2],xmm22[2],xmm26[3],xmm22[3],xmm26[4],xmm22[4],xmm26[5],xmm22[5],xmm26[6],xmm22[6],xmm26[7],xmm22[7]
+; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} xmm8 = [0,1,u,u,u,6,7,2,3,u,u,u,8,9,4,5]
+; AVX512BW-FCP-NEXT: vpshufb %xmm8, %xmm17, %xmm17
+; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm17 = zmm9[2,3,2,3],zmm17[0,1,0,1]
+; AVX512BW-FCP-NEXT: vmovdqa64 32(%rcx), %xmm28
; AVX512BW-FCP-NEXT: vpshufb %ymm3, %ymm23, %ymm3
; AVX512BW-FCP-NEXT: vmovdqa64 32(%r9), %xmm23
-; AVX512BW-FCP-NEXT: vpshufb %ymm4, %ymm19, %ymm4
+; AVX512BW-FCP-NEXT: vpshufb %ymm4, %ymm18, %ymm4
; AVX512BW-FCP-NEXT: vpor %ymm3, %ymm4, %ymm3
-; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm28[0],xmm29[0],xmm28[1],xmm29[1],xmm28[2],xmm29[2],xmm28[3],xmm29[3],xmm28[4],xmm29[4],xmm28[5],xmm29[5],xmm28[6],xmm29[6],xmm28[7],xmm29[7]
+; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm27[0],xmm28[0],xmm27[1],xmm28[1],xmm27[2],xmm28[2],xmm27[3],xmm28[3],xmm27[4],xmm28[4],xmm27[5],xmm28[5],xmm27[6],xmm28[6],xmm27[7],xmm28[7]
; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} xmm9 = [4,5,0,1,u,u,u,6,7,2,3,u,u,u,8,9]
; AVX512BW-FCP-NEXT: vpshufb %xmm9, %xmm4, %xmm4
; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm3 = zmm3[2,3,2,3],zmm4[0,1,0,1]
-; AVX512BW-FCP-NEXT: vmovdqa64 32(%r8), %xmm19
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm18, %zmm3 {%k2}
-; AVX512BW-FCP-NEXT: vpshufb %ymm5, %ymm24, %ymm4
-; AVX512BW-FCP-NEXT: vpshufb %ymm6, %ymm20, %ymm5
+; AVX512BW-FCP-NEXT: vmovdqa64 32(%r8), %xmm18
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm17, %zmm3 {%k2}
+; AVX512BW-FCP-NEXT: vpshufb %ymm5, %ymm20, %ymm4
+; AVX512BW-FCP-NEXT: vpshufb %ymm6, %ymm19, %ymm5
; AVX512BW-FCP-NEXT: vpor %ymm4, %ymm5, %ymm5
-; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm6 = xmm19[0],xmm23[0],xmm19[1],xmm23[1],xmm19[2],xmm23[2],xmm19[3],xmm23[3],xmm19[4],xmm23[4],xmm19[5],xmm23[5],xmm19[6],xmm23[6],xmm19[7],xmm23[7]
+; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm6 = xmm18[0],xmm23[0],xmm18[1],xmm23[1],xmm18[2],xmm23[2],xmm18[3],xmm23[3],xmm18[4],xmm23[4],xmm18[5],xmm23[5],xmm18[6],xmm23[6],xmm18[7],xmm23[7]
; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} xmm4 = [u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
; AVX512BW-FCP-NEXT: vpshufb %xmm4, %xmm6, %xmm6
; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm5 = zmm5[2,3,2,3],zmm6[0,1,0,1]
; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} zmm6 = [14,13,14,15,15,14,14,15,14,13,14,15,15,14,14,15,17,17,16,16,17,17,16,16,20,21,17,17,17,17,16,16]
-; AVX512BW-FCP-NEXT: vpermw %zmm10, %zmm6, %zmm6
+; AVX512BW-FCP-NEXT: vpermw %zmm0, %zmm6, %zmm6
; AVX512BW-FCP-NEXT: movabsq $580999813345182728, %rax # imm = 0x810204081020408
; AVX512BW-FCP-NEXT: kmovq %rax, %k2
; AVX512BW-FCP-NEXT: vmovdqu8 %zmm6, %zmm5 {%k2}
@@ -10455,372 +10455,372 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512BW-FCP-NEXT: kmovq %rax, %k2
; AVX512BW-FCP-NEXT: vmovdqu8 %zmm5, %zmm3 {%k2}
; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} xmm6 = [u,u,u,128,7,128,5,u,u,u,128,8,128,6,u,u]
-; AVX512BW-FCP-NEXT: vpshufb %xmm6, %xmm29, %xmm5
-; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} xmm18 = [u,u,u,7,128,5,128,u,u,u,8,128,6,128,u,u]
-; AVX512BW-FCP-NEXT: vpshufb %xmm18, %xmm28, %xmm20
-; AVX512BW-FCP-NEXT: vporq %xmm5, %xmm20, %xmm5
-; AVX512BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm20 = xmm29[8],xmm28[8],xmm29[9],xmm28[9],xmm29[10],xmm28[10],xmm29[11],xmm28[11],xmm29[12],xmm28[12],xmm29[13],xmm28[13],xmm29[14],xmm28[14],xmm29[15],xmm28[15]
-; AVX512BW-FCP-NEXT: vpshufb %xmm21, %xmm20, %xmm20
-; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm20 = zmm5[0,1,0,1],zmm20[0,1,0,1]
-; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} xmm21 = [u,128,7,128,5,u,u,u,128,8,128,6,u,u,u,128]
-; AVX512BW-FCP-NEXT: vpshufb %xmm21, %xmm22, %xmm5
-; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} xmm24 = [u,7,128,5,128,u,u,u,8,128,6,128,u,u,u,9]
-; AVX512BW-FCP-NEXT: vpshufb %xmm24, %xmm27, %xmm28
-; AVX512BW-FCP-NEXT: vporq %xmm5, %xmm28, %xmm5
-; AVX512BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm22 = xmm22[8],xmm27[8],xmm22[9],xmm27[9],xmm22[10],xmm27[10],xmm22[11],xmm27[11],xmm22[12],xmm27[12],xmm22[13],xmm27[13],xmm22[14],xmm27[14],xmm22[15],xmm27[15]
-; AVX512BW-FCP-NEXT: vpshufb %xmm25, %xmm22, %xmm22
+; AVX512BW-FCP-NEXT: vpshufb %xmm6, %xmm28, %xmm5
+; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} xmm17 = [u,u,u,7,128,5,128,u,u,u,8,128,6,128,u,u]
+; AVX512BW-FCP-NEXT: vpshufb %xmm17, %xmm27, %xmm19
+; AVX512BW-FCP-NEXT: vporq %xmm5, %xmm19, %xmm5
+; AVX512BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm19 = xmm28[8],xmm27[8],xmm28[9],xmm27[9],xmm28[10],xmm27[10],xmm28[11],xmm27[11],xmm28[12],xmm27[12],xmm28[13],xmm27[13],xmm28[14],xmm27[14],xmm28[15],xmm27[15]
+; AVX512BW-FCP-NEXT: vpshufb %xmm21, %xmm19, %xmm19
+; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm19 = zmm5[0,1,0,1],zmm19[0,1,0,1]
+; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} xmm20 = [u,128,7,128,5,u,u,u,128,8,128,6,u,u,u,128]
+; AVX512BW-FCP-NEXT: vpshufb %xmm20, %xmm22, %xmm5
+; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} xmm21 = [u,7,128,5,128,u,u,u,8,128,6,128,u,u,u,9]
+; AVX512BW-FCP-NEXT: vpshufb %xmm21, %xmm26, %xmm27
+; AVX512BW-FCP-NEXT: vporq %xmm5, %xmm27, %xmm5
+; AVX512BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm22 = xmm22[8],xmm26[8],xmm22[9],xmm26[9],xmm22[10],xmm26[10],xmm22[11],xmm26[11],xmm22[12],xmm26[12],xmm22[13],xmm26[13],xmm22[14],xmm26[14],xmm22[15],xmm26[15]
+; AVX512BW-FCP-NEXT: vpshufb %xmm24, %xmm22, %xmm22
; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm5 = zmm5[0,1,0,1],zmm22[0,1,0,1]
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm20, %zmm5 {%k1}
-; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} xmm20 = [128,4,u,u,u,128,7,128,5,u,u,u,128,8,128,6]
-; AVX512BW-FCP-NEXT: vpshufb %xmm20, %xmm23, %xmm22
-; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} xmm25 = [4,128,u,u,u,7,128,5,128,u,u,u,8,128,6,128]
-; AVX512BW-FCP-NEXT: vpshufb %xmm25, %xmm19, %xmm27
-; AVX512BW-FCP-NEXT: vporq %xmm22, %xmm27, %xmm22
-; AVX512BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm19 = xmm19[8],xmm23[8],xmm19[9],xmm23[9],xmm19[10],xmm23[10],xmm19[11],xmm23[11],xmm19[12],xmm23[12],xmm19[13],xmm23[13],xmm19[14],xmm23[14],xmm19[15],xmm23[15]
-; AVX512BW-FCP-NEXT: vpshufb %xmm26, %xmm19, %xmm19
-; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm19 = zmm22[0,1,0,1],zmm19[0,1,0,1]
-; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} zmm22 = [2,2,2,4,2,2,2,4,3,3,3,3,2,2,2,4,4,5,4,5,5,6,5,6,4,5,4,5,5,6,5,6]
-; AVX512BW-FCP-NEXT: vpermw %zmm2, %zmm22, %zmm2
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm19, %zmm5 {%k1}
+; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} xmm19 = [128,4,u,u,u,128,7,128,5,u,u,u,128,8,128,6]
+; AVX512BW-FCP-NEXT: vpshufb %xmm19, %xmm23, %xmm22
+; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} xmm24 = [4,128,u,u,u,7,128,5,128,u,u,u,8,128,6,128]
+; AVX512BW-FCP-NEXT: vpshufb %xmm24, %xmm18, %xmm26
+; AVX512BW-FCP-NEXT: vporq %xmm22, %xmm26, %xmm22
+; AVX512BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm18 = xmm18[8],xmm23[8],xmm18[9],xmm23[9],xmm18[10],xmm23[10],xmm18[11],xmm23[11],xmm18[12],xmm23[12],xmm18[13],xmm23[13],xmm18[14],xmm23[14],xmm18[15],xmm23[15]
+; AVX512BW-FCP-NEXT: vpshufb %xmm25, %xmm18, %xmm18
+; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm18 = zmm22[0,1,0,1],zmm18[0,1,0,1]
+; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} zmm22 = [18,18,18,20,18,18,18,20,19,19,19,19,18,18,18,20,20,21,20,21,21,22,21,22,20,21,20,21,21,22,21,22]
+; AVX512BW-FCP-NEXT: vpermw %zmm0, %zmm22, %zmm22
; AVX512BW-FCP-NEXT: movabsq $290499906672591364, %rax # imm = 0x408102040810204
; AVX512BW-FCP-NEXT: kmovq %rax, %k1
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm2, %zmm19 {%k1}
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm22, %zmm18 {%k1}
; AVX512BW-FCP-NEXT: movabsq $-8714997200177740921, %rax # imm = 0x870E1C3870E1C387
; AVX512BW-FCP-NEXT: kmovq %rax, %k1
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm19, %zmm5 {%k1}
-; AVX512BW-FCP-NEXT: vpshufb %xmm6, %xmm13, %xmm2
-; AVX512BW-FCP-NEXT: vpshufb %xmm18, %xmm12, %xmm6
-; AVX512BW-FCP-NEXT: vpor %xmm2, %xmm6, %xmm2
-; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm6 = xmm12[0],xmm13[0],xmm12[1],xmm13[1],xmm12[2],xmm13[2],xmm12[3],xmm13[3],xmm12[4],xmm13[4],xmm12[5],xmm13[5],xmm12[6],xmm13[6],xmm12[7],xmm13[7]
-; AVX512BW-FCP-NEXT: vpshufb %xmm9, %xmm6, %xmm6
-; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm2 = zmm6[0,1,0,1],zmm2[0,1,0,1]
-; AVX512BW-FCP-NEXT: vpshufb %xmm21, %xmm15, %xmm6
-; AVX512BW-FCP-NEXT: vpshufb %xmm24, %xmm14, %xmm9
-; AVX512BW-FCP-NEXT: vpor %xmm6, %xmm9, %xmm6
-; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm9 = xmm14[0],xmm15[0],xmm14[1],xmm15[1],xmm14[2],xmm15[2],xmm14[3],xmm15[3],xmm14[4],xmm15[4],xmm14[5],xmm15[5],xmm14[6],xmm15[6],xmm14[7],xmm15[7]
-; AVX512BW-FCP-NEXT: vpshufb %xmm7, %xmm9, %xmm7
-; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm6 = zmm7[0,1,0,1],zmm6[0,1,0,1]
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm18, %zmm5 {%k1}
+; AVX512BW-FCP-NEXT: vpshufb %xmm6, %xmm12, %xmm6
+; AVX512BW-FCP-NEXT: vpshufb %xmm17, %xmm11, %xmm17
+; AVX512BW-FCP-NEXT: vporq %xmm6, %xmm17, %xmm6
+; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm11 = xmm11[0],xmm12[0],xmm11[1],xmm12[1],xmm11[2],xmm12[2],xmm11[3],xmm12[3],xmm11[4],xmm12[4],xmm11[5],xmm12[5],xmm11[6],xmm12[6],xmm11[7],xmm12[7]
+; AVX512BW-FCP-NEXT: vpshufb %xmm9, %xmm11, %xmm9
+; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm6 = zmm9[0,1,0,1],zmm6[0,1,0,1]
+; AVX512BW-FCP-NEXT: vpshufb %xmm20, %xmm14, %xmm9
+; AVX512BW-FCP-NEXT: vpshufb %xmm21, %xmm13, %xmm11
+; AVX512BW-FCP-NEXT: vpor %xmm9, %xmm11, %xmm9
+; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm11 = xmm13[0],xmm14[0],xmm13[1],xmm14[1],xmm13[2],xmm14[2],xmm13[3],xmm14[3],xmm13[4],xmm14[4],xmm13[5],xmm14[5],xmm13[6],xmm14[6],xmm13[7],xmm14[7]
+; AVX512BW-FCP-NEXT: vpshufb %xmm8, %xmm11, %xmm8
+; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm8 = zmm8[0,1,0,1],zmm9[0,1,0,1]
; AVX512BW-FCP-NEXT: movabsq $871499720017774092, %rax # imm = 0xC183060C183060C
; AVX512BW-FCP-NEXT: kmovq %rax, %k1
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm2, %zmm6 {%k1}
-; AVX512BW-FCP-NEXT: vpshufb %xmm20, %xmm16, %xmm2
-; AVX512BW-FCP-NEXT: vpshufb %xmm25, %xmm17, %xmm7
-; AVX512BW-FCP-NEXT: vpor %xmm2, %xmm7, %xmm2
-; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm7 = xmm17[0],xmm16[0],xmm17[1],xmm16[1],xmm17[2],xmm16[2],xmm17[3],xmm16[3],xmm17[4],xmm16[4],xmm17[5],xmm16[5],xmm17[6],xmm16[6],xmm17[7],xmm16[7]
-; AVX512BW-FCP-NEXT: vpshufb %xmm4, %xmm7, %xmm4
-; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm2 = zmm4[0,1,0,1],zmm2[0,1,0,1]
-; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} zmm4 = [1,1,0,0,1,1,0,0,4,5,1,1,1,1,0,0,2,2,2,4,2,2,2,4,3,3,3,3,2,2,2,4]
-; AVX512BW-FCP-NEXT: vpermw %zmm10, %zmm4, %zmm4
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm6, %zmm8 {%k1}
+; AVX512BW-FCP-NEXT: vpshufb %xmm19, %xmm15, %xmm6
+; AVX512BW-FCP-NEXT: vpshufb %xmm24, %xmm16, %xmm9
+; AVX512BW-FCP-NEXT: vpor %xmm6, %xmm9, %xmm6
+; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm9 = xmm16[0],xmm15[0],xmm16[1],xmm15[1],xmm16[2],xmm15[2],xmm16[3],xmm15[3],xmm16[4],xmm15[4],xmm16[5],xmm15[5],xmm16[6],xmm15[6],xmm16[7],xmm15[7]
+; AVX512BW-FCP-NEXT: vpshufb %xmm4, %xmm9, %xmm4
+; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm4 = zmm4[0,1,0,1],zmm6[0,1,0,1]
+; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} zmm6 = [1,1,0,0,1,1,0,0,4,5,1,1,1,1,0,0,2,2,2,4,2,2,2,4,3,3,3,3,2,2,2,4]
+; AVX512BW-FCP-NEXT: vpermw %zmm0, %zmm6, %zmm0
; AVX512BW-FCP-NEXT: movabsq $4647998506761461824, %rax # imm = 0x4081020408102040
; AVX512BW-FCP-NEXT: kmovq %rax, %k1
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm4, %zmm2 {%k1}
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm0, %zmm4 {%k1}
; AVX512BW-FCP-NEXT: movabsq $8133997386832558192, %rax # imm = 0x70E1C3870E1C3870
; AVX512BW-FCP-NEXT: kmovq %rax, %k1
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm2, %zmm6 {%k1}
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm4, %zmm8 {%k1}
; AVX512BW-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-FCP-NEXT: vmovdqa64 %zmm8, 128(%rax)
-; AVX512BW-FCP-NEXT: vmovdqa64 %zmm6, (%rax)
-; AVX512BW-FCP-NEXT: vmovdqa64 %zmm1, 320(%rax)
+; AVX512BW-FCP-NEXT: vmovdqa64 %zmm7, 128(%rax)
+; AVX512BW-FCP-NEXT: vmovdqa64 %zmm8, (%rax)
+; AVX512BW-FCP-NEXT: vmovdqa64 %zmm2, 320(%rax)
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm5, 256(%rax)
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm3, 192(%rax)
-; AVX512BW-FCP-NEXT: vmovdqa64 %zmm0, 384(%rax)
-; AVX512BW-FCP-NEXT: vmovdqa64 %zmm11, 64(%rax)
+; AVX512BW-FCP-NEXT: vmovdqa64 %zmm1, 384(%rax)
+; AVX512BW-FCP-NEXT: vmovdqa64 %zmm10, 64(%rax)
; AVX512BW-FCP-NEXT: vzeroupper
; AVX512BW-FCP-NEXT: retq
;
; AVX512DQ-BW-LABEL: store_i8_stride7_vf64:
; AVX512DQ-BW: # %bb.0:
; AVX512DQ-BW-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-BW-NEXT: vmovdqa64 (%rax), %zmm0
+; AVX512DQ-BW-NEXT: movq {{[0-9]+}}(%rsp), %r10
+; AVX512DQ-BW-NEXT: vmovdqa64 (%r10), %zmm0
; AVX512DQ-BW-NEXT: vmovdqa 32(%rsi), %ymm2
-; AVX512DQ-BW-NEXT: vmovdqa 32(%rdi), %ymm14
-; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} ymm1 = ymm14[0,1,2,3,6,7,7,6,8,9,10,11,14,15,15,14]
+; AVX512DQ-BW-NEXT: vmovdqa 32(%rdi), %ymm13
+; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} ymm1 = ymm13[0,1,2,3,6,7,7,6,8,9,10,11,14,15,15,14]
; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[2,2,3,3,6,6,7,7]
-; AVX512DQ-BW-NEXT: vpbroadcastd {{.*#+}} ymm4 = [13,12,15,14,13,12,15,14,13,12,15,14,13,12,15,14,13,12,15,14,13,12,15,14,13,12,15,14,13,12,15,14]
-; AVX512DQ-BW-NEXT: movl $338170920, %r10d # imm = 0x14281428
-; AVX512DQ-BW-NEXT: kmovd %r10d, %k2
-; AVX512DQ-BW-NEXT: vpshufb %ymm4, %ymm2, %ymm1 {%k2}
-; AVX512DQ-BW-NEXT: vbroadcasti128 {{.*#+}} ymm9 = [25,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128]
-; AVX512DQ-BW-NEXT: # ymm9 = mem[0,1,0,1]
-; AVX512DQ-BW-NEXT: vpshufb %ymm9, %ymm2, %ymm3
-; AVX512DQ-BW-NEXT: vmovdqa {{.*#+}} ymm10 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128,25]
-; AVX512DQ-BW-NEXT: vpshufb %ymm10, %ymm14, %ymm5
-; AVX512DQ-BW-NEXT: vpor %ymm3, %ymm5, %ymm3
-; AVX512DQ-BW-NEXT: vshufi64x2 {{.*#+}} zmm1 = zmm3[2,3,2,3],zmm1[2,3,2,3]
-; AVX512DQ-BW-NEXT: vmovdqa 32(%rdx), %ymm15
-; AVX512DQ-BW-NEXT: vmovdqa {{.*#+}} ymm5 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,24,25,128,23,128,21,22,23,26,128,24,128,28,29,26,27]
-; AVX512DQ-BW-NEXT: vpshufb %ymm5, %ymm15, %ymm3
-; AVX512DQ-BW-NEXT: vmovdqa64 32(%rcx), %ymm17
-; AVX512DQ-BW-NEXT: vbroadcasti128 {{.*#+}} ymm11 = [128,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128,128,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128]
-; AVX512DQ-BW-NEXT: # ymm11 = mem[0,1,0,1]
-; AVX512DQ-BW-NEXT: vpshufb %ymm11, %ymm17, %ymm6
-; AVX512DQ-BW-NEXT: vpor %ymm3, %ymm6, %ymm3
-; AVX512DQ-BW-NEXT: vpbroadcastq {{.*#+}} ymm6 = [11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12]
-; AVX512DQ-BW-NEXT: vpshufb %ymm6, %ymm17, %ymm7
-; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} ymm8 = ymm15[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
-; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} ymm8 = ymm8[0,2,3,3,4,6,7,7]
-; AVX512DQ-BW-NEXT: movl $676341840, %r10d # imm = 0x28502850
-; AVX512DQ-BW-NEXT: kmovd %r10d, %k3
-; AVX512DQ-BW-NEXT: vmovdqu8 %ymm8, %ymm7 {%k3}
-; AVX512DQ-BW-NEXT: vshufi64x2 {{.*#+}} zmm3 = zmm3[2,3,2,3],zmm7[2,3,2,3]
-; AVX512DQ-BW-NEXT: movabsq $1742999440035548184, %r10 # imm = 0x183060C183060C18
-; AVX512DQ-BW-NEXT: kmovq %r10, %k1
-; AVX512DQ-BW-NEXT: vmovdqu8 %zmm3, %zmm1 {%k1}
+; AVX512DQ-BW-NEXT: vpbroadcastd {{.*#+}} ymm3 = [13,12,15,14,13,12,15,14,13,12,15,14,13,12,15,14,13,12,15,14,13,12,15,14,13,12,15,14,13,12,15,14]
+; AVX512DQ-BW-NEXT: movl $338170920, %r11d # imm = 0x14281428
+; AVX512DQ-BW-NEXT: kmovd %r11d, %k2
+; AVX512DQ-BW-NEXT: vpshufb %ymm3, %ymm2, %ymm1 {%k2}
+; AVX512DQ-BW-NEXT: vbroadcasti128 {{.*#+}} ymm8 = [25,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128]
+; AVX512DQ-BW-NEXT: # ymm8 = mem[0,1,0,1]
+; AVX512DQ-BW-NEXT: vpshufb %ymm8, %ymm2, %ymm4
+; AVX512DQ-BW-NEXT: vmovdqa {{.*#+}} ymm9 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128,25]
+; AVX512DQ-BW-NEXT: vpshufb %ymm9, %ymm13, %ymm5
+; AVX512DQ-BW-NEXT: vpor %ymm4, %ymm5, %ymm4
+; AVX512DQ-BW-NEXT: vshufi64x2 {{.*#+}} zmm1 = zmm4[2,3,2,3],zmm1[2,3,2,3]
+; AVX512DQ-BW-NEXT: vmovdqa 32(%rdx), %ymm14
+; AVX512DQ-BW-NEXT: vmovdqa {{.*#+}} ymm4 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,24,25,128,23,128,21,22,23,26,128,24,128,28,29,26,27]
+; AVX512DQ-BW-NEXT: vpshufb %ymm4, %ymm14, %ymm5
+; AVX512DQ-BW-NEXT: vmovdqa 32(%rcx), %ymm15
+; AVX512DQ-BW-NEXT: vbroadcasti128 {{.*#+}} ymm10 = [128,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128,128,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128]
+; AVX512DQ-BW-NEXT: # ymm10 = mem[0,1,0,1]
+; AVX512DQ-BW-NEXT: vpshufb %ymm10, %ymm15, %ymm6
+; AVX512DQ-BW-NEXT: vpor %ymm5, %ymm6, %ymm6
+; AVX512DQ-BW-NEXT: vpbroadcastq {{.*#+}} ymm5 = [11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12]
+; AVX512DQ-BW-NEXT: vpshufb %ymm5, %ymm15, %ymm7
+; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} ymm11 = ymm14[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
+; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} ymm11 = ymm11[0,2,3,3,4,6,7,7]
+; AVX512DQ-BW-NEXT: movl $676341840, %r11d # imm = 0x28502850
+; AVX512DQ-BW-NEXT: kmovd %r11d, %k3
+; AVX512DQ-BW-NEXT: vmovdqu8 %ymm11, %ymm7 {%k3}
+; AVX512DQ-BW-NEXT: vshufi64x2 {{.*#+}} zmm6 = zmm6[2,3,2,3],zmm7[2,3,2,3]
+; AVX512DQ-BW-NEXT: movabsq $1742999440035548184, %r11 # imm = 0x183060C183060C18
+; AVX512DQ-BW-NEXT: kmovq %r11, %k1
+; AVX512DQ-BW-NEXT: vmovdqu8 %zmm6, %zmm1 {%k1}
; AVX512DQ-BW-NEXT: vmovdqa64 32(%r9), %ymm16
-; AVX512DQ-BW-NEXT: vbroadcasti128 {{.*#+}} ymm7 = [29,128,27,128,128,128,128,30,128,28,128,128,128,128,31,128,29,128,27,128,128,128,128,30,128,28,128,128,128,128,31,128]
-; AVX512DQ-BW-NEXT: # ymm7 = mem[0,1,0,1]
-; AVX512DQ-BW-NEXT: vpshufb %ymm7, %ymm16, %ymm3
-; AVX512DQ-BW-NEXT: vmovdqa64 32(%r8), %ymm18
-; AVX512DQ-BW-NEXT: vmovdqa {{.*#+}} ymm8 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,27,128,128,128,128,30,128,28,128,128,128,128,31,128,29]
-; AVX512DQ-BW-NEXT: vpshufb %ymm8, %ymm18, %ymm12
-; AVX512DQ-BW-NEXT: vpor %ymm3, %ymm12, %ymm3
-; AVX512DQ-BW-NEXT: vmovdqa {{.*#+}} ymm12 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,24,25,24,25,128,23,128,23,24,25,26,128,24,128,30,31]
-; AVX512DQ-BW-NEXT: vpshufb %ymm12, %ymm18, %ymm19
-; AVX512DQ-BW-NEXT: vbroadcasti128 {{.*#+}} ymm13 = [128,128,128,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128,128,128,25,128,23,128,128,128,128,26,128,24,128,128]
-; AVX512DQ-BW-NEXT: # ymm13 = mem[0,1,0,1]
-; AVX512DQ-BW-NEXT: vpshufb %ymm13, %ymm16, %ymm20
-; AVX512DQ-BW-NEXT: vporq %ymm19, %ymm20, %ymm19
-; AVX512DQ-BW-NEXT: vshufi64x2 {{.*#+}} zmm3 = zmm19[2,3,2,3],zmm3[2,3,2,3]
-; AVX512DQ-BW-NEXT: movabsq $6971997760142192736, %r10 # imm = 0x60C183060C183060
-; AVX512DQ-BW-NEXT: kmovq %r10, %k1
-; AVX512DQ-BW-NEXT: vmovdqu8 %zmm3, %zmm1 {%k1}
-; AVX512DQ-BW-NEXT: vmovdqa 32(%rax), %ymm3
-; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} zmm19 = [11,13,12,11,12,13,13,12,11,13,12,11,12,13,13,12,14,13,14,15,15,14,14,15,14,13,14,15,15,14,14,15]
-; AVX512DQ-BW-NEXT: vpermw %zmm3, %zmm19, %zmm19
-; AVX512DQ-BW-NEXT: movabsq $-9150747060186627967, %rax # imm = 0x8102040810204081
-; AVX512DQ-BW-NEXT: kmovq %rax, %k4
-; AVX512DQ-BW-NEXT: vmovdqu8 %zmm19, %zmm1 {%k4}
-; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} ymm19 = [0,1,0,1,14,128,14,15,0,1,14,15,128,13,14,15,16,17,16,128,30,31,30,31,16,17,128,31,28,29,30,31]
-; AVX512DQ-BW-NEXT: vpshufb %ymm19, %ymm15, %ymm21
-; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} ymm20 = [128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128]
-; AVX512DQ-BW-NEXT: vpshufb %ymm20, %ymm17, %ymm22
-; AVX512DQ-BW-NEXT: vporq %ymm21, %ymm22, %ymm21
+; AVX512DQ-BW-NEXT: vbroadcasti128 {{.*#+}} ymm6 = [29,128,27,128,128,128,128,30,128,28,128,128,128,128,31,128,29,128,27,128,128,128,128,30,128,28,128,128,128,128,31,128]
+; AVX512DQ-BW-NEXT: # ymm6 = mem[0,1,0,1]
+; AVX512DQ-BW-NEXT: vpshufb %ymm6, %ymm16, %ymm11
+; AVX512DQ-BW-NEXT: vmovdqa64 32(%r8), %ymm19
+; AVX512DQ-BW-NEXT: vmovdqa {{.*#+}} ymm7 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,27,128,128,128,128,30,128,28,128,128,128,128,31,128,29]
+; AVX512DQ-BW-NEXT: vpshufb %ymm7, %ymm19, %ymm12
+; AVX512DQ-BW-NEXT: vpor %ymm11, %ymm12, %ymm11
+; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} ymm17 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,24,25,24,25,128,23,128,23,24,25,26,128,24,128,30,31]
+; AVX512DQ-BW-NEXT: vpshufb %ymm17, %ymm19, %ymm12
+; AVX512DQ-BW-NEXT: vbroadcasti32x4 {{.*#+}} ymm18 = [128,128,128,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128,128,128,25,128,23,128,128,128,128,26,128,24,128,128]
+; AVX512DQ-BW-NEXT: # ymm18 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-BW-NEXT: vpshufb %ymm18, %ymm16, %ymm20
+; AVX512DQ-BW-NEXT: vporq %ymm12, %ymm20, %ymm12
+; AVX512DQ-BW-NEXT: vshufi64x2 {{.*#+}} zmm11 = zmm12[2,3,2,3],zmm11[2,3,2,3]
+; AVX512DQ-BW-NEXT: movabsq $6971997760142192736, %r11 # imm = 0x60C183060C183060
+; AVX512DQ-BW-NEXT: kmovq %r11, %k1
+; AVX512DQ-BW-NEXT: vmovdqu8 %zmm11, %zmm1 {%k1}
+; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} zmm11 = [27,29,28,27,28,29,29,28,27,29,28,27,28,29,29,28,30,29,30,31,31,30,30,31,30,29,30,31,31,30,30,31]
+; AVX512DQ-BW-NEXT: vpermw %zmm0, %zmm11, %zmm11
+; AVX512DQ-BW-NEXT: movabsq $-9150747060186627967, %r11 # imm = 0x8102040810204081
+; AVX512DQ-BW-NEXT: kmovq %r11, %k4
+; AVX512DQ-BW-NEXT: vmovdqu8 %zmm11, %zmm1 {%k4}
+; AVX512DQ-BW-NEXT: vmovdqa {{.*#+}} ymm11 = [0,1,0,1,14,128,14,15,0,1,14,15,128,13,14,15,16,17,16,128,30,31,30,31,16,17,128,31,28,29,30,31]
+; AVX512DQ-BW-NEXT: vpshufb %ymm11, %ymm14, %ymm20
+; AVX512DQ-BW-NEXT: vmovdqa {{.*#+}} ymm12 = [128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128]
+; AVX512DQ-BW-NEXT: vpshufb %ymm12, %ymm15, %ymm21
+; AVX512DQ-BW-NEXT: vporq %ymm20, %ymm21, %ymm20
; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} ymm22 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,18,128,18,19,20,21,128,19,128,25,26,27,22,128,20,128]
-; AVX512DQ-BW-NEXT: vpshufb %ymm22, %ymm15, %ymm15
-; AVX512DQ-BW-NEXT: vbroadcasti32x4 {{.*#+}} ymm25 = [128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20]
-; AVX512DQ-BW-NEXT: # ymm25 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-BW-NEXT: vpshufb %ymm25, %ymm17, %ymm17
-; AVX512DQ-BW-NEXT: vporq %ymm15, %ymm17, %ymm15
+; AVX512DQ-BW-NEXT: vpshufb %ymm22, %ymm14, %ymm14
+; AVX512DQ-BW-NEXT: vbroadcasti32x4 {{.*#+}} ymm23 = [128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20]
+; AVX512DQ-BW-NEXT: # ymm23 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-BW-NEXT: vpshufb %ymm23, %ymm15, %ymm15
+; AVX512DQ-BW-NEXT: vpor %ymm14, %ymm15, %ymm14
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm14 = ymm14[2,3,2,3]
+; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm14, %zmm20, %zmm14
+; AVX512DQ-BW-NEXT: vpshuflw {{.*#+}} ymm15 = ymm13[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
+; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} ymm15 = ymm15[0,0,1,1,4,4,5,5]
+; AVX512DQ-BW-NEXT: vpbroadcastd {{.*#+}} ymm25 = [5,4,3,6,5,4,3,6,5,4,3,6,5,4,3,6,5,4,3,6,5,4,3,6,5,4,3,6,5,4,3,6]
+; AVX512DQ-BW-NEXT: vpshufb %ymm25, %ymm2, %ymm15 {%k3}
; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm15 = ymm15[2,3,2,3]
-; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm15, %zmm21, %zmm15
-; AVX512DQ-BW-NEXT: vpshuflw {{.*#+}} ymm17 = ymm14[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
-; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} ymm17 = ymm17[0,0,1,1,4,4,5,5]
-; AVX512DQ-BW-NEXT: vpbroadcastd {{.*#+}} ymm21 = [5,4,3,6,5,4,3,6,5,4,3,6,5,4,3,6,5,4,3,6,5,4,3,6,5,4,3,6,5,4,3,6]
-; AVX512DQ-BW-NEXT: vpshufb %ymm21, %ymm2, %ymm17 {%k3}
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm17 = ymm17[2,3,2,3]
-; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} ymm23 = [0,1,14,128,12,13,0,1,14,15,128,3,12,13,2,3,16,128,30,31,28,29,16,17,128,31,18,19,28,29,18,128]
-; AVX512DQ-BW-NEXT: vpshufb %ymm23, %ymm14, %ymm14
-; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} ymm24 = [128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128,128,18]
-; AVX512DQ-BW-NEXT: vpshufb %ymm24, %ymm2, %ymm2
-; AVX512DQ-BW-NEXT: vpor %ymm2, %ymm14, %ymm2
-; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm17, %zmm2, %zmm2
-; AVX512DQ-BW-NEXT: movabsq $3485998880071096368, %rax # imm = 0x3060C183060C1830
-; AVX512DQ-BW-NEXT: kmovq %rax, %k4
-; AVX512DQ-BW-NEXT: vmovdqu8 %zmm15, %zmm2 {%k4}
-; AVX512DQ-BW-NEXT: vmovdqa {{.*#+}} ymm14 = [13,128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128]
-; AVX512DQ-BW-NEXT: vpshufb %ymm14, %ymm16, %ymm17
-; AVX512DQ-BW-NEXT: vmovdqa {{.*#+}} ymm15 = [128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128]
-; AVX512DQ-BW-NEXT: vpshufb %ymm15, %ymm18, %ymm26
-; AVX512DQ-BW-NEXT: vporq %ymm17, %ymm26, %ymm17
+; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} ymm20 = [0,1,14,128,12,13,0,1,14,15,128,3,12,13,2,3,16,128,30,31,28,29,16,17,128,31,18,19,28,29,18,128]
+; AVX512DQ-BW-NEXT: vpshufb %ymm20, %ymm13, %ymm13
+; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} ymm21 = [128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128,128,18]
+; AVX512DQ-BW-NEXT: vpshufb %ymm21, %ymm2, %ymm2
+; AVX512DQ-BW-NEXT: vpor %ymm2, %ymm13, %ymm2
+; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm15, %zmm2, %zmm2
+; AVX512DQ-BW-NEXT: movabsq $3485998880071096368, %r11 # imm = 0x3060C183060C1830
+; AVX512DQ-BW-NEXT: kmovq %r11, %k4
+; AVX512DQ-BW-NEXT: vmovdqu8 %zmm14, %zmm2 {%k4}
+; AVX512DQ-BW-NEXT: vmovdqa {{.*#+}} ymm13 = [13,128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128]
+; AVX512DQ-BW-NEXT: vpshufb %ymm13, %ymm16, %ymm15
+; AVX512DQ-BW-NEXT: vmovdqa {{.*#+}} ymm14 = [128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128]
+; AVX512DQ-BW-NEXT: vpshufb %ymm14, %ymm19, %ymm24
+; AVX512DQ-BW-NEXT: vporq %ymm15, %ymm24, %ymm15
; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} ymm26 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,20,128,18,128,20,21,20,21,128,19,128,19,20,21,22,128]
-; AVX512DQ-BW-NEXT: vpshufb %ymm26, %ymm18, %ymm18
+; AVX512DQ-BW-NEXT: vpshufb %ymm26, %ymm19, %ymm19
; AVX512DQ-BW-NEXT: vbroadcasti32x4 {{.*#+}} ymm28 = [128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22]
; AVX512DQ-BW-NEXT: # ymm28 = mem[0,1,2,3,0,1,2,3]
; AVX512DQ-BW-NEXT: vpshufb %ymm28, %ymm16, %ymm16
-; AVX512DQ-BW-NEXT: vporq %ymm18, %ymm16, %ymm16
+; AVX512DQ-BW-NEXT: vporq %ymm19, %ymm16, %ymm16
; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm16 = ymm16[2,3,2,3]
-; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm16, %zmm17, %zmm16
+; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm16, %zmm15, %zmm15
+; AVX512DQ-BW-NEXT: vmovdqa64 32(%r10), %ymm16
; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} zmm29 = [10,9,9,10,10,9,9,10,9,10,14,15,10,9,9,10,11,13,12,11,12,13,13,12,11,13,12,11,12,13,13,12]
-; AVX512DQ-BW-NEXT: vpermw %ymm3, %ymm29, %ymm17
-; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm18 = ymm3[12,13,2,3,12,13,0,1,14,15,2,3,0,1,14,15,28,29,18,19,28,29,16,17,30,31,18,19,16,17,30,31]
-; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm17, %zmm18, %zmm17
-; AVX512DQ-BW-NEXT: movabsq $145249953336295682, %rax # imm = 0x204081020408102
-; AVX512DQ-BW-NEXT: kmovq %rax, %k5
-; AVX512DQ-BW-NEXT: vmovdqu8 %zmm17, %zmm16 {%k5}
-; AVX512DQ-BW-NEXT: movabsq $-4357498600088870461, %rax # imm = 0xC3870E1C3870E1C3
-; AVX512DQ-BW-NEXT: kmovq %rax, %k5
-; AVX512DQ-BW-NEXT: vmovdqu8 %zmm16, %zmm2 {%k5}
-; AVX512DQ-BW-NEXT: vmovdqa64 (%rdx), %ymm16
-; AVX512DQ-BW-NEXT: vpshufb %ymm5, %ymm16, %ymm5
-; AVX512DQ-BW-NEXT: vmovdqa64 (%rcx), %ymm17
-; AVX512DQ-BW-NEXT: vpshufb %ymm11, %ymm17, %ymm11
-; AVX512DQ-BW-NEXT: vpor %ymm5, %ymm11, %ymm5
-; AVX512DQ-BW-NEXT: vpshufb %ymm22, %ymm16, %ymm11
-; AVX512DQ-BW-NEXT: vpshufb %ymm25, %ymm17, %ymm18
-; AVX512DQ-BW-NEXT: vporq %ymm11, %ymm18, %ymm11
-; AVX512DQ-BW-NEXT: vshufi64x2 {{.*#+}} zmm5 = zmm11[2,3,2,3],zmm5[2,3,2,3]
-; AVX512DQ-BW-NEXT: vmovdqa64 (%rsi), %ymm25
+; AVX512DQ-BW-NEXT: vpermw %ymm16, %ymm29, %ymm19
+; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm16 = ymm16[12,13,2,3,12,13,0,1,14,15,2,3,0,1,14,15,28,29,18,19,28,29,16,17,30,31,18,19,16,17,30,31]
+; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm19, %zmm16, %zmm16
+; AVX512DQ-BW-NEXT: movabsq $145249953336295682, %r10 # imm = 0x204081020408102
+; AVX512DQ-BW-NEXT: kmovq %r10, %k5
+; AVX512DQ-BW-NEXT: vmovdqu8 %zmm16, %zmm15 {%k5}
+; AVX512DQ-BW-NEXT: movabsq $-4357498600088870461, %r10 # imm = 0xC3870E1C3870E1C3
+; AVX512DQ-BW-NEXT: kmovq %r10, %k5
+; AVX512DQ-BW-NEXT: vmovdqu8 %zmm15, %zmm2 {%k5}
+; AVX512DQ-BW-NEXT: vmovdqa (%rdx), %ymm15
+; AVX512DQ-BW-NEXT: vpshufb %ymm4, %ymm15, %ymm4
+; AVX512DQ-BW-NEXT: vmovdqa64 (%rcx), %ymm16
+; AVX512DQ-BW-NEXT: vpshufb %ymm10, %ymm16, %ymm10
+; AVX512DQ-BW-NEXT: vpor %ymm4, %ymm10, %ymm4
+; AVX512DQ-BW-NEXT: vpshufb %ymm22, %ymm15, %ymm10
+; AVX512DQ-BW-NEXT: vpshufb %ymm23, %ymm16, %ymm19
+; AVX512DQ-BW-NEXT: vporq %ymm10, %ymm19, %ymm10
+; AVX512DQ-BW-NEXT: vshufi64x2 {{.*#+}} zmm4 = zmm10[2,3,2,3],zmm4[2,3,2,3]
+; AVX512DQ-BW-NEXT: vmovdqa64 (%rsi), %ymm24
; AVX512DQ-BW-NEXT: vmovdqa64 (%rdi), %ymm27
-; AVX512DQ-BW-NEXT: vpshuflw {{.*#+}} ymm11 = ymm27[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
-; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} ymm11 = ymm11[0,0,1,1,4,4,5,5]
-; AVX512DQ-BW-NEXT: vpshufb %ymm21, %ymm25, %ymm11 {%k3}
-; AVX512DQ-BW-NEXT: vpshufb %ymm9, %ymm25, %ymm9
-; AVX512DQ-BW-NEXT: vpshufb %ymm10, %ymm27, %ymm10
-; AVX512DQ-BW-NEXT: vpor %ymm9, %ymm10, %ymm9
-; AVX512DQ-BW-NEXT: vshufi64x2 {{.*#+}} zmm9 = zmm11[2,3,2,3],zmm9[2,3,2,3]
-; AVX512DQ-BW-NEXT: vmovdqu8 %zmm9, %zmm5 {%k1}
-; AVX512DQ-BW-NEXT: vmovdqa64 (%r8), %ymm18
-; AVX512DQ-BW-NEXT: vpshufb %ymm12, %ymm18, %ymm9
-; AVX512DQ-BW-NEXT: vmovdqa64 (%r9), %ymm21
-; AVX512DQ-BW-NEXT: vpshufb %ymm13, %ymm21, %ymm10
+; AVX512DQ-BW-NEXT: vpshuflw {{.*#+}} ymm10 = ymm27[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
+; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} ymm10 = ymm10[0,0,1,1,4,4,5,5]
+; AVX512DQ-BW-NEXT: vpshufb %ymm25, %ymm24, %ymm10 {%k3}
+; AVX512DQ-BW-NEXT: vpshufb %ymm8, %ymm24, %ymm8
+; AVX512DQ-BW-NEXT: vpshufb %ymm9, %ymm27, %ymm9
+; AVX512DQ-BW-NEXT: vpor %ymm8, %ymm9, %ymm8
+; AVX512DQ-BW-NEXT: vshufi64x2 {{.*#+}} zmm8 = zmm10[2,3,2,3],zmm8[2,3,2,3]
+; AVX512DQ-BW-NEXT: vmovdqu8 %zmm8, %zmm4 {%k1}
+; AVX512DQ-BW-NEXT: vmovdqa64 (%r8), %ymm19
+; AVX512DQ-BW-NEXT: vpshufb %ymm17, %ymm19, %ymm8
+; AVX512DQ-BW-NEXT: vmovdqa64 (%r9), %ymm22
+; AVX512DQ-BW-NEXT: vpshufb %ymm18, %ymm22, %ymm9
+; AVX512DQ-BW-NEXT: vpor %ymm8, %ymm9, %ymm8
+; AVX512DQ-BW-NEXT: vpshufb %ymm26, %ymm19, %ymm9
+; AVX512DQ-BW-NEXT: vpshufb %ymm28, %ymm22, %ymm10
; AVX512DQ-BW-NEXT: vpor %ymm9, %ymm10, %ymm9
-; AVX512DQ-BW-NEXT: vpshufb %ymm26, %ymm18, %ymm10
-; AVX512DQ-BW-NEXT: vpshufb %ymm28, %ymm21, %ymm11
-; AVX512DQ-BW-NEXT: vpor %ymm10, %ymm11, %ymm10
-; AVX512DQ-BW-NEXT: vshufi64x2 {{.*#+}} zmm9 = zmm10[2,3,2,3],zmm9[2,3,2,3]
-; AVX512DQ-BW-NEXT: vpermw %zmm0, %zmm29, %zmm10
-; AVX512DQ-BW-NEXT: movabsq $1161999626690365456, %rax # imm = 0x1020408102040810
-; AVX512DQ-BW-NEXT: kmovq %rax, %k5
-; AVX512DQ-BW-NEXT: vmovdqu8 %zmm10, %zmm9 {%k5}
-; AVX512DQ-BW-NEXT: movabsq $2033499346708139548, %rax # imm = 0x1C3870E1C3870E1C
-; AVX512DQ-BW-NEXT: kmovq %rax, %k5
-; AVX512DQ-BW-NEXT: vmovdqu8 %zmm9, %zmm5 {%k5}
-; AVX512DQ-BW-NEXT: vpshufb %ymm19, %ymm16, %ymm9
-; AVX512DQ-BW-NEXT: vpshufb %ymm20, %ymm17, %ymm10
-; AVX512DQ-BW-NEXT: vpor %ymm9, %ymm10, %ymm9
-; AVX512DQ-BW-NEXT: vmovdqa (%rdx), %xmm10
-; AVX512DQ-BW-NEXT: vmovdqa64 32(%rdx), %xmm19
-; AVX512DQ-BW-NEXT: vmovdqa (%rcx), %xmm11
-; AVX512DQ-BW-NEXT: vmovdqa64 32(%rcx), %xmm20
-; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm12 = xmm11[8],xmm10[8],xmm11[9],xmm10[9],xmm11[10],xmm10[10],xmm11[11],xmm10[11],xmm11[12],xmm10[12],xmm11[13],xmm10[13],xmm11[14],xmm10[14],xmm11[15],xmm10[15]
-; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} xmm22 = [6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7]
-; AVX512DQ-BW-NEXT: vpshufb %xmm22, %xmm12, %xmm12
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm12 = ymm12[0,1,0,1]
-; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm9, %zmm12, %zmm26
-; AVX512DQ-BW-NEXT: vpshufb %ymm23, %ymm27, %ymm9
-; AVX512DQ-BW-NEXT: vpshufb %ymm24, %ymm25, %ymm12
-; AVX512DQ-BW-NEXT: vpor %ymm9, %ymm12, %ymm9
-; AVX512DQ-BW-NEXT: vmovdqa (%rdi), %xmm12
-; AVX512DQ-BW-NEXT: vmovdqa (%rsi), %xmm13
-; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm24 = xmm13[8],xmm12[8],xmm13[9],xmm12[9],xmm13[10],xmm12[10],xmm13[11],xmm12[11],xmm13[12],xmm12[12],xmm13[13],xmm12[13],xmm13[14],xmm12[14],xmm13[15],xmm12[15]
-; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} xmm23 = [2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
-; AVX512DQ-BW-NEXT: vpshufb %xmm23, %xmm24, %xmm24
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm24 = ymm24[0,1,0,1]
-; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm9, %zmm24, %zmm9
-; AVX512DQ-BW-NEXT: movabsq $435749860008887046, %rax # imm = 0x60C183060C18306
-; AVX512DQ-BW-NEXT: kmovq %rax, %k5
-; AVX512DQ-BW-NEXT: vmovdqu8 %zmm26, %zmm9 {%k5}
-; AVX512DQ-BW-NEXT: vpshufb %ymm14, %ymm21, %ymm14
-; AVX512DQ-BW-NEXT: vpshufb %ymm15, %ymm18, %ymm15
-; AVX512DQ-BW-NEXT: vporq %ymm14, %ymm15, %ymm24
-; AVX512DQ-BW-NEXT: vmovdqa (%r9), %xmm14
-; AVX512DQ-BW-NEXT: vmovdqa (%r8), %xmm15
-; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm28 = xmm15[8],xmm14[8],xmm15[9],xmm14[9],xmm15[10],xmm14[10],xmm15[11],xmm14[11],xmm15[12],xmm14[12],xmm15[13],xmm14[13],xmm15[14],xmm14[14],xmm15[15],xmm14[15]
-; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} xmm26 = [u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10]
-; AVX512DQ-BW-NEXT: vpshufb %xmm26, %xmm28, %xmm28
+; AVX512DQ-BW-NEXT: vshufi64x2 {{.*#+}} zmm8 = zmm9[2,3,2,3],zmm8[2,3,2,3]
+; AVX512DQ-BW-NEXT: vpermw %zmm0, %zmm29, %zmm9
+; AVX512DQ-BW-NEXT: movabsq $1161999626690365456, %r10 # imm = 0x1020408102040810
+; AVX512DQ-BW-NEXT: kmovq %r10, %k5
+; AVX512DQ-BW-NEXT: vmovdqu8 %zmm9, %zmm8 {%k5}
+; AVX512DQ-BW-NEXT: movabsq $2033499346708139548, %r10 # imm = 0x1C3870E1C3870E1C
+; AVX512DQ-BW-NEXT: kmovq %r10, %k5
+; AVX512DQ-BW-NEXT: vmovdqu8 %zmm8, %zmm4 {%k5}
+; AVX512DQ-BW-NEXT: vpshufb %ymm11, %ymm15, %ymm8
+; AVX512DQ-BW-NEXT: vpshufb %ymm12, %ymm16, %ymm9
+; AVX512DQ-BW-NEXT: vpor %ymm8, %ymm9, %ymm8
+; AVX512DQ-BW-NEXT: vmovdqa (%rdx), %xmm9
+; AVX512DQ-BW-NEXT: vmovdqa64 32(%rdx), %xmm17
+; AVX512DQ-BW-NEXT: vmovdqa (%rcx), %xmm10
+; AVX512DQ-BW-NEXT: vmovdqa64 32(%rcx), %xmm18
+; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm11 = xmm10[8],xmm9[8],xmm10[9],xmm9[9],xmm10[10],xmm9[10],xmm10[11],xmm9[11],xmm10[12],xmm9[12],xmm10[13],xmm9[13],xmm10[14],xmm9[14],xmm10[15],xmm9[15]
+; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} xmm23 = [6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7]
+; AVX512DQ-BW-NEXT: vpshufb %xmm23, %xmm11, %xmm11
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm11 = ymm11[0,1,0,1]
+; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm8, %zmm11, %zmm26
+; AVX512DQ-BW-NEXT: vpshufb %ymm20, %ymm27, %ymm8
+; AVX512DQ-BW-NEXT: vpshufb %ymm21, %ymm24, %ymm11
+; AVX512DQ-BW-NEXT: vpor %ymm8, %ymm11, %ymm8
+; AVX512DQ-BW-NEXT: vmovdqa (%rdi), %xmm11
+; AVX512DQ-BW-NEXT: vmovdqa64 32(%rdi), %xmm20
+; AVX512DQ-BW-NEXT: vmovdqa (%rsi), %xmm12
+; AVX512DQ-BW-NEXT: vmovdqa64 32(%rsi), %xmm21
+; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm28 = xmm12[8],xmm11[8],xmm12[9],xmm11[9],xmm12[10],xmm11[10],xmm12[11],xmm11[11],xmm12[12],xmm11[12],xmm12[13],xmm11[13],xmm12[14],xmm11[14],xmm12[15],xmm11[15]
+; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} xmm25 = [2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
+; AVX512DQ-BW-NEXT: vpshufb %xmm25, %xmm28, %xmm28
; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm28 = ymm28[0,1,0,1]
-; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm24, %zmm28, %zmm24
-; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} zmm28 = [4,5,4,5,5,6,5,6,4,5,4,5,5,6,5,6,6,1,6,0,7,1,0,7,14,9,14,8,15,9,8,15]
-; AVX512DQ-BW-NEXT: vpermw %zmm0, %zmm28, %zmm28
-; AVX512DQ-BW-NEXT: movabsq $2323999253380730912, %rax # imm = 0x2040810204081020
-; AVX512DQ-BW-NEXT: kmovq %rax, %k5
-; AVX512DQ-BW-NEXT: vmovdqu8 %zmm28, %zmm24 {%k5}
-; AVX512DQ-BW-NEXT: vmovdqa64 32(%rdi), %xmm28
-; AVX512DQ-BW-NEXT: movabsq $4066998693416279096, %rax # imm = 0x3870E1C3870E1C38
-; AVX512DQ-BW-NEXT: kmovq %rax, %k5
-; AVX512DQ-BW-NEXT: vmovdqu8 %zmm24, %zmm9 {%k5}
-; AVX512DQ-BW-NEXT: vmovdqa64 32(%rsi), %xmm29
-; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} ymm24 = ymm27[0,1,2,3,6,7,7,6,8,9,10,11,14,15,15,14]
-; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} ymm27 = ymm24[2,2,3,3,6,6,7,7]
-; AVX512DQ-BW-NEXT: vpshufb %ymm4, %ymm25, %ymm27 {%k2}
-; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm28[0],xmm29[0],xmm28[1],xmm29[1],xmm28[2],xmm29[2],xmm28[3],xmm29[3],xmm28[4],xmm29[4],xmm28[5],xmm29[5],xmm28[6],xmm29[6],xmm28[7],xmm29[7]
+; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm8, %zmm28, %zmm8
+; AVX512DQ-BW-NEXT: movabsq $435749860008887046, %rcx # imm = 0x60C183060C18306
+; AVX512DQ-BW-NEXT: kmovq %rcx, %k5
+; AVX512DQ-BW-NEXT: vmovdqu8 %zmm26, %zmm8 {%k5}
+; AVX512DQ-BW-NEXT: vpshufb %ymm13, %ymm22, %ymm13
+; AVX512DQ-BW-NEXT: vpshufb %ymm14, %ymm19, %ymm14
+; AVX512DQ-BW-NEXT: vporq %ymm13, %ymm14, %ymm30
+; AVX512DQ-BW-NEXT: vmovdqa (%r9), %xmm13
+; AVX512DQ-BW-NEXT: vmovdqa64 32(%r9), %xmm26
+; AVX512DQ-BW-NEXT: vmovdqa (%r8), %xmm14
+; AVX512DQ-BW-NEXT: vmovdqa64 32(%r8), %xmm28
+; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm31 = xmm14[8],xmm13[8],xmm14[9],xmm13[9],xmm14[10],xmm13[10],xmm14[11],xmm13[11],xmm14[12],xmm13[12],xmm14[13],xmm13[13],xmm14[14],xmm13[14],xmm14[15],xmm13[15]
+; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} xmm29 = [u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10]
+; AVX512DQ-BW-NEXT: vpshufb %xmm29, %xmm31, %xmm31
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm31 = ymm31[0,1,0,1]
+; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm30, %zmm31, %zmm30
+; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} zmm31 = [4,5,4,5,5,6,5,6,4,5,4,5,5,6,5,6,6,1,6,0,7,1,0,7,14,9,14,8,15,9,8,15]
+; AVX512DQ-BW-NEXT: vpermw %zmm0, %zmm31, %zmm31
+; AVX512DQ-BW-NEXT: movabsq $2323999253380730912, %rcx # imm = 0x2040810204081020
+; AVX512DQ-BW-NEXT: kmovq %rcx, %k5
+; AVX512DQ-BW-NEXT: vmovdqu8 %zmm31, %zmm30 {%k5}
+; AVX512DQ-BW-NEXT: movabsq $4066998693416279096, %rcx # imm = 0x3870E1C3870E1C38
+; AVX512DQ-BW-NEXT: kmovq %rcx, %k5
+; AVX512DQ-BW-NEXT: vmovdqu8 %zmm30, %zmm8 {%k5}
+; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} ymm27 = ymm27[0,1,2,3,6,7,7,6,8,9,10,11,14,15,15,14]
+; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} ymm27 = ymm27[2,2,3,3,6,6,7,7]
+; AVX512DQ-BW-NEXT: vpshufb %ymm3, %ymm24, %ymm27 {%k2}
+; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm3 = xmm20[0],xmm21[0],xmm20[1],xmm21[1],xmm20[2],xmm21[2],xmm20[3],xmm21[3],xmm20[4],xmm21[4],xmm20[5],xmm21[5],xmm20[6],xmm21[6],xmm20[7],xmm21[7]
; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} xmm24 = [0,1,u,u,u,6,7,2,3,u,u,u,8,9,4,5]
-; AVX512DQ-BW-NEXT: vpshufb %xmm24, %xmm4, %xmm4
-; AVX512DQ-BW-NEXT: vshufi64x2 {{.*#+}} zmm27 = zmm27[2,3,2,3],zmm4[0,1,0,1]
-; AVX512DQ-BW-NEXT: vmovdqa64 32(%r9), %xmm25
-; AVX512DQ-BW-NEXT: vpshufb %ymm6, %ymm17, %ymm4
-; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm6 = xmm19[0],xmm20[0],xmm19[1],xmm20[1],xmm19[2],xmm20[2],xmm19[3],xmm20[3],xmm19[4],xmm20[4],xmm19[5],xmm20[5],xmm19[6],xmm20[6],xmm19[7],xmm20[7]
-; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} ymm16 = ymm16[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
-; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} ymm16 = ymm16[0,2,3,3,4,6,7,7]
-; AVX512DQ-BW-NEXT: vmovdqu8 %ymm16, %ymm4 {%k3}
-; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} xmm16 = [4,5,0,1,u,u,u,6,7,2,3,u,u,u,8,9]
-; AVX512DQ-BW-NEXT: vpshufb %xmm16, %xmm6, %xmm6
-; AVX512DQ-BW-NEXT: vshufi64x2 {{.*#+}} zmm4 = zmm4[2,3,2,3],zmm6[0,1,0,1]
-; AVX512DQ-BW-NEXT: vmovdqa64 32(%r8), %xmm17
-; AVX512DQ-BW-NEXT: vmovdqu8 %zmm27, %zmm4 {%k4}
-; AVX512DQ-BW-NEXT: vpshufb %ymm7, %ymm21, %ymm6
-; AVX512DQ-BW-NEXT: vpshufb %ymm8, %ymm18, %ymm7
-; AVX512DQ-BW-NEXT: vpor %ymm6, %ymm7, %ymm7
-; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm8 = xmm17[0],xmm25[0],xmm17[1],xmm25[1],xmm17[2],xmm25[2],xmm17[3],xmm25[3],xmm17[4],xmm25[4],xmm17[5],xmm25[5],xmm17[6],xmm25[6],xmm17[7],xmm25[7]
-; AVX512DQ-BW-NEXT: vmovdqa {{.*#+}} xmm6 = [u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
-; AVX512DQ-BW-NEXT: vpshufb %xmm6, %xmm8, %xmm8
-; AVX512DQ-BW-NEXT: vshufi64x2 {{.*#+}} zmm7 = zmm7[2,3,2,3],zmm8[0,1,0,1]
-; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} zmm8 = [14,13,14,15,15,14,14,15,14,13,14,15,15,14,14,15,17,17,16,16,17,17,16,16,20,21,17,17,17,17,16,16]
-; AVX512DQ-BW-NEXT: vpermw %zmm0, %zmm8, %zmm8
-; AVX512DQ-BW-NEXT: movabsq $580999813345182728, %rax # imm = 0x810204081020408
-; AVX512DQ-BW-NEXT: kmovq %rax, %k2
-; AVX512DQ-BW-NEXT: vmovdqu8 %zmm8, %zmm7 {%k2}
-; AVX512DQ-BW-NEXT: movabsq $1016749673354069774, %rax # imm = 0xE1C3870E1C3870E
-; AVX512DQ-BW-NEXT: kmovq %rax, %k2
-; AVX512DQ-BW-NEXT: vmovdqu8 %zmm7, %zmm4 {%k2}
-; AVX512DQ-BW-NEXT: vmovdqa {{.*#+}} xmm7 = [u,u,u,128,7,128,5,u,u,u,128,8,128,6,u,u]
-; AVX512DQ-BW-NEXT: vpshufb %xmm7, %xmm20, %xmm8
-; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} xmm18 = [u,u,u,7,128,5,128,u,u,u,8,128,6,128,u,u]
-; AVX512DQ-BW-NEXT: vpshufb %xmm18, %xmm19, %xmm21
-; AVX512DQ-BW-NEXT: vporq %xmm8, %xmm21, %xmm8
-; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm19 = xmm20[8],xmm19[8],xmm20[9],xmm19[9],xmm20[10],xmm19[10],xmm20[11],xmm19[11],xmm20[12],xmm19[12],xmm20[13],xmm19[13],xmm20[14],xmm19[14],xmm20[15],xmm19[15]
-; AVX512DQ-BW-NEXT: vpshufb %xmm22, %xmm19, %xmm19
-; AVX512DQ-BW-NEXT: vshufi64x2 {{.*#+}} zmm8 = zmm8[0,1,0,1],zmm19[0,1,0,1]
-; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} xmm19 = [u,128,7,128,5,u,u,u,128,8,128,6,u,u,u,128]
-; AVX512DQ-BW-NEXT: vpshufb %xmm19, %xmm29, %xmm20
-; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} xmm21 = [u,7,128,5,128,u,u,u,8,128,6,128,u,u,u,9]
-; AVX512DQ-BW-NEXT: vpshufb %xmm21, %xmm28, %xmm22
-; AVX512DQ-BW-NEXT: vporq %xmm20, %xmm22, %xmm20
-; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm22 = xmm29[8],xmm28[8],xmm29[9],xmm28[9],xmm29[10],xmm28[10],xmm29[11],xmm28[11],xmm29[12],xmm28[12],xmm29[13],xmm28[13],xmm29[14],xmm28[14],xmm29[15],xmm28[15]
-; AVX512DQ-BW-NEXT: vpshufb %xmm23, %xmm22, %xmm22
-; AVX512DQ-BW-NEXT: vshufi64x2 {{.*#+}} zmm20 = zmm20[0,1,0,1],zmm22[0,1,0,1]
-; AVX512DQ-BW-NEXT: vmovdqu8 %zmm8, %zmm20 {%k1}
-; AVX512DQ-BW-NEXT: vmovdqa {{.*#+}} xmm8 = [128,4,u,u,u,128,7,128,5,u,u,u,128,8,128,6]
-; AVX512DQ-BW-NEXT: vpshufb %xmm8, %xmm25, %xmm22
-; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} xmm23 = [4,128,u,u,u,7,128,5,128,u,u,u,8,128,6,128]
-; AVX512DQ-BW-NEXT: vpshufb %xmm23, %xmm17, %xmm27
-; AVX512DQ-BW-NEXT: vporq %xmm22, %xmm27, %xmm22
-; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm17 = xmm17[8],xmm25[8],xmm17[9],xmm25[9],xmm17[10],xmm25[10],xmm17[11],xmm25[11],xmm17[12],xmm25[12],xmm17[13],xmm25[13],xmm17[14],xmm25[14],xmm17[15],xmm25[15]
-; AVX512DQ-BW-NEXT: vpshufb %xmm26, %xmm17, %xmm17
-; AVX512DQ-BW-NEXT: vshufi64x2 {{.*#+}} zmm17 = zmm22[0,1,0,1],zmm17[0,1,0,1]
-; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} zmm22 = [2,2,2,4,2,2,2,4,3,3,3,3,2,2,2,4,4,5,4,5,5,6,5,6,4,5,4,5,5,6,5,6]
-; AVX512DQ-BW-NEXT: vpermw %zmm3, %zmm22, %zmm3
-; AVX512DQ-BW-NEXT: movabsq $290499906672591364, %rax # imm = 0x408102040810204
-; AVX512DQ-BW-NEXT: kmovq %rax, %k1
-; AVX512DQ-BW-NEXT: vmovdqu8 %zmm3, %zmm17 {%k1}
-; AVX512DQ-BW-NEXT: movabsq $-8714997200177740921, %rax # imm = 0x870E1C3870E1C387
-; AVX512DQ-BW-NEXT: kmovq %rax, %k1
-; AVX512DQ-BW-NEXT: vmovdqu8 %zmm17, %zmm20 {%k1}
-; AVX512DQ-BW-NEXT: vpshufb %xmm7, %xmm11, %xmm3
-; AVX512DQ-BW-NEXT: vpshufb %xmm18, %xmm10, %xmm7
-; AVX512DQ-BW-NEXT: vpor %xmm3, %xmm7, %xmm3
-; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm7 = xmm10[0],xmm11[0],xmm10[1],xmm11[1],xmm10[2],xmm11[2],xmm10[3],xmm11[3],xmm10[4],xmm11[4],xmm10[5],xmm11[5],xmm10[6],xmm11[6],xmm10[7],xmm11[7]
-; AVX512DQ-BW-NEXT: vpshufb %xmm16, %xmm7, %xmm7
-; AVX512DQ-BW-NEXT: vshufi64x2 {{.*#+}} zmm3 = zmm7[0,1,0,1],zmm3[0,1,0,1]
-; AVX512DQ-BW-NEXT: vpshufb %xmm19, %xmm13, %xmm7
-; AVX512DQ-BW-NEXT: vpshufb %xmm21, %xmm12, %xmm10
-; AVX512DQ-BW-NEXT: vpor %xmm7, %xmm10, %xmm7
-; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm10 = xmm12[0],xmm13[0],xmm12[1],xmm13[1],xmm12[2],xmm13[2],xmm12[3],xmm13[3],xmm12[4],xmm13[4],xmm12[5],xmm13[5],xmm12[6],xmm13[6],xmm12[7],xmm13[7]
+; AVX512DQ-BW-NEXT: vpshufb %xmm24, %xmm3, %xmm3
+; AVX512DQ-BW-NEXT: vshufi64x2 {{.*#+}} zmm30 = zmm27[2,3,2,3],zmm3[0,1,0,1]
+; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm3 = xmm17[0],xmm18[0],xmm17[1],xmm18[1],xmm17[2],xmm18[2],xmm17[3],xmm18[3],xmm17[4],xmm18[4],xmm17[5],xmm18[5],xmm17[6],xmm18[6],xmm17[7],xmm18[7]
+; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} xmm27 = [4,5,0,1,u,u,u,6,7,2,3,u,u,u,8,9]
+; AVX512DQ-BW-NEXT: vpshufb %xmm27, %xmm3, %xmm3
+; AVX512DQ-BW-NEXT: vpshufb %ymm5, %ymm16, %ymm5
+; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} ymm15 = ymm15[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
+; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} ymm15 = ymm15[0,2,3,3,4,6,7,7]
+; AVX512DQ-BW-NEXT: vmovdqu8 %ymm15, %ymm5 {%k3}
+; AVX512DQ-BW-NEXT: vshufi64x2 {{.*#+}} zmm3 = zmm5[2,3,2,3],zmm3[0,1,0,1]
+; AVX512DQ-BW-NEXT: vmovdqu8 %zmm30, %zmm3 {%k4}
+; AVX512DQ-BW-NEXT: vpshufb %ymm6, %ymm22, %ymm5
+; AVX512DQ-BW-NEXT: vpshufb %ymm7, %ymm19, %ymm6
+; AVX512DQ-BW-NEXT: vpor %ymm5, %ymm6, %ymm6
+; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm7 = xmm28[0],xmm26[0],xmm28[1],xmm26[1],xmm28[2],xmm26[2],xmm28[3],xmm26[3],xmm28[4],xmm26[4],xmm28[5],xmm26[5],xmm28[6],xmm26[6],xmm28[7],xmm26[7]
+; AVX512DQ-BW-NEXT: vmovdqa {{.*#+}} xmm5 = [u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
+; AVX512DQ-BW-NEXT: vpshufb %xmm5, %xmm7, %xmm7
+; AVX512DQ-BW-NEXT: vshufi64x2 {{.*#+}} zmm6 = zmm6[2,3,2,3],zmm7[0,1,0,1]
+; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} zmm7 = [14,13,14,15,15,14,14,15,14,13,14,15,15,14,14,15,17,17,16,16,17,17,16,16,20,21,17,17,17,17,16,16]
+; AVX512DQ-BW-NEXT: vpermw %zmm0, %zmm7, %zmm7
+; AVX512DQ-BW-NEXT: movabsq $580999813345182728, %rcx # imm = 0x810204081020408
+; AVX512DQ-BW-NEXT: kmovq %rcx, %k2
+; AVX512DQ-BW-NEXT: vmovdqu8 %zmm7, %zmm6 {%k2}
+; AVX512DQ-BW-NEXT: movabsq $1016749673354069774, %rcx # imm = 0xE1C3870E1C3870E
+; AVX512DQ-BW-NEXT: kmovq %rcx, %k2
+; AVX512DQ-BW-NEXT: vmovdqu8 %zmm6, %zmm3 {%k2}
+; AVX512DQ-BW-NEXT: vmovdqa {{.*#+}} xmm6 = [u,u,u,128,7,128,5,u,u,u,128,8,128,6,u,u]
+; AVX512DQ-BW-NEXT: vpshufb %xmm6, %xmm18, %xmm7
+; AVX512DQ-BW-NEXT: vmovdqa {{.*#+}} xmm15 = [u,u,u,7,128,5,128,u,u,u,8,128,6,128,u,u]
+; AVX512DQ-BW-NEXT: vpshufb %xmm15, %xmm17, %xmm16
+; AVX512DQ-BW-NEXT: vporq %xmm7, %xmm16, %xmm7
+; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm16 = xmm18[8],xmm17[8],xmm18[9],xmm17[9],xmm18[10],xmm17[10],xmm18[11],xmm17[11],xmm18[12],xmm17[12],xmm18[13],xmm17[13],xmm18[14],xmm17[14],xmm18[15],xmm17[15]
+; AVX512DQ-BW-NEXT: vpshufb %xmm23, %xmm16, %xmm16
+; AVX512DQ-BW-NEXT: vshufi64x2 {{.*#+}} zmm7 = zmm7[0,1,0,1],zmm16[0,1,0,1]
+; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} xmm16 = [u,128,7,128,5,u,u,u,128,8,128,6,u,u,u,128]
+; AVX512DQ-BW-NEXT: vpshufb %xmm16, %xmm21, %xmm17
+; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} xmm18 = [u,7,128,5,128,u,u,u,8,128,6,128,u,u,u,9]
+; AVX512DQ-BW-NEXT: vpshufb %xmm18, %xmm20, %xmm19
+; AVX512DQ-BW-NEXT: vporq %xmm17, %xmm19, %xmm17
+; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm19 = xmm21[8],xmm20[8],xmm21[9],xmm20[9],xmm21[10],xmm20[10],xmm21[11],xmm20[11],xmm21[12],xmm20[12],xmm21[13],xmm20[13],xmm21[14],xmm20[14],xmm21[15],xmm20[15]
+; AVX512DQ-BW-NEXT: vpshufb %xmm25, %xmm19, %xmm19
+; AVX512DQ-BW-NEXT: vshufi64x2 {{.*#+}} zmm17 = zmm17[0,1,0,1],zmm19[0,1,0,1]
+; AVX512DQ-BW-NEXT: vmovdqu8 %zmm7, %zmm17 {%k1}
+; AVX512DQ-BW-NEXT: vmovdqa {{.*#+}} xmm7 = [128,4,u,u,u,128,7,128,5,u,u,u,128,8,128,6]
+; AVX512DQ-BW-NEXT: vpshufb %xmm7, %xmm26, %xmm19
+; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} xmm20 = [4,128,u,u,u,7,128,5,128,u,u,u,8,128,6,128]
+; AVX512DQ-BW-NEXT: vpshufb %xmm20, %xmm28, %xmm21
+; AVX512DQ-BW-NEXT: vporq %xmm19, %xmm21, %xmm19
+; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm21 = xmm28[8],xmm26[8],xmm28[9],xmm26[9],xmm28[10],xmm26[10],xmm28[11],xmm26[11],xmm28[12],xmm26[12],xmm28[13],xmm26[13],xmm28[14],xmm26[14],xmm28[15],xmm26[15]
+; AVX512DQ-BW-NEXT: vpshufb %xmm29, %xmm21, %xmm21
+; AVX512DQ-BW-NEXT: vshufi64x2 {{.*#+}} zmm19 = zmm19[0,1,0,1],zmm21[0,1,0,1]
+; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} zmm21 = [18,18,18,20,18,18,18,20,19,19,19,19,18,18,18,20,20,21,20,21,21,22,21,22,20,21,20,21,21,22,21,22]
+; AVX512DQ-BW-NEXT: vpermw %zmm0, %zmm21, %zmm21
+; AVX512DQ-BW-NEXT: movabsq $290499906672591364, %rcx # imm = 0x408102040810204
+; AVX512DQ-BW-NEXT: kmovq %rcx, %k1
+; AVX512DQ-BW-NEXT: vmovdqu8 %zmm21, %zmm19 {%k1}
+; AVX512DQ-BW-NEXT: movabsq $-8714997200177740921, %rcx # imm = 0x870E1C3870E1C387
+; AVX512DQ-BW-NEXT: kmovq %rcx, %k1
+; AVX512DQ-BW-NEXT: vmovdqu8 %zmm19, %zmm17 {%k1}
+; AVX512DQ-BW-NEXT: vpshufb %xmm6, %xmm10, %xmm6
+; AVX512DQ-BW-NEXT: vpshufb %xmm15, %xmm9, %xmm15
+; AVX512DQ-BW-NEXT: vpor %xmm6, %xmm15, %xmm6
+; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm9 = xmm9[0],xmm10[0],xmm9[1],xmm10[1],xmm9[2],xmm10[2],xmm9[3],xmm10[3],xmm9[4],xmm10[4],xmm9[5],xmm10[5],xmm9[6],xmm10[6],xmm9[7],xmm10[7]
+; AVX512DQ-BW-NEXT: vpshufb %xmm27, %xmm9, %xmm9
+; AVX512DQ-BW-NEXT: vshufi64x2 {{.*#+}} zmm6 = zmm9[0,1,0,1],zmm6[0,1,0,1]
+; AVX512DQ-BW-NEXT: vpshufb %xmm16, %xmm12, %xmm9
+; AVX512DQ-BW-NEXT: vpshufb %xmm18, %xmm11, %xmm10
+; AVX512DQ-BW-NEXT: vpor %xmm9, %xmm10, %xmm9
+; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm10 = xmm11[0],xmm12[0],xmm11[1],xmm12[1],xmm11[2],xmm12[2],xmm11[3],xmm12[3],xmm11[4],xmm12[4],xmm11[5],xmm12[5],xmm11[6],xmm12[6],xmm11[7],xmm12[7]
; AVX512DQ-BW-NEXT: vpshufb %xmm24, %xmm10, %xmm10
-; AVX512DQ-BW-NEXT: vshufi64x2 {{.*#+}} zmm7 = zmm10[0,1,0,1],zmm7[0,1,0,1]
-; AVX512DQ-BW-NEXT: movabsq $871499720017774092, %rax # imm = 0xC183060C183060C
-; AVX512DQ-BW-NEXT: kmovq %rax, %k1
-; AVX512DQ-BW-NEXT: vmovdqu8 %zmm3, %zmm7 {%k1}
-; AVX512DQ-BW-NEXT: vpshufb %xmm8, %xmm14, %xmm3
-; AVX512DQ-BW-NEXT: vpshufb %xmm23, %xmm15, %xmm8
-; AVX512DQ-BW-NEXT: vpor %xmm3, %xmm8, %xmm3
-; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm8 = xmm15[0],xmm14[0],xmm15[1],xmm14[1],xmm15[2],xmm14[2],xmm15[3],xmm14[3],xmm15[4],xmm14[4],xmm15[5],xmm14[5],xmm15[6],xmm14[6],xmm15[7],xmm14[7]
-; AVX512DQ-BW-NEXT: vpshufb %xmm6, %xmm8, %xmm6
-; AVX512DQ-BW-NEXT: vshufi64x2 {{.*#+}} zmm3 = zmm6[0,1,0,1],zmm3[0,1,0,1]
+; AVX512DQ-BW-NEXT: vshufi64x2 {{.*#+}} zmm9 = zmm10[0,1,0,1],zmm9[0,1,0,1]
+; AVX512DQ-BW-NEXT: movabsq $871499720017774092, %rcx # imm = 0xC183060C183060C
+; AVX512DQ-BW-NEXT: kmovq %rcx, %k1
+; AVX512DQ-BW-NEXT: vmovdqu8 %zmm6, %zmm9 {%k1}
+; AVX512DQ-BW-NEXT: vpshufb %xmm7, %xmm13, %xmm6
+; AVX512DQ-BW-NEXT: vpshufb %xmm20, %xmm14, %xmm7
+; AVX512DQ-BW-NEXT: vpor %xmm6, %xmm7, %xmm6
+; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm7 = xmm14[0],xmm13[0],xmm14[1],xmm13[1],xmm14[2],xmm13[2],xmm14[3],xmm13[3],xmm14[4],xmm13[4],xmm14[5],xmm13[5],xmm14[6],xmm13[6],xmm14[7],xmm13[7]
+; AVX512DQ-BW-NEXT: vpshufb %xmm5, %xmm7, %xmm5
+; AVX512DQ-BW-NEXT: vshufi64x2 {{.*#+}} zmm5 = zmm5[0,1,0,1],zmm6[0,1,0,1]
; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} zmm6 = [1,1,0,0,1,1,0,0,4,5,1,1,1,1,0,0,2,2,2,4,2,2,2,4,3,3,3,3,2,2,2,4]
; AVX512DQ-BW-NEXT: vpermw %zmm0, %zmm6, %zmm0
-; AVX512DQ-BW-NEXT: movabsq $4647998506761461824, %rax # imm = 0x4081020408102040
-; AVX512DQ-BW-NEXT: kmovq %rax, %k1
-; AVX512DQ-BW-NEXT: vmovdqu8 %zmm0, %zmm3 {%k1}
-; AVX512DQ-BW-NEXT: movabsq $8133997386832558192, %rax # imm = 0x70E1C3870E1C3870
-; AVX512DQ-BW-NEXT: kmovq %rax, %k1
-; AVX512DQ-BW-NEXT: vmovdqu8 %zmm3, %zmm7 {%k1}
-; AVX512DQ-BW-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-BW-NEXT: vmovdqa64 %zmm7, (%rax)
-; AVX512DQ-BW-NEXT: vmovdqa64 %zmm20, 256(%rax)
-; AVX512DQ-BW-NEXT: vmovdqa64 %zmm5, 128(%rax)
+; AVX512DQ-BW-NEXT: movabsq $4647998506761461824, %rcx # imm = 0x4081020408102040
+; AVX512DQ-BW-NEXT: kmovq %rcx, %k1
+; AVX512DQ-BW-NEXT: vmovdqu8 %zmm0, %zmm5 {%k1}
+; AVX512DQ-BW-NEXT: movabsq $8133997386832558192, %rcx # imm = 0x70E1C3870E1C3870
+; AVX512DQ-BW-NEXT: kmovq %rcx, %k1
+; AVX512DQ-BW-NEXT: vmovdqu8 %zmm5, %zmm9 {%k1}
+; AVX512DQ-BW-NEXT: vmovdqa64 %zmm9, (%rax)
+; AVX512DQ-BW-NEXT: vmovdqa64 %zmm17, 256(%rax)
+; AVX512DQ-BW-NEXT: vmovdqa64 %zmm4, 128(%rax)
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm2, 320(%rax)
-; AVX512DQ-BW-NEXT: vmovdqa64 %zmm4, 192(%rax)
-; AVX512DQ-BW-NEXT: vmovdqa64 %zmm9, 64(%rax)
+; AVX512DQ-BW-NEXT: vmovdqa64 %zmm3, 192(%rax)
+; AVX512DQ-BW-NEXT: vmovdqa64 %zmm8, 64(%rax)
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm1, 384(%rax)
; AVX512DQ-BW-NEXT: vzeroupper
; AVX512DQ-BW-NEXT: retq
@@ -10828,222 +10828,222 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-BW-FCP-LABEL: store_i8_stride7_vf64:
; AVX512DQ-BW-FCP: # %bb.0:
; AVX512DQ-BW-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%rcx), %ymm1
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rax), %zmm0
+; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%rcx), %ymm2
; AVX512DQ-BW-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [27,128,128,128,128,30,128,28,128,128,128,128,31,128,29,128,27,128,128,128,128,30,128,28,128,128,128,128,31,128,29,128]
; AVX512DQ-BW-FCP-NEXT: # ymm3 = mem[0,1,0,1]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm3, %ymm1, %ymm0
-; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%rdx), %ymm11
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm3, %ymm2, %ymm1
+; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%rdx), %ymm13
; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} ymm4 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,30,128,28,128,128,128,128,31,128,29,128,128]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm4, %ymm11, %ymm2
-; AVX512DQ-BW-FCP-NEXT: vpor %ymm0, %ymm2, %ymm0
-; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} ymm8 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,24,25,128,23,128,21,22,23,26,128,24,128,28,29,26,27]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm8, %ymm11, %ymm2
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm4, %ymm13, %ymm5
+; AVX512DQ-BW-FCP-NEXT: vpor %ymm1, %ymm5, %ymm1
+; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} ymm7 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,24,25,128,23,128,21,22,23,26,128,24,128,28,29,26,27]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm7, %ymm13, %ymm5
; AVX512DQ-BW-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm10 = [128,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128,128,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128]
; AVX512DQ-BW-FCP-NEXT: # ymm10 = mem[0,1,0,1]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm10, %ymm1, %ymm5
-; AVX512DQ-BW-FCP-NEXT: vpor %ymm2, %ymm5, %ymm2
-; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm2 = zmm2[2,3,2,3],zmm0[2,3,2,3]
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 32(%rdi), %ymm16
-; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} ymm7 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,28,29,30,128,28,128,30,31,30,31,128,29,128,31,28,29]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm7, %ymm16, %ymm0
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 32(%rsi), %ymm17
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm10, %ymm2, %ymm6
+; AVX512DQ-BW-FCP-NEXT: vpor %ymm5, %ymm6, %ymm5
+; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm5 = zmm5[2,3,2,3],zmm1[2,3,2,3]
+; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%rdi), %ymm15
+; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} ymm8 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,28,29,30,128,28,128,30,31,30,31,128,29,128,31,28,29]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm8, %ymm15, %ymm1
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 32(%rsi), %ymm16
; AVX512DQ-BW-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm9 = [128,128,128,30,128,28,128,128,128,128,31,128,29,128,128,128,128,128,128,30,128,28,128,128,128,128,31,128,29,128,128,128]
; AVX512DQ-BW-FCP-NEXT: # ymm9 = mem[0,1,0,1]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm9, %ymm17, %ymm5
-; AVX512DQ-BW-FCP-NEXT: vpor %ymm0, %ymm5, %ymm0
-; AVX512DQ-BW-FCP-NEXT: vbroadcasti32x4 {{.*#+}} ymm19 = [25,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128]
-; AVX512DQ-BW-FCP-NEXT: # ymm19 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm19, %ymm17, %ymm5
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} ymm20 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128,25]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm20, %ymm16, %ymm6
-; AVX512DQ-BW-FCP-NEXT: vpor %ymm5, %ymm6, %ymm5
-; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm5[2,3,2,3],zmm0[2,3,2,3]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm9, %ymm16, %ymm6
+; AVX512DQ-BW-FCP-NEXT: vpor %ymm1, %ymm6, %ymm1
+; AVX512DQ-BW-FCP-NEXT: vbroadcasti32x4 {{.*#+}} ymm18 = [25,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128]
+; AVX512DQ-BW-FCP-NEXT: # ymm18 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm18, %ymm16, %ymm6
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} ymm19 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128,25]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm19, %ymm15, %ymm11
+; AVX512DQ-BW-FCP-NEXT: vpor %ymm6, %ymm11, %ymm6
+; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm1 = zmm6[2,3,2,3],zmm1[2,3,2,3]
; AVX512DQ-BW-FCP-NEXT: movabsq $1742999440035548184, %r10 # imm = 0x183060C183060C18
; AVX512DQ-BW-FCP-NEXT: kmovq %r10, %k1
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm2, %zmm0 {%k1}
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 32(%r9), %ymm18
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm5, %zmm1 {%k1}
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 32(%r9), %ymm17
; AVX512DQ-BW-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm5 = [29,128,27,128,128,128,128,30,128,28,128,128,128,128,31,128,29,128,27,128,128,128,128,30,128,28,128,128,128,128,31,128]
; AVX512DQ-BW-FCP-NEXT: # ymm5 = mem[0,1,0,1]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm5, %ymm18, %ymm2
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm5, %ymm17, %ymm11
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 32(%r8), %ymm22
; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} ymm6 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,27,128,128,128,128,30,128,28,128,128,128,128,31,128,29]
; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm6, %ymm22, %ymm12
-; AVX512DQ-BW-FCP-NEXT: vpor %ymm2, %ymm12, %ymm2
-; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} ymm12 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,24,25,24,25,128,23,128,23,24,25,26,128,24,128,30,31]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm12, %ymm22, %ymm14
-; AVX512DQ-BW-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm13 = [128,128,128,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128,128,128,25,128,23,128,128,128,128,26,128,24,128,128]
-; AVX512DQ-BW-FCP-NEXT: # ymm13 = mem[0,1,0,1]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm13, %ymm18, %ymm15
-; AVX512DQ-BW-FCP-NEXT: vpor %ymm14, %ymm15, %ymm14
-; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm2 = zmm14[2,3,2,3],zmm2[2,3,2,3]
+; AVX512DQ-BW-FCP-NEXT: vpor %ymm11, %ymm12, %ymm11
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} ymm20 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,24,25,24,25,128,23,128,23,24,25,26,128,24,128,30,31]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm20, %ymm22, %ymm12
+; AVX512DQ-BW-FCP-NEXT: vbroadcasti32x4 {{.*#+}} ymm21 = [128,128,128,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128,128,128,25,128,23,128,128,128,128,26,128,24,128,128]
+; AVX512DQ-BW-FCP-NEXT: # ymm21 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm21, %ymm17, %ymm14
+; AVX512DQ-BW-FCP-NEXT: vpor %ymm12, %ymm14, %ymm12
+; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm11 = zmm12[2,3,2,3],zmm11[2,3,2,3]
; AVX512DQ-BW-FCP-NEXT: movabsq $6971997760142192736, %r10 # imm = 0x60C183060C183060
; AVX512DQ-BW-FCP-NEXT: kmovq %r10, %k1
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm2, %zmm0 {%k1}
-; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%rax), %ymm2
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} zmm14 = [11,13,12,11,12,13,13,12,11,13,12,11,12,13,13,12,14,13,14,15,15,14,14,15,14,13,14,15,15,14,14,15]
-; AVX512DQ-BW-FCP-NEXT: vpermw %zmm2, %zmm14, %zmm14
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm11, %zmm1 {%k1}
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} zmm11 = [27,29,28,27,28,29,29,28,27,29,28,27,28,29,29,28,30,29,30,31,31,30,30,31,30,29,30,31,31,30,30,31]
+; AVX512DQ-BW-FCP-NEXT: vpermw %zmm0, %zmm11, %zmm11
; AVX512DQ-BW-FCP-NEXT: movabsq $-9150747060186627967, %r10 # imm = 0x8102040810204081
; AVX512DQ-BW-FCP-NEXT: kmovq %r10, %k2
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm14, %zmm0 {%k2}
-; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} ymm14 = [0,1,0,1,14,128,14,15,0,1,14,15,128,13,14,15,16,17,16,128,30,31,30,31,16,17,128,31,28,29,30,31]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm14, %ymm11, %ymm15
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} ymm21 = [128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm21, %ymm1, %ymm23
-; AVX512DQ-BW-FCP-NEXT: vporq %ymm15, %ymm23, %ymm15
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm11, %zmm1 {%k2}
+; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} ymm11 = [0,1,0,1,14,128,14,15,0,1,14,15,128,13,14,15,16,17,16,128,30,31,30,31,16,17,128,31,28,29,30,31]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm11, %ymm13, %ymm14
+; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} ymm12 = [128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm12, %ymm2, %ymm23
+; AVX512DQ-BW-FCP-NEXT: vporq %ymm14, %ymm23, %ymm14
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} ymm24 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,18,128,18,19,20,21,128,19,128,25,26,27,22,128,20,128]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm24, %ymm11, %ymm11
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm24, %ymm13, %ymm13
; AVX512DQ-BW-FCP-NEXT: vbroadcasti32x4 {{.*#+}} ymm25 = [128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20]
; AVX512DQ-BW-FCP-NEXT: # ymm25 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm25, %ymm1, %ymm1
-; AVX512DQ-BW-FCP-NEXT: vpor %ymm1, %ymm11, %ymm1
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm1, %zmm15, %zmm23
-; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} ymm11 = [0,1,14,128,12,13,0,1,14,15,128,3,12,13,2,3,16,128,30,31,28,29,16,17,128,31,18,19,28,29,18,128]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm11, %ymm16, %ymm1
-; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} ymm15 = [128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128,128,18]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm15, %ymm17, %ymm26
-; AVX512DQ-BW-FCP-NEXT: vporq %ymm1, %ymm26, %ymm1
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm25, %ymm2, %ymm2
+; AVX512DQ-BW-FCP-NEXT: vpor %ymm2, %ymm13, %ymm2
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,3,2,3]
+; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm2, %zmm14, %zmm23
+; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} ymm13 = [0,1,14,128,12,13,0,1,14,15,128,3,12,13,2,3,16,128,30,31,28,29,16,17,128,31,18,19,28,29,18,128]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm13, %ymm15, %ymm2
+; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} ymm14 = [128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128,128,18]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm14, %ymm16, %ymm26
+; AVX512DQ-BW-FCP-NEXT: vporq %ymm2, %ymm26, %ymm2
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} ymm26 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,18,19,20,21,128,19,128,21,20,21,22,128,20,128,22,23]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm26, %ymm16, %ymm16
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm26, %ymm15, %ymm15
; AVX512DQ-BW-FCP-NEXT: vbroadcasti32x4 {{.*#+}} ymm27 = [128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,128,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,128]
; AVX512DQ-BW-FCP-NEXT: # ymm27 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm27, %ymm17, %ymm17
-; AVX512DQ-BW-FCP-NEXT: vporq %ymm16, %ymm17, %ymm16
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm16 = ymm16[2,3,2,3]
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm16, %zmm1, %zmm1
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm27, %ymm16, %ymm16
+; AVX512DQ-BW-FCP-NEXT: vporq %ymm15, %ymm16, %ymm15
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm15 = ymm15[2,3,2,3]
+; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm15, %zmm2, %zmm2
; AVX512DQ-BW-FCP-NEXT: movabsq $3485998880071096368, %r10 # imm = 0x3060C183060C1830
; AVX512DQ-BW-FCP-NEXT: kmovq %r10, %k2
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm23, %zmm1 {%k2}
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} ymm16 = [13,128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm16, %ymm18, %ymm23
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} ymm17 = [128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm17, %ymm22, %ymm28
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm23, %zmm2 {%k2}
+; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} ymm15 = [13,128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm15, %ymm17, %ymm23
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} ymm16 = [128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm16, %ymm22, %ymm28
; AVX512DQ-BW-FCP-NEXT: vporq %ymm23, %ymm28, %ymm23
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} ymm28 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,20,128,18,128,20,21,20,21,128,19,128,19,20,21,22,128]
; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm28, %ymm22, %ymm22
; AVX512DQ-BW-FCP-NEXT: vbroadcasti32x4 {{.*#+}} ymm29 = [128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22]
; AVX512DQ-BW-FCP-NEXT: # ymm29 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm29, %ymm18, %ymm18
-; AVX512DQ-BW-FCP-NEXT: vporq %ymm22, %ymm18, %ymm18
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm18 = ymm18[2,3,2,3]
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm18, %zmm23, %zmm18
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm29, %ymm17, %ymm17
+; AVX512DQ-BW-FCP-NEXT: vporq %ymm22, %ymm17, %ymm17
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm17 = ymm17[2,3,2,3]
+; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm17, %zmm23, %zmm17
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 32(%rax), %ymm22
; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} zmm30 = [10,9,9,10,10,9,9,10,9,10,14,15,10,9,9,10,11,13,12,11,12,13,13,12,11,13,12,11,12,13,13,12]
-; AVX512DQ-BW-FCP-NEXT: vpermw %ymm2, %ymm30, %ymm22
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm23 = ymm2[12,13,2,3,12,13,0,1,14,15,2,3,0,1,14,15,28,29,18,19,28,29,16,17,30,31,18,19,16,17,30,31]
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm22, %zmm23, %zmm22
-; AVX512DQ-BW-FCP-NEXT: movabsq $145249953336295682, %r10 # imm = 0x204081020408102
-; AVX512DQ-BW-FCP-NEXT: kmovq %r10, %k3
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm22, %zmm18 {%k3}
-; AVX512DQ-BW-FCP-NEXT: movabsq $-4357498600088870461, %r10 # imm = 0xC3870E1C3870E1C3
-; AVX512DQ-BW-FCP-NEXT: kmovq %r10, %k3
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm18, %zmm1 {%k3}
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rsi), %ymm18
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm19, %ymm18, %ymm19
+; AVX512DQ-BW-FCP-NEXT: vpermw %ymm22, %ymm30, %ymm23
+; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm22 = ymm22[12,13,2,3,12,13,0,1,14,15,2,3,0,1,14,15,28,29,18,19,28,29,16,17,30,31,18,19,16,17,30,31]
+; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm23, %zmm22, %zmm22
+; AVX512DQ-BW-FCP-NEXT: movabsq $145249953336295682, %rax # imm = 0x204081020408102
+; AVX512DQ-BW-FCP-NEXT: kmovq %rax, %k3
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm22, %zmm17 {%k3}
+; AVX512DQ-BW-FCP-NEXT: movabsq $-4357498600088870461, %rax # imm = 0xC3870E1C3870E1C3
+; AVX512DQ-BW-FCP-NEXT: kmovq %rax, %k3
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm17, %zmm2 {%k3}
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rsi), %ymm17
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm18, %ymm17, %ymm18
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rdi), %ymm22
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm20, %ymm22, %ymm20
-; AVX512DQ-BW-FCP-NEXT: vporq %ymm19, %ymm20, %ymm19
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm26, %ymm22, %ymm20
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm27, %ymm18, %ymm23
-; AVX512DQ-BW-FCP-NEXT: vporq %ymm20, %ymm23, %ymm20
-; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm20 = zmm20[2,3,2,3],zmm19[2,3,2,3]
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rdx), %ymm19
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm8, %ymm19, %ymm8
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm19, %ymm22, %ymm19
+; AVX512DQ-BW-FCP-NEXT: vporq %ymm18, %ymm19, %ymm18
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm26, %ymm22, %ymm19
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm27, %ymm17, %ymm23
+; AVX512DQ-BW-FCP-NEXT: vporq %ymm19, %ymm23, %ymm19
+; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm19 = zmm19[2,3,2,3],zmm18[2,3,2,3]
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rdx), %ymm18
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm7, %ymm18, %ymm7
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rcx), %ymm23
; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm10, %ymm23, %ymm10
-; AVX512DQ-BW-FCP-NEXT: vpor %ymm8, %ymm10, %ymm8
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rax), %zmm10
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm24, %ymm19, %ymm24
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm25, %ymm23, %ymm25
-; AVX512DQ-BW-FCP-NEXT: vporq %ymm24, %ymm25, %ymm24
-; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm8 = zmm24[2,3,2,3],zmm8[2,3,2,3]
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm20, %zmm8 {%k1}
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%r8), %ymm20
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm12, %ymm20, %ymm12
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%r9), %ymm24
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm13, %ymm24, %ymm13
-; AVX512DQ-BW-FCP-NEXT: vpor %ymm12, %ymm13, %ymm12
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm28, %ymm20, %ymm13
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm29, %ymm24, %ymm25
-; AVX512DQ-BW-FCP-NEXT: vporq %ymm13, %ymm25, %ymm13
-; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm12 = zmm13[2,3,2,3],zmm12[2,3,2,3]
-; AVX512DQ-BW-FCP-NEXT: vpermw %zmm10, %zmm30, %zmm13
+; AVX512DQ-BW-FCP-NEXT: vpor %ymm7, %ymm10, %ymm7
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm24, %ymm18, %ymm10
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm25, %ymm23, %ymm24
+; AVX512DQ-BW-FCP-NEXT: vporq %ymm10, %ymm24, %ymm10
+; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm7 = zmm10[2,3,2,3],zmm7[2,3,2,3]
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm19, %zmm7 {%k1}
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%r8), %ymm19
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm20, %ymm19, %ymm10
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%r9), %ymm20
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm21, %ymm20, %ymm21
+; AVX512DQ-BW-FCP-NEXT: vporq %ymm10, %ymm21, %ymm10
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm28, %ymm19, %ymm21
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm29, %ymm20, %ymm24
+; AVX512DQ-BW-FCP-NEXT: vporq %ymm21, %ymm24, %ymm21
+; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm10 = zmm21[2,3,2,3],zmm10[2,3,2,3]
+; AVX512DQ-BW-FCP-NEXT: vpermw %zmm0, %zmm30, %zmm21
; AVX512DQ-BW-FCP-NEXT: movabsq $1161999626690365456, %rax # imm = 0x1020408102040810
; AVX512DQ-BW-FCP-NEXT: kmovq %rax, %k3
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm13, %zmm12 {%k3}
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm21, %zmm10 {%k3}
; AVX512DQ-BW-FCP-NEXT: movabsq $2033499346708139548, %rax # imm = 0x1C3870E1C3870E1C
; AVX512DQ-BW-FCP-NEXT: kmovq %rax, %k3
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm12, %zmm8 {%k3}
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm14, %ymm19, %ymm12
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm21, %ymm23, %ymm13
-; AVX512DQ-BW-FCP-NEXT: vpor %ymm12, %ymm13, %ymm14
-; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdx), %xmm12
-; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rcx), %xmm13
-; AVX512DQ-BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm25 = xmm13[8],xmm12[8],xmm13[9],xmm12[9],xmm13[10],xmm12[10],xmm13[11],xmm12[11],xmm13[12],xmm12[12],xmm13[13],xmm12[13],xmm13[14],xmm12[14],xmm13[15],xmm12[15]
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm10, %zmm7 {%k3}
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm11, %ymm18, %ymm10
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm12, %ymm23, %ymm11
+; AVX512DQ-BW-FCP-NEXT: vpor %ymm10, %ymm11, %ymm10
+; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdx), %xmm11
+; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rcx), %xmm12
+; AVX512DQ-BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm24 = xmm12[8],xmm11[8],xmm12[9],xmm11[9],xmm12[10],xmm11[10],xmm12[11],xmm11[11],xmm12[12],xmm11[12],xmm12[13],xmm11[13],xmm12[14],xmm11[14],xmm12[15],xmm11[15]
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} xmm21 = [6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm21, %xmm25, %xmm25
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm25 = ymm25[0,1,0,1]
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm14, %zmm25, %zmm26
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm11, %ymm22, %ymm11
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm15, %ymm18, %ymm14
-; AVX512DQ-BW-FCP-NEXT: vpor %ymm11, %ymm14, %ymm11
-; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdi), %xmm14
-; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rsi), %xmm15
-; AVX512DQ-BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm27 = xmm15[8],xmm14[8],xmm15[9],xmm14[9],xmm15[10],xmm14[10],xmm15[11],xmm14[11],xmm15[12],xmm14[12],xmm15[13],xmm14[13],xmm15[14],xmm14[14],xmm15[15],xmm14[15]
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} xmm25 = [2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm25, %xmm27, %xmm27
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm27 = ymm27[0,1,0,1]
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm11, %zmm27, %zmm11
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm21, %xmm24, %xmm24
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm24 = ymm24[0,1,0,1]
+; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm10, %zmm24, %zmm25
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm13, %ymm22, %ymm10
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm14, %ymm17, %ymm13
+; AVX512DQ-BW-FCP-NEXT: vpor %ymm10, %ymm13, %ymm10
+; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdi), %xmm13
+; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rsi), %xmm14
+; AVX512DQ-BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm26 = xmm14[8],xmm13[8],xmm14[9],xmm13[9],xmm14[10],xmm13[10],xmm14[11],xmm13[11],xmm14[12],xmm13[12],xmm14[13],xmm13[13],xmm14[14],xmm13[14],xmm14[15],xmm13[15]
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} xmm24 = [2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm24, %xmm26, %xmm26
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm26 = ymm26[0,1,0,1]
+; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm10, %zmm26, %zmm10
; AVX512DQ-BW-FCP-NEXT: movabsq $435749860008887046, %rax # imm = 0x60C183060C18306
; AVX512DQ-BW-FCP-NEXT: kmovq %rax, %k3
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm26, %zmm11 {%k3}
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm16, %ymm24, %ymm16
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm17, %ymm20, %ymm17
-; AVX512DQ-BW-FCP-NEXT: vporq %ymm16, %ymm17, %ymm27
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%r9), %xmm16
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%r8), %xmm17
-; AVX512DQ-BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm28 = xmm17[8],xmm16[8],xmm17[9],xmm16[9],xmm17[10],xmm16[10],xmm17[11],xmm16[11],xmm17[12],xmm16[12],xmm17[13],xmm16[13],xmm17[14],xmm16[14],xmm17[15],xmm16[15]
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} xmm26 = [u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm26, %xmm28, %xmm28
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm28 = ymm28[0,1,0,1]
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm27, %zmm28, %zmm27
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} zmm28 = [4,5,4,5,5,6,5,6,4,5,4,5,5,6,5,6,6,1,6,0,7,1,0,7,14,9,14,8,15,9,8,15]
-; AVX512DQ-BW-FCP-NEXT: vpermw %zmm10, %zmm28, %zmm28
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm25, %zmm10 {%k3}
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm15, %ymm20, %ymm15
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm16, %ymm19, %ymm16
+; AVX512DQ-BW-FCP-NEXT: vporq %ymm15, %ymm16, %ymm26
+; AVX512DQ-BW-FCP-NEXT: vmovdqa (%r9), %xmm15
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%r8), %xmm16
+; AVX512DQ-BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm27 = xmm16[8],xmm15[8],xmm16[9],xmm15[9],xmm16[10],xmm15[10],xmm16[11],xmm15[11],xmm16[12],xmm15[12],xmm16[13],xmm15[13],xmm16[14],xmm15[14],xmm16[15],xmm15[15]
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} xmm25 = [u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm25, %xmm27, %xmm27
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm27 = ymm27[0,1,0,1]
+; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm26, %zmm27, %zmm26
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} zmm27 = [4,5,4,5,5,6,5,6,4,5,4,5,5,6,5,6,6,1,6,0,7,1,0,7,14,9,14,8,15,9,8,15]
+; AVX512DQ-BW-FCP-NEXT: vpermw %zmm0, %zmm27, %zmm27
; AVX512DQ-BW-FCP-NEXT: movabsq $2323999253380730912, %rax # imm = 0x2040810204081020
; AVX512DQ-BW-FCP-NEXT: kmovq %rax, %k3
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm28, %zmm27 {%k3}
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 32(%rdx), %xmm28
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm27, %zmm26 {%k3}
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 32(%rdx), %xmm27
; AVX512DQ-BW-FCP-NEXT: movabsq $4066998693416279096, %rax # imm = 0x3870E1C3870E1C38
; AVX512DQ-BW-FCP-NEXT: kmovq %rax, %k3
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm27, %zmm11 {%k3}
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 32(%rdi), %xmm27
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm7, %ymm22, %ymm7
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm26, %zmm10 {%k3}
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 32(%rdi), %xmm26
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm8, %ymm22, %ymm8
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 32(%rsi), %xmm22
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm9, %ymm18, %ymm9
-; AVX512DQ-BW-FCP-NEXT: vpor %ymm7, %ymm9, %ymm9
-; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm18 = xmm27[0],xmm22[0],xmm27[1],xmm22[1],xmm27[2],xmm22[2],xmm27[3],xmm22[3],xmm27[4],xmm22[4],xmm27[5],xmm22[5],xmm27[6],xmm22[6],xmm27[7],xmm22[7]
-; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} xmm7 = [0,1,u,u,u,6,7,2,3,u,u,u,8,9,4,5]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm7, %xmm18, %xmm18
-; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm18 = zmm9[2,3,2,3],zmm18[0,1,0,1]
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 32(%rcx), %xmm29
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm9, %ymm17, %ymm9
+; AVX512DQ-BW-FCP-NEXT: vpor %ymm8, %ymm9, %ymm9
+; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm17 = xmm26[0],xmm22[0],xmm26[1],xmm22[1],xmm26[2],xmm22[2],xmm26[3],xmm22[3],xmm26[4],xmm22[4],xmm26[5],xmm22[5],xmm26[6],xmm22[6],xmm26[7],xmm22[7]
+; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} xmm8 = [0,1,u,u,u,6,7,2,3,u,u,u,8,9,4,5]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm8, %xmm17, %xmm17
+; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm17 = zmm9[2,3,2,3],zmm17[0,1,0,1]
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 32(%rcx), %xmm28
; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm3, %ymm23, %ymm3
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 32(%r9), %xmm23
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm4, %ymm19, %ymm4
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm4, %ymm18, %ymm4
; AVX512DQ-BW-FCP-NEXT: vpor %ymm3, %ymm4, %ymm3
-; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm28[0],xmm29[0],xmm28[1],xmm29[1],xmm28[2],xmm29[2],xmm28[3],xmm29[3],xmm28[4],xmm29[4],xmm28[5],xmm29[5],xmm28[6],xmm29[6],xmm28[7],xmm29[7]
+; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm27[0],xmm28[0],xmm27[1],xmm28[1],xmm27[2],xmm28[2],xmm27[3],xmm28[3],xmm27[4],xmm28[4],xmm27[5],xmm28[5],xmm27[6],xmm28[6],xmm27[7],xmm28[7]
; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} xmm9 = [4,5,0,1,u,u,u,6,7,2,3,u,u,u,8,9]
; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm9, %xmm4, %xmm4
; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm3 = zmm3[2,3,2,3],zmm4[0,1,0,1]
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 32(%r8), %xmm19
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm18, %zmm3 {%k2}
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm5, %ymm24, %ymm4
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm6, %ymm20, %ymm5
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 32(%r8), %xmm18
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm17, %zmm3 {%k2}
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm5, %ymm20, %ymm4
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm6, %ymm19, %ymm5
; AVX512DQ-BW-FCP-NEXT: vpor %ymm4, %ymm5, %ymm5
-; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm6 = xmm19[0],xmm23[0],xmm19[1],xmm23[1],xmm19[2],xmm23[2],xmm19[3],xmm23[3],xmm19[4],xmm23[4],xmm19[5],xmm23[5],xmm19[6],xmm23[6],xmm19[7],xmm23[7]
+; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm6 = xmm18[0],xmm23[0],xmm18[1],xmm23[1],xmm18[2],xmm23[2],xmm18[3],xmm23[3],xmm18[4],xmm23[4],xmm18[5],xmm23[5],xmm18[6],xmm23[6],xmm18[7],xmm23[7]
; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} xmm4 = [u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm4, %xmm6, %xmm6
; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm5 = zmm5[2,3,2,3],zmm6[0,1,0,1]
; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} zmm6 = [14,13,14,15,15,14,14,15,14,13,14,15,15,14,14,15,17,17,16,16,17,17,16,16,20,21,17,17,17,17,16,16]
-; AVX512DQ-BW-FCP-NEXT: vpermw %zmm10, %zmm6, %zmm6
+; AVX512DQ-BW-FCP-NEXT: vpermw %zmm0, %zmm6, %zmm6
; AVX512DQ-BW-FCP-NEXT: movabsq $580999813345182728, %rax # imm = 0x810204081020408
; AVX512DQ-BW-FCP-NEXT: kmovq %rax, %k2
; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm6, %zmm5 {%k2}
@@ -11051,75 +11051,75 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-BW-FCP-NEXT: kmovq %rax, %k2
; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm5, %zmm3 {%k2}
; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} xmm6 = [u,u,u,128,7,128,5,u,u,u,128,8,128,6,u,u]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm6, %xmm29, %xmm5
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} xmm18 = [u,u,u,7,128,5,128,u,u,u,8,128,6,128,u,u]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm18, %xmm28, %xmm20
-; AVX512DQ-BW-FCP-NEXT: vporq %xmm5, %xmm20, %xmm5
-; AVX512DQ-BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm20 = xmm29[8],xmm28[8],xmm29[9],xmm28[9],xmm29[10],xmm28[10],xmm29[11],xmm28[11],xmm29[12],xmm28[12],xmm29[13],xmm28[13],xmm29[14],xmm28[14],xmm29[15],xmm28[15]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm21, %xmm20, %xmm20
-; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm20 = zmm5[0,1,0,1],zmm20[0,1,0,1]
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} xmm21 = [u,128,7,128,5,u,u,u,128,8,128,6,u,u,u,128]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm21, %xmm22, %xmm5
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} xmm24 = [u,7,128,5,128,u,u,u,8,128,6,128,u,u,u,9]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm24, %xmm27, %xmm28
-; AVX512DQ-BW-FCP-NEXT: vporq %xmm5, %xmm28, %xmm5
-; AVX512DQ-BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm22 = xmm22[8],xmm27[8],xmm22[9],xmm27[9],xmm22[10],xmm27[10],xmm22[11],xmm27[11],xmm22[12],xmm27[12],xmm22[13],xmm27[13],xmm22[14],xmm27[14],xmm22[15],xmm27[15]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm25, %xmm22, %xmm22
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm6, %xmm28, %xmm5
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} xmm17 = [u,u,u,7,128,5,128,u,u,u,8,128,6,128,u,u]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm17, %xmm27, %xmm19
+; AVX512DQ-BW-FCP-NEXT: vporq %xmm5, %xmm19, %xmm5
+; AVX512DQ-BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm19 = xmm28[8],xmm27[8],xmm28[9],xmm27[9],xmm28[10],xmm27[10],xmm28[11],xmm27[11],xmm28[12],xmm27[12],xmm28[13],xmm27[13],xmm28[14],xmm27[14],xmm28[15],xmm27[15]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm21, %xmm19, %xmm19
+; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm19 = zmm5[0,1,0,1],zmm19[0,1,0,1]
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} xmm20 = [u,128,7,128,5,u,u,u,128,8,128,6,u,u,u,128]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm20, %xmm22, %xmm5
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} xmm21 = [u,7,128,5,128,u,u,u,8,128,6,128,u,u,u,9]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm21, %xmm26, %xmm27
+; AVX512DQ-BW-FCP-NEXT: vporq %xmm5, %xmm27, %xmm5
+; AVX512DQ-BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm22 = xmm22[8],xmm26[8],xmm22[9],xmm26[9],xmm22[10],xmm26[10],xmm22[11],xmm26[11],xmm22[12],xmm26[12],xmm22[13],xmm26[13],xmm22[14],xmm26[14],xmm22[15],xmm26[15]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm24, %xmm22, %xmm22
; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm5 = zmm5[0,1,0,1],zmm22[0,1,0,1]
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm20, %zmm5 {%k1}
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} xmm20 = [128,4,u,u,u,128,7,128,5,u,u,u,128,8,128,6]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm20, %xmm23, %xmm22
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} xmm25 = [4,128,u,u,u,7,128,5,128,u,u,u,8,128,6,128]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm25, %xmm19, %xmm27
-; AVX512DQ-BW-FCP-NEXT: vporq %xmm22, %xmm27, %xmm22
-; AVX512DQ-BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm19 = xmm19[8],xmm23[8],xmm19[9],xmm23[9],xmm19[10],xmm23[10],xmm19[11],xmm23[11],xmm19[12],xmm23[12],xmm19[13],xmm23[13],xmm19[14],xmm23[14],xmm19[15],xmm23[15]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm26, %xmm19, %xmm19
-; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm19 = zmm22[0,1,0,1],zmm19[0,1,0,1]
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} zmm22 = [2,2,2,4,2,2,2,4,3,3,3,3,2,2,2,4,4,5,4,5,5,6,5,6,4,5,4,5,5,6,5,6]
-; AVX512DQ-BW-FCP-NEXT: vpermw %zmm2, %zmm22, %zmm2
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm19, %zmm5 {%k1}
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} xmm19 = [128,4,u,u,u,128,7,128,5,u,u,u,128,8,128,6]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm19, %xmm23, %xmm22
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} xmm24 = [4,128,u,u,u,7,128,5,128,u,u,u,8,128,6,128]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm24, %xmm18, %xmm26
+; AVX512DQ-BW-FCP-NEXT: vporq %xmm22, %xmm26, %xmm22
+; AVX512DQ-BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm18 = xmm18[8],xmm23[8],xmm18[9],xmm23[9],xmm18[10],xmm23[10],xmm18[11],xmm23[11],xmm18[12],xmm23[12],xmm18[13],xmm23[13],xmm18[14],xmm23[14],xmm18[15],xmm23[15]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm25, %xmm18, %xmm18
+; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm18 = zmm22[0,1,0,1],zmm18[0,1,0,1]
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} zmm22 = [18,18,18,20,18,18,18,20,19,19,19,19,18,18,18,20,20,21,20,21,21,22,21,22,20,21,20,21,21,22,21,22]
+; AVX512DQ-BW-FCP-NEXT: vpermw %zmm0, %zmm22, %zmm22
; AVX512DQ-BW-FCP-NEXT: movabsq $290499906672591364, %rax # imm = 0x408102040810204
; AVX512DQ-BW-FCP-NEXT: kmovq %rax, %k1
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm2, %zmm19 {%k1}
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm22, %zmm18 {%k1}
; AVX512DQ-BW-FCP-NEXT: movabsq $-8714997200177740921, %rax # imm = 0x870E1C3870E1C387
; AVX512DQ-BW-FCP-NEXT: kmovq %rax, %k1
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm19, %zmm5 {%k1}
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm6, %xmm13, %xmm2
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm18, %xmm12, %xmm6
-; AVX512DQ-BW-FCP-NEXT: vpor %xmm2, %xmm6, %xmm2
-; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm6 = xmm12[0],xmm13[0],xmm12[1],xmm13[1],xmm12[2],xmm13[2],xmm12[3],xmm13[3],xmm12[4],xmm13[4],xmm12[5],xmm13[5],xmm12[6],xmm13[6],xmm12[7],xmm13[7]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm9, %xmm6, %xmm6
-; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm2 = zmm6[0,1,0,1],zmm2[0,1,0,1]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm21, %xmm15, %xmm6
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm24, %xmm14, %xmm9
-; AVX512DQ-BW-FCP-NEXT: vpor %xmm6, %xmm9, %xmm6
-; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm9 = xmm14[0],xmm15[0],xmm14[1],xmm15[1],xmm14[2],xmm15[2],xmm14[3],xmm15[3],xmm14[4],xmm15[4],xmm14[5],xmm15[5],xmm14[6],xmm15[6],xmm14[7],xmm15[7]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm7, %xmm9, %xmm7
-; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm6 = zmm7[0,1,0,1],zmm6[0,1,0,1]
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm18, %zmm5 {%k1}
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm6, %xmm12, %xmm6
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm17, %xmm11, %xmm17
+; AVX512DQ-BW-FCP-NEXT: vporq %xmm6, %xmm17, %xmm6
+; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm11 = xmm11[0],xmm12[0],xmm11[1],xmm12[1],xmm11[2],xmm12[2],xmm11[3],xmm12[3],xmm11[4],xmm12[4],xmm11[5],xmm12[5],xmm11[6],xmm12[6],xmm11[7],xmm12[7]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm9, %xmm11, %xmm9
+; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm6 = zmm9[0,1,0,1],zmm6[0,1,0,1]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm20, %xmm14, %xmm9
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm21, %xmm13, %xmm11
+; AVX512DQ-BW-FCP-NEXT: vpor %xmm9, %xmm11, %xmm9
+; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm11 = xmm13[0],xmm14[0],xmm13[1],xmm14[1],xmm13[2],xmm14[2],xmm13[3],xmm14[3],xmm13[4],xmm14[4],xmm13[5],xmm14[5],xmm13[6],xmm14[6],xmm13[7],xmm14[7]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm8, %xmm11, %xmm8
+; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm8 = zmm8[0,1,0,1],zmm9[0,1,0,1]
; AVX512DQ-BW-FCP-NEXT: movabsq $871499720017774092, %rax # imm = 0xC183060C183060C
; AVX512DQ-BW-FCP-NEXT: kmovq %rax, %k1
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm2, %zmm6 {%k1}
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm20, %xmm16, %xmm2
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm25, %xmm17, %xmm7
-; AVX512DQ-BW-FCP-NEXT: vpor %xmm2, %xmm7, %xmm2
-; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm7 = xmm17[0],xmm16[0],xmm17[1],xmm16[1],xmm17[2],xmm16[2],xmm17[3],xmm16[3],xmm17[4],xmm16[4],xmm17[5],xmm16[5],xmm17[6],xmm16[6],xmm17[7],xmm16[7]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm4, %xmm7, %xmm4
-; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm2 = zmm4[0,1,0,1],zmm2[0,1,0,1]
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} zmm4 = [1,1,0,0,1,1,0,0,4,5,1,1,1,1,0,0,2,2,2,4,2,2,2,4,3,3,3,3,2,2,2,4]
-; AVX512DQ-BW-FCP-NEXT: vpermw %zmm10, %zmm4, %zmm4
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm6, %zmm8 {%k1}
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm19, %xmm15, %xmm6
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm24, %xmm16, %xmm9
+; AVX512DQ-BW-FCP-NEXT: vpor %xmm6, %xmm9, %xmm6
+; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm9 = xmm16[0],xmm15[0],xmm16[1],xmm15[1],xmm16[2],xmm15[2],xmm16[3],xmm15[3],xmm16[4],xmm15[4],xmm16[5],xmm15[5],xmm16[6],xmm15[6],xmm16[7],xmm15[7]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm4, %xmm9, %xmm4
+; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm4 = zmm4[0,1,0,1],zmm6[0,1,0,1]
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} zmm6 = [1,1,0,0,1,1,0,0,4,5,1,1,1,1,0,0,2,2,2,4,2,2,2,4,3,3,3,3,2,2,2,4]
+; AVX512DQ-BW-FCP-NEXT: vpermw %zmm0, %zmm6, %zmm0
; AVX512DQ-BW-FCP-NEXT: movabsq $4647998506761461824, %rax # imm = 0x4081020408102040
; AVX512DQ-BW-FCP-NEXT: kmovq %rax, %k1
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm4, %zmm2 {%k1}
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm0, %zmm4 {%k1}
; AVX512DQ-BW-FCP-NEXT: movabsq $8133997386832558192, %rax # imm = 0x70E1C3870E1C3870
; AVX512DQ-BW-FCP-NEXT: kmovq %rax, %k1
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm2, %zmm6 {%k1}
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm4, %zmm8 {%k1}
; AVX512DQ-BW-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm8, 128(%rax)
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm6, (%rax)
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm1, 320(%rax)
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm7, 128(%rax)
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm8, (%rax)
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm2, 320(%rax)
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm5, 256(%rax)
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm3, 192(%rax)
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm0, 384(%rax)
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm11, 64(%rax)
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm1, 384(%rax)
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm10, 64(%rax)
; AVX512DQ-BW-FCP-NEXT: vzeroupper
; AVX512DQ-BW-FCP-NEXT: retq
%in.vec0 = load <64 x i8>, ptr %in.vecptr0, align 64
diff --git a/llvm/test/CodeGen/X86/vector-shuffle-256-v32.ll b/llvm/test/CodeGen/X86/vector-shuffle-256-v32.ll
index d8ee8103cee50..dbbd6b19b2829 100644
--- a/llvm/test/CodeGen/X86/vector-shuffle-256-v32.ll
+++ b/llvm/test/CodeGen/X86/vector-shuffle-256-v32.ll
@@ -61,23 +61,11 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_
; AVX512VLBW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,0,1]
; AVX512VLBW-NEXT: retq
;
-; AVX512VLVBMI-SLOW-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_01_00:
-; AVX512VLVBMI-SLOW: # %bb.0:
-; AVX512VLVBMI-SLOW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0]
-; AVX512VLVBMI-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,0,1]
-; AVX512VLVBMI-SLOW-NEXT: retq
-;
-; AVX512VLVBMI-FAST-ALL-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_01_00:
-; AVX512VLVBMI-FAST-ALL: # %bb.0:
-; AVX512VLVBMI-FAST-ALL-NEXT: vpmovsxbw {{.*#+}} ymm1 = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1]
-; AVX512VLVBMI-FAST-ALL-NEXT: vpermb %ymm0, %ymm1, %ymm0
-; AVX512VLVBMI-FAST-ALL-NEXT: retq
-;
-; AVX512VLVBMI-FAST-PERLANE-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_01_00:
-; AVX512VLVBMI-FAST-PERLANE: # %bb.0:
-; AVX512VLVBMI-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0]
-; AVX512VLVBMI-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,0,1]
-; AVX512VLVBMI-FAST-PERLANE-NEXT: retq
+; AVX512VLVBMI-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_01_00:
+; AVX512VLVBMI: # %bb.0:
+; AVX512VLVBMI-NEXT: vpmovsxbw {{.*#+}} ymm1 = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1]
+; AVX512VLVBMI-NEXT: vpermb %ymm0, %ymm1, %ymm0
+; AVX512VLVBMI-NEXT: retq
;
; XOPAVX1-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_01_00:
; XOPAVX1: # %bb.0:
@@ -117,23 +105,11 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_
; AVX512VLBW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,0,1]
; AVX512VLBW-NEXT: retq
;
-; AVX512VLVBMI-SLOW-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_02_00_00:
-; AVX512VLVBMI-SLOW: # %bb.0:
-; AVX512VLVBMI-SLOW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0]
-; AVX512VLVBMI-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,0,1]
-; AVX512VLVBMI-SLOW-NEXT: retq
-;
-; AVX512VLVBMI-FAST-ALL-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_02_00_00:
-; AVX512VLVBMI-FAST-ALL: # %bb.0:
-; AVX512VLVBMI-FAST-ALL-NEXT: vpmovsxwd {{.*#+}} ymm1 = [0,0,0,0,0,0,0,512]
-; AVX512VLVBMI-FAST-ALL-NEXT: vpermb %ymm0, %ymm1, %ymm0
-; AVX512VLVBMI-FAST-ALL-NEXT: retq
-;
-; AVX512VLVBMI-FAST-PERLANE-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_02_00_00:
-; AVX512VLVBMI-FAST-PERLANE: # %bb.0:
-; AVX512VLVBMI-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0]
-; AVX512VLVBMI-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,0,1]
-; AVX512VLVBMI-FAST-PERLANE-NEXT: retq
+; AVX512VLVBMI-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_02_00_00:
+; AVX512VLVBMI: # %bb.0:
+; AVX512VLVBMI-NEXT: vpmovsxwd {{.*#+}} ymm1 = [0,0,0,0,0,0,0,512]
+; AVX512VLVBMI-NEXT: vpermb %ymm0, %ymm1, %ymm0
+; AVX512VLVBMI-NEXT: retq
;
; XOPAVX1-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_02_00_00:
; XOPAVX1: # %bb.0:
@@ -173,23 +149,11 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_
; AVX512VLBW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,0,1]
; AVX512VLBW-NEXT: retq
;
-; AVX512VLVBMI-SLOW-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_03_00_00_00:
-; AVX512VLVBMI-SLOW: # %bb.0:
-; AVX512VLVBMI-SLOW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0]
-; AVX512VLVBMI-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,0,1]
-; AVX512VLVBMI-SLOW-NEXT: retq
-;
-; AVX512VLVBMI-FAST-ALL-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_03_00_00_00:
-; AVX512VLVBMI-FAST-ALL: # %bb.0:
-; AVX512VLVBMI-FAST-ALL-NEXT: vpmovsxbd {{.*#+}} ymm1 = [0,0,0,0,0,0,0,3]
-; AVX512VLVBMI-FAST-ALL-NEXT: vpermb %ymm0, %ymm1, %ymm0
-; AVX512VLVBMI-FAST-ALL-NEXT: retq
-;
-; AVX512VLVBMI-FAST-PERLANE-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_03_00_00_00:
-; AVX512VLVBMI-FAST-PERLANE: # %bb.0:
-; AVX512VLVBMI-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0]
-; AVX512VLVBMI-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,0,1]
-; AVX512VLVBMI-FAST-PERLANE-NEXT: retq
+; AVX512VLVBMI-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_03_00_00_00:
+; AVX512VLVBMI: # %bb.0:
+; AVX512VLVBMI-NEXT: vpmovsxbd {{.*#+}} ymm1 = [0,0,0,0,0,0,0,3]
+; AVX512VLVBMI-NEXT: vpermb %ymm0, %ymm1, %ymm0
+; AVX512VLVBMI-NEXT: retq
;
; XOPAVX1-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_03_00_00_00:
; XOPAVX1: # %bb.0:
@@ -229,23 +193,11 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_
; AVX512VLBW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,0,1]
; AVX512VLBW-NEXT: retq
;
-; AVX512VLVBMI-SLOW-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_04_00_00_00_00:
-; AVX512VLVBMI-SLOW: # %bb.0:
-; AVX512VLVBMI-SLOW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,0,0,0,4,0,0,0,0]
-; AVX512VLVBMI-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,0,1]
-; AVX512VLVBMI-SLOW-NEXT: retq
-;
-; AVX512VLVBMI-FAST-ALL-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_04_00_00_00_00:
-; AVX512VLVBMI-FAST-ALL: # %bb.0:
-; AVX512VLVBMI-FAST-ALL-NEXT: vpmovsxdq {{.*#+}} ymm1 = [0,0,0,67108864]
-; AVX512VLVBMI-FAST-ALL-NEXT: vpermb %ymm0, %ymm1, %ymm0
-; AVX512VLVBMI-FAST-ALL-NEXT: retq
-;
-; AVX512VLVBMI-FAST-PERLANE-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_04_00_00_00_00:
-; AVX512VLVBMI-FAST-PERLANE: # %bb.0:
-; AVX512VLVBMI-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,0,0,0,4,0,0,0,0]
-; AVX512VLVBMI-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,0,1]
-; AVX512VLVBMI-FAST-PERLANE-NEXT: retq
+; AVX512VLVBMI-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_04_00_00_00_00:
+; AVX512VLVBMI: # %bb.0:
+; AVX512VLVBMI-NEXT: vpmovsxdq {{.*#+}} ymm1 = [0,0,0,67108864]
+; AVX512VLVBMI-NEXT: vpermb %ymm0, %ymm1, %ymm0
+; AVX512VLVBMI-NEXT: retq
;
; XOPAVX1-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_04_00_00_00_00:
; XOPAVX1: # %bb.0:
@@ -285,23 +237,11 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_
; AVX512VLBW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,0,1]
; AVX512VLBW-NEXT: retq
;
-; AVX512VLVBMI-SLOW-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_05_00_00_00_00_00:
-; AVX512VLVBMI-SLOW: # %bb.0:
-; AVX512VLVBMI-SLOW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,0,0,5,0,0,0,0,0]
-; AVX512VLVBMI-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,0,1]
-; AVX512VLVBMI-SLOW-NEXT: retq
-;
-; AVX512VLVBMI-FAST-ALL-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_05_00_00_00_00_00:
-; AVX512VLVBMI-FAST-ALL: # %bb.0:
-; AVX512VLVBMI-FAST-ALL-NEXT: vpmovsxbw {{.*#+}} ymm1 = [0,0,0,0,0,0,0,0,0,0,0,0,0,5,0,0]
-; AVX512VLVBMI-FAST-ALL-NEXT: vpermb %ymm0, %ymm1, %ymm0
-; AVX512VLVBMI-FAST-ALL-NEXT: retq
-;
-; AVX512VLVBMI-FAST-PERLANE-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_05_00_00_00_00_00:
-; AVX512VLVBMI-FAST-PERLANE: # %bb.0:
-; AVX512VLVBMI-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,0,0,5,0,0,0,0,0]
-; AVX512VLVBMI-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,0,1]
-; AVX512VLVBMI-FAST-PERLANE-NEXT: retq
+; AVX512VLVBMI-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_05_00_00_00_00_00:
+; AVX512VLVBMI: # %bb.0:
+; AVX512VLVBMI-NEXT: vpmovsxbw {{.*#+}} ymm1 = [0,0,0,0,0,0,0,0,0,0,0,0,0,5,0,0]
+; AVX512VLVBMI-NEXT: vpermb %ymm0, %ymm1, %ymm0
+; AVX512VLVBMI-NEXT: retq
;
; XOPAVX1-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_05_00_00_00_00_00:
; XOPAVX1: # %bb.0:
@@ -341,23 +281,11 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_
; AVX512VLBW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,0,1]
; AVX512VLBW-NEXT: retq
;
-; AVX512VLVBMI-SLOW-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_06_00_00_00_00_00_00:
-; AVX512VLVBMI-SLOW: # %bb.0:
-; AVX512VLVBMI-SLOW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,0,6,0,0,0,0,0,0]
-; AVX512VLVBMI-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,0,1]
-; AVX512VLVBMI-SLOW-NEXT: retq
-;
-; AVX512VLVBMI-FAST-ALL-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_06_00_00_00_00_00_00:
-; AVX512VLVBMI-FAST-ALL: # %bb.0:
-; AVX512VLVBMI-FAST-ALL-NEXT: vpmovsxwq {{.*#+}} ymm1 = [0,0,0,1536]
-; AVX512VLVBMI-FAST-ALL-NEXT: vpermb %ymm0, %ymm1, %ymm0
-; AVX512VLVBMI-FAST-ALL-NEXT: retq
-;
-; AVX512VLVBMI-FAST-PERLANE-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_06_00_00_00_00_00_00:
-; AVX512VLVBMI-FAST-PERLANE: # %bb.0:
-; AVX512VLVBMI-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,0,6,0,0,0,0,0,0]
-; AVX512VLVBMI-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,0,1]
-; AVX512VLVBMI-FAST-PERLANE-NEXT: retq
+; AVX512VLVBMI-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_06_00_00_00_00_00_00:
+; AVX512VLVBMI: # %bb.0:
+; AVX512VLVBMI-NEXT: vpmovsxwq {{.*#+}} ymm1 = [0,0,0,1536]
+; AVX512VLVBMI-NEXT: vpermb %ymm0, %ymm1, %ymm0
+; AVX512VLVBMI-NEXT: retq
;
; XOPAVX1-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_06_00_00_00_00_00_00:
; XOPAVX1: # %bb.0:
@@ -397,23 +325,11 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_
; AVX512VLBW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,0,1]
; AVX512VLBW-NEXT: retq
;
-; AVX512VLVBMI-SLOW-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_07_00_00_00_00_00_00_00:
-; AVX512VLVBMI-SLOW: # %bb.0:
-; AVX512VLVBMI-SLOW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,7,0,0,0,0,0,0,0]
-; AVX512VLVBMI-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,0,1]
-; AVX512VLVBMI-SLOW-NEXT: retq
-;
-; AVX512VLVBMI-FAST-ALL-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_07_00_00_00_00_00_00_00:
-; AVX512VLVBMI-FAST-ALL: # %bb.0:
-; AVX512VLVBMI-FAST-ALL-NEXT: vpmovsxbq {{.*#+}} ymm1 = [0,0,0,7]
-; AVX512VLVBMI-FAST-ALL-NEXT: vpermb %ymm0, %ymm1, %ymm0
-; AVX512VLVBMI-FAST-ALL-NEXT: retq
-;
-; AVX512VLVBMI-FAST-PERLANE-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_07_00_00_00_00_00_00_00:
-; AVX512VLVBMI-FAST-PERLANE: # %bb.0:
-; AVX512VLVBMI-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,7,0,0,0,0,0,0,0]
-; AVX512VLVBMI-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,0,1]
-; AVX512VLVBMI-FAST-PERLANE-NEXT: retq
+; AVX512VLVBMI-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_07_00_00_00_00_00_00_00:
+; AVX512VLVBMI: # %bb.0:
+; AVX512VLVBMI-NEXT: vpmovsxbq {{.*#+}} ymm1 = [0,0,0,7]
+; AVX512VLVBMI-NEXT: vpermb %ymm0, %ymm1, %ymm0
+; AVX512VLVBMI-NEXT: retq
;
; XOPAVX1-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_07_00_00_00_00_00_00_00:
; XOPAVX1: # %bb.0:
@@ -453,23 +369,11 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_
; AVX512VLBW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,1,0]
; AVX512VLBW-NEXT: retq
;
-; AVX512VLVBMI-SLOW-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_08_00_00_00_00_00_00_00_00:
-; AVX512VLVBMI-SLOW: # %bb.0:
-; AVX512VLVBMI-SLOW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,8]
-; AVX512VLVBMI-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,1,0]
-; AVX512VLVBMI-SLOW-NEXT: retq
-;
-; AVX512VLVBMI-FAST-ALL-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_08_00_00_00_00_00_00_00_00:
-; AVX512VLVBMI-FAST-ALL: # %bb.0:
-; AVX512VLVBMI-FAST-ALL-NEXT: vmovdqa {{.*#+}} ymm1 = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,8,0,0,0,0,0,0,0,0]
-; AVX512VLVBMI-FAST-ALL-NEXT: vpermb %ymm0, %ymm1, %ymm0
-; AVX512VLVBMI-FAST-ALL-NEXT: retq
-;
-; AVX512VLVBMI-FAST-PERLANE-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_08_00_00_00_00_00_00_00_00:
-; AVX512VLVBMI-FAST-PERLANE: # %bb.0:
-; AVX512VLVBMI-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,8]
-; AVX512VLVBMI-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,1,0]
-; AVX512VLVBMI-FAST-PERLANE-NEXT: retq
+; AVX512VLVBMI-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_08_00_00_00_00_00_00_00_00:
+; AVX512VLVBMI: # %bb.0:
+; AVX512VLVBMI-NEXT: vmovdqa {{.*#+}} ymm1 = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,8,0,0,0,0,0,0,0,0]
+; AVX512VLVBMI-NEXT: vpermb %ymm0, %ymm1, %ymm0
+; AVX512VLVBMI-NEXT: retq
;
; XOPAVX1-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_08_00_00_00_00_00_00_00_00:
; XOPAVX1: # %bb.0:
@@ -509,23 +413,11 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_
; AVX512VLBW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,1,0]
; AVX512VLBW-NEXT: retq
;
-; AVX512VLVBMI-SLOW-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_09_00_00_00_00_00_00_00_00_00:
-; AVX512VLVBMI-SLOW: # %bb.0:
-; AVX512VLVBMI-SLOW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,0,0,0,0,0,0,9,0]
-; AVX512VLVBMI-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,1,0]
-; AVX512VLVBMI-SLOW-NEXT: retq
-;
-; AVX512VLVBMI-FAST-ALL-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_09_00_00_00_00_00_00_00_00_00:
-; AVX512VLVBMI-FAST-ALL: # %bb.0:
-; AVX512VLVBMI-FAST-ALL-NEXT: vpmovsxbw {{.*#+}} ymm1 = [0,0,0,0,0,0,0,0,0,0,0,9,0,0,0,0]
-; AVX512VLVBMI-FAST-ALL-NEXT: vpermb %ymm0, %ymm1, %ymm0
-; AVX512VLVBMI-FAST-ALL-NEXT: retq
-;
-; AVX512VLVBMI-FAST-PERLANE-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_09_00_00_00_00_00_00_00_00_00:
-; AVX512VLVBMI-FAST-PERLANE: # %bb.0:
-; AVX512VLVBMI-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,0,0,0,0,0,0,9,0]
-; AVX512VLVBMI-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,1,0]
-; AVX512VLVBMI-FAST-PERLANE-NEXT: retq
+; AVX512VLVBMI-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_09_00_00_00_00_00_00_00_00_00:
+; AVX512VLVBMI: # %bb.0:
+; AVX512VLVBMI-NEXT: vpmovsxbw {{.*#+}} ymm1 = [0,0,0,0,0,0,0,0,0,0,0,9,0,0,0,0]
+; AVX512VLVBMI-NEXT: vpermb %ymm0, %ymm1, %ymm0
+; AVX512VLVBMI-NEXT: retq
;
; XOPAVX1-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_09_00_00_00_00_00_00_00_00_00:
; XOPAVX1: # %bb.0:
@@ -565,23 +457,11 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_
; AVX512VLBW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,1,0]
; AVX512VLBW-NEXT: retq
;
-; AVX512VLVBMI-SLOW-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_10_00_00_00_00_00_00_00_00_00_00:
-; AVX512VLVBMI-SLOW: # %bb.0:
-; AVX512VLVBMI-SLOW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,0,0,0,0,0,10,0,0]
-; AVX512VLVBMI-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,1,0]
-; AVX512VLVBMI-SLOW-NEXT: retq
-;
-; AVX512VLVBMI-FAST-ALL-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_10_00_00_00_00_00_00_00_00_00_00:
-; AVX512VLVBMI-FAST-ALL: # %bb.0:
-; AVX512VLVBMI-FAST-ALL-NEXT: vpmovsxwd {{.*#+}} ymm1 = [0,0,0,0,0,2560,0,0]
-; AVX512VLVBMI-FAST-ALL-NEXT: vpermb %ymm0, %ymm1, %ymm0
-; AVX512VLVBMI-FAST-ALL-NEXT: retq
-;
-; AVX512VLVBMI-FAST-PERLANE-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_10_00_00_00_00_00_00_00_00_00_00:
-; AVX512VLVBMI-FAST-PERLANE: # %bb.0:
-; AVX512VLVBMI-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,0,0,0,0,0,10,0,0]
-; AVX512VLVBMI-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,1,0]
-; AVX512VLVBMI-FAST-PERLANE-NEXT: retq
+; AVX512VLVBMI-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_10_00_00_00_00_00_00_00_00_00_00:
+; AVX512VLVBMI: # %bb.0:
+; AVX512VLVBMI-NEXT: vpmovsxwd {{.*#+}} ymm1 = [0,0,0,0,0,2560,0,0]
+; AVX512VLVBMI-NEXT: vpermb %ymm0, %ymm1, %ymm0
+; AVX512VLVBMI-NEXT: retq
;
; XOPAVX1-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_10_00_00_00_00_00_00_00_00_00_00:
; XOPAVX1: # %bb.0:
@@ -621,23 +501,11 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_
; AVX512VLBW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,1,0]
; AVX512VLBW-NEXT: retq
;
-; AVX512VLVBMI-SLOW-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_11_00_00_00_00_00_00_00_00_00_00_00:
-; AVX512VLVBMI-SLOW: # %bb.0:
-; AVX512VLVBMI-SLOW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,0,0,0,0,11,0,0,0]
-; AVX512VLVBMI-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,1,0]
-; AVX512VLVBMI-SLOW-NEXT: retq
-;
-; AVX512VLVBMI-FAST-ALL-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_11_00_00_00_00_00_00_00_00_00_00_00:
-; AVX512VLVBMI-FAST-ALL: # %bb.0:
-; AVX512VLVBMI-FAST-ALL-NEXT: vpmovsxbd {{.*#+}} ymm1 = [0,0,0,0,0,11,0,0]
-; AVX512VLVBMI-FAST-ALL-NEXT: vpermb %ymm0, %ymm1, %ymm0
-; AVX512VLVBMI-FAST-ALL-NEXT: retq
-;
-; AVX512VLVBMI-FAST-PERLANE-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_11_00_00_00_00_00_00_00_00_00_00_00:
-; AVX512VLVBMI-FAST-PERLANE: # %bb.0:
-; AVX512VLVBMI-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,0,0,0,0,11,0,0,0]
-; AVX512VLVBMI-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,1,0]
-; AVX512VLVBMI-FAST-PERLANE-NEXT: retq
+; AVX512VLVBMI-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_11_00_00_00_00_00_00_00_00_00_00_00:
+; AVX512VLVBMI: # %bb.0:
+; AVX512VLVBMI-NEXT: vpmovsxbd {{.*#+}} ymm1 = [0,0,0,0,0,11,0,0]
+; AVX512VLVBMI-NEXT: vpermb %ymm0, %ymm1, %ymm0
+; AVX512VLVBMI-NEXT: retq
;
; XOPAVX1-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_11_00_00_00_00_00_00_00_00_00_00_00:
; XOPAVX1: # %bb.0:
@@ -677,23 +545,11 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_
; AVX512VLBW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,1,0]
; AVX512VLBW-NEXT: retq
;
-; AVX512VLVBMI-SLOW-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_12_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX512VLVBMI-SLOW: # %bb.0:
-; AVX512VLVBMI-SLOW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,0,0,0,12,0,0,0,0]
-; AVX512VLVBMI-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,1,0]
-; AVX512VLVBMI-SLOW-NEXT: retq
-;
-; AVX512VLVBMI-FAST-ALL-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_12_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX512VLVBMI-FAST-ALL: # %bb.0:
-; AVX512VLVBMI-FAST-ALL-NEXT: vpmovsxdq {{.*#+}} ymm1 = [0,0,201326592,0]
-; AVX512VLVBMI-FAST-ALL-NEXT: vpermb %ymm0, %ymm1, %ymm0
-; AVX512VLVBMI-FAST-ALL-NEXT: retq
-;
-; AVX512VLVBMI-FAST-PERLANE-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_12_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX512VLVBMI-FAST-PERLANE: # %bb.0:
-; AVX512VLVBMI-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,0,0,0,12,0,0,0,0]
-; AVX512VLVBMI-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,1,0]
-; AVX512VLVBMI-FAST-PERLANE-NEXT: retq
+; AVX512VLVBMI-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_12_00_00_00_00_00_00_00_00_00_00_00_00:
+; AVX512VLVBMI: # %bb.0:
+; AVX512VLVBMI-NEXT: vpmovsxdq {{.*#+}} ymm1 = [0,0,201326592,0]
+; AVX512VLVBMI-NEXT: vpermb %ymm0, %ymm1, %ymm0
+; AVX512VLVBMI-NEXT: retq
;
; XOPAVX1-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_12_00_00_00_00_00_00_00_00_00_00_00_00:
; XOPAVX1: # %bb.0:
@@ -733,23 +589,11 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_
; AVX512VLBW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,1,0]
; AVX512VLBW-NEXT: retq
;
-; AVX512VLVBMI-SLOW-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_13_00_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX512VLVBMI-SLOW: # %bb.0:
-; AVX512VLVBMI-SLOW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,0,0,13,0,0,0,0,0]
-; AVX512VLVBMI-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,1,0]
-; AVX512VLVBMI-SLOW-NEXT: retq
-;
-; AVX512VLVBMI-FAST-ALL-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_13_00_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX512VLVBMI-FAST-ALL: # %bb.0:
-; AVX512VLVBMI-FAST-ALL-NEXT: vpmovsxbw {{.*#+}} ymm1 = [0,0,0,0,0,0,0,0,0,13,0,0,0,0,0,0]
-; AVX512VLVBMI-FAST-ALL-NEXT: vpermb %ymm0, %ymm1, %ymm0
-; AVX512VLVBMI-FAST-ALL-NEXT: retq
-;
-; AVX512VLVBMI-FAST-PERLANE-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_13_00_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX512VLVBMI-FAST-PERLANE: # %bb.0:
-; AVX512VLVBMI-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,0,0,13,0,0,0,0,0]
-; AVX512VLVBMI-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,1,0]
-; AVX512VLVBMI-FAST-PERLANE-NEXT: retq
+; AVX512VLVBMI-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_13_00_00_00_00_00_00_00_00_00_00_00_00_00:
+; AVX512VLVBMI: # %bb.0:
+; AVX512VLVBMI-NEXT: vpmovsxbw {{.*#+}} ymm1 = [0,0,0,0,0,0,0,0,0,13,0,0,0,0,0,0]
+; AVX512VLVBMI-NEXT: vpermb %ymm0, %ymm1, %ymm0
+; AVX512VLVBMI-NEXT: retq
;
; XOPAVX1-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_13_00_00_00_00_00_00_00_00_00_00_00_00_00:
; XOPAVX1: # %bb.0:
@@ -789,23 +633,11 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_
; AVX512VLBW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,1,0]
; AVX512VLBW-NEXT: retq
;
-; AVX512VLVBMI-SLOW-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_14_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX512VLVBMI-SLOW: # %bb.0:
-; AVX512VLVBMI-SLOW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,0,14,0,0,0,0,0,0]
-; AVX512VLVBMI-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,1,0]
-; AVX512VLVBMI-SLOW-NEXT: retq
-;
-; AVX512VLVBMI-FAST-ALL-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_14_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX512VLVBMI-FAST-ALL: # %bb.0:
-; AVX512VLVBMI-FAST-ALL-NEXT: vpmovsxwq {{.*#+}} ymm1 = [0,0,3584,0]
-; AVX512VLVBMI-FAST-ALL-NEXT: vpermb %ymm0, %ymm1, %ymm0
-; AVX512VLVBMI-FAST-ALL-NEXT: retq
-;
-; AVX512VLVBMI-FAST-PERLANE-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_14_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX512VLVBMI-FAST-PERLANE: # %bb.0:
-; AVX512VLVBMI-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,0,14,0,0,0,0,0,0]
-; AVX512VLVBMI-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,1,0]
-; AVX512VLVBMI-FAST-PERLANE-NEXT: retq
+; AVX512VLVBMI-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_14_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
+; AVX512VLVBMI: # %bb.0:
+; AVX512VLVBMI-NEXT: vpmovsxwq {{.*#+}} ymm1 = [0,0,3584,0]
+; AVX512VLVBMI-NEXT: vpermb %ymm0, %ymm1, %ymm0
+; AVX512VLVBMI-NEXT: retq
;
; XOPAVX1-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_14_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
; XOPAVX1: # %bb.0:
@@ -845,23 +677,11 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_
; AVX512VLBW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,1,0]
; AVX512VLBW-NEXT: retq
;
-; AVX512VLVBMI-SLOW-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_15_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX512VLVBMI-SLOW: # %bb.0:
-; AVX512VLVBMI-SLOW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,15,0,0,0,0,0,0,0]
-; AVX512VLVBMI-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,1,0]
-; AVX512VLVBMI-SLOW-NEXT: retq
-;
-; AVX512VLVBMI-FAST-ALL-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_15_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX512VLVBMI-FAST-ALL: # %bb.0:
-; AVX512VLVBMI-FAST-ALL-NEXT: vpmovsxbq {{.*#+}} ymm1 = [0,0,15,0]
-; AVX512VLVBMI-FAST-ALL-NEXT: vpermb %ymm0, %ymm1, %ymm0
-; AVX512VLVBMI-FAST-ALL-NEXT: retq
-;
-; AVX512VLVBMI-FAST-PERLANE-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_15_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX512VLVBMI-FAST-PERLANE: # %bb.0:
-; AVX512VLVBMI-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,15,0,0,0,0,0,0,0]
-; AVX512VLVBMI-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,1,0]
-; AVX512VLVBMI-FAST-PERLANE-NEXT: retq
+; AVX512VLVBMI-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_15_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
+; AVX512VLVBMI: # %bb.0:
+; AVX512VLVBMI-NEXT: vpmovsxbq {{.*#+}} ymm1 = [0,0,15,0]
+; AVX512VLVBMI-NEXT: vpermb %ymm0, %ymm1, %ymm0
+; AVX512VLVBMI-NEXT: retq
;
; XOPAVX1-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_15_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
; XOPAVX1: # %bb.0:
@@ -1909,24 +1729,12 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_01_00_
; AVX512VLBW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1]
; AVX512VLBW-NEXT: retq
;
-; AVX512VLVBMI-SLOW-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_01_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_01_00:
-; AVX512VLVBMI-SLOW: # %bb.0:
-; AVX512VLVBMI-SLOW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0]
-; AVX512VLVBMI-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1]
-; AVX512VLVBMI-SLOW-NEXT: retq
-;
-; AVX512VLVBMI-FAST-ALL-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_01_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_01_00:
-; AVX512VLVBMI-FAST-ALL: # %bb.0:
-; AVX512VLVBMI-FAST-ALL-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0]
-; AVX512VLVBMI-FAST-ALL-NEXT: # ymm1 = mem[0,1,0,1]
-; AVX512VLVBMI-FAST-ALL-NEXT: vpermb %ymm0, %ymm1, %ymm0
-; AVX512VLVBMI-FAST-ALL-NEXT: retq
-;
-; AVX512VLVBMI-FAST-PERLANE-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_01_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_01_00:
-; AVX512VLVBMI-FAST-PERLANE: # %bb.0:
-; AVX512VLVBMI-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0]
-; AVX512VLVBMI-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1]
-; AVX512VLVBMI-FAST-PERLANE-NEXT: retq
+; AVX512VLVBMI-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_01_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_01_00:
+; AVX512VLVBMI: # %bb.0:
+; AVX512VLVBMI-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0]
+; AVX512VLVBMI-NEXT: # ymm1 = mem[0,1,0,1]
+; AVX512VLVBMI-NEXT: vpermb %ymm0, %ymm1, %ymm0
+; AVX512VLVBMI-NEXT: retq
;
; XOPAVX1-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_01_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_01_00:
; XOPAVX1: # %bb.0:
@@ -1962,24 +1770,12 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_02_00_00_
; AVX512VLBW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1]
; AVX512VLBW-NEXT: retq
;
-; AVX512VLVBMI-SLOW-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_02_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_02_00_00:
-; AVX512VLVBMI-SLOW: # %bb.0:
-; AVX512VLVBMI-SLOW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0]
-; AVX512VLVBMI-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1]
-; AVX512VLVBMI-SLOW-NEXT: retq
-;
-; AVX512VLVBMI-FAST-ALL-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_02_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_02_00_00:
-; AVX512VLVBMI-FAST-ALL: # %bb.0:
-; AVX512VLVBMI-FAST-ALL-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0]
-; AVX512VLVBMI-FAST-ALL-NEXT: # ymm1 = mem[0,1,0,1]
-; AVX512VLVBMI-FAST-ALL-NEXT: vpermb %ymm0, %ymm1, %ymm0
-; AVX512VLVBMI-FAST-ALL-NEXT: retq
-;
-; AVX512VLVBMI-FAST-PERLANE-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_02_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_02_00_00:
-; AVX512VLVBMI-FAST-PERLANE: # %bb.0:
-; AVX512VLVBMI-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0]
-; AVX512VLVBMI-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1]
-; AVX512VLVBMI-FAST-PERLANE-NEXT: retq
+; AVX512VLVBMI-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_02_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_02_00_00:
+; AVX512VLVBMI: # %bb.0:
+; AVX512VLVBMI-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0]
+; AVX512VLVBMI-NEXT: # ymm1 = mem[0,1,0,1]
+; AVX512VLVBMI-NEXT: vpermb %ymm0, %ymm1, %ymm0
+; AVX512VLVBMI-NEXT: retq
;
; XOPAVX1-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_02_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_02_00_00:
; XOPAVX1: # %bb.0:
@@ -2015,24 +1811,12 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_07_00_00_00_00_00_00_00_
; AVX512VLBW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1]
; AVX512VLBW-NEXT: retq
;
-; AVX512VLVBMI-SLOW-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_07_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_07_00_00_00_00_00_00_00:
-; AVX512VLVBMI-SLOW: # %bb.0:
-; AVX512VLVBMI-SLOW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,7,0,0,0,0,0,0,0]
-; AVX512VLVBMI-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1]
-; AVX512VLVBMI-SLOW-NEXT: retq
-;
-; AVX512VLVBMI-FAST-ALL-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_07_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_07_00_00_00_00_00_00_00:
-; AVX512VLVBMI-FAST-ALL: # %bb.0:
-; AVX512VLVBMI-FAST-ALL-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [0,0,0,0,0,0,0,0,7,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,7,0,0,0,0,0,0,0]
-; AVX512VLVBMI-FAST-ALL-NEXT: # ymm1 = mem[0,1,0,1]
-; AVX512VLVBMI-FAST-ALL-NEXT: vpermb %ymm0, %ymm1, %ymm0
-; AVX512VLVBMI-FAST-ALL-NEXT: retq
-;
-; AVX512VLVBMI-FAST-PERLANE-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_07_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_07_00_00_00_00_00_00_00:
-; AVX512VLVBMI-FAST-PERLANE: # %bb.0:
-; AVX512VLVBMI-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,7,0,0,0,0,0,0,0]
-; AVX512VLVBMI-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1]
-; AVX512VLVBMI-FAST-PERLANE-NEXT: retq
+; AVX512VLVBMI-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_07_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_07_00_00_00_00_00_00_00:
+; AVX512VLVBMI: # %bb.0:
+; AVX512VLVBMI-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [0,0,0,0,0,0,0,0,7,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,7,0,0,0,0,0,0,0]
+; AVX512VLVBMI-NEXT: # ymm1 = mem[0,1,0,1]
+; AVX512VLVBMI-NEXT: vpermb %ymm0, %ymm1, %ymm0
+; AVX512VLVBMI-NEXT: retq
;
; XOPAVX1-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_07_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_07_00_00_00_00_00_00_00:
; XOPAVX1: # %bb.0:
@@ -2068,24 +1852,12 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_08_00_00_00_00_00_00_00_00_
; AVX512VLBW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1]
; AVX512VLBW-NEXT: retq
;
-; AVX512VLVBMI-SLOW-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_08_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_08_00_00_00_00_00_00_00_00:
-; AVX512VLVBMI-SLOW: # %bb.0:
-; AVX512VLVBMI-SLOW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,8,0,0,0,0,0,0,0,0]
-; AVX512VLVBMI-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1]
-; AVX512VLVBMI-SLOW-NEXT: retq
-;
-; AVX512VLVBMI-FAST-ALL-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_08_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_08_00_00_00_00_00_00_00_00:
-; AVX512VLVBMI-FAST-ALL: # %bb.0:
-; AVX512VLVBMI-FAST-ALL-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [0,0,0,0,0,0,0,8,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,8,0,0,0,0,0,0,0,0]
-; AVX512VLVBMI-FAST-ALL-NEXT: # ymm1 = mem[0,1,0,1]
-; AVX512VLVBMI-FAST-ALL-NEXT: vpermb %ymm0, %ymm1, %ymm0
-; AVX512VLVBMI-FAST-ALL-NEXT: retq
-;
-; AVX512VLVBMI-FAST-PERLANE-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_08_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_08_00_00_00_00_00_00_00_00:
-; AVX512VLVBMI-FAST-PERLANE: # %bb.0:
-; AVX512VLVBMI-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,8,0,0,0,0,0,0,0,0]
-; AVX512VLVBMI-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1]
-; AVX512VLVBMI-FAST-PERLANE-NEXT: retq
+; AVX512VLVBMI-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_08_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_08_00_00_00_00_00_00_00_00:
+; AVX512VLVBMI: # %bb.0:
+; AVX512VLVBMI-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [0,0,0,0,0,0,0,8,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,8,0,0,0,0,0,0,0,0]
+; AVX512VLVBMI-NEXT: # ymm1 = mem[0,1,0,1]
+; AVX512VLVBMI-NEXT: vpermb %ymm0, %ymm1, %ymm0
+; AVX512VLVBMI-NEXT: retq
;
; XOPAVX1-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_08_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_08_00_00_00_00_00_00_00_00:
; XOPAVX1: # %bb.0:
@@ -2121,24 +1893,12 @@ define <32 x i8> @shuffle_v32i8_00_14_00_00_00_00_00_00_00_00_00_00_00_00_00_00_
; AVX512VLBW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1]
; AVX512VLBW-NEXT: retq
;
-; AVX512VLVBMI-SLOW-LABEL: shuffle_v32i8_00_14_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_14_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX512VLVBMI-SLOW: # %bb.0:
-; AVX512VLVBMI-SLOW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,14,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
-; AVX512VLVBMI-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1]
-; AVX512VLVBMI-SLOW-NEXT: retq
-;
-; AVX512VLVBMI-FAST-ALL-LABEL: shuffle_v32i8_00_14_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_14_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX512VLVBMI-FAST-ALL: # %bb.0:
-; AVX512VLVBMI-FAST-ALL-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [0,14,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,14,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
-; AVX512VLVBMI-FAST-ALL-NEXT: # ymm1 = mem[0,1,0,1]
-; AVX512VLVBMI-FAST-ALL-NEXT: vpermb %ymm0, %ymm1, %ymm0
-; AVX512VLVBMI-FAST-ALL-NEXT: retq
-;
-; AVX512VLVBMI-FAST-PERLANE-LABEL: shuffle_v32i8_00_14_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_14_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX512VLVBMI-FAST-PERLANE: # %bb.0:
-; AVX512VLVBMI-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,14,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
-; AVX512VLVBMI-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1]
-; AVX512VLVBMI-FAST-PERLANE-NEXT: retq
+; AVX512VLVBMI-LABEL: shuffle_v32i8_00_14_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_14_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
+; AVX512VLVBMI: # %bb.0:
+; AVX512VLVBMI-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [0,14,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,14,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
+; AVX512VLVBMI-NEXT: # ymm1 = mem[0,1,0,1]
+; AVX512VLVBMI-NEXT: vpermb %ymm0, %ymm1, %ymm0
+; AVX512VLVBMI-NEXT: retq
;
; XOPAVX1-LABEL: shuffle_v32i8_00_14_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_14_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
; XOPAVX1: # %bb.0:
@@ -2174,24 +1934,12 @@ define <32 x i8> @shuffle_v32i8_15_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_
; AVX512VLBW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1]
; AVX512VLBW-NEXT: retq
;
-; AVX512VLVBMI-SLOW-LABEL: shuffle_v32i8_15_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_15_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX512VLVBMI-SLOW: # %bb.0:
-; AVX512VLVBMI-SLOW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[15,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
-; AVX512VLVBMI-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1]
-; AVX512VLVBMI-SLOW-NEXT: retq
-;
-; AVX512VLVBMI-FAST-ALL-LABEL: shuffle_v32i8_15_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_15_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX512VLVBMI-FAST-ALL: # %bb.0:
-; AVX512VLVBMI-FAST-ALL-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [15,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,15,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
-; AVX512VLVBMI-FAST-ALL-NEXT: # ymm1 = mem[0,1,0,1]
-; AVX512VLVBMI-FAST-ALL-NEXT: vpermb %ymm0, %ymm1, %ymm0
-; AVX512VLVBMI-FAST-ALL-NEXT: retq
-;
-; AVX512VLVBMI-FAST-PERLANE-LABEL: shuffle_v32i8_15_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_15_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX512VLVBMI-FAST-PERLANE: # %bb.0:
-; AVX512VLVBMI-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[15,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
-; AVX512VLVBMI-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1]
-; AVX512VLVBMI-FAST-PERLANE-NEXT: retq
+; AVX512VLVBMI-LABEL: shuffle_v32i8_15_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_15_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
+; AVX512VLVBMI: # %bb.0:
+; AVX512VLVBMI-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [15,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,15,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
+; AVX512VLVBMI-NEXT: # ymm1 = mem[0,1,0,1]
+; AVX512VLVBMI-NEXT: vpermb %ymm0, %ymm1, %ymm0
+; AVX512VLVBMI-NEXT: retq
;
; XOPAVX1-LABEL: shuffle_v32i8_15_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_15_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
; XOPAVX1: # %bb.0:
diff --git a/llvm/test/CodeGen/X86/x86-interleaved-access.ll b/llvm/test/CodeGen/X86/x86-interleaved-access.ll
index a01e6ca4b175d..d2c64a462a3e7 100644
--- a/llvm/test/CodeGen/X86/x86-interleaved-access.ll
+++ b/llvm/test/CodeGen/X86/x86-interleaved-access.ll
@@ -1605,20 +1605,20 @@ define void @interleaved_store_vf64_i8_stride4(<64 x i8> %a, <64 x i8> %b, <64 x
; AVX512-NEXT: vextracti32x4 $2, %zmm1, %xmm6
; AVX512-NEXT: vextracti64x4 $1, %zmm3, %ymm7
; AVX512-NEXT: vinserti128 $1, %xmm6, %ymm7, %ymm6
-; AVX512-NEXT: vextracti32x4 $2, %zmm0, %xmm8
-; AVX512-NEXT: vextracti64x4 $1, %zmm4, %ymm9
-; AVX512-NEXT: vinserti128 $1, %xmm8, %ymm9, %ymm8
+; AVX512-NEXT: vextracti32x4 $2, %zmm0, %xmm7
+; AVX512-NEXT: vextracti64x4 $1, %zmm4, %ymm8
+; AVX512-NEXT: vinserti128 $1, %xmm7, %ymm8, %ymm7
; AVX512-NEXT: vinserti64x4 $1, %ymm5, %zmm2, %zmm2
-; AVX512-NEXT: vinserti64x4 $1, %ymm0, %zmm4, %zmm4
-; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm3, %zmm3
-; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm3 = zmm3[2,3,6,7],zmm4[2,3,6,7]
-; AVX512-NEXT: vinserti64x4 $1, %ymm8, %zmm6, %zmm4
-; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm9[0,1,2,3],zmm0[4,5,6,7]
-; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm1 = zmm7[0,1,2,3],zmm1[4,5,6,7]
+; AVX512-NEXT: vinserti64x4 $1, %ymm0, %zmm4, %zmm5
+; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm3, %zmm8
+; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm5 = zmm8[2,3,6,7],zmm5[2,3,6,7]
+; AVX512-NEXT: vinserti64x4 $1, %ymm7, %zmm6, %zmm6
+; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm4[4,5,6,7],zmm0[4,5,6,7]
+; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm1 = zmm3[4,5,6,7],zmm1[4,5,6,7]
; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm1[2,3,6,7],zmm0[2,3,6,7]
-; AVX512-NEXT: vmovdqa64 %zmm3, 64(%rdi)
; AVX512-NEXT: vmovdqa64 %zmm0, 192(%rdi)
-; AVX512-NEXT: vmovdqa64 %zmm4, 128(%rdi)
+; AVX512-NEXT: vmovdqa64 %zmm5, 64(%rdi)
+; AVX512-NEXT: vmovdqa64 %zmm6, 128(%rdi)
; AVX512-NEXT: vmovdqa64 %zmm2, (%rdi)
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
More information about the llvm-commits
mailing list