[llvm] 7457f51 - [X86] Fold VPERMV3(X,M,Y) -> VPERMV(CONCAT(X,Y),WIDEN(M)) iff the CONCAT is free (#122485)
via llvm-commits
llvm-commits at lists.llvm.org
Mon Jan 13 06:14:00 PST 2025
Author: Simon Pilgrim
Date: 2025-01-13T14:13:55Z
New Revision: 7457f51f6cf61b960e3e6e45e63378debd5c1d5c
URL: https://github.com/llvm/llvm-project/commit/7457f51f6cf61b960e3e6e45e63378debd5c1d5c
DIFF: https://github.com/llvm/llvm-project/commit/7457f51f6cf61b960e3e6e45e63378debd5c1d5c.diff
LOG: [X86] Fold VPERMV3(X,M,Y) -> VPERMV(CONCAT(X,Y),WIDEN(M)) iff the CONCAT is free (#122485)
This extends the existing fold which concatenates X and Y if they are sequential subvectors extracted from the same source.
By using combineConcatVectorOps we can recognise other patterns where X and Y can be concatenated for free (e.g. sequential loads, concatenating repeated instructions etc.), which allows the VPERMV3 fold to be a lot more aggressive.
This required combineConcatVectorOps to be extended to fold the additional case of "concat(extract_subvector(x,lo), extract_subvector(x,hi)) -> extract_subvector(x)", similar to the original VPERMV3 fold where "x" was larger than the concat result type.
This also exposes more cases where we have repeated vector/subvector loads if they have multiple uses - e.g. where we're loading a ymm and the lo/hi xmm pairs independently - in the past we've always considered this to be relatively benign, but I'm not certain if we should now do more to keep these from splitting?
Added:
Modified:
llvm/lib/Target/X86/X86ISelLowering.cpp
llvm/test/CodeGen/X86/any_extend_vector_inreg_of_broadcast_from_memory.ll
llvm/test/CodeGen/X86/avx512-shuffles/partial_permute.ll
llvm/test/CodeGen/X86/pr97968.ll
llvm/test/CodeGen/X86/shuffle-strided-with-offset-512.ll
llvm/test/CodeGen/X86/shuffle-vs-trunc-512.ll
llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-3.ll
llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-5.ll
llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-6.ll
llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-7.ll
llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-8.ll
llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-2.ll
llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-3.ll
llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-4.ll
llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-5.ll
llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-6.ll
llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-7.ll
llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-8.ll
llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-2.ll
llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-6.ll
llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-7.ll
llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast_from_memory.ll
Removed:
################################################################################
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 596139d0845701..add51fac4b9e62 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -41701,6 +41701,11 @@ static SDValue canonicalizeLaneShuffleWithRepeatedOps(SDValue V,
return SDValue();
}
+static SDValue combineConcatVectorOps(const SDLoc &DL, MVT VT,
+ ArrayRef<SDValue> Ops, SelectionDAG &DAG,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const X86Subtarget &Subtarget);
+
/// Try to combine x86 target specific shuffles.
static SDValue combineTargetShuffle(SDValue N, const SDLoc &DL,
SelectionDAG &DAG,
@@ -42401,25 +42406,17 @@ static SDValue combineTargetShuffle(SDValue N, const SDLoc &DL,
return SDValue();
}
case X86ISD::VPERMV3: {
- SDValue V1 = peekThroughBitcasts(N.getOperand(0));
- SDValue V2 = peekThroughBitcasts(N.getOperand(2));
- MVT SVT = V1.getSimpleValueType();
- // Combine VPERMV3 to widened VPERMV if the two source operands are split
- // from the same vector.
- if (V1.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
- V1.getConstantOperandVal(1) == 0 &&
- V2.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
- V2.getConstantOperandVal(1) == SVT.getVectorNumElements() &&
- V1.getOperand(0) == V2.getOperand(0)) {
- EVT NVT = V1.getOperand(0).getValueType();
- if (NVT.is256BitVector() ||
- (NVT.is512BitVector() && Subtarget.hasEVEX512())) {
- MVT WideVT = MVT::getVectorVT(
- VT.getScalarType(), NVT.getSizeInBits() / VT.getScalarSizeInBits());
+ // Combine VPERMV3 to widened VPERMV if the two source operands can be
+ // freely concatenated.
+ if (VT.is128BitVector() ||
+ (VT.is256BitVector() && Subtarget.useAVX512Regs())) {
+ SDValue Ops[] = {N.getOperand(0), N.getOperand(2)};
+ MVT WideVT = VT.getDoubleNumVectorElementsVT();
+ if (SDValue ConcatSrc =
+ combineConcatVectorOps(DL, WideVT, Ops, DAG, DCI, Subtarget)) {
SDValue Mask = widenSubVector(N.getOperand(1), false, Subtarget, DAG,
DL, WideVT.getSizeInBits());
- SDValue Perm = DAG.getNode(X86ISD::VPERMV, DL, WideVT, Mask,
- DAG.getBitcast(WideVT, V1.getOperand(0)));
+ SDValue Perm = DAG.getNode(X86ISD::VPERMV, DL, WideVT, Mask, ConcatSrc);
return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Perm,
DAG.getIntPtrConstant(0, DL));
}
@@ -42427,6 +42424,9 @@ static SDValue combineTargetShuffle(SDValue N, const SDLoc &DL,
SmallVector<SDValue, 2> Ops;
SmallVector<int, 32> Mask;
if (getTargetShuffleMask(N, /*AllowSentinelZero=*/false, Ops, Mask)) {
+ assert(Mask.size() == NumElts && "Unexpected shuffle mask size");
+ SDValue V1 = peekThroughBitcasts(N.getOperand(0));
+ SDValue V2 = peekThroughBitcasts(N.getOperand(2));
MVT MaskVT = N.getOperand(1).getSimpleValueType();
// Canonicalize to VPERMV if both sources are the same.
if (V1 == V2) {
@@ -57369,10 +57369,8 @@ static SDValue combineConcatVectorOps(const SDLoc &DL, MVT VT,
Op0.getOperand(1));
}
- // concat(extract_subvector(v0,c0), extract_subvector(v1,c1)) -> vperm2x128.
- // Only concat of subvector high halves which vperm2x128 is best at.
// TODO: This should go in combineX86ShufflesRecursively eventually.
- if (VT.is256BitVector() && NumOps == 2) {
+ if (NumOps == 2) {
SDValue Src0 = peekThroughBitcasts(Ops[0]);
SDValue Src1 = peekThroughBitcasts(Ops[1]);
if (Src0.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
@@ -57381,7 +57379,10 @@ static SDValue combineConcatVectorOps(const SDLoc &DL, MVT VT,
EVT SrcVT1 = Src1.getOperand(0).getValueType();
unsigned NumSrcElts0 = SrcVT0.getVectorNumElements();
unsigned NumSrcElts1 = SrcVT1.getVectorNumElements();
- if (SrcVT0.is256BitVector() && SrcVT1.is256BitVector() &&
+ // concat(extract_subvector(v0), extract_subvector(v1)) -> vperm2x128.
+ // Only concat of subvector high halves which vperm2x128 is best at.
+ if (VT.is256BitVector() && SrcVT0.is256BitVector() &&
+ SrcVT1.is256BitVector() &&
Src0.getConstantOperandAPInt(1) == (NumSrcElts0 / 2) &&
Src1.getConstantOperandAPInt(1) == (NumSrcElts1 / 2)) {
return DAG.getNode(X86ISD::VPERM2X128, DL, VT,
@@ -57389,6 +57390,14 @@ static SDValue combineConcatVectorOps(const SDLoc &DL, MVT VT,
DAG.getBitcast(VT, Src1.getOperand(0)),
DAG.getTargetConstant(0x31, DL, MVT::i8));
}
+ // concat(extract_subvector(x,lo), extract_subvector(x,hi)) -> x.
+ if (Src0.getOperand(0) == Src1.getOperand(0) &&
+ Src0.getConstantOperandAPInt(1) == 0 &&
+ Src1.getConstantOperandAPInt(1) ==
+ Src0.getValueType().getVectorNumElements()) {
+ return DAG.getBitcast(VT, extractSubVector(Src0.getOperand(0), 0, DAG,
+ DL, VT.getSizeInBits()));
+ }
}
}
diff --git a/llvm/test/CodeGen/X86/any_extend_vector_inreg_of_broadcast_from_memory.ll b/llvm/test/CodeGen/X86/any_extend_vector_inreg_of_broadcast_from_memory.ll
index 1305559bc04e0f..3d72319f59ca9e 100644
--- a/llvm/test/CodeGen/X86/any_extend_vector_inreg_of_broadcast_from_memory.ll
+++ b/llvm/test/CodeGen/X86/any_extend_vector_inreg_of_broadcast_from_memory.ll
@@ -1337,10 +1337,9 @@ define void @vec256_i16_widen_to_i32_factor2_broadcast_to_v8i32_factor8(ptr %in.
;
; AVX512BW-LABEL: vec256_i16_widen_to_i32_factor2_broadcast_to_v8i32_factor8:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: vmovdqa (%rdi), %ymm0
-; AVX512BW-NEXT: vpmovsxbw {{.*#+}} ymm1 = [0,17,0,19,0,21,0,23,0,25,0,27,0,29,0,31]
-; AVX512BW-NEXT: vpermi2w 32(%rdi), %ymm0, %ymm1
-; AVX512BW-NEXT: vpaddb (%rsi), %zmm1, %zmm0
+; AVX512BW-NEXT: vpmovsxbw {{.*#+}} ymm0 = [0,17,0,19,0,21,0,23,0,25,0,27,0,29,0,31]
+; AVX512BW-NEXT: vpermw (%rdi), %zmm0, %zmm0
+; AVX512BW-NEXT: vpaddb (%rsi), %zmm0, %zmm0
; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rdx)
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
@@ -1789,10 +1788,9 @@ define void @vec256_i64_widen_to_i128_factor2_broadcast_to_v2i128_factor2(ptr %i
;
; AVX512F-FAST-LABEL: vec256_i64_widen_to_i128_factor2_broadcast_to_v2i128_factor2:
; AVX512F-FAST: # %bb.0:
-; AVX512F-FAST-NEXT: vmovdqa (%rdi), %ymm0
-; AVX512F-FAST-NEXT: vpmovsxbq {{.*#+}} ymm1 = [0,5,0,7]
-; AVX512F-FAST-NEXT: vpermi2q 32(%rdi), %ymm0, %ymm1
-; AVX512F-FAST-NEXT: vpaddb (%rsi), %ymm1, %ymm0
+; AVX512F-FAST-NEXT: vpmovsxbq {{.*#+}} ymm0 = [0,5,0,7]
+; AVX512F-FAST-NEXT: vpermq (%rdi), %zmm0, %zmm0
+; AVX512F-FAST-NEXT: vpaddb (%rsi), %ymm0, %ymm0
; AVX512F-FAST-NEXT: vmovdqa %ymm0, (%rdx)
; AVX512F-FAST-NEXT: vzeroupper
; AVX512F-FAST-NEXT: retq
@@ -1808,10 +1806,9 @@ define void @vec256_i64_widen_to_i128_factor2_broadcast_to_v2i128_factor2(ptr %i
;
; AVX512DQ-FAST-LABEL: vec256_i64_widen_to_i128_factor2_broadcast_to_v2i128_factor2:
; AVX512DQ-FAST: # %bb.0:
-; AVX512DQ-FAST-NEXT: vmovdqa (%rdi), %ymm0
-; AVX512DQ-FAST-NEXT: vpmovsxbq {{.*#+}} ymm1 = [0,5,0,7]
-; AVX512DQ-FAST-NEXT: vpermi2q 32(%rdi), %ymm0, %ymm1
-; AVX512DQ-FAST-NEXT: vpaddb (%rsi), %ymm1, %ymm0
+; AVX512DQ-FAST-NEXT: vpmovsxbq {{.*#+}} ymm0 = [0,5,0,7]
+; AVX512DQ-FAST-NEXT: vpermq (%rdi), %zmm0, %zmm0
+; AVX512DQ-FAST-NEXT: vpaddb (%rsi), %ymm0, %ymm0
; AVX512DQ-FAST-NEXT: vmovdqa %ymm0, (%rdx)
; AVX512DQ-FAST-NEXT: vzeroupper
; AVX512DQ-FAST-NEXT: retq
@@ -1827,10 +1824,9 @@ define void @vec256_i64_widen_to_i128_factor2_broadcast_to_v2i128_factor2(ptr %i
;
; AVX512BW-FAST-LABEL: vec256_i64_widen_to_i128_factor2_broadcast_to_v2i128_factor2:
; AVX512BW-FAST: # %bb.0:
-; AVX512BW-FAST-NEXT: vmovdqa (%rdi), %ymm0
-; AVX512BW-FAST-NEXT: vpmovsxbq {{.*#+}} ymm1 = [0,5,0,7]
-; AVX512BW-FAST-NEXT: vpermi2q 32(%rdi), %ymm0, %ymm1
-; AVX512BW-FAST-NEXT: vpaddb (%rsi), %zmm1, %zmm0
+; AVX512BW-FAST-NEXT: vpmovsxbq {{.*#+}} ymm0 = [0,5,0,7]
+; AVX512BW-FAST-NEXT: vpermq (%rdi), %zmm0, %zmm0
+; AVX512BW-FAST-NEXT: vpaddb (%rsi), %zmm0, %zmm0
; AVX512BW-FAST-NEXT: vmovdqa64 %zmm0, (%rdx)
; AVX512BW-FAST-NEXT: vzeroupper
; AVX512BW-FAST-NEXT: retq
diff --git a/llvm/test/CodeGen/X86/avx512-shuffles/partial_permute.ll b/llvm/test/CodeGen/X86/avx512-shuffles/partial_permute.ll
index 5d901a8a380a9c..aac5847061cbec 100644
--- a/llvm/test/CodeGen/X86/avx512-shuffles/partial_permute.ll
+++ b/llvm/test/CodeGen/X86/avx512-shuffles/partial_permute.ll
@@ -149,9 +149,10 @@ define <8 x i16> @test_masked_z_16xi16_to_8xi16_perm_mask3(<16 x i16> %vec, <8 x
define <8 x i16> @test_16xi16_to_8xi16_perm_mem_mask0(ptr %vp) {
; CHECK-LABEL: test_16xi16_to_8xi16_perm_mem_mask0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmovdqa (%rdi), %xmm1
; CHECK-NEXT: vpmovsxbw {{.*#+}} xmm0 = [0,7,13,3,5,13,3,9]
-; CHECK-NEXT: vpermi2w 16(%rdi), %xmm1, %xmm0
+; CHECK-NEXT: vpermw (%rdi), %ymm0, %ymm0
+; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%vec = load <16 x i16>, ptr %vp
%res = shufflevector <16 x i16> %vec, <16 x i16> undef, <8 x i32> <i32 0, i32 7, i32 13, i32 3, i32 5, i32 13, i32 3, i32 9>
@@ -160,11 +161,12 @@ define <8 x i16> @test_16xi16_to_8xi16_perm_mem_mask0(ptr %vp) {
define <8 x i16> @test_masked_16xi16_to_8xi16_perm_mem_mask0(ptr %vp, <8 x i16> %vec2, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_16xi16_to_8xi16_perm_mem_mask0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmovdqa (%rdi), %xmm2
-; CHECK-NEXT: vpmovsxbw {{.*#+}} xmm3 = [0,7,13,3,5,13,3,9]
-; CHECK-NEXT: vpermi2w 16(%rdi), %xmm2, %xmm3
+; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
+; CHECK-NEXT: vpmovsxbw {{.*#+}} xmm2 = [0,7,13,3,5,13,3,9]
; CHECK-NEXT: vptestnmw %xmm1, %xmm1, %k1
-; CHECK-NEXT: vmovdqu16 %xmm3, %xmm0 {%k1}
+; CHECK-NEXT: vpermw (%rdi), %ymm2, %ymm0 {%k1}
+; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%vec = load <16 x i16>, ptr %vp
%shuf = shufflevector <16 x i16> %vec, <16 x i16> undef, <8 x i32> <i32 0, i32 7, i32 13, i32 3, i32 5, i32 13, i32 3, i32 9>
@@ -176,11 +178,11 @@ define <8 x i16> @test_masked_16xi16_to_8xi16_perm_mem_mask0(ptr %vp, <8 x i16>
define <8 x i16> @test_masked_z_16xi16_to_8xi16_perm_mem_mask0(ptr %vp, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_z_16xi16_to_8xi16_perm_mem_mask0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmovdqa (%rdi), %xmm2
; CHECK-NEXT: vpmovsxbw {{.*#+}} xmm1 = [0,7,13,3,5,13,3,9]
; CHECK-NEXT: vptestnmw %xmm0, %xmm0, %k1
-; CHECK-NEXT: vpermi2w 16(%rdi), %xmm2, %xmm1 {%k1} {z}
-; CHECK-NEXT: vmovdqa %xmm1, %xmm0
+; CHECK-NEXT: vpermw (%rdi), %ymm1, %ymm0 {%k1} {z}
+; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%vec = load <16 x i16>, ptr %vp
%shuf = shufflevector <16 x i16> %vec, <16 x i16> undef, <8 x i32> <i32 0, i32 7, i32 13, i32 3, i32 5, i32 13, i32 3, i32 9>
@@ -192,11 +194,12 @@ define <8 x i16> @test_masked_z_16xi16_to_8xi16_perm_mem_mask0(ptr %vp, <8 x i16
define <8 x i16> @test_masked_16xi16_to_8xi16_perm_mem_mask1(ptr %vp, <8 x i16> %vec2, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_16xi16_to_8xi16_perm_mem_mask1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmovdqa (%rdi), %xmm2
-; CHECK-NEXT: vpmovsxbw {{.*#+}} xmm3 = [3,15,12,7,1,5,8,14]
-; CHECK-NEXT: vpermi2w 16(%rdi), %xmm2, %xmm3
+; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
+; CHECK-NEXT: vpmovsxbw {{.*#+}} xmm2 = [3,15,12,7,1,5,8,14]
; CHECK-NEXT: vptestnmw %xmm1, %xmm1, %k1
-; CHECK-NEXT: vmovdqu16 %xmm3, %xmm0 {%k1}
+; CHECK-NEXT: vpermw (%rdi), %ymm2, %ymm0 {%k1}
+; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%vec = load <16 x i16>, ptr %vp
%shuf = shufflevector <16 x i16> %vec, <16 x i16> undef, <8 x i32> <i32 3, i32 15, i32 12, i32 7, i32 1, i32 5, i32 8, i32 14>
@@ -208,11 +211,11 @@ define <8 x i16> @test_masked_16xi16_to_8xi16_perm_mem_mask1(ptr %vp, <8 x i16>
define <8 x i16> @test_masked_z_16xi16_to_8xi16_perm_mem_mask1(ptr %vp, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_z_16xi16_to_8xi16_perm_mem_mask1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmovdqa (%rdi), %xmm2
; CHECK-NEXT: vpmovsxbw {{.*#+}} xmm1 = [3,15,12,7,1,5,8,14]
; CHECK-NEXT: vptestnmw %xmm0, %xmm0, %k1
-; CHECK-NEXT: vpermi2w 16(%rdi), %xmm2, %xmm1 {%k1} {z}
-; CHECK-NEXT: vmovdqa %xmm1, %xmm0
+; CHECK-NEXT: vpermw (%rdi), %ymm1, %ymm0 {%k1} {z}
+; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%vec = load <16 x i16>, ptr %vp
%shuf = shufflevector <16 x i16> %vec, <16 x i16> undef, <8 x i32> <i32 3, i32 15, i32 12, i32 7, i32 1, i32 5, i32 8, i32 14>
@@ -256,9 +259,10 @@ define <8 x i16> @test_masked_z_16xi16_to_8xi16_perm_mem_mask2(ptr %vp, <8 x i16
define <8 x i16> @test_16xi16_to_8xi16_perm_mem_mask3(ptr %vp) {
; CHECK-LABEL: test_16xi16_to_8xi16_perm_mem_mask3:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmovdqa (%rdi), %xmm1
; CHECK-NEXT: vpmovsxbw {{.*#+}} xmm0 = [9,7,9,6,9,4,3,2]
-; CHECK-NEXT: vpermi2w 16(%rdi), %xmm1, %xmm0
+; CHECK-NEXT: vpermw (%rdi), %ymm0, %ymm0
+; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%vec = load <16 x i16>, ptr %vp
%res = shufflevector <16 x i16> %vec, <16 x i16> undef, <8 x i32> <i32 9, i32 7, i32 9, i32 6, i32 9, i32 4, i32 3, i32 2>
@@ -267,11 +271,12 @@ define <8 x i16> @test_16xi16_to_8xi16_perm_mem_mask3(ptr %vp) {
define <8 x i16> @test_masked_16xi16_to_8xi16_perm_mem_mask3(ptr %vp, <8 x i16> %vec2, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_16xi16_to_8xi16_perm_mem_mask3:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmovdqa (%rdi), %xmm2
-; CHECK-NEXT: vpmovsxbw {{.*#+}} xmm3 = [9,7,9,6,9,4,3,2]
-; CHECK-NEXT: vpermi2w 16(%rdi), %xmm2, %xmm3
+; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
+; CHECK-NEXT: vpmovsxbw {{.*#+}} xmm2 = [9,7,9,6,9,4,3,2]
; CHECK-NEXT: vptestnmw %xmm1, %xmm1, %k1
-; CHECK-NEXT: vmovdqu16 %xmm3, %xmm0 {%k1}
+; CHECK-NEXT: vpermw (%rdi), %ymm2, %ymm0 {%k1}
+; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%vec = load <16 x i16>, ptr %vp
%shuf = shufflevector <16 x i16> %vec, <16 x i16> undef, <8 x i32> <i32 9, i32 7, i32 9, i32 6, i32 9, i32 4, i32 3, i32 2>
@@ -283,11 +288,11 @@ define <8 x i16> @test_masked_16xi16_to_8xi16_perm_mem_mask3(ptr %vp, <8 x i16>
define <8 x i16> @test_masked_z_16xi16_to_8xi16_perm_mem_mask3(ptr %vp, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_z_16xi16_to_8xi16_perm_mem_mask3:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmovdqa (%rdi), %xmm2
; CHECK-NEXT: vpmovsxbw {{.*#+}} xmm1 = [9,7,9,6,9,4,3,2]
; CHECK-NEXT: vptestnmw %xmm0, %xmm0, %k1
-; CHECK-NEXT: vpermi2w 16(%rdi), %xmm2, %xmm1 {%k1} {z}
-; CHECK-NEXT: vmovdqa %xmm1, %xmm0
+; CHECK-NEXT: vpermw (%rdi), %ymm1, %ymm0 {%k1} {z}
+; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%vec = load <16 x i16>, ptr %vp
%shuf = shufflevector <16 x i16> %vec, <16 x i16> undef, <8 x i32> <i32 9, i32 7, i32 9, i32 6, i32 9, i32 4, i32 3, i32 2>
@@ -579,9 +584,9 @@ define <8 x i16> @test_masked_z_32xi16_to_8xi16_perm_mask3(<32 x i16> %vec, <8 x
define <16 x i16> @test_32xi16_to_16xi16_perm_mem_mask0(ptr %vp) {
; CHECK-LABEL: test_32xi16_to_16xi16_perm_mem_mask0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmovdqa (%rdi), %ymm1
; CHECK-NEXT: vpmovsxbw {{.*#+}} ymm0 = [20,19,22,12,13,20,0,6,10,7,20,12,28,18,13,12]
-; CHECK-NEXT: vpermi2w 32(%rdi), %ymm1, %ymm0
+; CHECK-NEXT: vpermw (%rdi), %zmm0, %zmm0
+; CHECK-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; CHECK-NEXT: retq
%vec = load <32 x i16>, ptr %vp
%res = shufflevector <32 x i16> %vec, <32 x i16> undef, <16 x i32> <i32 20, i32 19, i32 22, i32 12, i32 13, i32 20, i32 0, i32 6, i32 10, i32 7, i32 20, i32 12, i32 28, i32 18, i32 13, i32 12>
@@ -590,11 +595,11 @@ define <16 x i16> @test_32xi16_to_16xi16_perm_mem_mask0(ptr %vp) {
define <16 x i16> @test_masked_32xi16_to_16xi16_perm_mem_mask0(ptr %vp, <16 x i16> %vec2, <16 x i16> %mask) {
; CHECK-LABEL: test_masked_32xi16_to_16xi16_perm_mem_mask0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmovdqa (%rdi), %ymm2
-; CHECK-NEXT: vpmovsxbw {{.*#+}} ymm3 = [20,19,22,12,13,20,0,6,10,7,20,12,28,18,13,12]
-; CHECK-NEXT: vpermi2w 32(%rdi), %ymm2, %ymm3
+; CHECK-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
+; CHECK-NEXT: vpmovsxbw {{.*#+}} ymm2 = [20,19,22,12,13,20,0,6,10,7,20,12,28,18,13,12]
; CHECK-NEXT: vptestnmw %ymm1, %ymm1, %k1
-; CHECK-NEXT: vmovdqu16 %ymm3, %ymm0 {%k1}
+; CHECK-NEXT: vpermw (%rdi), %zmm2, %zmm0 {%k1}
+; CHECK-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; CHECK-NEXT: retq
%vec = load <32 x i16>, ptr %vp
%shuf = shufflevector <32 x i16> %vec, <32 x i16> undef, <16 x i32> <i32 20, i32 19, i32 22, i32 12, i32 13, i32 20, i32 0, i32 6, i32 10, i32 7, i32 20, i32 12, i32 28, i32 18, i32 13, i32 12>
@@ -606,11 +611,10 @@ define <16 x i16> @test_masked_32xi16_to_16xi16_perm_mem_mask0(ptr %vp, <16 x i1
define <16 x i16> @test_masked_z_32xi16_to_16xi16_perm_mem_mask0(ptr %vp, <16 x i16> %mask) {
; CHECK-LABEL: test_masked_z_32xi16_to_16xi16_perm_mem_mask0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmovdqa (%rdi), %ymm2
; CHECK-NEXT: vpmovsxbw {{.*#+}} ymm1 = [20,19,22,12,13,20,0,6,10,7,20,12,28,18,13,12]
; CHECK-NEXT: vptestnmw %ymm0, %ymm0, %k1
-; CHECK-NEXT: vpermi2w 32(%rdi), %ymm2, %ymm1 {%k1} {z}
-; CHECK-NEXT: vmovdqa %ymm1, %ymm0
+; CHECK-NEXT: vpermw (%rdi), %zmm1, %zmm0 {%k1} {z}
+; CHECK-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; CHECK-NEXT: retq
%vec = load <32 x i16>, ptr %vp
%shuf = shufflevector <32 x i16> %vec, <32 x i16> undef, <16 x i32> <i32 20, i32 19, i32 22, i32 12, i32 13, i32 20, i32 0, i32 6, i32 10, i32 7, i32 20, i32 12, i32 28, i32 18, i32 13, i32 12>
@@ -622,11 +626,11 @@ define <16 x i16> @test_masked_z_32xi16_to_16xi16_perm_mem_mask0(ptr %vp, <16 x
define <16 x i16> @test_masked_32xi16_to_16xi16_perm_mem_mask1(ptr %vp, <16 x i16> %vec2, <16 x i16> %mask) {
; CHECK-LABEL: test_masked_32xi16_to_16xi16_perm_mem_mask1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmovdqa (%rdi), %ymm2
-; CHECK-NEXT: vpmovsxbw {{.*#+}} ymm3 = [22,13,21,1,14,8,5,16,15,17,24,28,15,9,14,25]
-; CHECK-NEXT: vpermi2w 32(%rdi), %ymm2, %ymm3
+; CHECK-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
+; CHECK-NEXT: vpmovsxbw {{.*#+}} ymm2 = [22,13,21,1,14,8,5,16,15,17,24,28,15,9,14,25]
; CHECK-NEXT: vptestnmw %ymm1, %ymm1, %k1
-; CHECK-NEXT: vmovdqu16 %ymm3, %ymm0 {%k1}
+; CHECK-NEXT: vpermw (%rdi), %zmm2, %zmm0 {%k1}
+; CHECK-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; CHECK-NEXT: retq
%vec = load <32 x i16>, ptr %vp
%shuf = shufflevector <32 x i16> %vec, <32 x i16> undef, <16 x i32> <i32 22, i32 13, i32 21, i32 1, i32 14, i32 8, i32 5, i32 16, i32 15, i32 17, i32 24, i32 28, i32 15, i32 9, i32 14, i32 25>
@@ -638,11 +642,10 @@ define <16 x i16> @test_masked_32xi16_to_16xi16_perm_mem_mask1(ptr %vp, <16 x i1
define <16 x i16> @test_masked_z_32xi16_to_16xi16_perm_mem_mask1(ptr %vp, <16 x i16> %mask) {
; CHECK-LABEL: test_masked_z_32xi16_to_16xi16_perm_mem_mask1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmovdqa (%rdi), %ymm2
; CHECK-NEXT: vpmovsxbw {{.*#+}} ymm1 = [22,13,21,1,14,8,5,16,15,17,24,28,15,9,14,25]
; CHECK-NEXT: vptestnmw %ymm0, %ymm0, %k1
-; CHECK-NEXT: vpermi2w 32(%rdi), %ymm2, %ymm1 {%k1} {z}
-; CHECK-NEXT: vmovdqa %ymm1, %ymm0
+; CHECK-NEXT: vpermw (%rdi), %zmm1, %zmm0 {%k1} {z}
+; CHECK-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; CHECK-NEXT: retq
%vec = load <32 x i16>, ptr %vp
%shuf = shufflevector <32 x i16> %vec, <32 x i16> undef, <16 x i32> <i32 22, i32 13, i32 21, i32 1, i32 14, i32 8, i32 5, i32 16, i32 15, i32 17, i32 24, i32 28, i32 15, i32 9, i32 14, i32 25>
@@ -686,9 +689,9 @@ define <16 x i16> @test_masked_z_32xi16_to_16xi16_perm_mem_mask2(ptr %vp, <16 x
define <16 x i16> @test_32xi16_to_16xi16_perm_mem_mask3(ptr %vp) {
; CHECK-LABEL: test_32xi16_to_16xi16_perm_mem_mask3:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmovdqa (%rdi), %ymm1
; CHECK-NEXT: vpmovsxbw {{.*#+}} ymm0 = [3,3,20,27,8,31,3,27,12,2,8,14,25,27,4,16]
-; CHECK-NEXT: vpermi2w 32(%rdi), %ymm1, %ymm0
+; CHECK-NEXT: vpermw (%rdi), %zmm0, %zmm0
+; CHECK-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; CHECK-NEXT: retq
%vec = load <32 x i16>, ptr %vp
%res = shufflevector <32 x i16> %vec, <32 x i16> undef, <16 x i32> <i32 3, i32 3, i32 20, i32 27, i32 8, i32 31, i32 3, i32 27, i32 12, i32 2, i32 8, i32 14, i32 25, i32 27, i32 4, i32 16>
@@ -697,11 +700,11 @@ define <16 x i16> @test_32xi16_to_16xi16_perm_mem_mask3(ptr %vp) {
define <16 x i16> @test_masked_32xi16_to_16xi16_perm_mem_mask3(ptr %vp, <16 x i16> %vec2, <16 x i16> %mask) {
; CHECK-LABEL: test_masked_32xi16_to_16xi16_perm_mem_mask3:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmovdqa (%rdi), %ymm2
-; CHECK-NEXT: vpmovsxbw {{.*#+}} ymm3 = [3,3,20,27,8,31,3,27,12,2,8,14,25,27,4,16]
-; CHECK-NEXT: vpermi2w 32(%rdi), %ymm2, %ymm3
+; CHECK-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
+; CHECK-NEXT: vpmovsxbw {{.*#+}} ymm2 = [3,3,20,27,8,31,3,27,12,2,8,14,25,27,4,16]
; CHECK-NEXT: vptestnmw %ymm1, %ymm1, %k1
-; CHECK-NEXT: vmovdqu16 %ymm3, %ymm0 {%k1}
+; CHECK-NEXT: vpermw (%rdi), %zmm2, %zmm0 {%k1}
+; CHECK-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; CHECK-NEXT: retq
%vec = load <32 x i16>, ptr %vp
%shuf = shufflevector <32 x i16> %vec, <32 x i16> undef, <16 x i32> <i32 3, i32 3, i32 20, i32 27, i32 8, i32 31, i32 3, i32 27, i32 12, i32 2, i32 8, i32 14, i32 25, i32 27, i32 4, i32 16>
@@ -713,11 +716,10 @@ define <16 x i16> @test_masked_32xi16_to_16xi16_perm_mem_mask3(ptr %vp, <16 x i1
define <16 x i16> @test_masked_z_32xi16_to_16xi16_perm_mem_mask3(ptr %vp, <16 x i16> %mask) {
; CHECK-LABEL: test_masked_z_32xi16_to_16xi16_perm_mem_mask3:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmovdqa (%rdi), %ymm2
; CHECK-NEXT: vpmovsxbw {{.*#+}} ymm1 = [3,3,20,27,8,31,3,27,12,2,8,14,25,27,4,16]
; CHECK-NEXT: vptestnmw %ymm0, %ymm0, %k1
-; CHECK-NEXT: vpermi2w 32(%rdi), %ymm2, %ymm1 {%k1} {z}
-; CHECK-NEXT: vmovdqa %ymm1, %ymm0
+; CHECK-NEXT: vpermw (%rdi), %zmm1, %zmm0 {%k1} {z}
+; CHECK-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; CHECK-NEXT: retq
%vec = load <32 x i16>, ptr %vp
%shuf = shufflevector <32 x i16> %vec, <32 x i16> undef, <16 x i32> <i32 3, i32 3, i32 20, i32 27, i32 8, i32 31, i32 3, i32 27, i32 12, i32 2, i32 8, i32 14, i32 25, i32 27, i32 4, i32 16>
@@ -810,11 +812,11 @@ define <8 x i16> @test_masked_z_32xi16_to_8xi16_perm_mem_mask1(ptr %vp, <8 x i16
define <8 x i16> @test_masked_32xi16_to_8xi16_perm_mem_mask2(ptr %vp, <8 x i16> %vec2, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_32xi16_to_8xi16_perm_mem_mask2:
; CHECK: # %bb.0:
+; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; CHECK-NEXT: vpmovsxbw {{.*#+}} xmm2 = [6,18,0,4,10,25,22,10]
-; CHECK-NEXT: vmovdqa (%rdi), %ymm3
-; CHECK-NEXT: vpermt2w 32(%rdi), %ymm2, %ymm3
; CHECK-NEXT: vptestnmw %xmm1, %xmm1, %k1
-; CHECK-NEXT: vmovdqu16 %xmm3, %xmm0 {%k1}
+; CHECK-NEXT: vpermw (%rdi), %zmm2, %zmm0 {%k1}
+; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%vec = load <32 x i16>, ptr %vp
@@ -827,11 +829,10 @@ define <8 x i16> @test_masked_32xi16_to_8xi16_perm_mem_mask2(ptr %vp, <8 x i16>
define <8 x i16> @test_masked_z_32xi16_to_8xi16_perm_mem_mask2(ptr %vp, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_z_32xi16_to_8xi16_perm_mem_mask2:
; CHECK: # %bb.0:
-; CHECK-NEXT: vpmovsxbw {{.*#+}} xmm2 = [6,18,0,4,10,25,22,10]
-; CHECK-NEXT: vmovdqa (%rdi), %ymm1
+; CHECK-NEXT: vpmovsxbw {{.*#+}} xmm1 = [6,18,0,4,10,25,22,10]
; CHECK-NEXT: vptestnmw %xmm0, %xmm0, %k1
-; CHECK-NEXT: vpermt2w 32(%rdi), %ymm2, %ymm1 {%k1} {z}
-; CHECK-NEXT: vmovdqa %xmm1, %xmm0
+; CHECK-NEXT: vpermw (%rdi), %zmm1, %zmm0 {%k1} {z}
+; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%vec = load <32 x i16>, ptr %vp
@@ -844,10 +845,9 @@ define <8 x i16> @test_masked_z_32xi16_to_8xi16_perm_mem_mask2(ptr %vp, <8 x i16
define <8 x i16> @test_32xi16_to_8xi16_perm_mem_mask3(ptr %vp) {
; CHECK-LABEL: test_32xi16_to_8xi16_perm_mem_mask3:
; CHECK: # %bb.0:
-; CHECK-NEXT: vpmovsxbw {{.*#+}} xmm1 = [19,1,5,31,9,12,17,9]
-; CHECK-NEXT: vmovdqa (%rdi), %ymm0
-; CHECK-NEXT: vpermt2w 32(%rdi), %ymm1, %ymm0
-; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; CHECK-NEXT: vpmovsxbw {{.*#+}} xmm0 = [19,1,5,31,9,12,17,9]
+; CHECK-NEXT: vpermw (%rdi), %zmm0, %zmm0
+; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%vec = load <32 x i16>, ptr %vp
@@ -857,11 +857,11 @@ define <8 x i16> @test_32xi16_to_8xi16_perm_mem_mask3(ptr %vp) {
define <8 x i16> @test_masked_32xi16_to_8xi16_perm_mem_mask3(ptr %vp, <8 x i16> %vec2, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_32xi16_to_8xi16_perm_mem_mask3:
; CHECK: # %bb.0:
+; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; CHECK-NEXT: vpmovsxbw {{.*#+}} xmm2 = [19,1,5,31,9,12,17,9]
-; CHECK-NEXT: vmovdqa (%rdi), %ymm3
-; CHECK-NEXT: vpermt2w 32(%rdi), %ymm2, %ymm3
; CHECK-NEXT: vptestnmw %xmm1, %xmm1, %k1
-; CHECK-NEXT: vmovdqu16 %xmm3, %xmm0 {%k1}
+; CHECK-NEXT: vpermw (%rdi), %zmm2, %zmm0 {%k1}
+; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%vec = load <32 x i16>, ptr %vp
@@ -874,11 +874,10 @@ define <8 x i16> @test_masked_32xi16_to_8xi16_perm_mem_mask3(ptr %vp, <8 x i16>
define <8 x i16> @test_masked_z_32xi16_to_8xi16_perm_mem_mask3(ptr %vp, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_z_32xi16_to_8xi16_perm_mem_mask3:
; CHECK: # %bb.0:
-; CHECK-NEXT: vpmovsxbw {{.*#+}} xmm2 = [19,1,5,31,9,12,17,9]
-; CHECK-NEXT: vmovdqa (%rdi), %ymm1
+; CHECK-NEXT: vpmovsxbw {{.*#+}} xmm1 = [19,1,5,31,9,12,17,9]
; CHECK-NEXT: vptestnmw %xmm0, %xmm0, %k1
-; CHECK-NEXT: vpermt2w 32(%rdi), %ymm2, %ymm1 {%k1} {z}
-; CHECK-NEXT: vmovdqa %xmm1, %xmm0
+; CHECK-NEXT: vpermw (%rdi), %zmm1, %zmm0 {%k1} {z}
+; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%vec = load <32 x i16>, ptr %vp
@@ -1082,11 +1081,12 @@ define <4 x i32> @test_masked_z_8xi32_to_4xi32_perm_mem_mask0(ptr %vp, <4 x i32>
define <4 x i32> @test_masked_8xi32_to_4xi32_perm_mem_mask1(ptr %vp, <4 x i32> %vec2, <4 x i32> %mask) {
; CHECK-LABEL: test_masked_8xi32_to_4xi32_perm_mem_mask1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmovdqa (%rdi), %xmm2
-; CHECK-NEXT: vpmovsxbd {{.*#+}} xmm3 = [5,0,0,3]
-; CHECK-NEXT: vpermi2d 16(%rdi), %xmm2, %xmm3
+; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
+; CHECK-NEXT: vpmovsxbd {{.*#+}} xmm2 = [5,0,0,3]
; CHECK-NEXT: vptestnmd %xmm1, %xmm1, %k1
-; CHECK-NEXT: vmovdqa32 %xmm3, %xmm0 {%k1}
+; CHECK-NEXT: vpermd (%rdi), %ymm2, %ymm0 {%k1}
+; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%vec = load <8 x i32>, ptr %vp
%shuf = shufflevector <8 x i32> %vec, <8 x i32> undef, <4 x i32> <i32 5, i32 0, i32 0, i32 3>
@@ -1098,11 +1098,11 @@ define <4 x i32> @test_masked_8xi32_to_4xi32_perm_mem_mask1(ptr %vp, <4 x i32> %
define <4 x i32> @test_masked_z_8xi32_to_4xi32_perm_mem_mask1(ptr %vp, <4 x i32> %mask) {
; CHECK-LABEL: test_masked_z_8xi32_to_4xi32_perm_mem_mask1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmovdqa (%rdi), %xmm2
; CHECK-NEXT: vpmovsxbd {{.*#+}} xmm1 = [5,0,0,3]
; CHECK-NEXT: vptestnmd %xmm0, %xmm0, %k1
-; CHECK-NEXT: vpermi2d 16(%rdi), %xmm2, %xmm1 {%k1} {z}
-; CHECK-NEXT: vmovdqa %xmm1, %xmm0
+; CHECK-NEXT: vpermd (%rdi), %ymm1, %ymm0 {%k1} {z}
+; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%vec = load <8 x i32>, ptr %vp
%shuf = shufflevector <8 x i32> %vec, <8 x i32> undef, <4 x i32> <i32 5, i32 0, i32 0, i32 3>
@@ -1567,9 +1567,9 @@ define <8 x i32> @test_masked_z_16xi32_to_8xi32_perm_mem_mask2(ptr %vp, <8 x i32
define <8 x i32> @test_16xi32_to_8xi32_perm_mem_mask3(ptr %vp) {
; CHECK-LABEL: test_16xi32_to_8xi32_perm_mem_mask3:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmovdqa (%rdi), %ymm1
; CHECK-NEXT: vpmovsxbd {{.*#+}} ymm0 = [8,4,1,13,15,4,6,12]
-; CHECK-NEXT: vpermi2d 32(%rdi), %ymm1, %ymm0
+; CHECK-NEXT: vpermps (%rdi), %zmm0, %zmm0
+; CHECK-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; CHECK-NEXT: retq
%vec = load <16 x i32>, ptr %vp
%res = shufflevector <16 x i32> %vec, <16 x i32> undef, <8 x i32> <i32 8, i32 4, i32 1, i32 13, i32 15, i32 4, i32 6, i32 12>
@@ -1578,11 +1578,11 @@ define <8 x i32> @test_16xi32_to_8xi32_perm_mem_mask3(ptr %vp) {
define <8 x i32> @test_masked_16xi32_to_8xi32_perm_mem_mask3(ptr %vp, <8 x i32> %vec2, <8 x i32> %mask) {
; CHECK-LABEL: test_masked_16xi32_to_8xi32_perm_mem_mask3:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmovdqa (%rdi), %ymm2
-; CHECK-NEXT: vpmovsxbd {{.*#+}} ymm3 = [8,4,1,13,15,4,6,12]
-; CHECK-NEXT: vpermi2d 32(%rdi), %ymm2, %ymm3
+; CHECK-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
+; CHECK-NEXT: vpmovsxbd {{.*#+}} ymm2 = [8,4,1,13,15,4,6,12]
; CHECK-NEXT: vptestnmd %ymm1, %ymm1, %k1
-; CHECK-NEXT: vmovdqa32 %ymm3, %ymm0 {%k1}
+; CHECK-NEXT: vpermd (%rdi), %zmm2, %zmm0 {%k1}
+; CHECK-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; CHECK-NEXT: retq
%vec = load <16 x i32>, ptr %vp
%shuf = shufflevector <16 x i32> %vec, <16 x i32> undef, <8 x i32> <i32 8, i32 4, i32 1, i32 13, i32 15, i32 4, i32 6, i32 12>
@@ -1594,11 +1594,10 @@ define <8 x i32> @test_masked_16xi32_to_8xi32_perm_mem_mask3(ptr %vp, <8 x i32>
define <8 x i32> @test_masked_z_16xi32_to_8xi32_perm_mem_mask3(ptr %vp, <8 x i32> %mask) {
; CHECK-LABEL: test_masked_z_16xi32_to_8xi32_perm_mem_mask3:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmovdqa (%rdi), %ymm2
; CHECK-NEXT: vpmovsxbd {{.*#+}} ymm1 = [8,4,1,13,15,4,6,12]
; CHECK-NEXT: vptestnmd %ymm0, %ymm0, %k1
-; CHECK-NEXT: vpermi2d 32(%rdi), %ymm2, %ymm1 {%k1} {z}
-; CHECK-NEXT: vmovdqa %ymm1, %ymm0
+; CHECK-NEXT: vpermd (%rdi), %zmm1, %zmm0 {%k1} {z}
+; CHECK-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; CHECK-NEXT: retq
%vec = load <16 x i32>, ptr %vp
%shuf = shufflevector <16 x i32> %vec, <16 x i32> undef, <8 x i32> <i32 8, i32 4, i32 1, i32 13, i32 15, i32 4, i32 6, i32 12>
@@ -1610,10 +1609,9 @@ define <8 x i32> @test_masked_z_16xi32_to_8xi32_perm_mem_mask3(ptr %vp, <8 x i32
define <4 x i32> @test_16xi32_to_4xi32_perm_mem_mask0(ptr %vp) {
; CHECK-LABEL: test_16xi32_to_4xi32_perm_mem_mask0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vpmovsxbd {{.*#+}} xmm1 = [13,0,0,6]
-; CHECK-NEXT: vmovdqa (%rdi), %ymm0
-; CHECK-NEXT: vpermt2d 32(%rdi), %ymm1, %ymm0
-; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; CHECK-NEXT: vpmovsxbd {{.*#+}} xmm0 = [13,0,0,6]
+; CHECK-NEXT: vpermps (%rdi), %zmm0, %zmm0
+; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%vec = load <16 x i32>, ptr %vp
@@ -1623,11 +1621,11 @@ define <4 x i32> @test_16xi32_to_4xi32_perm_mem_mask0(ptr %vp) {
define <4 x i32> @test_masked_16xi32_to_4xi32_perm_mem_mask0(ptr %vp, <4 x i32> %vec2, <4 x i32> %mask) {
; CHECK-LABEL: test_masked_16xi32_to_4xi32_perm_mem_mask0:
; CHECK: # %bb.0:
+; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; CHECK-NEXT: vpmovsxbd {{.*#+}} xmm2 = [13,0,0,6]
-; CHECK-NEXT: vmovdqa (%rdi), %ymm3
-; CHECK-NEXT: vpermt2d 32(%rdi), %ymm2, %ymm3
; CHECK-NEXT: vptestnmd %xmm1, %xmm1, %k1
-; CHECK-NEXT: vmovdqa32 %xmm3, %xmm0 {%k1}
+; CHECK-NEXT: vpermd (%rdi), %zmm2, %zmm0 {%k1}
+; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%vec = load <16 x i32>, ptr %vp
@@ -1640,11 +1638,10 @@ define <4 x i32> @test_masked_16xi32_to_4xi32_perm_mem_mask0(ptr %vp, <4 x i32>
define <4 x i32> @test_masked_z_16xi32_to_4xi32_perm_mem_mask0(ptr %vp, <4 x i32> %mask) {
; CHECK-LABEL: test_masked_z_16xi32_to_4xi32_perm_mem_mask0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vpmovsxbd {{.*#+}} xmm2 = [13,0,0,6]
-; CHECK-NEXT: vmovdqa (%rdi), %ymm1
+; CHECK-NEXT: vpmovsxbd {{.*#+}} xmm1 = [13,0,0,6]
; CHECK-NEXT: vptestnmd %xmm0, %xmm0, %k1
-; CHECK-NEXT: vpermt2d 32(%rdi), %ymm2, %ymm1 {%k1} {z}
-; CHECK-NEXT: vmovdqa %xmm1, %xmm0
+; CHECK-NEXT: vpermd (%rdi), %zmm1, %zmm0 {%k1} {z}
+; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%vec = load <16 x i32>, ptr %vp
@@ -1691,11 +1688,11 @@ define <4 x i32> @test_masked_z_16xi32_to_4xi32_perm_mem_mask1(ptr %vp, <4 x i32
define <4 x i32> @test_masked_16xi32_to_4xi32_perm_mem_mask2(ptr %vp, <4 x i32> %vec2, <4 x i32> %mask) {
; CHECK-LABEL: test_masked_16xi32_to_4xi32_perm_mem_mask2:
; CHECK: # %bb.0:
+; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; CHECK-NEXT: vpmovsxbd {{.*#+}} xmm2 = [2,15,6,9]
-; CHECK-NEXT: vmovdqa (%rdi), %ymm3
-; CHECK-NEXT: vpermt2d 32(%rdi), %ymm2, %ymm3
; CHECK-NEXT: vptestnmd %xmm1, %xmm1, %k1
-; CHECK-NEXT: vmovdqa32 %xmm3, %xmm0 {%k1}
+; CHECK-NEXT: vpermd (%rdi), %zmm2, %zmm0 {%k1}
+; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%vec = load <16 x i32>, ptr %vp
@@ -1708,11 +1705,10 @@ define <4 x i32> @test_masked_16xi32_to_4xi32_perm_mem_mask2(ptr %vp, <4 x i32>
define <4 x i32> @test_masked_z_16xi32_to_4xi32_perm_mem_mask2(ptr %vp, <4 x i32> %mask) {
; CHECK-LABEL: test_masked_z_16xi32_to_4xi32_perm_mem_mask2:
; CHECK: # %bb.0:
-; CHECK-NEXT: vpmovsxbd {{.*#+}} xmm2 = [2,15,6,9]
-; CHECK-NEXT: vmovdqa (%rdi), %ymm1
+; CHECK-NEXT: vpmovsxbd {{.*#+}} xmm1 = [2,15,6,9]
; CHECK-NEXT: vptestnmd %xmm0, %xmm0, %k1
-; CHECK-NEXT: vpermt2d 32(%rdi), %ymm2, %ymm1 {%k1} {z}
-; CHECK-NEXT: vmovdqa %xmm1, %xmm0
+; CHECK-NEXT: vpermd (%rdi), %zmm1, %zmm0 {%k1} {z}
+; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%vec = load <16 x i32>, ptr %vp
@@ -2474,9 +2470,9 @@ define <4 x i64> @test_masked_z_8xi64_to_4xi64_perm_mem_mask2(ptr %vp, <4 x i64>
define <4 x i64> @test_8xi64_to_4xi64_perm_mem_mask3(ptr %vp) {
; CHECK-FAST-LABEL: test_8xi64_to_4xi64_perm_mem_mask3:
; CHECK-FAST: # %bb.0:
-; CHECK-FAST-NEXT: vmovdqa (%rdi), %ymm1
; CHECK-FAST-NEXT: vpmovsxbq {{.*#+}} ymm0 = [7,0,0,2]
-; CHECK-FAST-NEXT: vpermi2q 32(%rdi), %ymm1, %ymm0
+; CHECK-FAST-NEXT: vpermpd (%rdi), %zmm0, %zmm0
+; CHECK-FAST-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; CHECK-FAST-NEXT: retq
;
; CHECK-FAST-PERLANE-LABEL: test_8xi64_to_4xi64_perm_mem_mask3:
@@ -2492,11 +2488,11 @@ define <4 x i64> @test_8xi64_to_4xi64_perm_mem_mask3(ptr %vp) {
define <4 x i64> @test_masked_8xi64_to_4xi64_perm_mem_mask3(ptr %vp, <4 x i64> %vec2, <4 x i64> %mask) {
; CHECK-FAST-LABEL: test_masked_8xi64_to_4xi64_perm_mem_mask3:
; CHECK-FAST: # %bb.0:
-; CHECK-FAST-NEXT: vmovdqa (%rdi), %ymm2
-; CHECK-FAST-NEXT: vpmovsxbq {{.*#+}} ymm3 = [7,0,0,2]
-; CHECK-FAST-NEXT: vpermi2q 32(%rdi), %ymm2, %ymm3
+; CHECK-FAST-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
+; CHECK-FAST-NEXT: vpmovsxbq {{.*#+}} ymm2 = [7,0,0,2]
; CHECK-FAST-NEXT: vptestnmq %ymm1, %ymm1, %k1
-; CHECK-FAST-NEXT: vmovdqa64 %ymm3, %ymm0 {%k1}
+; CHECK-FAST-NEXT: vpermq (%rdi), %zmm2, %zmm0 {%k1}
+; CHECK-FAST-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; CHECK-FAST-NEXT: retq
;
; CHECK-FAST-PERLANE-LABEL: test_masked_8xi64_to_4xi64_perm_mem_mask3:
@@ -2516,11 +2512,10 @@ define <4 x i64> @test_masked_8xi64_to_4xi64_perm_mem_mask3(ptr %vp, <4 x i64> %
define <4 x i64> @test_masked_z_8xi64_to_4xi64_perm_mem_mask3(ptr %vp, <4 x i64> %mask) {
; CHECK-FAST-LABEL: test_masked_z_8xi64_to_4xi64_perm_mem_mask3:
; CHECK-FAST: # %bb.0:
-; CHECK-FAST-NEXT: vmovdqa (%rdi), %ymm2
; CHECK-FAST-NEXT: vpmovsxbq {{.*#+}} ymm1 = [7,0,0,2]
; CHECK-FAST-NEXT: vptestnmq %ymm0, %ymm0, %k1
-; CHECK-FAST-NEXT: vpermi2q 32(%rdi), %ymm2, %ymm1 {%k1} {z}
-; CHECK-FAST-NEXT: vmovdqa %ymm1, %ymm0
+; CHECK-FAST-NEXT: vpermq (%rdi), %zmm1, %zmm0 {%k1} {z}
+; CHECK-FAST-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; CHECK-FAST-NEXT: retq
;
; CHECK-FAST-PERLANE-LABEL: test_masked_z_8xi64_to_4xi64_perm_mem_mask3:
@@ -2572,11 +2567,11 @@ define <4 x i64> @test_masked_z_8xi64_to_4xi64_perm_mem_mask4(ptr %vp, <4 x i64>
define <4 x i64> @test_masked_8xi64_to_4xi64_perm_mem_mask5(ptr %vp, <4 x i64> %vec2, <4 x i64> %mask) {
; CHECK-FAST-LABEL: test_masked_8xi64_to_4xi64_perm_mem_mask5:
; CHECK-FAST: # %bb.0:
-; CHECK-FAST-NEXT: vmovdqa (%rdi), %ymm2
-; CHECK-FAST-NEXT: vpmovsxbq {{.*#+}} ymm3 = [0,2,7,1]
-; CHECK-FAST-NEXT: vpermi2q 32(%rdi), %ymm2, %ymm3
+; CHECK-FAST-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
+; CHECK-FAST-NEXT: vpmovsxbq {{.*#+}} ymm2 = [0,2,7,1]
; CHECK-FAST-NEXT: vptestnmq %ymm1, %ymm1, %k1
-; CHECK-FAST-NEXT: vmovdqa64 %ymm3, %ymm0 {%k1}
+; CHECK-FAST-NEXT: vpermq (%rdi), %zmm2, %zmm0 {%k1}
+; CHECK-FAST-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; CHECK-FAST-NEXT: retq
;
; CHECK-FAST-PERLANE-LABEL: test_masked_8xi64_to_4xi64_perm_mem_mask5:
@@ -2596,11 +2591,10 @@ define <4 x i64> @test_masked_8xi64_to_4xi64_perm_mem_mask5(ptr %vp, <4 x i64> %
define <4 x i64> @test_masked_z_8xi64_to_4xi64_perm_mem_mask5(ptr %vp, <4 x i64> %mask) {
; CHECK-FAST-LABEL: test_masked_z_8xi64_to_4xi64_perm_mem_mask5:
; CHECK-FAST: # %bb.0:
-; CHECK-FAST-NEXT: vmovdqa (%rdi), %ymm2
; CHECK-FAST-NEXT: vpmovsxbq {{.*#+}} ymm1 = [0,2,7,1]
; CHECK-FAST-NEXT: vptestnmq %ymm0, %ymm0, %k1
-; CHECK-FAST-NEXT: vpermi2q 32(%rdi), %ymm2, %ymm1 {%k1} {z}
-; CHECK-FAST-NEXT: vmovdqa %ymm1, %ymm0
+; CHECK-FAST-NEXT: vpermq (%rdi), %zmm1, %zmm0 {%k1} {z}
+; CHECK-FAST-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; CHECK-FAST-NEXT: retq
;
; CHECK-FAST-PERLANE-LABEL: test_masked_z_8xi64_to_4xi64_perm_mem_mask5:
@@ -2620,9 +2614,9 @@ define <4 x i64> @test_masked_z_8xi64_to_4xi64_perm_mem_mask5(ptr %vp, <4 x i64>
define <4 x i64> @test_8xi64_to_4xi64_perm_mem_mask6(ptr %vp) {
; CHECK-LABEL: test_8xi64_to_4xi64_perm_mem_mask6:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmovdqa (%rdi), %ymm1
; CHECK-NEXT: vpmovsxbq {{.*#+}} ymm0 = [7,2,3,2]
-; CHECK-NEXT: vpermi2q 32(%rdi), %ymm1, %ymm0
+; CHECK-NEXT: vpermpd (%rdi), %zmm0, %zmm0
+; CHECK-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; CHECK-NEXT: retq
%vec = load <8 x i64>, ptr %vp
%res = shufflevector <8 x i64> %vec, <8 x i64> undef, <4 x i32> <i32 7, i32 2, i32 3, i32 2>
@@ -2631,11 +2625,11 @@ define <4 x i64> @test_8xi64_to_4xi64_perm_mem_mask6(ptr %vp) {
define <4 x i64> @test_masked_8xi64_to_4xi64_perm_mem_mask6(ptr %vp, <4 x i64> %vec2, <4 x i64> %mask) {
; CHECK-LABEL: test_masked_8xi64_to_4xi64_perm_mem_mask6:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmovdqa (%rdi), %ymm2
-; CHECK-NEXT: vpmovsxbq {{.*#+}} ymm3 = [7,2,3,2]
-; CHECK-NEXT: vpermi2q 32(%rdi), %ymm2, %ymm3
+; CHECK-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
+; CHECK-NEXT: vpmovsxbq {{.*#+}} ymm2 = [7,2,3,2]
; CHECK-NEXT: vptestnmq %ymm1, %ymm1, %k1
-; CHECK-NEXT: vmovdqa64 %ymm3, %ymm0 {%k1}
+; CHECK-NEXT: vpermq (%rdi), %zmm2, %zmm0 {%k1}
+; CHECK-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; CHECK-NEXT: retq
%vec = load <8 x i64>, ptr %vp
%shuf = shufflevector <8 x i64> %vec, <8 x i64> undef, <4 x i32> <i32 7, i32 2, i32 3, i32 2>
@@ -2647,11 +2641,10 @@ define <4 x i64> @test_masked_8xi64_to_4xi64_perm_mem_mask6(ptr %vp, <4 x i64> %
define <4 x i64> @test_masked_z_8xi64_to_4xi64_perm_mem_mask6(ptr %vp, <4 x i64> %mask) {
; CHECK-LABEL: test_masked_z_8xi64_to_4xi64_perm_mem_mask6:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmovdqa (%rdi), %ymm2
; CHECK-NEXT: vpmovsxbq {{.*#+}} ymm1 = [7,2,3,2]
; CHECK-NEXT: vptestnmq %ymm0, %ymm0, %k1
-; CHECK-NEXT: vpermi2q 32(%rdi), %ymm2, %ymm1 {%k1} {z}
-; CHECK-NEXT: vmovdqa %ymm1, %ymm0
+; CHECK-NEXT: vpermq (%rdi), %zmm1, %zmm0 {%k1} {z}
+; CHECK-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; CHECK-NEXT: retq
%vec = load <8 x i64>, ptr %vp
%shuf = shufflevector <8 x i64> %vec, <8 x i64> undef, <4 x i32> <i32 7, i32 2, i32 3, i32 2>
@@ -3032,12 +3025,13 @@ define <4 x float> @test_masked_z_8xfloat_to_4xfloat_perm_mem_mask1(ptr %vp, <4
define <4 x float> @test_masked_8xfloat_to_4xfloat_perm_mem_mask2(ptr %vp, <4 x float> %vec2, <4 x float> %mask) {
; CHECK-LABEL: test_masked_8xfloat_to_4xfloat_perm_mem_mask2:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmovaps (%rdi), %xmm2
-; CHECK-NEXT: vpmovsxbd {{.*#+}} xmm3 = [3,1,3,7]
-; CHECK-NEXT: vpermi2ps 16(%rdi), %xmm2, %xmm3
-; CHECK-NEXT: vxorps %xmm2, %xmm2, %xmm2
-; CHECK-NEXT: vcmpeqps %xmm2, %xmm1, %k1
-; CHECK-NEXT: vmovaps %xmm3, %xmm0 {%k1}
+; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
+; CHECK-NEXT: vpmovsxbd {{.*#+}} xmm2 = [3,1,3,7]
+; CHECK-NEXT: vxorps %xmm3, %xmm3, %xmm3
+; CHECK-NEXT: vcmpeqps %xmm3, %xmm1, %k1
+; CHECK-NEXT: vpermps (%rdi), %ymm2, %ymm0 {%k1}
+; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%vec = load <8 x float>, ptr %vp
%shuf = shufflevector <8 x float> %vec, <8 x float> undef, <4 x i32> <i32 3, i32 1, i32 3, i32 7>
@@ -3049,12 +3043,12 @@ define <4 x float> @test_masked_8xfloat_to_4xfloat_perm_mem_mask2(ptr %vp, <4 x
define <4 x float> @test_masked_z_8xfloat_to_4xfloat_perm_mem_mask2(ptr %vp, <4 x float> %mask) {
; CHECK-LABEL: test_masked_z_8xfloat_to_4xfloat_perm_mem_mask2:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmovaps (%rdi), %xmm2
; CHECK-NEXT: vpmovsxbd {{.*#+}} xmm1 = [3,1,3,7]
-; CHECK-NEXT: vxorps %xmm3, %xmm3, %xmm3
-; CHECK-NEXT: vcmpeqps %xmm3, %xmm0, %k1
-; CHECK-NEXT: vpermi2ps 16(%rdi), %xmm2, %xmm1 {%k1} {z}
-; CHECK-NEXT: vmovaps %xmm1, %xmm0
+; CHECK-NEXT: vxorps %xmm2, %xmm2, %xmm2
+; CHECK-NEXT: vcmpeqps %xmm2, %xmm0, %k1
+; CHECK-NEXT: vpermps (%rdi), %ymm1, %ymm0 {%k1} {z}
+; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%vec = load <8 x float>, ptr %vp
%shuf = shufflevector <8 x float> %vec, <8 x float> undef, <4 x i32> <i32 3, i32 1, i32 3, i32 7>
@@ -3066,9 +3060,10 @@ define <4 x float> @test_masked_z_8xfloat_to_4xfloat_perm_mem_mask2(ptr %vp, <4
define <4 x float> @test_8xfloat_to_4xfloat_perm_mem_mask3(ptr %vp) {
; CHECK-LABEL: test_8xfloat_to_4xfloat_perm_mem_mask3:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmovaps (%rdi), %xmm1
; CHECK-NEXT: vpmovsxbd {{.*#+}} xmm0 = [1,3,5,3]
-; CHECK-NEXT: vpermi2ps 16(%rdi), %xmm1, %xmm0
+; CHECK-NEXT: vpermps (%rdi), %ymm0, %ymm0
+; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%vec = load <8 x float>, ptr %vp
%res = shufflevector <8 x float> %vec, <8 x float> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 3>
@@ -3077,12 +3072,13 @@ define <4 x float> @test_8xfloat_to_4xfloat_perm_mem_mask3(ptr %vp) {
define <4 x float> @test_masked_8xfloat_to_4xfloat_perm_mem_mask3(ptr %vp, <4 x float> %vec2, <4 x float> %mask) {
; CHECK-LABEL: test_masked_8xfloat_to_4xfloat_perm_mem_mask3:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmovaps (%rdi), %xmm2
-; CHECK-NEXT: vpmovsxbd {{.*#+}} xmm3 = [1,3,5,3]
-; CHECK-NEXT: vpermi2ps 16(%rdi), %xmm2, %xmm3
-; CHECK-NEXT: vxorps %xmm2, %xmm2, %xmm2
-; CHECK-NEXT: vcmpeqps %xmm2, %xmm1, %k1
-; CHECK-NEXT: vmovaps %xmm3, %xmm0 {%k1}
+; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
+; CHECK-NEXT: vpmovsxbd {{.*#+}} xmm2 = [1,3,5,3]
+; CHECK-NEXT: vxorps %xmm3, %xmm3, %xmm3
+; CHECK-NEXT: vcmpeqps %xmm3, %xmm1, %k1
+; CHECK-NEXT: vpermps (%rdi), %ymm2, %ymm0 {%k1}
+; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%vec = load <8 x float>, ptr %vp
%shuf = shufflevector <8 x float> %vec, <8 x float> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 3>
@@ -3094,12 +3090,12 @@ define <4 x float> @test_masked_8xfloat_to_4xfloat_perm_mem_mask3(ptr %vp, <4 x
define <4 x float> @test_masked_z_8xfloat_to_4xfloat_perm_mem_mask3(ptr %vp, <4 x float> %mask) {
; CHECK-LABEL: test_masked_z_8xfloat_to_4xfloat_perm_mem_mask3:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmovaps (%rdi), %xmm2
; CHECK-NEXT: vpmovsxbd {{.*#+}} xmm1 = [1,3,5,3]
-; CHECK-NEXT: vxorps %xmm3, %xmm3, %xmm3
-; CHECK-NEXT: vcmpeqps %xmm3, %xmm0, %k1
-; CHECK-NEXT: vpermi2ps 16(%rdi), %xmm2, %xmm1 {%k1} {z}
-; CHECK-NEXT: vmovaps %xmm1, %xmm0
+; CHECK-NEXT: vxorps %xmm2, %xmm2, %xmm2
+; CHECK-NEXT: vcmpeqps %xmm2, %xmm0, %k1
+; CHECK-NEXT: vpermps (%rdi), %ymm1, %ymm0 {%k1} {z}
+; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%vec = load <8 x float>, ptr %vp
%shuf = shufflevector <8 x float> %vec, <8 x float> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 3>
@@ -3424,9 +3420,9 @@ define <4 x float> @test_masked_z_16xfloat_to_4xfloat_perm_mask3(<16 x float> %v
define <8 x float> @test_16xfloat_to_8xfloat_perm_mem_mask0(ptr %vp) {
; CHECK-LABEL: test_16xfloat_to_8xfloat_perm_mem_mask0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmovaps (%rdi), %ymm1
; CHECK-NEXT: vpmovsxbd {{.*#+}} ymm0 = [7,6,7,11,5,10,0,4]
-; CHECK-NEXT: vpermi2ps 32(%rdi), %ymm1, %ymm0
+; CHECK-NEXT: vpermps (%rdi), %zmm0, %zmm0
+; CHECK-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; CHECK-NEXT: retq
%vec = load <16 x float>, ptr %vp
%res = shufflevector <16 x float> %vec, <16 x float> undef, <8 x i32> <i32 7, i32 6, i32 7, i32 11, i32 5, i32 10, i32 0, i32 4>
@@ -3435,12 +3431,12 @@ define <8 x float> @test_16xfloat_to_8xfloat_perm_mem_mask0(ptr %vp) {
define <8 x float> @test_masked_16xfloat_to_8xfloat_perm_mem_mask0(ptr %vp, <8 x float> %vec2, <8 x float> %mask) {
; CHECK-LABEL: test_masked_16xfloat_to_8xfloat_perm_mem_mask0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmovaps (%rdi), %ymm2
-; CHECK-NEXT: vpmovsxbd {{.*#+}} ymm3 = [7,6,7,11,5,10,0,4]
-; CHECK-NEXT: vpermi2ps 32(%rdi), %ymm2, %ymm3
-; CHECK-NEXT: vxorps %xmm2, %xmm2, %xmm2
-; CHECK-NEXT: vcmpeqps %ymm2, %ymm1, %k1
-; CHECK-NEXT: vmovaps %ymm3, %ymm0 {%k1}
+; CHECK-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
+; CHECK-NEXT: vpmovsxbd {{.*#+}} ymm2 = [7,6,7,11,5,10,0,4]
+; CHECK-NEXT: vxorps %xmm3, %xmm3, %xmm3
+; CHECK-NEXT: vcmpeqps %ymm3, %ymm1, %k1
+; CHECK-NEXT: vpermps (%rdi), %zmm2, %zmm0 {%k1}
+; CHECK-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; CHECK-NEXT: retq
%vec = load <16 x float>, ptr %vp
%shuf = shufflevector <16 x float> %vec, <16 x float> undef, <8 x i32> <i32 7, i32 6, i32 7, i32 11, i32 5, i32 10, i32 0, i32 4>
@@ -3452,12 +3448,11 @@ define <8 x float> @test_masked_16xfloat_to_8xfloat_perm_mem_mask0(ptr %vp, <8 x
define <8 x float> @test_masked_z_16xfloat_to_8xfloat_perm_mem_mask0(ptr %vp, <8 x float> %mask) {
; CHECK-LABEL: test_masked_z_16xfloat_to_8xfloat_perm_mem_mask0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmovaps (%rdi), %ymm2
; CHECK-NEXT: vpmovsxbd {{.*#+}} ymm1 = [7,6,7,11,5,10,0,4]
-; CHECK-NEXT: vxorps %xmm3, %xmm3, %xmm3
-; CHECK-NEXT: vcmpeqps %ymm3, %ymm0, %k1
-; CHECK-NEXT: vpermi2ps 32(%rdi), %ymm2, %ymm1 {%k1} {z}
-; CHECK-NEXT: vmovaps %ymm1, %ymm0
+; CHECK-NEXT: vxorps %xmm2, %xmm2, %xmm2
+; CHECK-NEXT: vcmpeqps %ymm2, %ymm0, %k1
+; CHECK-NEXT: vpermps (%rdi), %zmm1, %zmm0 {%k1} {z}
+; CHECK-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; CHECK-NEXT: retq
%vec = load <16 x float>, ptr %vp
%shuf = shufflevector <16 x float> %vec, <16 x float> undef, <8 x i32> <i32 7, i32 6, i32 7, i32 11, i32 5, i32 10, i32 0, i32 4>
@@ -3469,12 +3464,12 @@ define <8 x float> @test_masked_z_16xfloat_to_8xfloat_perm_mem_mask0(ptr %vp, <8
define <8 x float> @test_masked_16xfloat_to_8xfloat_perm_mem_mask1(ptr %vp, <8 x float> %vec2, <8 x float> %mask) {
; CHECK-LABEL: test_masked_16xfloat_to_8xfloat_perm_mem_mask1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmovaps (%rdi), %ymm2
-; CHECK-NEXT: vpmovsxbd {{.*#+}} ymm3 = [11,0,9,0,7,14,0,8]
-; CHECK-NEXT: vpermi2ps 32(%rdi), %ymm2, %ymm3
-; CHECK-NEXT: vxorps %xmm2, %xmm2, %xmm2
-; CHECK-NEXT: vcmpeqps %ymm2, %ymm1, %k1
-; CHECK-NEXT: vmovaps %ymm3, %ymm0 {%k1}
+; CHECK-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
+; CHECK-NEXT: vpmovsxbd {{.*#+}} ymm2 = [11,0,9,0,7,14,0,8]
+; CHECK-NEXT: vxorps %xmm3, %xmm3, %xmm3
+; CHECK-NEXT: vcmpeqps %ymm3, %ymm1, %k1
+; CHECK-NEXT: vpermps (%rdi), %zmm2, %zmm0 {%k1}
+; CHECK-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; CHECK-NEXT: retq
%vec = load <16 x float>, ptr %vp
%shuf = shufflevector <16 x float> %vec, <16 x float> undef, <8 x i32> <i32 11, i32 0, i32 9, i32 0, i32 7, i32 14, i32 0, i32 8>
@@ -3486,12 +3481,11 @@ define <8 x float> @test_masked_16xfloat_to_8xfloat_perm_mem_mask1(ptr %vp, <8 x
define <8 x float> @test_masked_z_16xfloat_to_8xfloat_perm_mem_mask1(ptr %vp, <8 x float> %mask) {
; CHECK-LABEL: test_masked_z_16xfloat_to_8xfloat_perm_mem_mask1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmovaps (%rdi), %ymm2
; CHECK-NEXT: vpmovsxbd {{.*#+}} ymm1 = [11,0,9,0,7,14,0,8]
-; CHECK-NEXT: vxorps %xmm3, %xmm3, %xmm3
-; CHECK-NEXT: vcmpeqps %ymm3, %ymm0, %k1
-; CHECK-NEXT: vpermi2ps 32(%rdi), %ymm2, %ymm1 {%k1} {z}
-; CHECK-NEXT: vmovaps %ymm1, %ymm0
+; CHECK-NEXT: vxorps %xmm2, %xmm2, %xmm2
+; CHECK-NEXT: vcmpeqps %ymm2, %ymm0, %k1
+; CHECK-NEXT: vpermps (%rdi), %zmm1, %zmm0 {%k1} {z}
+; CHECK-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; CHECK-NEXT: retq
%vec = load <16 x float>, ptr %vp
%shuf = shufflevector <16 x float> %vec, <16 x float> undef, <8 x i32> <i32 11, i32 0, i32 9, i32 0, i32 7, i32 14, i32 0, i32 8>
@@ -3724,10 +3718,9 @@ define <4 x float> @test_masked_z_16xfloat_to_4xfloat_perm_mem_mask2(ptr %vp, <4
define <4 x float> @test_16xfloat_to_4xfloat_perm_mem_mask3(ptr %vp) {
; CHECK-LABEL: test_16xfloat_to_4xfloat_perm_mem_mask3:
; CHECK: # %bb.0:
-; CHECK-NEXT: vpmovsxbd {{.*#+}} xmm1 = [3,3,15,9]
-; CHECK-NEXT: vmovaps (%rdi), %ymm0
-; CHECK-NEXT: vpermt2ps 32(%rdi), %ymm1, %ymm0
-; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; CHECK-NEXT: vpmovsxbd {{.*#+}} xmm0 = [3,3,15,9]
+; CHECK-NEXT: vpermps (%rdi), %zmm0, %zmm0
+; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%vec = load <16 x float>, ptr %vp
@@ -3737,12 +3730,12 @@ define <4 x float> @test_16xfloat_to_4xfloat_perm_mem_mask3(ptr %vp) {
define <4 x float> @test_masked_16xfloat_to_4xfloat_perm_mem_mask3(ptr %vp, <4 x float> %vec2, <4 x float> %mask) {
; CHECK-LABEL: test_masked_16xfloat_to_4xfloat_perm_mem_mask3:
; CHECK: # %bb.0:
+; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; CHECK-NEXT: vpmovsxbd {{.*#+}} xmm2 = [3,3,15,9]
-; CHECK-NEXT: vmovaps (%rdi), %ymm3
-; CHECK-NEXT: vpermt2ps 32(%rdi), %ymm2, %ymm3
-; CHECK-NEXT: vxorps %xmm2, %xmm2, %xmm2
-; CHECK-NEXT: vcmpeqps %xmm2, %xmm1, %k1
-; CHECK-NEXT: vmovaps %xmm3, %xmm0 {%k1}
+; CHECK-NEXT: vxorps %xmm3, %xmm3, %xmm3
+; CHECK-NEXT: vcmpeqps %xmm3, %xmm1, %k1
+; CHECK-NEXT: vpermps (%rdi), %zmm2, %zmm0 {%k1}
+; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%vec = load <16 x float>, ptr %vp
@@ -3755,12 +3748,11 @@ define <4 x float> @test_masked_16xfloat_to_4xfloat_perm_mem_mask3(ptr %vp, <4 x
define <4 x float> @test_masked_z_16xfloat_to_4xfloat_perm_mem_mask3(ptr %vp, <4 x float> %mask) {
; CHECK-LABEL: test_masked_z_16xfloat_to_4xfloat_perm_mem_mask3:
; CHECK: # %bb.0:
-; CHECK-NEXT: vpmovsxbd {{.*#+}} xmm2 = [3,3,15,9]
-; CHECK-NEXT: vmovaps (%rdi), %ymm1
-; CHECK-NEXT: vxorps %xmm3, %xmm3, %xmm3
-; CHECK-NEXT: vcmpeqps %xmm3, %xmm0, %k1
-; CHECK-NEXT: vpermt2ps 32(%rdi), %ymm2, %ymm1 {%k1} {z}
-; CHECK-NEXT: vmovaps %xmm1, %xmm0
+; CHECK-NEXT: vpmovsxbd {{.*#+}} xmm1 = [3,3,15,9]
+; CHECK-NEXT: vxorps %xmm2, %xmm2, %xmm2
+; CHECK-NEXT: vcmpeqps %xmm2, %xmm0, %k1
+; CHECK-NEXT: vpermps (%rdi), %zmm1, %zmm0 {%k1} {z}
+; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%vec = load <16 x float>, ptr %vp
@@ -4346,9 +4338,9 @@ define <2 x double> @test_masked_z_8xdouble_to_2xdouble_perm_mask1(<8 x double>
define <4 x double> @test_8xdouble_to_4xdouble_perm_mem_mask0(ptr %vp) {
; CHECK-LABEL: test_8xdouble_to_4xdouble_perm_mem_mask0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmovapd (%rdi), %ymm1
; CHECK-NEXT: vpmovsxbq {{.*#+}} ymm0 = [1,6,7,2]
-; CHECK-NEXT: vpermi2pd 32(%rdi), %ymm1, %ymm0
+; CHECK-NEXT: vpermpd (%rdi), %zmm0, %zmm0
+; CHECK-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; CHECK-NEXT: retq
%vec = load <8 x double>, ptr %vp
%res = shufflevector <8 x double> %vec, <8 x double> undef, <4 x i32> <i32 1, i32 6, i32 7, i32 2>
@@ -4357,12 +4349,12 @@ define <4 x double> @test_8xdouble_to_4xdouble_perm_mem_mask0(ptr %vp) {
define <4 x double> @test_masked_8xdouble_to_4xdouble_perm_mem_mask0(ptr %vp, <4 x double> %vec2, <4 x double> %mask) {
; CHECK-LABEL: test_masked_8xdouble_to_4xdouble_perm_mem_mask0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmovapd (%rdi), %ymm2
-; CHECK-NEXT: vpmovsxbq {{.*#+}} ymm3 = [1,6,7,2]
-; CHECK-NEXT: vpermi2pd 32(%rdi), %ymm2, %ymm3
-; CHECK-NEXT: vxorpd %xmm2, %xmm2, %xmm2
-; CHECK-NEXT: vcmpeqpd %ymm2, %ymm1, %k1
-; CHECK-NEXT: vmovapd %ymm3, %ymm0 {%k1}
+; CHECK-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
+; CHECK-NEXT: vpmovsxbq {{.*#+}} ymm2 = [1,6,7,2]
+; CHECK-NEXT: vxorpd %xmm3, %xmm3, %xmm3
+; CHECK-NEXT: vcmpeqpd %ymm3, %ymm1, %k1
+; CHECK-NEXT: vpermpd (%rdi), %zmm2, %zmm0 {%k1}
+; CHECK-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; CHECK-NEXT: retq
%vec = load <8 x double>, ptr %vp
%shuf = shufflevector <8 x double> %vec, <8 x double> undef, <4 x i32> <i32 1, i32 6, i32 7, i32 2>
@@ -4374,12 +4366,11 @@ define <4 x double> @test_masked_8xdouble_to_4xdouble_perm_mem_mask0(ptr %vp, <4
define <4 x double> @test_masked_z_8xdouble_to_4xdouble_perm_mem_mask0(ptr %vp, <4 x double> %mask) {
; CHECK-LABEL: test_masked_z_8xdouble_to_4xdouble_perm_mem_mask0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmovapd (%rdi), %ymm2
; CHECK-NEXT: vpmovsxbq {{.*#+}} ymm1 = [1,6,7,2]
-; CHECK-NEXT: vxorpd %xmm3, %xmm3, %xmm3
-; CHECK-NEXT: vcmpeqpd %ymm3, %ymm0, %k1
-; CHECK-NEXT: vpermi2pd 32(%rdi), %ymm2, %ymm1 {%k1} {z}
-; CHECK-NEXT: vmovapd %ymm1, %ymm0
+; CHECK-NEXT: vxorpd %xmm2, %xmm2, %xmm2
+; CHECK-NEXT: vcmpeqpd %ymm2, %ymm0, %k1
+; CHECK-NEXT: vpermpd (%rdi), %zmm1, %zmm0 {%k1} {z}
+; CHECK-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; CHECK-NEXT: retq
%vec = load <8 x double>, ptr %vp
%shuf = shufflevector <8 x double> %vec, <8 x double> undef, <4 x i32> <i32 1, i32 6, i32 7, i32 2>
@@ -4441,12 +4432,12 @@ define <4 x double> @test_masked_z_8xdouble_to_4xdouble_perm_mem_mask1(ptr %vp,
define <4 x double> @test_masked_8xdouble_to_4xdouble_perm_mem_mask2(ptr %vp, <4 x double> %vec2, <4 x double> %mask) {
; CHECK-FAST-LABEL: test_masked_8xdouble_to_4xdouble_perm_mem_mask2:
; CHECK-FAST: # %bb.0:
-; CHECK-FAST-NEXT: vmovapd (%rdi), %ymm2
-; CHECK-FAST-NEXT: vpmovsxbq {{.*#+}} ymm3 = [1,2,3,4]
-; CHECK-FAST-NEXT: vpermi2pd 32(%rdi), %ymm2, %ymm3
-; CHECK-FAST-NEXT: vxorpd %xmm2, %xmm2, %xmm2
-; CHECK-FAST-NEXT: vcmpeqpd %ymm2, %ymm1, %k1
-; CHECK-FAST-NEXT: vmovapd %ymm3, %ymm0 {%k1}
+; CHECK-FAST-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
+; CHECK-FAST-NEXT: vpmovsxbq {{.*#+}} ymm2 = [1,2,3,4]
+; CHECK-FAST-NEXT: vxorpd %xmm3, %xmm3, %xmm3
+; CHECK-FAST-NEXT: vcmpeqpd %ymm3, %ymm1, %k1
+; CHECK-FAST-NEXT: vpermpd (%rdi), %zmm2, %zmm0 {%k1}
+; CHECK-FAST-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; CHECK-FAST-NEXT: retq
;
; CHECK-FAST-PERLANE-LABEL: test_masked_8xdouble_to_4xdouble_perm_mem_mask2:
@@ -4467,12 +4458,11 @@ define <4 x double> @test_masked_8xdouble_to_4xdouble_perm_mem_mask2(ptr %vp, <4
define <4 x double> @test_masked_z_8xdouble_to_4xdouble_perm_mem_mask2(ptr %vp, <4 x double> %mask) {
; CHECK-FAST-LABEL: test_masked_z_8xdouble_to_4xdouble_perm_mem_mask2:
; CHECK-FAST: # %bb.0:
-; CHECK-FAST-NEXT: vmovapd (%rdi), %ymm2
; CHECK-FAST-NEXT: vpmovsxbq {{.*#+}} ymm1 = [1,2,3,4]
-; CHECK-FAST-NEXT: vxorpd %xmm3, %xmm3, %xmm3
-; CHECK-FAST-NEXT: vcmpeqpd %ymm3, %ymm0, %k1
-; CHECK-FAST-NEXT: vpermi2pd 32(%rdi), %ymm2, %ymm1 {%k1} {z}
-; CHECK-FAST-NEXT: vmovapd %ymm1, %ymm0
+; CHECK-FAST-NEXT: vxorpd %xmm2, %xmm2, %xmm2
+; CHECK-FAST-NEXT: vcmpeqpd %ymm2, %ymm0, %k1
+; CHECK-FAST-NEXT: vpermpd (%rdi), %zmm1, %zmm0 {%k1} {z}
+; CHECK-FAST-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; CHECK-FAST-NEXT: retq
;
; CHECK-FAST-PERLANE-LABEL: test_masked_z_8xdouble_to_4xdouble_perm_mem_mask2:
@@ -4493,9 +4483,9 @@ define <4 x double> @test_masked_z_8xdouble_to_4xdouble_perm_mem_mask2(ptr %vp,
define <4 x double> @test_8xdouble_to_4xdouble_perm_mem_mask3(ptr %vp) {
; CHECK-LABEL: test_8xdouble_to_4xdouble_perm_mem_mask3:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmovapd (%rdi), %ymm1
; CHECK-NEXT: vpmovsxbq {{.*#+}} ymm0 = [4,2,1,0]
-; CHECK-NEXT: vpermi2pd 32(%rdi), %ymm1, %ymm0
+; CHECK-NEXT: vpermpd (%rdi), %zmm0, %zmm0
+; CHECK-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; CHECK-NEXT: retq
%vec = load <8 x double>, ptr %vp
%res = shufflevector <8 x double> %vec, <8 x double> undef, <4 x i32> <i32 4, i32 2, i32 1, i32 0>
@@ -4504,12 +4494,12 @@ define <4 x double> @test_8xdouble_to_4xdouble_perm_mem_mask3(ptr %vp) {
define <4 x double> @test_masked_8xdouble_to_4xdouble_perm_mem_mask3(ptr %vp, <4 x double> %vec2, <4 x double> %mask) {
; CHECK-LABEL: test_masked_8xdouble_to_4xdouble_perm_mem_mask3:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmovapd (%rdi), %ymm2
-; CHECK-NEXT: vpmovsxbq {{.*#+}} ymm3 = [4,2,1,0]
-; CHECK-NEXT: vpermi2pd 32(%rdi), %ymm2, %ymm3
-; CHECK-NEXT: vxorpd %xmm2, %xmm2, %xmm2
-; CHECK-NEXT: vcmpeqpd %ymm2, %ymm1, %k1
-; CHECK-NEXT: vmovapd %ymm3, %ymm0 {%k1}
+; CHECK-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
+; CHECK-NEXT: vpmovsxbq {{.*#+}} ymm2 = [4,2,1,0]
+; CHECK-NEXT: vxorpd %xmm3, %xmm3, %xmm3
+; CHECK-NEXT: vcmpeqpd %ymm3, %ymm1, %k1
+; CHECK-NEXT: vpermpd (%rdi), %zmm2, %zmm0 {%k1}
+; CHECK-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; CHECK-NEXT: retq
%vec = load <8 x double>, ptr %vp
%shuf = shufflevector <8 x double> %vec, <8 x double> undef, <4 x i32> <i32 4, i32 2, i32 1, i32 0>
@@ -4521,12 +4511,11 @@ define <4 x double> @test_masked_8xdouble_to_4xdouble_perm_mem_mask3(ptr %vp, <4
define <4 x double> @test_masked_z_8xdouble_to_4xdouble_perm_mem_mask3(ptr %vp, <4 x double> %mask) {
; CHECK-LABEL: test_masked_z_8xdouble_to_4xdouble_perm_mem_mask3:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmovapd (%rdi), %ymm2
; CHECK-NEXT: vpmovsxbq {{.*#+}} ymm1 = [4,2,1,0]
-; CHECK-NEXT: vxorpd %xmm3, %xmm3, %xmm3
-; CHECK-NEXT: vcmpeqpd %ymm3, %ymm0, %k1
-; CHECK-NEXT: vpermi2pd 32(%rdi), %ymm2, %ymm1 {%k1} {z}
-; CHECK-NEXT: vmovapd %ymm1, %ymm0
+; CHECK-NEXT: vxorpd %xmm2, %xmm2, %xmm2
+; CHECK-NEXT: vcmpeqpd %ymm2, %ymm0, %k1
+; CHECK-NEXT: vpermpd (%rdi), %zmm1, %zmm0 {%k1} {z}
+; CHECK-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; CHECK-NEXT: retq
%vec = load <8 x double>, ptr %vp
%shuf = shufflevector <8 x double> %vec, <8 x double> undef, <4 x i32> <i32 4, i32 2, i32 1, i32 0>
diff --git a/llvm/test/CodeGen/X86/pr97968.ll b/llvm/test/CodeGen/X86/pr97968.ll
index ca5c63cdc1c2ec..a539a33e9a2817 100644
--- a/llvm/test/CodeGen/X86/pr97968.ll
+++ b/llvm/test/CodeGen/X86/pr97968.ll
@@ -5,8 +5,8 @@ define <2 x i32> @PR97968(<16 x i32> %a0) {
; CHECK-LABEL: PR97968:
; CHECK: # %bb.0:
; CHECK-NEXT: vpmovsxbd {{.*#+}} xmm1 = [2,7,2,7]
-; CHECK-NEXT: vpermps %zmm0, %zmm1, %zmm0
-; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
+; CHECK-NEXT: vpermps %ymm0, %ymm1, %ymm0
+; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%sub0 = shufflevector <16 x i32> %a0, <16 x i32> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
diff --git a/llvm/test/CodeGen/X86/shuffle-strided-with-offset-512.ll b/llvm/test/CodeGen/X86/shuffle-strided-with-offset-512.ll
index 45842d4148a8b8..82c460fc559389 100644
--- a/llvm/test/CodeGen/X86/shuffle-strided-with-offset-512.ll
+++ b/llvm/test/CodeGen/X86/shuffle-strided-with-offset-512.ll
@@ -65,10 +65,9 @@ define void @shuffle_v16i32_to_v8i32_1(ptr %L, ptr %S) nounwind {
;
; AVX512BWVL-FAST-ALL-LABEL: shuffle_v16i32_to_v8i32_1:
; AVX512BWVL-FAST-ALL: # %bb.0:
-; AVX512BWVL-FAST-ALL-NEXT: vmovdqa (%rdi), %ymm0
-; AVX512BWVL-FAST-ALL-NEXT: vpmovsxbd {{.*#+}} ymm1 = [1,3,5,7,9,11,13,15]
-; AVX512BWVL-FAST-ALL-NEXT: vpermi2d 32(%rdi), %ymm0, %ymm1
-; AVX512BWVL-FAST-ALL-NEXT: vmovdqa %ymm1, (%rsi)
+; AVX512BWVL-FAST-ALL-NEXT: vpmovsxbd {{.*#+}} ymm0 = [1,3,5,7,9,11,13,15]
+; AVX512BWVL-FAST-ALL-NEXT: vpermps (%rdi), %zmm0, %zmm0
+; AVX512BWVL-FAST-ALL-NEXT: vmovaps %ymm0, (%rsi)
; AVX512BWVL-FAST-ALL-NEXT: vzeroupper
; AVX512BWVL-FAST-ALL-NEXT: retq
;
diff --git a/llvm/test/CodeGen/X86/shuffle-vs-trunc-512.ll b/llvm/test/CodeGen/X86/shuffle-vs-trunc-512.ll
index e7557134b14864..1d82d57e5552fe 100644
--- a/llvm/test/CodeGen/X86/shuffle-vs-trunc-512.ll
+++ b/llvm/test/CodeGen/X86/shuffle-vs-trunc-512.ll
@@ -453,9 +453,8 @@ define <4 x double> @PR34175(ptr %p) {
; AVX512BWVL-LABEL: PR34175:
; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vmovq {{.*#+}} xmm0 = [0,8,16,24,0,0,0,0]
-; AVX512BWVL-NEXT: vmovdqu (%rdi), %ymm1
-; AVX512BWVL-NEXT: vpermt2w 32(%rdi), %ymm0, %ymm1
-; AVX512BWVL-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
+; AVX512BWVL-NEXT: vpermw (%rdi), %zmm0, %zmm0
+; AVX512BWVL-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX512BWVL-NEXT: vcvtdq2pd %xmm0, %ymm0
; AVX512BWVL-NEXT: retq
;
@@ -472,9 +471,8 @@ define <4 x double> @PR34175(ptr %p) {
; AVX512VBMIVL-LABEL: PR34175:
; AVX512VBMIVL: # %bb.0:
; AVX512VBMIVL-NEXT: vmovq {{.*#+}} xmm0 = [0,8,16,24,0,0,0,0]
-; AVX512VBMIVL-NEXT: vmovdqu (%rdi), %ymm1
-; AVX512VBMIVL-NEXT: vpermt2w 32(%rdi), %ymm0, %ymm1
-; AVX512VBMIVL-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
+; AVX512VBMIVL-NEXT: vpermw (%rdi), %zmm0, %zmm0
+; AVX512VBMIVL-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX512VBMIVL-NEXT: vcvtdq2pd %xmm0, %ymm0
; AVX512VBMIVL-NEXT: retq
%v = load <32 x i16>, ptr %p, align 2
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-3.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-3.ll
index 0cefc1c32d71bf..a39bc6b6686692 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-3.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-3.ll
@@ -345,66 +345,66 @@ define void @load_i16_stride3_vf4(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
;
; AVX512BW-LABEL: load_i16_stride3_vf4:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: vmovq {{.*#+}} xmm0 = [0,3,6,9,0,0,0,0]
-; AVX512BW-NEXT: vmovdqa (%rdi), %xmm1
-; AVX512BW-NEXT: vmovdqa 16(%rdi), %xmm2
-; AVX512BW-NEXT: vpermi2w %xmm2, %xmm1, %xmm0
-; AVX512BW-NEXT: vmovq {{.*#+}} xmm3 = [1,4,7,10,0,0,0,0]
-; AVX512BW-NEXT: vpermi2w %xmm2, %xmm1, %xmm3
-; AVX512BW-NEXT: vpshuflw {{.*#+}} xmm2 = xmm2[0,3,2,3,4,5,6,7]
-; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,1,2,3]
-; AVX512BW-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[2,1,2,3,4,5,6,7]
-; AVX512BW-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
+; AVX512BW-NEXT: vpmovsxbw {{.*#+}} xmm0 = [0,3,6,9,6,3,6,7]
+; AVX512BW-NEXT: vmovdqa (%rdi), %ymm1
+; AVX512BW-NEXT: vpermw %ymm1, %ymm0, %ymm0
+; AVX512BW-NEXT: vpmovsxbw {{.*#+}} xmm2 = [1,4,7,10,4,7,6,7]
+; AVX512BW-NEXT: vpermw %ymm1, %ymm2, %ymm1
+; AVX512BW-NEXT: vpshuflw {{.*#+}} xmm2 = mem[0,3,2,3,4,5,6,7]
+; AVX512BW-NEXT: vpshufd {{.*#+}} xmm3 = mem[2,1,2,3]
+; AVX512BW-NEXT: vpshuflw {{.*#+}} xmm3 = xmm3[2,1,2,3,4,5,6,7]
+; AVX512BW-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
; AVX512BW-NEXT: vmovq %xmm0, (%rsi)
-; AVX512BW-NEXT: vmovq %xmm3, (%rdx)
-; AVX512BW-NEXT: vmovq %xmm1, (%rcx)
+; AVX512BW-NEXT: vmovq %xmm1, (%rdx)
+; AVX512BW-NEXT: vmovq %xmm2, (%rcx)
+; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512BW-FCP-LABEL: load_i16_stride3_vf4:
; AVX512BW-FCP: # %bb.0:
-; AVX512BW-FCP-NEXT: vmovq {{.*#+}} xmm0 = [0,3,6,9,0,0,0,0]
-; AVX512BW-FCP-NEXT: vmovdqa (%rdi), %xmm1
-; AVX512BW-FCP-NEXT: vmovdqa 16(%rdi), %xmm2
-; AVX512BW-FCP-NEXT: vpermi2w %xmm2, %xmm1, %xmm0
-; AVX512BW-FCP-NEXT: vmovq {{.*#+}} xmm3 = [1,4,7,10,0,0,0,0]
-; AVX512BW-FCP-NEXT: vpermi2w %xmm2, %xmm1, %xmm3
-; AVX512BW-FCP-NEXT: vmovq {{.*#+}} xmm4 = [2,5,8,11,0,0,0,0]
-; AVX512BW-FCP-NEXT: vpermi2w %xmm2, %xmm1, %xmm4
+; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} xmm0 = [0,3,6,9,6,3,6,7]
+; AVX512BW-FCP-NEXT: vmovdqa (%rdi), %ymm1
+; AVX512BW-FCP-NEXT: vpermw %ymm1, %ymm0, %ymm0
+; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} xmm2 = [1,4,7,10,4,7,6,7]
+; AVX512BW-FCP-NEXT: vpermw %ymm1, %ymm2, %ymm2
+; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} xmm3 = [2,5,8,11,2,3,10,11]
+; AVX512BW-FCP-NEXT: vpermw %ymm1, %ymm3, %ymm1
; AVX512BW-FCP-NEXT: vmovq %xmm0, (%rsi)
-; AVX512BW-FCP-NEXT: vmovq %xmm3, (%rdx)
-; AVX512BW-FCP-NEXT: vmovq %xmm4, (%rcx)
+; AVX512BW-FCP-NEXT: vmovq %xmm2, (%rdx)
+; AVX512BW-FCP-NEXT: vmovq %xmm1, (%rcx)
+; AVX512BW-FCP-NEXT: vzeroupper
; AVX512BW-FCP-NEXT: retq
;
; AVX512DQ-BW-LABEL: load_i16_stride3_vf4:
; AVX512DQ-BW: # %bb.0:
-; AVX512DQ-BW-NEXT: vmovq {{.*#+}} xmm0 = [0,3,6,9,0,0,0,0]
-; AVX512DQ-BW-NEXT: vmovdqa (%rdi), %xmm1
-; AVX512DQ-BW-NEXT: vmovdqa 16(%rdi), %xmm2
-; AVX512DQ-BW-NEXT: vpermi2w %xmm2, %xmm1, %xmm0
-; AVX512DQ-BW-NEXT: vmovq {{.*#+}} xmm3 = [1,4,7,10,0,0,0,0]
-; AVX512DQ-BW-NEXT: vpermi2w %xmm2, %xmm1, %xmm3
-; AVX512DQ-BW-NEXT: vpshuflw {{.*#+}} xmm2 = xmm2[0,3,2,3,4,5,6,7]
-; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,1,2,3]
-; AVX512DQ-BW-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[2,1,2,3,4,5,6,7]
-; AVX512DQ-BW-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
+; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} xmm0 = [0,3,6,9,6,3,6,7]
+; AVX512DQ-BW-NEXT: vmovdqa (%rdi), %ymm1
+; AVX512DQ-BW-NEXT: vpermw %ymm1, %ymm0, %ymm0
+; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} xmm2 = [1,4,7,10,4,7,6,7]
+; AVX512DQ-BW-NEXT: vpermw %ymm1, %ymm2, %ymm1
+; AVX512DQ-BW-NEXT: vpshuflw {{.*#+}} xmm2 = mem[0,3,2,3,4,5,6,7]
+; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} xmm3 = mem[2,1,2,3]
+; AVX512DQ-BW-NEXT: vpshuflw {{.*#+}} xmm3 = xmm3[2,1,2,3,4,5,6,7]
+; AVX512DQ-BW-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
; AVX512DQ-BW-NEXT: vmovq %xmm0, (%rsi)
-; AVX512DQ-BW-NEXT: vmovq %xmm3, (%rdx)
-; AVX512DQ-BW-NEXT: vmovq %xmm1, (%rcx)
+; AVX512DQ-BW-NEXT: vmovq %xmm1, (%rdx)
+; AVX512DQ-BW-NEXT: vmovq %xmm2, (%rcx)
+; AVX512DQ-BW-NEXT: vzeroupper
; AVX512DQ-BW-NEXT: retq
;
; AVX512DQ-BW-FCP-LABEL: load_i16_stride3_vf4:
; AVX512DQ-BW-FCP: # %bb.0:
-; AVX512DQ-BW-FCP-NEXT: vmovq {{.*#+}} xmm0 = [0,3,6,9,0,0,0,0]
-; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdi), %xmm1
-; AVX512DQ-BW-FCP-NEXT: vmovdqa 16(%rdi), %xmm2
-; AVX512DQ-BW-FCP-NEXT: vpermi2w %xmm2, %xmm1, %xmm0
-; AVX512DQ-BW-FCP-NEXT: vmovq {{.*#+}} xmm3 = [1,4,7,10,0,0,0,0]
-; AVX512DQ-BW-FCP-NEXT: vpermi2w %xmm2, %xmm1, %xmm3
-; AVX512DQ-BW-FCP-NEXT: vmovq {{.*#+}} xmm4 = [2,5,8,11,0,0,0,0]
-; AVX512DQ-BW-FCP-NEXT: vpermi2w %xmm2, %xmm1, %xmm4
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} xmm0 = [0,3,6,9,6,3,6,7]
+; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdi), %ymm1
+; AVX512DQ-BW-FCP-NEXT: vpermw %ymm1, %ymm0, %ymm0
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} xmm2 = [1,4,7,10,4,7,6,7]
+; AVX512DQ-BW-FCP-NEXT: vpermw %ymm1, %ymm2, %ymm2
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} xmm3 = [2,5,8,11,2,3,10,11]
+; AVX512DQ-BW-FCP-NEXT: vpermw %ymm1, %ymm3, %ymm1
; AVX512DQ-BW-FCP-NEXT: vmovq %xmm0, (%rsi)
-; AVX512DQ-BW-FCP-NEXT: vmovq %xmm3, (%rdx)
-; AVX512DQ-BW-FCP-NEXT: vmovq %xmm4, (%rcx)
+; AVX512DQ-BW-FCP-NEXT: vmovq %xmm2, (%rdx)
+; AVX512DQ-BW-FCP-NEXT: vmovq %xmm1, (%rcx)
+; AVX512DQ-BW-FCP-NEXT: vzeroupper
; AVX512DQ-BW-FCP-NEXT: retq
%wide.vec = load <12 x i16>, ptr %in.vec, align 64
%strided.vec0 = shufflevector <12 x i16> %wide.vec, <12 x i16> poison, <4 x i32> <i32 0, i32 3, i32 6, i32 9>
@@ -629,64 +629,60 @@ define void @load_i16_stride3_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512BW-LABEL: load_i16_stride3_vf8:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpmovsxbw {{.*#+}} xmm0 = [0,3,6,9,12,15,18,21]
-; AVX512BW-NEXT: vmovdqa (%rdi), %ymm1
-; AVX512BW-NEXT: vmovdqa 32(%rdi), %ymm2
-; AVX512BW-NEXT: vpermi2w %ymm2, %ymm1, %ymm0
-; AVX512BW-NEXT: vpmovsxbw {{.*#+}} xmm3 = [1,4,7,10,13,16,19,22]
-; AVX512BW-NEXT: vpermi2w %ymm2, %ymm1, %ymm3
-; AVX512BW-NEXT: vpmovsxbw {{.*#+}} xmm4 = [2,5,8,11,14,17,20,23]
-; AVX512BW-NEXT: vpermi2w %ymm2, %ymm1, %ymm4
+; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm1
+; AVX512BW-NEXT: vpermw %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: vpmovsxbw {{.*#+}} xmm2 = [1,4,7,10,13,16,19,22]
+; AVX512BW-NEXT: vpermw %zmm1, %zmm2, %zmm2
+; AVX512BW-NEXT: vpmovsxbw {{.*#+}} xmm3 = [2,5,8,11,14,17,20,23]
+; AVX512BW-NEXT: vpermw %zmm1, %zmm3, %zmm1
; AVX512BW-NEXT: vmovdqa %xmm0, (%rsi)
-; AVX512BW-NEXT: vmovdqa %xmm3, (%rdx)
-; AVX512BW-NEXT: vmovdqa %xmm4, (%rcx)
+; AVX512BW-NEXT: vmovdqa %xmm2, (%rdx)
+; AVX512BW-NEXT: vmovdqa %xmm1, (%rcx)
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512BW-FCP-LABEL: load_i16_stride3_vf8:
; AVX512BW-FCP: # %bb.0:
; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} xmm0 = [0,3,6,9,12,15,18,21]
-; AVX512BW-FCP-NEXT: vmovdqa (%rdi), %ymm1
-; AVX512BW-FCP-NEXT: vmovdqa 32(%rdi), %ymm2
-; AVX512BW-FCP-NEXT: vpermi2w %ymm2, %ymm1, %ymm0
-; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} xmm3 = [1,4,7,10,13,16,19,22]
-; AVX512BW-FCP-NEXT: vpermi2w %ymm2, %ymm1, %ymm3
-; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} xmm4 = [2,5,8,11,14,17,20,23]
-; AVX512BW-FCP-NEXT: vpermi2w %ymm2, %ymm1, %ymm4
+; AVX512BW-FCP-NEXT: vmovdqa64 (%rdi), %zmm1
+; AVX512BW-FCP-NEXT: vpermw %zmm1, %zmm0, %zmm0
+; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} xmm2 = [1,4,7,10,13,16,19,22]
+; AVX512BW-FCP-NEXT: vpermw %zmm1, %zmm2, %zmm2
+; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} xmm3 = [2,5,8,11,14,17,20,23]
+; AVX512BW-FCP-NEXT: vpermw %zmm1, %zmm3, %zmm1
; AVX512BW-FCP-NEXT: vmovdqa %xmm0, (%rsi)
-; AVX512BW-FCP-NEXT: vmovdqa %xmm3, (%rdx)
-; AVX512BW-FCP-NEXT: vmovdqa %xmm4, (%rcx)
+; AVX512BW-FCP-NEXT: vmovdqa %xmm2, (%rdx)
+; AVX512BW-FCP-NEXT: vmovdqa %xmm1, (%rcx)
; AVX512BW-FCP-NEXT: vzeroupper
; AVX512BW-FCP-NEXT: retq
;
; AVX512DQ-BW-LABEL: load_i16_stride3_vf8:
; AVX512DQ-BW: # %bb.0:
; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} xmm0 = [0,3,6,9,12,15,18,21]
-; AVX512DQ-BW-NEXT: vmovdqa (%rdi), %ymm1
-; AVX512DQ-BW-NEXT: vmovdqa 32(%rdi), %ymm2
-; AVX512DQ-BW-NEXT: vpermi2w %ymm2, %ymm1, %ymm0
-; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} xmm3 = [1,4,7,10,13,16,19,22]
-; AVX512DQ-BW-NEXT: vpermi2w %ymm2, %ymm1, %ymm3
-; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} xmm4 = [2,5,8,11,14,17,20,23]
-; AVX512DQ-BW-NEXT: vpermi2w %ymm2, %ymm1, %ymm4
+; AVX512DQ-BW-NEXT: vmovdqa64 (%rdi), %zmm1
+; AVX512DQ-BW-NEXT: vpermw %zmm1, %zmm0, %zmm0
+; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} xmm2 = [1,4,7,10,13,16,19,22]
+; AVX512DQ-BW-NEXT: vpermw %zmm1, %zmm2, %zmm2
+; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} xmm3 = [2,5,8,11,14,17,20,23]
+; AVX512DQ-BW-NEXT: vpermw %zmm1, %zmm3, %zmm1
; AVX512DQ-BW-NEXT: vmovdqa %xmm0, (%rsi)
-; AVX512DQ-BW-NEXT: vmovdqa %xmm3, (%rdx)
-; AVX512DQ-BW-NEXT: vmovdqa %xmm4, (%rcx)
+; AVX512DQ-BW-NEXT: vmovdqa %xmm2, (%rdx)
+; AVX512DQ-BW-NEXT: vmovdqa %xmm1, (%rcx)
; AVX512DQ-BW-NEXT: vzeroupper
; AVX512DQ-BW-NEXT: retq
;
; AVX512DQ-BW-FCP-LABEL: load_i16_stride3_vf8:
; AVX512DQ-BW-FCP: # %bb.0:
; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} xmm0 = [0,3,6,9,12,15,18,21]
-; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdi), %ymm1
-; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%rdi), %ymm2
-; AVX512DQ-BW-FCP-NEXT: vpermi2w %ymm2, %ymm1, %ymm0
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} xmm3 = [1,4,7,10,13,16,19,22]
-; AVX512DQ-BW-FCP-NEXT: vpermi2w %ymm2, %ymm1, %ymm3
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} xmm4 = [2,5,8,11,14,17,20,23]
-; AVX512DQ-BW-FCP-NEXT: vpermi2w %ymm2, %ymm1, %ymm4
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rdi), %zmm1
+; AVX512DQ-BW-FCP-NEXT: vpermw %zmm1, %zmm0, %zmm0
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} xmm2 = [1,4,7,10,13,16,19,22]
+; AVX512DQ-BW-FCP-NEXT: vpermw %zmm1, %zmm2, %zmm2
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} xmm3 = [2,5,8,11,14,17,20,23]
+; AVX512DQ-BW-FCP-NEXT: vpermw %zmm1, %zmm3, %zmm1
; AVX512DQ-BW-FCP-NEXT: vmovdqa %xmm0, (%rsi)
-; AVX512DQ-BW-FCP-NEXT: vmovdqa %xmm3, (%rdx)
-; AVX512DQ-BW-FCP-NEXT: vmovdqa %xmm4, (%rcx)
+; AVX512DQ-BW-FCP-NEXT: vmovdqa %xmm2, (%rdx)
+; AVX512DQ-BW-FCP-NEXT: vmovdqa %xmm1, (%rcx)
; AVX512DQ-BW-FCP-NEXT: vzeroupper
; AVX512DQ-BW-FCP-NEXT: retq
%wide.vec = load <24 x i16>, ptr %in.vec, align 64
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-5.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-5.ll
index 68e92d7cf773f8..739e6e2369e366 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-5.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-5.ll
@@ -596,24 +596,22 @@ define void @load_i16_stride5_vf4(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm1
; AVX512BW-NEXT: vpermw %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: vmovq {{.*#+}} xmm2 = [0,5,10,0,0,0,0,0]
-; AVX512BW-NEXT: vpermw %zmm1, %zmm2, %zmm1
-; AVX512BW-NEXT: vmovdqa 16(%rdi), %xmm2
-; AVX512BW-NEXT: vpextrw $7, %xmm2, %eax
-; AVX512BW-NEXT: vpinsrw $3, %eax, %xmm1, %xmm1
+; AVX512BW-NEXT: vpermw %zmm1, %zmm2, %zmm2
+; AVX512BW-NEXT: vmovdqa 16(%rdi), %xmm3
+; AVX512BW-NEXT: vpextrw $7, %xmm3, %eax
+; AVX512BW-NEXT: vpinsrw $3, %eax, %xmm2, %xmm2
; AVX512BW-NEXT: vpinsrw $3, 32(%rdi), %xmm0, %xmm0
-; AVX512BW-NEXT: vmovq {{.*#+}} xmm2 = [2,7,12,17,0,0,0,0]
-; AVX512BW-NEXT: vmovdqa 32(%rdi), %ymm3
-; AVX512BW-NEXT: vmovdqa (%rdi), %ymm4
-; AVX512BW-NEXT: vpermi2w %ymm3, %ymm4, %ymm2
-; AVX512BW-NEXT: vmovq {{.*#+}} xmm5 = [3,8,13,18,0,0,0,0]
-; AVX512BW-NEXT: vpermi2w %ymm3, %ymm4, %ymm5
-; AVX512BW-NEXT: vmovq {{.*#+}} xmm6 = [4,9,14,19,0,0,0,0]
-; AVX512BW-NEXT: vpermi2w %ymm3, %ymm4, %ymm6
-; AVX512BW-NEXT: vmovq %xmm1, (%rsi)
+; AVX512BW-NEXT: vmovq {{.*#+}} xmm3 = [2,7,12,17,0,0,0,0]
+; AVX512BW-NEXT: vpermw %zmm1, %zmm3, %zmm3
+; AVX512BW-NEXT: vmovq {{.*#+}} xmm4 = [3,8,13,18,0,0,0,0]
+; AVX512BW-NEXT: vpermw %zmm1, %zmm4, %zmm4
+; AVX512BW-NEXT: vmovq {{.*#+}} xmm5 = [4,9,14,19,0,0,0,0]
+; AVX512BW-NEXT: vpermw %zmm1, %zmm5, %zmm1
+; AVX512BW-NEXT: vmovq %xmm2, (%rsi)
; AVX512BW-NEXT: vmovq %xmm0, (%rdx)
-; AVX512BW-NEXT: vmovq %xmm2, (%rcx)
-; AVX512BW-NEXT: vmovq %xmm5, (%r8)
-; AVX512BW-NEXT: vmovq %xmm6, (%r9)
+; AVX512BW-NEXT: vmovq %xmm3, (%rcx)
+; AVX512BW-NEXT: vmovq %xmm4, (%r8)
+; AVX512BW-NEXT: vmovq %xmm1, (%r9)
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -623,24 +621,22 @@ define void @load_i16_stride5_vf4(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512BW-FCP-NEXT: vmovdqa64 (%rdi), %zmm1
; AVX512BW-FCP-NEXT: vpermw %zmm1, %zmm0, %zmm0
; AVX512BW-FCP-NEXT: vmovq {{.*#+}} xmm2 = [0,5,10,0,0,0,0,0]
-; AVX512BW-FCP-NEXT: vpermw %zmm1, %zmm2, %zmm1
-; AVX512BW-FCP-NEXT: vmovdqa 16(%rdi), %xmm2
-; AVX512BW-FCP-NEXT: vpextrw $7, %xmm2, %eax
-; AVX512BW-FCP-NEXT: vpinsrw $3, %eax, %xmm1, %xmm1
+; AVX512BW-FCP-NEXT: vpermw %zmm1, %zmm2, %zmm2
+; AVX512BW-FCP-NEXT: vmovdqa 16(%rdi), %xmm3
+; AVX512BW-FCP-NEXT: vpextrw $7, %xmm3, %eax
+; AVX512BW-FCP-NEXT: vpinsrw $3, %eax, %xmm2, %xmm2
; AVX512BW-FCP-NEXT: vpinsrw $3, 32(%rdi), %xmm0, %xmm0
-; AVX512BW-FCP-NEXT: vmovq {{.*#+}} xmm2 = [2,7,12,17,0,0,0,0]
-; AVX512BW-FCP-NEXT: vmovdqa 32(%rdi), %ymm3
-; AVX512BW-FCP-NEXT: vmovdqa (%rdi), %ymm4
-; AVX512BW-FCP-NEXT: vpermi2w %ymm3, %ymm4, %ymm2
-; AVX512BW-FCP-NEXT: vmovq {{.*#+}} xmm5 = [3,8,13,18,0,0,0,0]
-; AVX512BW-FCP-NEXT: vpermi2w %ymm3, %ymm4, %ymm5
-; AVX512BW-FCP-NEXT: vmovq {{.*#+}} xmm6 = [4,9,14,19,0,0,0,0]
-; AVX512BW-FCP-NEXT: vpermi2w %ymm3, %ymm4, %ymm6
-; AVX512BW-FCP-NEXT: vmovq %xmm1, (%rsi)
+; AVX512BW-FCP-NEXT: vmovq {{.*#+}} xmm3 = [2,7,12,17,0,0,0,0]
+; AVX512BW-FCP-NEXT: vpermw %zmm1, %zmm3, %zmm3
+; AVX512BW-FCP-NEXT: vmovq {{.*#+}} xmm4 = [3,8,13,18,0,0,0,0]
+; AVX512BW-FCP-NEXT: vpermw %zmm1, %zmm4, %zmm4
+; AVX512BW-FCP-NEXT: vmovq {{.*#+}} xmm5 = [4,9,14,19,0,0,0,0]
+; AVX512BW-FCP-NEXT: vpermw %zmm1, %zmm5, %zmm1
+; AVX512BW-FCP-NEXT: vmovq %xmm2, (%rsi)
; AVX512BW-FCP-NEXT: vmovq %xmm0, (%rdx)
-; AVX512BW-FCP-NEXT: vmovq %xmm2, (%rcx)
-; AVX512BW-FCP-NEXT: vmovq %xmm5, (%r8)
-; AVX512BW-FCP-NEXT: vmovq %xmm6, (%r9)
+; AVX512BW-FCP-NEXT: vmovq %xmm3, (%rcx)
+; AVX512BW-FCP-NEXT: vmovq %xmm4, (%r8)
+; AVX512BW-FCP-NEXT: vmovq %xmm1, (%r9)
; AVX512BW-FCP-NEXT: vzeroupper
; AVX512BW-FCP-NEXT: retq
;
@@ -650,24 +646,22 @@ define void @load_i16_stride5_vf4(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-BW-NEXT: vmovdqa64 (%rdi), %zmm1
; AVX512DQ-BW-NEXT: vpermw %zmm1, %zmm0, %zmm0
; AVX512DQ-BW-NEXT: vmovq {{.*#+}} xmm2 = [0,5,10,0,0,0,0,0]
-; AVX512DQ-BW-NEXT: vpermw %zmm1, %zmm2, %zmm1
-; AVX512DQ-BW-NEXT: vmovdqa 16(%rdi), %xmm2
-; AVX512DQ-BW-NEXT: vpextrw $7, %xmm2, %eax
-; AVX512DQ-BW-NEXT: vpinsrw $3, %eax, %xmm1, %xmm1
+; AVX512DQ-BW-NEXT: vpermw %zmm1, %zmm2, %zmm2
+; AVX512DQ-BW-NEXT: vmovdqa 16(%rdi), %xmm3
+; AVX512DQ-BW-NEXT: vpextrw $7, %xmm3, %eax
+; AVX512DQ-BW-NEXT: vpinsrw $3, %eax, %xmm2, %xmm2
; AVX512DQ-BW-NEXT: vpinsrw $3, 32(%rdi), %xmm0, %xmm0
-; AVX512DQ-BW-NEXT: vmovq {{.*#+}} xmm2 = [2,7,12,17,0,0,0,0]
-; AVX512DQ-BW-NEXT: vmovdqa 32(%rdi), %ymm3
-; AVX512DQ-BW-NEXT: vmovdqa (%rdi), %ymm4
-; AVX512DQ-BW-NEXT: vpermi2w %ymm3, %ymm4, %ymm2
-; AVX512DQ-BW-NEXT: vmovq {{.*#+}} xmm5 = [3,8,13,18,0,0,0,0]
-; AVX512DQ-BW-NEXT: vpermi2w %ymm3, %ymm4, %ymm5
-; AVX512DQ-BW-NEXT: vmovq {{.*#+}} xmm6 = [4,9,14,19,0,0,0,0]
-; AVX512DQ-BW-NEXT: vpermi2w %ymm3, %ymm4, %ymm6
-; AVX512DQ-BW-NEXT: vmovq %xmm1, (%rsi)
+; AVX512DQ-BW-NEXT: vmovq {{.*#+}} xmm3 = [2,7,12,17,0,0,0,0]
+; AVX512DQ-BW-NEXT: vpermw %zmm1, %zmm3, %zmm3
+; AVX512DQ-BW-NEXT: vmovq {{.*#+}} xmm4 = [3,8,13,18,0,0,0,0]
+; AVX512DQ-BW-NEXT: vpermw %zmm1, %zmm4, %zmm4
+; AVX512DQ-BW-NEXT: vmovq {{.*#+}} xmm5 = [4,9,14,19,0,0,0,0]
+; AVX512DQ-BW-NEXT: vpermw %zmm1, %zmm5, %zmm1
+; AVX512DQ-BW-NEXT: vmovq %xmm2, (%rsi)
; AVX512DQ-BW-NEXT: vmovq %xmm0, (%rdx)
-; AVX512DQ-BW-NEXT: vmovq %xmm2, (%rcx)
-; AVX512DQ-BW-NEXT: vmovq %xmm5, (%r8)
-; AVX512DQ-BW-NEXT: vmovq %xmm6, (%r9)
+; AVX512DQ-BW-NEXT: vmovq %xmm3, (%rcx)
+; AVX512DQ-BW-NEXT: vmovq %xmm4, (%r8)
+; AVX512DQ-BW-NEXT: vmovq %xmm1, (%r9)
; AVX512DQ-BW-NEXT: vzeroupper
; AVX512DQ-BW-NEXT: retq
;
@@ -677,24 +671,22 @@ define void @load_i16_stride5_vf4(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rdi), %zmm1
; AVX512DQ-BW-FCP-NEXT: vpermw %zmm1, %zmm0, %zmm0
; AVX512DQ-BW-FCP-NEXT: vmovq {{.*#+}} xmm2 = [0,5,10,0,0,0,0,0]
-; AVX512DQ-BW-FCP-NEXT: vpermw %zmm1, %zmm2, %zmm1
-; AVX512DQ-BW-FCP-NEXT: vmovdqa 16(%rdi), %xmm2
-; AVX512DQ-BW-FCP-NEXT: vpextrw $7, %xmm2, %eax
-; AVX512DQ-BW-FCP-NEXT: vpinsrw $3, %eax, %xmm1, %xmm1
+; AVX512DQ-BW-FCP-NEXT: vpermw %zmm1, %zmm2, %zmm2
+; AVX512DQ-BW-FCP-NEXT: vmovdqa 16(%rdi), %xmm3
+; AVX512DQ-BW-FCP-NEXT: vpextrw $7, %xmm3, %eax
+; AVX512DQ-BW-FCP-NEXT: vpinsrw $3, %eax, %xmm2, %xmm2
; AVX512DQ-BW-FCP-NEXT: vpinsrw $3, 32(%rdi), %xmm0, %xmm0
-; AVX512DQ-BW-FCP-NEXT: vmovq {{.*#+}} xmm2 = [2,7,12,17,0,0,0,0]
-; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%rdi), %ymm3
-; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdi), %ymm4
-; AVX512DQ-BW-FCP-NEXT: vpermi2w %ymm3, %ymm4, %ymm2
-; AVX512DQ-BW-FCP-NEXT: vmovq {{.*#+}} xmm5 = [3,8,13,18,0,0,0,0]
-; AVX512DQ-BW-FCP-NEXT: vpermi2w %ymm3, %ymm4, %ymm5
-; AVX512DQ-BW-FCP-NEXT: vmovq {{.*#+}} xmm6 = [4,9,14,19,0,0,0,0]
-; AVX512DQ-BW-FCP-NEXT: vpermi2w %ymm3, %ymm4, %ymm6
-; AVX512DQ-BW-FCP-NEXT: vmovq %xmm1, (%rsi)
+; AVX512DQ-BW-FCP-NEXT: vmovq {{.*#+}} xmm3 = [2,7,12,17,0,0,0,0]
+; AVX512DQ-BW-FCP-NEXT: vpermw %zmm1, %zmm3, %zmm3
+; AVX512DQ-BW-FCP-NEXT: vmovq {{.*#+}} xmm4 = [3,8,13,18,0,0,0,0]
+; AVX512DQ-BW-FCP-NEXT: vpermw %zmm1, %zmm4, %zmm4
+; AVX512DQ-BW-FCP-NEXT: vmovq {{.*#+}} xmm5 = [4,9,14,19,0,0,0,0]
+; AVX512DQ-BW-FCP-NEXT: vpermw %zmm1, %zmm5, %zmm1
+; AVX512DQ-BW-FCP-NEXT: vmovq %xmm2, (%rsi)
; AVX512DQ-BW-FCP-NEXT: vmovq %xmm0, (%rdx)
-; AVX512DQ-BW-FCP-NEXT: vmovq %xmm2, (%rcx)
-; AVX512DQ-BW-FCP-NEXT: vmovq %xmm5, (%r8)
-; AVX512DQ-BW-FCP-NEXT: vmovq %xmm6, (%r9)
+; AVX512DQ-BW-FCP-NEXT: vmovq %xmm3, (%rcx)
+; AVX512DQ-BW-FCP-NEXT: vmovq %xmm4, (%r8)
+; AVX512DQ-BW-FCP-NEXT: vmovq %xmm1, (%r9)
; AVX512DQ-BW-FCP-NEXT: vzeroupper
; AVX512DQ-BW-FCP-NEXT: retq
%wide.vec = load <20 x i16>, ptr %in.vec, align 64
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-6.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-6.ll
index 751412c77a59a6..c3b53211978ae4 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-6.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-6.ll
@@ -293,8 +293,8 @@ define void @load_i16_stride6_vf2(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512BW-FCP-NEXT: vpshuflw {{.*#+}} xmm2 = xmm2[1,3,2,3,4,5,6,7]
; AVX512BW-FCP-NEXT: vpbroadcastw 4(%rdi), %xmm4
; AVX512BW-FCP-NEXT: vpunpcklwd {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3]
-; AVX512BW-FCP-NEXT: vmovd {{.*#+}} xmm5 = [3,9,0,0,0,0,0,0]
-; AVX512BW-FCP-NEXT: vpermi2w %xmm1, %xmm0, %xmm5
+; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} xmm5 = [3,9,1,9,2,10,3,11]
+; AVX512BW-FCP-NEXT: vpermw (%rdi), %ymm5, %ymm5
; AVX512BW-FCP-NEXT: vpbroadcastw 20(%rdi), %xmm6
; AVX512BW-FCP-NEXT: vpbroadcastw 8(%rdi), %xmm7
; AVX512BW-FCP-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[1],xmm6[1],xmm7[2],xmm6[2],xmm7[3],xmm6[3]
@@ -307,6 +307,7 @@ define void @load_i16_stride6_vf2(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512BW-FCP-NEXT: vmovd %xmm5, (%r8)
; AVX512BW-FCP-NEXT: vmovd %xmm6, (%r9)
; AVX512BW-FCP-NEXT: vmovd %xmm0, (%rax)
+; AVX512BW-FCP-NEXT: vzeroupper
; AVX512BW-FCP-NEXT: retq
;
; AVX512DQ-BW-LABEL: load_i16_stride6_vf2:
@@ -346,8 +347,8 @@ define void @load_i16_stride6_vf2(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-BW-FCP-NEXT: vpshuflw {{.*#+}} xmm2 = xmm2[1,3,2,3,4,5,6,7]
; AVX512DQ-BW-FCP-NEXT: vpbroadcastw 4(%rdi), %xmm4
; AVX512DQ-BW-FCP-NEXT: vpunpcklwd {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3]
-; AVX512DQ-BW-FCP-NEXT: vmovd {{.*#+}} xmm5 = [3,9,0,0,0,0,0,0]
-; AVX512DQ-BW-FCP-NEXT: vpermi2w %xmm1, %xmm0, %xmm5
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} xmm5 = [3,9,1,9,2,10,3,11]
+; AVX512DQ-BW-FCP-NEXT: vpermw (%rdi), %ymm5, %ymm5
; AVX512DQ-BW-FCP-NEXT: vpbroadcastw 20(%rdi), %xmm6
; AVX512DQ-BW-FCP-NEXT: vpbroadcastw 8(%rdi), %xmm7
; AVX512DQ-BW-FCP-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[1],xmm6[1],xmm7[2],xmm6[2],xmm7[3],xmm6[3]
@@ -360,6 +361,7 @@ define void @load_i16_stride6_vf2(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-BW-FCP-NEXT: vmovd %xmm5, (%r8)
; AVX512DQ-BW-FCP-NEXT: vmovd %xmm6, (%r9)
; AVX512DQ-BW-FCP-NEXT: vmovd %xmm0, (%rax)
+; AVX512DQ-BW-FCP-NEXT: vzeroupper
; AVX512DQ-BW-FCP-NEXT: retq
%wide.vec = load <12 x i16>, ptr %in.vec, align 64
%strided.vec0 = shufflevector <12 x i16> %wide.vec, <12 x i16> poison, <2 x i32> <i32 0, i32 6>
@@ -580,21 +582,20 @@ define void @load_i16_stride6_vf4(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-NEXT: vpunpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; AVX512-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm2[3],xmm0[4,5,6,7]
; AVX512-NEXT: vpmovsxbd {{.*#+}} xmm1 = [4,1,10,7]
-; AVX512-NEXT: vmovdqa 32(%rdi), %ymm2
-; AVX512-NEXT: vmovdqa (%rdi), %ymm4
-; AVX512-NEXT: vpermi2d %ymm2, %ymm4, %ymm1
-; AVX512-NEXT: vpshufb {{.*#+}} xmm5 = xmm1[4,5,0,1,12,13,8,9,u,u,u,u,u,u,u,u]
+; AVX512-NEXT: vpermd (%rdi), %zmm1, %zmm1
+; AVX512-NEXT: vpshufb {{.*#+}} xmm2 = xmm1[4,5,0,1,12,13,8,9,u,u,u,u,u,u,u,u]
; AVX512-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[6,7,2,3,14,15,10,11,u,u,u,u,u,u,u,u]
-; AVX512-NEXT: vpmovsxbd {{.*#+}} xmm6 = [0,13,10,3]
-; AVX512-NEXT: vpermi2d %ymm4, %ymm2, %ymm6
-; AVX512-NEXT: vpshufb {{.*#+}} xmm2 = xmm6[8,9,4,5,0,1,12,13,u,u,u,u,u,u,u,u]
-; AVX512-NEXT: vpshufb {{.*#+}} xmm4 = xmm6[10,11,6,7,2,3,14,15,u,u,u,u,u,u,u,u]
+; AVX512-NEXT: vpmovsxbd {{.*#+}} xmm4 = [0,13,10,3]
+; AVX512-NEXT: vmovdqa 32(%rdi), %ymm5
+; AVX512-NEXT: vpermt2d (%rdi), %ymm4, %ymm5
+; AVX512-NEXT: vpshufb {{.*#+}} xmm4 = xmm5[8,9,4,5,0,1,12,13,u,u,u,u,u,u,u,u]
+; AVX512-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[10,11,6,7,2,3,14,15,u,u,u,u,u,u,u,u]
; AVX512-NEXT: vmovq %xmm3, (%rsi)
; AVX512-NEXT: vmovq %xmm0, (%rdx)
-; AVX512-NEXT: vmovq %xmm5, (%rcx)
+; AVX512-NEXT: vmovq %xmm2, (%rcx)
; AVX512-NEXT: vmovq %xmm1, (%r8)
-; AVX512-NEXT: vmovq %xmm2, (%r9)
-; AVX512-NEXT: vmovq %xmm4, (%rax)
+; AVX512-NEXT: vmovq %xmm4, (%r9)
+; AVX512-NEXT: vmovq %xmm5, (%rax)
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
;
@@ -612,21 +613,20 @@ define void @load_i16_stride6_vf4(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-FCP-NEXT: vpunpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm2[3],xmm0[4,5,6,7]
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} xmm1 = [4,1,10,7]
-; AVX512-FCP-NEXT: vmovdqa 32(%rdi), %ymm2
-; AVX512-FCP-NEXT: vmovdqa (%rdi), %ymm4
-; AVX512-FCP-NEXT: vpermi2d %ymm2, %ymm4, %ymm1
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm5 = xmm1[4,5,0,1,12,13,8,9,u,u,u,u,u,u,u,u]
+; AVX512-FCP-NEXT: vpermd (%rdi), %zmm1, %zmm1
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm1[4,5,0,1,12,13,8,9,u,u,u,u,u,u,u,u]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[6,7,2,3,14,15,10,11,u,u,u,u,u,u,u,u]
-; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} xmm6 = [0,13,10,3]
-; AVX512-FCP-NEXT: vpermi2d %ymm4, %ymm2, %ymm6
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm6[8,9,4,5,0,1,12,13,u,u,u,u,u,u,u,u]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm4 = xmm6[10,11,6,7,2,3,14,15,u,u,u,u,u,u,u,u]
+; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} xmm4 = [0,13,10,3]
+; AVX512-FCP-NEXT: vmovdqa 32(%rdi), %ymm5
+; AVX512-FCP-NEXT: vpermt2d (%rdi), %ymm4, %ymm5
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm4 = xmm5[8,9,4,5,0,1,12,13,u,u,u,u,u,u,u,u]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[10,11,6,7,2,3,14,15,u,u,u,u,u,u,u,u]
; AVX512-FCP-NEXT: vmovq %xmm3, (%rsi)
; AVX512-FCP-NEXT: vmovq %xmm0, (%rdx)
-; AVX512-FCP-NEXT: vmovq %xmm5, (%rcx)
+; AVX512-FCP-NEXT: vmovq %xmm2, (%rcx)
; AVX512-FCP-NEXT: vmovq %xmm1, (%r8)
-; AVX512-FCP-NEXT: vmovq %xmm2, (%r9)
-; AVX512-FCP-NEXT: vmovq %xmm4, (%rax)
+; AVX512-FCP-NEXT: vmovq %xmm4, (%r9)
+; AVX512-FCP-NEXT: vmovq %xmm5, (%rax)
; AVX512-FCP-NEXT: vzeroupper
; AVX512-FCP-NEXT: retq
;
@@ -645,21 +645,20 @@ define void @load_i16_stride6_vf4(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-NEXT: vpunpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm2[3],xmm0[4,5,6,7]
; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} xmm1 = [4,1,10,7]
-; AVX512DQ-NEXT: vmovdqa 32(%rdi), %ymm2
-; AVX512DQ-NEXT: vmovdqa (%rdi), %ymm4
-; AVX512DQ-NEXT: vpermi2d %ymm2, %ymm4, %ymm1
-; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm5 = xmm1[4,5,0,1,12,13,8,9,u,u,u,u,u,u,u,u]
+; AVX512DQ-NEXT: vpermd (%rdi), %zmm1, %zmm1
+; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm2 = xmm1[4,5,0,1,12,13,8,9,u,u,u,u,u,u,u,u]
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[6,7,2,3,14,15,10,11,u,u,u,u,u,u,u,u]
-; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} xmm6 = [0,13,10,3]
-; AVX512DQ-NEXT: vpermi2d %ymm4, %ymm2, %ymm6
-; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm2 = xmm6[8,9,4,5,0,1,12,13,u,u,u,u,u,u,u,u]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm4 = xmm6[10,11,6,7,2,3,14,15,u,u,u,u,u,u,u,u]
+; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} xmm4 = [0,13,10,3]
+; AVX512DQ-NEXT: vmovdqa 32(%rdi), %ymm5
+; AVX512DQ-NEXT: vpermt2d (%rdi), %ymm4, %ymm5
+; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm4 = xmm5[8,9,4,5,0,1,12,13,u,u,u,u,u,u,u,u]
+; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[10,11,6,7,2,3,14,15,u,u,u,u,u,u,u,u]
; AVX512DQ-NEXT: vmovq %xmm3, (%rsi)
; AVX512DQ-NEXT: vmovq %xmm0, (%rdx)
-; AVX512DQ-NEXT: vmovq %xmm5, (%rcx)
+; AVX512DQ-NEXT: vmovq %xmm2, (%rcx)
; AVX512DQ-NEXT: vmovq %xmm1, (%r8)
-; AVX512DQ-NEXT: vmovq %xmm2, (%r9)
-; AVX512DQ-NEXT: vmovq %xmm4, (%rax)
+; AVX512DQ-NEXT: vmovq %xmm4, (%r9)
+; AVX512DQ-NEXT: vmovq %xmm5, (%rax)
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
@@ -677,21 +676,20 @@ define void @load_i16_stride6_vf4(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-FCP-NEXT: vpunpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm2[3],xmm0[4,5,6,7]
; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} xmm1 = [4,1,10,7]
-; AVX512DQ-FCP-NEXT: vmovdqa 32(%rdi), %ymm2
-; AVX512DQ-FCP-NEXT: vmovdqa (%rdi), %ymm4
-; AVX512DQ-FCP-NEXT: vpermi2d %ymm2, %ymm4, %ymm1
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm5 = xmm1[4,5,0,1,12,13,8,9,u,u,u,u,u,u,u,u]
+; AVX512DQ-FCP-NEXT: vpermd (%rdi), %zmm1, %zmm1
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm1[4,5,0,1,12,13,8,9,u,u,u,u,u,u,u,u]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[6,7,2,3,14,15,10,11,u,u,u,u,u,u,u,u]
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} xmm6 = [0,13,10,3]
-; AVX512DQ-FCP-NEXT: vpermi2d %ymm4, %ymm2, %ymm6
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm6[8,9,4,5,0,1,12,13,u,u,u,u,u,u,u,u]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm4 = xmm6[10,11,6,7,2,3,14,15,u,u,u,u,u,u,u,u]
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} xmm4 = [0,13,10,3]
+; AVX512DQ-FCP-NEXT: vmovdqa 32(%rdi), %ymm5
+; AVX512DQ-FCP-NEXT: vpermt2d (%rdi), %ymm4, %ymm5
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm4 = xmm5[8,9,4,5,0,1,12,13,u,u,u,u,u,u,u,u]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[10,11,6,7,2,3,14,15,u,u,u,u,u,u,u,u]
; AVX512DQ-FCP-NEXT: vmovq %xmm3, (%rsi)
; AVX512DQ-FCP-NEXT: vmovq %xmm0, (%rdx)
-; AVX512DQ-FCP-NEXT: vmovq %xmm5, (%rcx)
+; AVX512DQ-FCP-NEXT: vmovq %xmm2, (%rcx)
; AVX512DQ-FCP-NEXT: vmovq %xmm1, (%r8)
-; AVX512DQ-FCP-NEXT: vmovq %xmm2, (%r9)
-; AVX512DQ-FCP-NEXT: vmovq %xmm4, (%rax)
+; AVX512DQ-FCP-NEXT: vmovq %xmm4, (%r9)
+; AVX512DQ-FCP-NEXT: vmovq %xmm5, (%rax)
; AVX512DQ-FCP-NEXT: vzeroupper
; AVX512DQ-FCP-NEXT: retq
;
@@ -699,25 +697,24 @@ define void @load_i16_stride6_vf4(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX512BW-NEXT: vmovq {{.*#+}} xmm0 = [0,6,12,18,0,0,0,0]
-; AVX512BW-NEXT: vmovdqa (%rdi), %ymm1
-; AVX512BW-NEXT: vmovdqa 32(%rdi), %ymm2
-; AVX512BW-NEXT: vpermi2w %ymm2, %ymm1, %ymm0
-; AVX512BW-NEXT: vmovq {{.*#+}} xmm3 = [1,7,13,19,0,0,0,0]
-; AVX512BW-NEXT: vpermi2w %ymm2, %ymm1, %ymm3
-; AVX512BW-NEXT: vmovq {{.*#+}} xmm4 = [2,8,14,20,0,0,0,0]
-; AVX512BW-NEXT: vpermi2w %ymm2, %ymm1, %ymm4
-; AVX512BW-NEXT: vmovq {{.*#+}} xmm5 = [3,9,15,21,0,0,0,0]
-; AVX512BW-NEXT: vpermi2w %ymm2, %ymm1, %ymm5
-; AVX512BW-NEXT: vmovq {{.*#+}} xmm6 = [4,10,16,22,0,0,0,0]
-; AVX512BW-NEXT: vpermi2w %ymm2, %ymm1, %ymm6
-; AVX512BW-NEXT: vmovq {{.*#+}} xmm7 = [5,11,17,23,0,0,0,0]
-; AVX512BW-NEXT: vpermi2w %ymm2, %ymm1, %ymm7
+; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm1
+; AVX512BW-NEXT: vpermw %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: vmovq {{.*#+}} xmm2 = [1,7,13,19,0,0,0,0]
+; AVX512BW-NEXT: vpermw %zmm1, %zmm2, %zmm2
+; AVX512BW-NEXT: vmovq {{.*#+}} xmm3 = [2,8,14,20,0,0,0,0]
+; AVX512BW-NEXT: vpermw %zmm1, %zmm3, %zmm3
+; AVX512BW-NEXT: vmovq {{.*#+}} xmm4 = [3,9,15,21,0,0,0,0]
+; AVX512BW-NEXT: vpermw %zmm1, %zmm4, %zmm4
+; AVX512BW-NEXT: vmovq {{.*#+}} xmm5 = [4,10,16,22,0,0,0,0]
+; AVX512BW-NEXT: vpermw %zmm1, %zmm5, %zmm5
+; AVX512BW-NEXT: vmovq {{.*#+}} xmm6 = [5,11,17,23,0,0,0,0]
+; AVX512BW-NEXT: vpermw %zmm1, %zmm6, %zmm1
; AVX512BW-NEXT: vmovq %xmm0, (%rsi)
-; AVX512BW-NEXT: vmovq %xmm3, (%rdx)
-; AVX512BW-NEXT: vmovq %xmm4, (%rcx)
-; AVX512BW-NEXT: vmovq %xmm5, (%r8)
-; AVX512BW-NEXT: vmovq %xmm6, (%r9)
-; AVX512BW-NEXT: vmovq %xmm7, (%rax)
+; AVX512BW-NEXT: vmovq %xmm2, (%rdx)
+; AVX512BW-NEXT: vmovq %xmm3, (%rcx)
+; AVX512BW-NEXT: vmovq %xmm4, (%r8)
+; AVX512BW-NEXT: vmovq %xmm5, (%r9)
+; AVX512BW-NEXT: vmovq %xmm1, (%rax)
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -725,25 +722,24 @@ define void @load_i16_stride6_vf4(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512BW-FCP: # %bb.0:
; AVX512BW-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX512BW-FCP-NEXT: vmovq {{.*#+}} xmm0 = [0,6,12,18,0,0,0,0]
-; AVX512BW-FCP-NEXT: vmovdqa (%rdi), %ymm1
-; AVX512BW-FCP-NEXT: vmovdqa 32(%rdi), %ymm2
-; AVX512BW-FCP-NEXT: vpermi2w %ymm2, %ymm1, %ymm0
-; AVX512BW-FCP-NEXT: vmovq {{.*#+}} xmm3 = [1,7,13,19,0,0,0,0]
-; AVX512BW-FCP-NEXT: vpermi2w %ymm2, %ymm1, %ymm3
-; AVX512BW-FCP-NEXT: vmovq {{.*#+}} xmm4 = [2,8,14,20,0,0,0,0]
-; AVX512BW-FCP-NEXT: vpermi2w %ymm2, %ymm1, %ymm4
-; AVX512BW-FCP-NEXT: vmovq {{.*#+}} xmm5 = [3,9,15,21,0,0,0,0]
-; AVX512BW-FCP-NEXT: vpermi2w %ymm2, %ymm1, %ymm5
-; AVX512BW-FCP-NEXT: vmovq {{.*#+}} xmm6 = [4,10,16,22,0,0,0,0]
-; AVX512BW-FCP-NEXT: vpermi2w %ymm2, %ymm1, %ymm6
-; AVX512BW-FCP-NEXT: vmovq {{.*#+}} xmm7 = [5,11,17,23,0,0,0,0]
-; AVX512BW-FCP-NEXT: vpermi2w %ymm2, %ymm1, %ymm7
+; AVX512BW-FCP-NEXT: vmovdqa64 (%rdi), %zmm1
+; AVX512BW-FCP-NEXT: vpermw %zmm1, %zmm0, %zmm0
+; AVX512BW-FCP-NEXT: vmovq {{.*#+}} xmm2 = [1,7,13,19,0,0,0,0]
+; AVX512BW-FCP-NEXT: vpermw %zmm1, %zmm2, %zmm2
+; AVX512BW-FCP-NEXT: vmovq {{.*#+}} xmm3 = [2,8,14,20,0,0,0,0]
+; AVX512BW-FCP-NEXT: vpermw %zmm1, %zmm3, %zmm3
+; AVX512BW-FCP-NEXT: vmovq {{.*#+}} xmm4 = [3,9,15,21,0,0,0,0]
+; AVX512BW-FCP-NEXT: vpermw %zmm1, %zmm4, %zmm4
+; AVX512BW-FCP-NEXT: vmovq {{.*#+}} xmm5 = [4,10,16,22,0,0,0,0]
+; AVX512BW-FCP-NEXT: vpermw %zmm1, %zmm5, %zmm5
+; AVX512BW-FCP-NEXT: vmovq {{.*#+}} xmm6 = [5,11,17,23,0,0,0,0]
+; AVX512BW-FCP-NEXT: vpermw %zmm1, %zmm6, %zmm1
; AVX512BW-FCP-NEXT: vmovq %xmm0, (%rsi)
-; AVX512BW-FCP-NEXT: vmovq %xmm3, (%rdx)
-; AVX512BW-FCP-NEXT: vmovq %xmm4, (%rcx)
-; AVX512BW-FCP-NEXT: vmovq %xmm5, (%r8)
-; AVX512BW-FCP-NEXT: vmovq %xmm6, (%r9)
-; AVX512BW-FCP-NEXT: vmovq %xmm7, (%rax)
+; AVX512BW-FCP-NEXT: vmovq %xmm2, (%rdx)
+; AVX512BW-FCP-NEXT: vmovq %xmm3, (%rcx)
+; AVX512BW-FCP-NEXT: vmovq %xmm4, (%r8)
+; AVX512BW-FCP-NEXT: vmovq %xmm5, (%r9)
+; AVX512BW-FCP-NEXT: vmovq %xmm1, (%rax)
; AVX512BW-FCP-NEXT: vzeroupper
; AVX512BW-FCP-NEXT: retq
;
@@ -751,25 +747,24 @@ define void @load_i16_stride6_vf4(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-BW: # %bb.0:
; AVX512DQ-BW-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX512DQ-BW-NEXT: vmovq {{.*#+}} xmm0 = [0,6,12,18,0,0,0,0]
-; AVX512DQ-BW-NEXT: vmovdqa (%rdi), %ymm1
-; AVX512DQ-BW-NEXT: vmovdqa 32(%rdi), %ymm2
-; AVX512DQ-BW-NEXT: vpermi2w %ymm2, %ymm1, %ymm0
-; AVX512DQ-BW-NEXT: vmovq {{.*#+}} xmm3 = [1,7,13,19,0,0,0,0]
-; AVX512DQ-BW-NEXT: vpermi2w %ymm2, %ymm1, %ymm3
-; AVX512DQ-BW-NEXT: vmovq {{.*#+}} xmm4 = [2,8,14,20,0,0,0,0]
-; AVX512DQ-BW-NEXT: vpermi2w %ymm2, %ymm1, %ymm4
-; AVX512DQ-BW-NEXT: vmovq {{.*#+}} xmm5 = [3,9,15,21,0,0,0,0]
-; AVX512DQ-BW-NEXT: vpermi2w %ymm2, %ymm1, %ymm5
-; AVX512DQ-BW-NEXT: vmovq {{.*#+}} xmm6 = [4,10,16,22,0,0,0,0]
-; AVX512DQ-BW-NEXT: vpermi2w %ymm2, %ymm1, %ymm6
-; AVX512DQ-BW-NEXT: vmovq {{.*#+}} xmm7 = [5,11,17,23,0,0,0,0]
-; AVX512DQ-BW-NEXT: vpermi2w %ymm2, %ymm1, %ymm7
+; AVX512DQ-BW-NEXT: vmovdqa64 (%rdi), %zmm1
+; AVX512DQ-BW-NEXT: vpermw %zmm1, %zmm0, %zmm0
+; AVX512DQ-BW-NEXT: vmovq {{.*#+}} xmm2 = [1,7,13,19,0,0,0,0]
+; AVX512DQ-BW-NEXT: vpermw %zmm1, %zmm2, %zmm2
+; AVX512DQ-BW-NEXT: vmovq {{.*#+}} xmm3 = [2,8,14,20,0,0,0,0]
+; AVX512DQ-BW-NEXT: vpermw %zmm1, %zmm3, %zmm3
+; AVX512DQ-BW-NEXT: vmovq {{.*#+}} xmm4 = [3,9,15,21,0,0,0,0]
+; AVX512DQ-BW-NEXT: vpermw %zmm1, %zmm4, %zmm4
+; AVX512DQ-BW-NEXT: vmovq {{.*#+}} xmm5 = [4,10,16,22,0,0,0,0]
+; AVX512DQ-BW-NEXT: vpermw %zmm1, %zmm5, %zmm5
+; AVX512DQ-BW-NEXT: vmovq {{.*#+}} xmm6 = [5,11,17,23,0,0,0,0]
+; AVX512DQ-BW-NEXT: vpermw %zmm1, %zmm6, %zmm1
; AVX512DQ-BW-NEXT: vmovq %xmm0, (%rsi)
-; AVX512DQ-BW-NEXT: vmovq %xmm3, (%rdx)
-; AVX512DQ-BW-NEXT: vmovq %xmm4, (%rcx)
-; AVX512DQ-BW-NEXT: vmovq %xmm5, (%r8)
-; AVX512DQ-BW-NEXT: vmovq %xmm6, (%r9)
-; AVX512DQ-BW-NEXT: vmovq %xmm7, (%rax)
+; AVX512DQ-BW-NEXT: vmovq %xmm2, (%rdx)
+; AVX512DQ-BW-NEXT: vmovq %xmm3, (%rcx)
+; AVX512DQ-BW-NEXT: vmovq %xmm4, (%r8)
+; AVX512DQ-BW-NEXT: vmovq %xmm5, (%r9)
+; AVX512DQ-BW-NEXT: vmovq %xmm1, (%rax)
; AVX512DQ-BW-NEXT: vzeroupper
; AVX512DQ-BW-NEXT: retq
;
@@ -777,25 +772,24 @@ define void @load_i16_stride6_vf4(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-BW-FCP: # %bb.0:
; AVX512DQ-BW-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX512DQ-BW-FCP-NEXT: vmovq {{.*#+}} xmm0 = [0,6,12,18,0,0,0,0]
-; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdi), %ymm1
-; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%rdi), %ymm2
-; AVX512DQ-BW-FCP-NEXT: vpermi2w %ymm2, %ymm1, %ymm0
-; AVX512DQ-BW-FCP-NEXT: vmovq {{.*#+}} xmm3 = [1,7,13,19,0,0,0,0]
-; AVX512DQ-BW-FCP-NEXT: vpermi2w %ymm2, %ymm1, %ymm3
-; AVX512DQ-BW-FCP-NEXT: vmovq {{.*#+}} xmm4 = [2,8,14,20,0,0,0,0]
-; AVX512DQ-BW-FCP-NEXT: vpermi2w %ymm2, %ymm1, %ymm4
-; AVX512DQ-BW-FCP-NEXT: vmovq {{.*#+}} xmm5 = [3,9,15,21,0,0,0,0]
-; AVX512DQ-BW-FCP-NEXT: vpermi2w %ymm2, %ymm1, %ymm5
-; AVX512DQ-BW-FCP-NEXT: vmovq {{.*#+}} xmm6 = [4,10,16,22,0,0,0,0]
-; AVX512DQ-BW-FCP-NEXT: vpermi2w %ymm2, %ymm1, %ymm6
-; AVX512DQ-BW-FCP-NEXT: vmovq {{.*#+}} xmm7 = [5,11,17,23,0,0,0,0]
-; AVX512DQ-BW-FCP-NEXT: vpermi2w %ymm2, %ymm1, %ymm7
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rdi), %zmm1
+; AVX512DQ-BW-FCP-NEXT: vpermw %zmm1, %zmm0, %zmm0
+; AVX512DQ-BW-FCP-NEXT: vmovq {{.*#+}} xmm2 = [1,7,13,19,0,0,0,0]
+; AVX512DQ-BW-FCP-NEXT: vpermw %zmm1, %zmm2, %zmm2
+; AVX512DQ-BW-FCP-NEXT: vmovq {{.*#+}} xmm3 = [2,8,14,20,0,0,0,0]
+; AVX512DQ-BW-FCP-NEXT: vpermw %zmm1, %zmm3, %zmm3
+; AVX512DQ-BW-FCP-NEXT: vmovq {{.*#+}} xmm4 = [3,9,15,21,0,0,0,0]
+; AVX512DQ-BW-FCP-NEXT: vpermw %zmm1, %zmm4, %zmm4
+; AVX512DQ-BW-FCP-NEXT: vmovq {{.*#+}} xmm5 = [4,10,16,22,0,0,0,0]
+; AVX512DQ-BW-FCP-NEXT: vpermw %zmm1, %zmm5, %zmm5
+; AVX512DQ-BW-FCP-NEXT: vmovq {{.*#+}} xmm6 = [5,11,17,23,0,0,0,0]
+; AVX512DQ-BW-FCP-NEXT: vpermw %zmm1, %zmm6, %zmm1
; AVX512DQ-BW-FCP-NEXT: vmovq %xmm0, (%rsi)
-; AVX512DQ-BW-FCP-NEXT: vmovq %xmm3, (%rdx)
-; AVX512DQ-BW-FCP-NEXT: vmovq %xmm4, (%rcx)
-; AVX512DQ-BW-FCP-NEXT: vmovq %xmm5, (%r8)
-; AVX512DQ-BW-FCP-NEXT: vmovq %xmm6, (%r9)
-; AVX512DQ-BW-FCP-NEXT: vmovq %xmm7, (%rax)
+; AVX512DQ-BW-FCP-NEXT: vmovq %xmm2, (%rdx)
+; AVX512DQ-BW-FCP-NEXT: vmovq %xmm3, (%rcx)
+; AVX512DQ-BW-FCP-NEXT: vmovq %xmm4, (%r8)
+; AVX512DQ-BW-FCP-NEXT: vmovq %xmm5, (%r9)
+; AVX512DQ-BW-FCP-NEXT: vmovq %xmm1, (%rax)
; AVX512DQ-BW-FCP-NEXT: vzeroupper
; AVX512DQ-BW-FCP-NEXT: retq
%wide.vec = load <24 x i16>, ptr %in.vec, align 64
@@ -2865,224 +2859,228 @@ define void @load_i16_stride6_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512BW-LABEL: load_i16_stride6_vf16:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm2
-; AVX512BW-NEXT: vmovdqa64 64(%rdi), %zmm3
; AVX512BW-NEXT: vbroadcasti128 {{.*#+}} ymm0 = [0,0,0,2,8,14,20,26,0,0,0,2,8,14,20,26]
; AVX512BW-NEXT: # ymm0 = mem[0,1,0,1]
-; AVX512BW-NEXT: vmovdqa 160(%rdi), %ymm4
-; AVX512BW-NEXT: vmovdqa 128(%rdi), %ymm5
-; AVX512BW-NEXT: vpermi2w %ymm4, %ymm5, %ymm0
+; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm3
+; AVX512BW-NEXT: vmovdqa64 64(%rdi), %zmm4
+; AVX512BW-NEXT: vmovdqa64 128(%rdi), %zmm5
+; AVX512BW-NEXT: vpermw %zmm5, %zmm0, %zmm0
; AVX512BW-NEXT: vpmovsxbw {{.*#+}} ymm1 = [0,6,12,18,24,30,36,42,48,54,60,0,0,0,0,0]
-; AVX512BW-NEXT: vpermi2w %zmm3, %zmm2, %zmm1
+; AVX512BW-NEXT: vpermi2w %zmm4, %zmm3, %zmm1
; AVX512BW-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7],ymm1[8,9,10],ymm0[11,12,13,14,15]
; AVX512BW-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; AVX512BW-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [0,0,0,3,9,15,21,27,0,0,0,3,9,15,21,27]
; AVX512BW-NEXT: # ymm1 = mem[0,1,0,1]
-; AVX512BW-NEXT: vpermi2w %ymm4, %ymm5, %ymm1
-; AVX512BW-NEXT: vpmovsxbw {{.*#+}} ymm6 = [1,7,13,19,25,31,37,43,49,55,61,0,0,0,0,0]
-; AVX512BW-NEXT: vpermi2w %zmm3, %zmm2, %zmm6
-; AVX512BW-NEXT: vpblendw {{.*#+}} ymm1 = ymm6[0,1,2],ymm1[3,4,5,6,7],ymm6[8,9,10],ymm1[11,12,13,14,15]
-; AVX512BW-NEXT: vpblendd {{.*#+}} ymm1 = ymm6[0,1,2,3],ymm1[4,5,6,7]
-; AVX512BW-NEXT: vbroadcasti128 {{.*#+}} ymm6 = [0,0,0,20,26,0,6,12,0,0,0,20,26,0,6,12]
-; AVX512BW-NEXT: # ymm6 = mem[0,1,0,1]
-; AVX512BW-NEXT: vpermi2w %ymm5, %ymm4, %ymm6
-; AVX512BW-NEXT: vpmovsxbw {{.*#+}} ymm7 = [34,40,46,52,58,0,6,12,18,24,30,0,0,0,0,0]
-; AVX512BW-NEXT: vpermi2w %zmm2, %zmm3, %zmm7
-; AVX512BW-NEXT: vpblendw {{.*#+}} ymm6 = ymm7[0,1,2],ymm6[3,4,5,6,7],ymm7[8,9,10],ymm6[11,12,13,14,15]
-; AVX512BW-NEXT: vpblendd {{.*#+}} ymm6 = ymm7[0,1,2,3],ymm6[4,5,6,7]
-; AVX512BW-NEXT: vbroadcasti128 {{.*#+}} ymm7 = [0,0,0,21,27,1,7,13,0,0,0,21,27,1,7,13]
+; AVX512BW-NEXT: vpermw %zmm5, %zmm1, %zmm1
+; AVX512BW-NEXT: vpmovsxbw {{.*#+}} ymm2 = [1,7,13,19,25,31,37,43,49,55,61,0,0,0,0,0]
+; AVX512BW-NEXT: vpermi2w %zmm4, %zmm3, %zmm2
+; AVX512BW-NEXT: vpblendw {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3,4,5,6,7],ymm2[8,9,10],ymm1[11,12,13,14,15]
+; AVX512BW-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
+; AVX512BW-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [0,0,0,20,26,0,6,12,0,0,0,20,26,0,6,12]
+; AVX512BW-NEXT: # ymm2 = mem[0,1,0,1]
+; AVX512BW-NEXT: vmovdqa 128(%rdi), %ymm6
+; AVX512BW-NEXT: vmovdqa 160(%rdi), %ymm7
+; AVX512BW-NEXT: vpermi2w %ymm6, %ymm7, %ymm2
+; AVX512BW-NEXT: vpmovsxbw {{.*#+}} ymm8 = [34,40,46,52,58,0,6,12,18,24,30,0,0,0,0,0]
+; AVX512BW-NEXT: vpermi2w %zmm3, %zmm4, %zmm8
+; AVX512BW-NEXT: vpblendw {{.*#+}} ymm2 = ymm8[0,1,2],ymm2[3,4,5,6,7],ymm8[8,9,10],ymm2[11,12,13,14,15]
+; AVX512BW-NEXT: vpblendd {{.*#+}} ymm2 = ymm8[0,1,2,3],ymm2[4,5,6,7]
+; AVX512BW-NEXT: vbroadcasti128 {{.*#+}} ymm8 = [0,0,0,21,27,1,7,13,0,0,0,21,27,1,7,13]
+; AVX512BW-NEXT: # ymm8 = mem[0,1,0,1]
+; AVX512BW-NEXT: vpermi2w %ymm6, %ymm7, %ymm8
+; AVX512BW-NEXT: vpmovsxbw {{.*#+}} ymm6 = [35,41,47,53,59,1,7,13,19,25,31,0,0,0,0,0]
+; AVX512BW-NEXT: vpermi2w %zmm3, %zmm4, %zmm6
+; AVX512BW-NEXT: vpblendw {{.*#+}} ymm7 = ymm6[0,1,2],ymm8[3,4,5,6,7],ymm6[8,9,10],ymm8[11,12,13,14,15]
+; AVX512BW-NEXT: vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm7[4,5,6,7]
+; AVX512BW-NEXT: vbroadcasti128 {{.*#+}} ymm7 = [0,0,0,6,12,18,24,30,0,0,0,6,12,18,24,30]
; AVX512BW-NEXT: # ymm7 = mem[0,1,0,1]
-; AVX512BW-NEXT: vpermi2w %ymm5, %ymm4, %ymm7
-; AVX512BW-NEXT: vpmovsxbw {{.*#+}} ymm8 = [35,41,47,53,59,1,7,13,19,25,31,0,0,0,0,0]
-; AVX512BW-NEXT: vpermi2w %zmm2, %zmm3, %zmm8
-; AVX512BW-NEXT: vpblendw {{.*#+}} ymm7 = ymm8[0,1,2],ymm7[3,4,5,6,7],ymm8[8,9,10],ymm7[11,12,13,14,15]
-; AVX512BW-NEXT: vpblendd {{.*#+}} ymm7 = ymm8[0,1,2,3],ymm7[4,5,6,7]
-; AVX512BW-NEXT: vbroadcasti128 {{.*#+}} ymm8 = [0,0,0,6,12,18,24,30,0,0,0,6,12,18,24,30]
+; AVX512BW-NEXT: vpermw %zmm5, %zmm7, %zmm7
+; AVX512BW-NEXT: vpmovsxbw {{.*#+}} ymm8 = [4,10,16,22,28,34,40,46,52,58,0,0,0,0,0,0]
+; AVX512BW-NEXT: vpermi2w %zmm4, %zmm3, %zmm8
+; AVX512BW-NEXT: vpblendd {{.*#+}} ymm7 = ymm8[0,1,2,3,4],ymm7[5,6,7]
+; AVX512BW-NEXT: vbroadcasti128 {{.*#+}} ymm8 = [0,0,1,7,13,19,25,31,0,0,1,7,13,19,25,31]
; AVX512BW-NEXT: # ymm8 = mem[0,1,0,1]
-; AVX512BW-NEXT: vpermi2w %ymm4, %ymm5, %ymm8
-; AVX512BW-NEXT: vpmovsxbw {{.*#+}} ymm9 = [4,10,16,22,28,34,40,46,52,58,0,0,0,0,0,0]
-; AVX512BW-NEXT: vpermi2w %zmm3, %zmm2, %zmm9
-; AVX512BW-NEXT: vpblendd {{.*#+}} ymm8 = ymm9[0,1,2,3,4],ymm8[5,6,7]
-; AVX512BW-NEXT: vbroadcasti128 {{.*#+}} ymm9 = [0,0,1,7,13,19,25,31,0,0,1,7,13,19,25,31]
-; AVX512BW-NEXT: # ymm9 = mem[0,1,0,1]
-; AVX512BW-NEXT: vpermi2w %ymm4, %ymm5, %ymm9
-; AVX512BW-NEXT: vpmovsxbw {{.*#+}} ymm4 = [5,11,17,23,29,35,41,47,53,59,0,0,0,0,0,0]
-; AVX512BW-NEXT: vpermi2w %zmm3, %zmm2, %zmm4
-; AVX512BW-NEXT: vpblendd {{.*#+}} ymm2 = ymm4[0,1,2,3,4],ymm9[5,6,7]
+; AVX512BW-NEXT: vpermw %zmm5, %zmm8, %zmm5
+; AVX512BW-NEXT: vpmovsxbw {{.*#+}} ymm8 = [5,11,17,23,29,35,41,47,53,59,0,0,0,0,0,0]
+; AVX512BW-NEXT: vpermi2w %zmm4, %zmm3, %zmm8
+; AVX512BW-NEXT: vpblendd {{.*#+}} ymm3 = ymm8[0,1,2,3,4],ymm5[5,6,7]
; AVX512BW-NEXT: vmovdqa %ymm0, (%rsi)
; AVX512BW-NEXT: vmovdqa %ymm1, (%rdx)
-; AVX512BW-NEXT: vmovdqa %ymm6, (%rcx)
-; AVX512BW-NEXT: vmovdqa %ymm7, (%r8)
-; AVX512BW-NEXT: vmovdqa %ymm8, (%r9)
-; AVX512BW-NEXT: vmovdqa %ymm2, (%rax)
+; AVX512BW-NEXT: vmovdqa %ymm2, (%rcx)
+; AVX512BW-NEXT: vmovdqa %ymm6, (%r8)
+; AVX512BW-NEXT: vmovdqa %ymm7, (%r9)
+; AVX512BW-NEXT: vmovdqa %ymm3, (%rax)
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512BW-FCP-LABEL: load_i16_stride6_vf16:
; AVX512BW-FCP: # %bb.0:
; AVX512BW-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-FCP-NEXT: vmovdqa64 (%rdi), %zmm2
-; AVX512BW-FCP-NEXT: vmovdqa64 64(%rdi), %zmm3
; AVX512BW-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm0 = [0,0,0,2,8,14,20,26,0,0,0,2,8,14,20,26]
; AVX512BW-FCP-NEXT: # ymm0 = mem[0,1,0,1]
-; AVX512BW-FCP-NEXT: vmovdqa 160(%rdi), %ymm4
-; AVX512BW-FCP-NEXT: vmovdqa 128(%rdi), %ymm5
-; AVX512BW-FCP-NEXT: vpermi2w %ymm4, %ymm5, %ymm0
+; AVX512BW-FCP-NEXT: vmovdqa64 (%rdi), %zmm3
+; AVX512BW-FCP-NEXT: vmovdqa64 64(%rdi), %zmm4
+; AVX512BW-FCP-NEXT: vmovdqa64 128(%rdi), %zmm5
+; AVX512BW-FCP-NEXT: vpermw %zmm5, %zmm0, %zmm0
; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm1 = [0,6,12,18,24,30,36,42,48,54,60,0,0,0,0,0]
-; AVX512BW-FCP-NEXT: vpermi2w %zmm3, %zmm2, %zmm1
+; AVX512BW-FCP-NEXT: vpermi2w %zmm4, %zmm3, %zmm1
; AVX512BW-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7],ymm1[8,9,10],ymm0[11,12,13,14,15]
; AVX512BW-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; AVX512BW-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [0,0,0,3,9,15,21,27,0,0,0,3,9,15,21,27]
; AVX512BW-FCP-NEXT: # ymm1 = mem[0,1,0,1]
-; AVX512BW-FCP-NEXT: vpermi2w %ymm4, %ymm5, %ymm1
-; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm6 = [1,7,13,19,25,31,37,43,49,55,61,0,0,0,0,0]
-; AVX512BW-FCP-NEXT: vpermi2w %zmm3, %zmm2, %zmm6
-; AVX512BW-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm6[0,1,2],ymm1[3,4,5,6,7],ymm6[8,9,10],ymm1[11,12,13,14,15]
-; AVX512BW-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm6[0,1,2,3],ymm1[4,5,6,7]
-; AVX512BW-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm6 = [0,0,0,20,26,0,6,12,0,0,0,20,26,0,6,12]
-; AVX512BW-FCP-NEXT: # ymm6 = mem[0,1,0,1]
-; AVX512BW-FCP-NEXT: vpermi2w %ymm5, %ymm4, %ymm6
-; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm7 = [34,40,46,52,58,0,6,12,18,24,30,0,0,0,0,0]
-; AVX512BW-FCP-NEXT: vpermi2w %zmm2, %zmm3, %zmm7
-; AVX512BW-FCP-NEXT: vpblendw {{.*#+}} ymm6 = ymm7[0,1,2],ymm6[3,4,5,6,7],ymm7[8,9,10],ymm6[11,12,13,14,15]
-; AVX512BW-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm7[0,1,2,3],ymm6[4,5,6,7]
-; AVX512BW-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm7 = [0,0,0,21,27,1,7,13,0,0,0,21,27,1,7,13]
+; AVX512BW-FCP-NEXT: vpermw %zmm5, %zmm1, %zmm1
+; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm2 = [1,7,13,19,25,31,37,43,49,55,61,0,0,0,0,0]
+; AVX512BW-FCP-NEXT: vpermi2w %zmm4, %zmm3, %zmm2
+; AVX512BW-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3,4,5,6,7],ymm2[8,9,10],ymm1[11,12,13,14,15]
+; AVX512BW-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
+; AVX512BW-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [0,0,0,20,26,0,6,12,0,0,0,20,26,0,6,12]
+; AVX512BW-FCP-NEXT: # ymm2 = mem[0,1,0,1]
+; AVX512BW-FCP-NEXT: vmovdqa 128(%rdi), %ymm6
+; AVX512BW-FCP-NEXT: vmovdqa 160(%rdi), %ymm7
+; AVX512BW-FCP-NEXT: vpermi2w %ymm6, %ymm7, %ymm2
+; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm8 = [34,40,46,52,58,0,6,12,18,24,30,0,0,0,0,0]
+; AVX512BW-FCP-NEXT: vpermi2w %zmm3, %zmm4, %zmm8
+; AVX512BW-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm8[0,1,2],ymm2[3,4,5,6,7],ymm8[8,9,10],ymm2[11,12,13,14,15]
+; AVX512BW-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm8[0,1,2,3],ymm2[4,5,6,7]
+; AVX512BW-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm8 = [0,0,0,21,27,1,7,13,0,0,0,21,27,1,7,13]
+; AVX512BW-FCP-NEXT: # ymm8 = mem[0,1,0,1]
+; AVX512BW-FCP-NEXT: vpermi2w %ymm6, %ymm7, %ymm8
+; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm6 = [35,41,47,53,59,1,7,13,19,25,31,0,0,0,0,0]
+; AVX512BW-FCP-NEXT: vpermi2w %zmm3, %zmm4, %zmm6
+; AVX512BW-FCP-NEXT: vpblendw {{.*#+}} ymm7 = ymm6[0,1,2],ymm8[3,4,5,6,7],ymm6[8,9,10],ymm8[11,12,13,14,15]
+; AVX512BW-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm7[4,5,6,7]
+; AVX512BW-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm7 = [0,0,0,6,12,18,24,30,0,0,0,6,12,18,24,30]
; AVX512BW-FCP-NEXT: # ymm7 = mem[0,1,0,1]
-; AVX512BW-FCP-NEXT: vpermi2w %ymm5, %ymm4, %ymm7
-; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm8 = [35,41,47,53,59,1,7,13,19,25,31,0,0,0,0,0]
-; AVX512BW-FCP-NEXT: vpermi2w %zmm2, %zmm3, %zmm8
-; AVX512BW-FCP-NEXT: vpblendw {{.*#+}} ymm7 = ymm8[0,1,2],ymm7[3,4,5,6,7],ymm8[8,9,10],ymm7[11,12,13,14,15]
-; AVX512BW-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm8[0,1,2,3],ymm7[4,5,6,7]
-; AVX512BW-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm8 = [0,0,0,6,12,18,24,30,0,0,0,6,12,18,24,30]
+; AVX512BW-FCP-NEXT: vpermw %zmm5, %zmm7, %zmm7
+; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm8 = [4,10,16,22,28,34,40,46,52,58,0,0,0,0,0,0]
+; AVX512BW-FCP-NEXT: vpermi2w %zmm4, %zmm3, %zmm8
+; AVX512BW-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm8[0,1,2,3,4],ymm7[5,6,7]
+; AVX512BW-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm8 = [0,0,1,7,13,19,25,31,0,0,1,7,13,19,25,31]
; AVX512BW-FCP-NEXT: # ymm8 = mem[0,1,0,1]
-; AVX512BW-FCP-NEXT: vpermi2w %ymm4, %ymm5, %ymm8
-; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm9 = [4,10,16,22,28,34,40,46,52,58,0,0,0,0,0,0]
-; AVX512BW-FCP-NEXT: vpermi2w %zmm3, %zmm2, %zmm9
-; AVX512BW-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm9[0,1,2,3,4],ymm8[5,6,7]
-; AVX512BW-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm9 = [0,0,1,7,13,19,25,31,0,0,1,7,13,19,25,31]
-; AVX512BW-FCP-NEXT: # ymm9 = mem[0,1,0,1]
-; AVX512BW-FCP-NEXT: vpermi2w %ymm4, %ymm5, %ymm9
-; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm4 = [5,11,17,23,29,35,41,47,53,59,0,0,0,0,0,0]
-; AVX512BW-FCP-NEXT: vpermi2w %zmm3, %zmm2, %zmm4
-; AVX512BW-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm4[0,1,2,3,4],ymm9[5,6,7]
+; AVX512BW-FCP-NEXT: vpermw %zmm5, %zmm8, %zmm5
+; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm8 = [5,11,17,23,29,35,41,47,53,59,0,0,0,0,0,0]
+; AVX512BW-FCP-NEXT: vpermi2w %zmm4, %zmm3, %zmm8
+; AVX512BW-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm8[0,1,2,3,4],ymm5[5,6,7]
; AVX512BW-FCP-NEXT: vmovdqa %ymm0, (%rsi)
; AVX512BW-FCP-NEXT: vmovdqa %ymm1, (%rdx)
-; AVX512BW-FCP-NEXT: vmovdqa %ymm6, (%rcx)
-; AVX512BW-FCP-NEXT: vmovdqa %ymm7, (%r8)
-; AVX512BW-FCP-NEXT: vmovdqa %ymm8, (%r9)
-; AVX512BW-FCP-NEXT: vmovdqa %ymm2, (%rax)
+; AVX512BW-FCP-NEXT: vmovdqa %ymm2, (%rcx)
+; AVX512BW-FCP-NEXT: vmovdqa %ymm6, (%r8)
+; AVX512BW-FCP-NEXT: vmovdqa %ymm7, (%r9)
+; AVX512BW-FCP-NEXT: vmovdqa %ymm3, (%rax)
; AVX512BW-FCP-NEXT: vzeroupper
; AVX512BW-FCP-NEXT: retq
;
; AVX512DQ-BW-LABEL: load_i16_stride6_vf16:
; AVX512DQ-BW: # %bb.0:
; AVX512DQ-BW-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-BW-NEXT: vmovdqa64 (%rdi), %zmm2
-; AVX512DQ-BW-NEXT: vmovdqa64 64(%rdi), %zmm3
; AVX512DQ-BW-NEXT: vbroadcasti128 {{.*#+}} ymm0 = [0,0,0,2,8,14,20,26,0,0,0,2,8,14,20,26]
; AVX512DQ-BW-NEXT: # ymm0 = mem[0,1,0,1]
-; AVX512DQ-BW-NEXT: vmovdqa 160(%rdi), %ymm4
-; AVX512DQ-BW-NEXT: vmovdqa 128(%rdi), %ymm5
-; AVX512DQ-BW-NEXT: vpermi2w %ymm4, %ymm5, %ymm0
+; AVX512DQ-BW-NEXT: vmovdqa64 (%rdi), %zmm3
+; AVX512DQ-BW-NEXT: vmovdqa64 64(%rdi), %zmm4
+; AVX512DQ-BW-NEXT: vmovdqa64 128(%rdi), %zmm5
+; AVX512DQ-BW-NEXT: vpermw %zmm5, %zmm0, %zmm0
; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} ymm1 = [0,6,12,18,24,30,36,42,48,54,60,0,0,0,0,0]
-; AVX512DQ-BW-NEXT: vpermi2w %zmm3, %zmm2, %zmm1
+; AVX512DQ-BW-NEXT: vpermi2w %zmm4, %zmm3, %zmm1
; AVX512DQ-BW-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7],ymm1[8,9,10],ymm0[11,12,13,14,15]
; AVX512DQ-BW-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; AVX512DQ-BW-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [0,0,0,3,9,15,21,27,0,0,0,3,9,15,21,27]
; AVX512DQ-BW-NEXT: # ymm1 = mem[0,1,0,1]
-; AVX512DQ-BW-NEXT: vpermi2w %ymm4, %ymm5, %ymm1
-; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} ymm6 = [1,7,13,19,25,31,37,43,49,55,61,0,0,0,0,0]
-; AVX512DQ-BW-NEXT: vpermi2w %zmm3, %zmm2, %zmm6
-; AVX512DQ-BW-NEXT: vpblendw {{.*#+}} ymm1 = ymm6[0,1,2],ymm1[3,4,5,6,7],ymm6[8,9,10],ymm1[11,12,13,14,15]
-; AVX512DQ-BW-NEXT: vpblendd {{.*#+}} ymm1 = ymm6[0,1,2,3],ymm1[4,5,6,7]
-; AVX512DQ-BW-NEXT: vbroadcasti128 {{.*#+}} ymm6 = [0,0,0,20,26,0,6,12,0,0,0,20,26,0,6,12]
-; AVX512DQ-BW-NEXT: # ymm6 = mem[0,1,0,1]
-; AVX512DQ-BW-NEXT: vpermi2w %ymm5, %ymm4, %ymm6
-; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} ymm7 = [34,40,46,52,58,0,6,12,18,24,30,0,0,0,0,0]
-; AVX512DQ-BW-NEXT: vpermi2w %zmm2, %zmm3, %zmm7
-; AVX512DQ-BW-NEXT: vpblendw {{.*#+}} ymm6 = ymm7[0,1,2],ymm6[3,4,5,6,7],ymm7[8,9,10],ymm6[11,12,13,14,15]
-; AVX512DQ-BW-NEXT: vpblendd {{.*#+}} ymm6 = ymm7[0,1,2,3],ymm6[4,5,6,7]
-; AVX512DQ-BW-NEXT: vbroadcasti128 {{.*#+}} ymm7 = [0,0,0,21,27,1,7,13,0,0,0,21,27,1,7,13]
+; AVX512DQ-BW-NEXT: vpermw %zmm5, %zmm1, %zmm1
+; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} ymm2 = [1,7,13,19,25,31,37,43,49,55,61,0,0,0,0,0]
+; AVX512DQ-BW-NEXT: vpermi2w %zmm4, %zmm3, %zmm2
+; AVX512DQ-BW-NEXT: vpblendw {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3,4,5,6,7],ymm2[8,9,10],ymm1[11,12,13,14,15]
+; AVX512DQ-BW-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
+; AVX512DQ-BW-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [0,0,0,20,26,0,6,12,0,0,0,20,26,0,6,12]
+; AVX512DQ-BW-NEXT: # ymm2 = mem[0,1,0,1]
+; AVX512DQ-BW-NEXT: vmovdqa 128(%rdi), %ymm6
+; AVX512DQ-BW-NEXT: vmovdqa 160(%rdi), %ymm7
+; AVX512DQ-BW-NEXT: vpermi2w %ymm6, %ymm7, %ymm2
+; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} ymm8 = [34,40,46,52,58,0,6,12,18,24,30,0,0,0,0,0]
+; AVX512DQ-BW-NEXT: vpermi2w %zmm3, %zmm4, %zmm8
+; AVX512DQ-BW-NEXT: vpblendw {{.*#+}} ymm2 = ymm8[0,1,2],ymm2[3,4,5,6,7],ymm8[8,9,10],ymm2[11,12,13,14,15]
+; AVX512DQ-BW-NEXT: vpblendd {{.*#+}} ymm2 = ymm8[0,1,2,3],ymm2[4,5,6,7]
+; AVX512DQ-BW-NEXT: vbroadcasti128 {{.*#+}} ymm8 = [0,0,0,21,27,1,7,13,0,0,0,21,27,1,7,13]
+; AVX512DQ-BW-NEXT: # ymm8 = mem[0,1,0,1]
+; AVX512DQ-BW-NEXT: vpermi2w %ymm6, %ymm7, %ymm8
+; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} ymm6 = [35,41,47,53,59,1,7,13,19,25,31,0,0,0,0,0]
+; AVX512DQ-BW-NEXT: vpermi2w %zmm3, %zmm4, %zmm6
+; AVX512DQ-BW-NEXT: vpblendw {{.*#+}} ymm7 = ymm6[0,1,2],ymm8[3,4,5,6,7],ymm6[8,9,10],ymm8[11,12,13,14,15]
+; AVX512DQ-BW-NEXT: vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm7[4,5,6,7]
+; AVX512DQ-BW-NEXT: vbroadcasti128 {{.*#+}} ymm7 = [0,0,0,6,12,18,24,30,0,0,0,6,12,18,24,30]
; AVX512DQ-BW-NEXT: # ymm7 = mem[0,1,0,1]
-; AVX512DQ-BW-NEXT: vpermi2w %ymm5, %ymm4, %ymm7
-; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} ymm8 = [35,41,47,53,59,1,7,13,19,25,31,0,0,0,0,0]
-; AVX512DQ-BW-NEXT: vpermi2w %zmm2, %zmm3, %zmm8
-; AVX512DQ-BW-NEXT: vpblendw {{.*#+}} ymm7 = ymm8[0,1,2],ymm7[3,4,5,6,7],ymm8[8,9,10],ymm7[11,12,13,14,15]
-; AVX512DQ-BW-NEXT: vpblendd {{.*#+}} ymm7 = ymm8[0,1,2,3],ymm7[4,5,6,7]
-; AVX512DQ-BW-NEXT: vbroadcasti128 {{.*#+}} ymm8 = [0,0,0,6,12,18,24,30,0,0,0,6,12,18,24,30]
+; AVX512DQ-BW-NEXT: vpermw %zmm5, %zmm7, %zmm7
+; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} ymm8 = [4,10,16,22,28,34,40,46,52,58,0,0,0,0,0,0]
+; AVX512DQ-BW-NEXT: vpermi2w %zmm4, %zmm3, %zmm8
+; AVX512DQ-BW-NEXT: vpblendd {{.*#+}} ymm7 = ymm8[0,1,2,3,4],ymm7[5,6,7]
+; AVX512DQ-BW-NEXT: vbroadcasti128 {{.*#+}} ymm8 = [0,0,1,7,13,19,25,31,0,0,1,7,13,19,25,31]
; AVX512DQ-BW-NEXT: # ymm8 = mem[0,1,0,1]
-; AVX512DQ-BW-NEXT: vpermi2w %ymm4, %ymm5, %ymm8
-; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} ymm9 = [4,10,16,22,28,34,40,46,52,58,0,0,0,0,0,0]
-; AVX512DQ-BW-NEXT: vpermi2w %zmm3, %zmm2, %zmm9
-; AVX512DQ-BW-NEXT: vpblendd {{.*#+}} ymm8 = ymm9[0,1,2,3,4],ymm8[5,6,7]
-; AVX512DQ-BW-NEXT: vbroadcasti128 {{.*#+}} ymm9 = [0,0,1,7,13,19,25,31,0,0,1,7,13,19,25,31]
-; AVX512DQ-BW-NEXT: # ymm9 = mem[0,1,0,1]
-; AVX512DQ-BW-NEXT: vpermi2w %ymm4, %ymm5, %ymm9
-; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} ymm4 = [5,11,17,23,29,35,41,47,53,59,0,0,0,0,0,0]
-; AVX512DQ-BW-NEXT: vpermi2w %zmm3, %zmm2, %zmm4
-; AVX512DQ-BW-NEXT: vpblendd {{.*#+}} ymm2 = ymm4[0,1,2,3,4],ymm9[5,6,7]
+; AVX512DQ-BW-NEXT: vpermw %zmm5, %zmm8, %zmm5
+; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} ymm8 = [5,11,17,23,29,35,41,47,53,59,0,0,0,0,0,0]
+; AVX512DQ-BW-NEXT: vpermi2w %zmm4, %zmm3, %zmm8
+; AVX512DQ-BW-NEXT: vpblendd {{.*#+}} ymm3 = ymm8[0,1,2,3,4],ymm5[5,6,7]
; AVX512DQ-BW-NEXT: vmovdqa %ymm0, (%rsi)
; AVX512DQ-BW-NEXT: vmovdqa %ymm1, (%rdx)
-; AVX512DQ-BW-NEXT: vmovdqa %ymm6, (%rcx)
-; AVX512DQ-BW-NEXT: vmovdqa %ymm7, (%r8)
-; AVX512DQ-BW-NEXT: vmovdqa %ymm8, (%r9)
-; AVX512DQ-BW-NEXT: vmovdqa %ymm2, (%rax)
+; AVX512DQ-BW-NEXT: vmovdqa %ymm2, (%rcx)
+; AVX512DQ-BW-NEXT: vmovdqa %ymm6, (%r8)
+; AVX512DQ-BW-NEXT: vmovdqa %ymm7, (%r9)
+; AVX512DQ-BW-NEXT: vmovdqa %ymm3, (%rax)
; AVX512DQ-BW-NEXT: vzeroupper
; AVX512DQ-BW-NEXT: retq
;
; AVX512DQ-BW-FCP-LABEL: load_i16_stride6_vf16:
; AVX512DQ-BW-FCP: # %bb.0:
; AVX512DQ-BW-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rdi), %zmm2
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 64(%rdi), %zmm3
; AVX512DQ-BW-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm0 = [0,0,0,2,8,14,20,26,0,0,0,2,8,14,20,26]
; AVX512DQ-BW-FCP-NEXT: # ymm0 = mem[0,1,0,1]
-; AVX512DQ-BW-FCP-NEXT: vmovdqa 160(%rdi), %ymm4
-; AVX512DQ-BW-FCP-NEXT: vmovdqa 128(%rdi), %ymm5
-; AVX512DQ-BW-FCP-NEXT: vpermi2w %ymm4, %ymm5, %ymm0
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rdi), %zmm3
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 64(%rdi), %zmm4
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 128(%rdi), %zmm5
+; AVX512DQ-BW-FCP-NEXT: vpermw %zmm5, %zmm0, %zmm0
; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm1 = [0,6,12,18,24,30,36,42,48,54,60,0,0,0,0,0]
-; AVX512DQ-BW-FCP-NEXT: vpermi2w %zmm3, %zmm2, %zmm1
+; AVX512DQ-BW-FCP-NEXT: vpermi2w %zmm4, %zmm3, %zmm1
; AVX512DQ-BW-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7],ymm1[8,9,10],ymm0[11,12,13,14,15]
; AVX512DQ-BW-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; AVX512DQ-BW-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [0,0,0,3,9,15,21,27,0,0,0,3,9,15,21,27]
; AVX512DQ-BW-FCP-NEXT: # ymm1 = mem[0,1,0,1]
-; AVX512DQ-BW-FCP-NEXT: vpermi2w %ymm4, %ymm5, %ymm1
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm6 = [1,7,13,19,25,31,37,43,49,55,61,0,0,0,0,0]
-; AVX512DQ-BW-FCP-NEXT: vpermi2w %zmm3, %zmm2, %zmm6
-; AVX512DQ-BW-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm6[0,1,2],ymm1[3,4,5,6,7],ymm6[8,9,10],ymm1[11,12,13,14,15]
-; AVX512DQ-BW-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm6[0,1,2,3],ymm1[4,5,6,7]
-; AVX512DQ-BW-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm6 = [0,0,0,20,26,0,6,12,0,0,0,20,26,0,6,12]
-; AVX512DQ-BW-FCP-NEXT: # ymm6 = mem[0,1,0,1]
-; AVX512DQ-BW-FCP-NEXT: vpermi2w %ymm5, %ymm4, %ymm6
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm7 = [34,40,46,52,58,0,6,12,18,24,30,0,0,0,0,0]
-; AVX512DQ-BW-FCP-NEXT: vpermi2w %zmm2, %zmm3, %zmm7
-; AVX512DQ-BW-FCP-NEXT: vpblendw {{.*#+}} ymm6 = ymm7[0,1,2],ymm6[3,4,5,6,7],ymm7[8,9,10],ymm6[11,12,13,14,15]
-; AVX512DQ-BW-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm7[0,1,2,3],ymm6[4,5,6,7]
-; AVX512DQ-BW-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm7 = [0,0,0,21,27,1,7,13,0,0,0,21,27,1,7,13]
+; AVX512DQ-BW-FCP-NEXT: vpermw %zmm5, %zmm1, %zmm1
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm2 = [1,7,13,19,25,31,37,43,49,55,61,0,0,0,0,0]
+; AVX512DQ-BW-FCP-NEXT: vpermi2w %zmm4, %zmm3, %zmm2
+; AVX512DQ-BW-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3,4,5,6,7],ymm2[8,9,10],ymm1[11,12,13,14,15]
+; AVX512DQ-BW-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
+; AVX512DQ-BW-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [0,0,0,20,26,0,6,12,0,0,0,20,26,0,6,12]
+; AVX512DQ-BW-FCP-NEXT: # ymm2 = mem[0,1,0,1]
+; AVX512DQ-BW-FCP-NEXT: vmovdqa 128(%rdi), %ymm6
+; AVX512DQ-BW-FCP-NEXT: vmovdqa 160(%rdi), %ymm7
+; AVX512DQ-BW-FCP-NEXT: vpermi2w %ymm6, %ymm7, %ymm2
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm8 = [34,40,46,52,58,0,6,12,18,24,30,0,0,0,0,0]
+; AVX512DQ-BW-FCP-NEXT: vpermi2w %zmm3, %zmm4, %zmm8
+; AVX512DQ-BW-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm8[0,1,2],ymm2[3,4,5,6,7],ymm8[8,9,10],ymm2[11,12,13,14,15]
+; AVX512DQ-BW-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm8[0,1,2,3],ymm2[4,5,6,7]
+; AVX512DQ-BW-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm8 = [0,0,0,21,27,1,7,13,0,0,0,21,27,1,7,13]
+; AVX512DQ-BW-FCP-NEXT: # ymm8 = mem[0,1,0,1]
+; AVX512DQ-BW-FCP-NEXT: vpermi2w %ymm6, %ymm7, %ymm8
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm6 = [35,41,47,53,59,1,7,13,19,25,31,0,0,0,0,0]
+; AVX512DQ-BW-FCP-NEXT: vpermi2w %zmm3, %zmm4, %zmm6
+; AVX512DQ-BW-FCP-NEXT: vpblendw {{.*#+}} ymm7 = ymm6[0,1,2],ymm8[3,4,5,6,7],ymm6[8,9,10],ymm8[11,12,13,14,15]
+; AVX512DQ-BW-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm7[4,5,6,7]
+; AVX512DQ-BW-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm7 = [0,0,0,6,12,18,24,30,0,0,0,6,12,18,24,30]
; AVX512DQ-BW-FCP-NEXT: # ymm7 = mem[0,1,0,1]
-; AVX512DQ-BW-FCP-NEXT: vpermi2w %ymm5, %ymm4, %ymm7
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm8 = [35,41,47,53,59,1,7,13,19,25,31,0,0,0,0,0]
-; AVX512DQ-BW-FCP-NEXT: vpermi2w %zmm2, %zmm3, %zmm8
-; AVX512DQ-BW-FCP-NEXT: vpblendw {{.*#+}} ymm7 = ymm8[0,1,2],ymm7[3,4,5,6,7],ymm8[8,9,10],ymm7[11,12,13,14,15]
-; AVX512DQ-BW-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm8[0,1,2,3],ymm7[4,5,6,7]
-; AVX512DQ-BW-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm8 = [0,0,0,6,12,18,24,30,0,0,0,6,12,18,24,30]
+; AVX512DQ-BW-FCP-NEXT: vpermw %zmm5, %zmm7, %zmm7
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm8 = [4,10,16,22,28,34,40,46,52,58,0,0,0,0,0,0]
+; AVX512DQ-BW-FCP-NEXT: vpermi2w %zmm4, %zmm3, %zmm8
+; AVX512DQ-BW-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm8[0,1,2,3,4],ymm7[5,6,7]
+; AVX512DQ-BW-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm8 = [0,0,1,7,13,19,25,31,0,0,1,7,13,19,25,31]
; AVX512DQ-BW-FCP-NEXT: # ymm8 = mem[0,1,0,1]
-; AVX512DQ-BW-FCP-NEXT: vpermi2w %ymm4, %ymm5, %ymm8
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm9 = [4,10,16,22,28,34,40,46,52,58,0,0,0,0,0,0]
-; AVX512DQ-BW-FCP-NEXT: vpermi2w %zmm3, %zmm2, %zmm9
-; AVX512DQ-BW-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm9[0,1,2,3,4],ymm8[5,6,7]
-; AVX512DQ-BW-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm9 = [0,0,1,7,13,19,25,31,0,0,1,7,13,19,25,31]
-; AVX512DQ-BW-FCP-NEXT: # ymm9 = mem[0,1,0,1]
-; AVX512DQ-BW-FCP-NEXT: vpermi2w %ymm4, %ymm5, %ymm9
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm4 = [5,11,17,23,29,35,41,47,53,59,0,0,0,0,0,0]
-; AVX512DQ-BW-FCP-NEXT: vpermi2w %zmm3, %zmm2, %zmm4
-; AVX512DQ-BW-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm4[0,1,2,3,4],ymm9[5,6,7]
+; AVX512DQ-BW-FCP-NEXT: vpermw %zmm5, %zmm8, %zmm5
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm8 = [5,11,17,23,29,35,41,47,53,59,0,0,0,0,0,0]
+; AVX512DQ-BW-FCP-NEXT: vpermi2w %zmm4, %zmm3, %zmm8
+; AVX512DQ-BW-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm8[0,1,2,3,4],ymm5[5,6,7]
; AVX512DQ-BW-FCP-NEXT: vmovdqa %ymm0, (%rsi)
; AVX512DQ-BW-FCP-NEXT: vmovdqa %ymm1, (%rdx)
-; AVX512DQ-BW-FCP-NEXT: vmovdqa %ymm6, (%rcx)
-; AVX512DQ-BW-FCP-NEXT: vmovdqa %ymm7, (%r8)
-; AVX512DQ-BW-FCP-NEXT: vmovdqa %ymm8, (%r9)
-; AVX512DQ-BW-FCP-NEXT: vmovdqa %ymm2, (%rax)
+; AVX512DQ-BW-FCP-NEXT: vmovdqa %ymm2, (%rcx)
+; AVX512DQ-BW-FCP-NEXT: vmovdqa %ymm6, (%r8)
+; AVX512DQ-BW-FCP-NEXT: vmovdqa %ymm7, (%r9)
+; AVX512DQ-BW-FCP-NEXT: vmovdqa %ymm3, (%rax)
; AVX512DQ-BW-FCP-NEXT: vzeroupper
; AVX512DQ-BW-FCP-NEXT: retq
%wide.vec = load <96 x i16>, ptr %in.vec, align 64
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-7.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-7.ll
index 713bd757a7b99e..95b5ffde485640 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-7.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-7.ll
@@ -321,22 +321,23 @@ define void @load_i16_stride7_vf2(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm0[0,1,14,15,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX512BW-FCP-NEXT: vpsrld $16, %xmm0, %xmm3
; AVX512BW-FCP-NEXT: vpunpcklwd {{.*#+}} xmm4 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3]
-; AVX512BW-FCP-NEXT: vpunpcklwd {{.*#+}} xmm5 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm6 = xmm5[8,9,6,7,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[12,13,10,11,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-FCP-NEXT: vpbroadcastw 8(%rdi), %xmm7
-; AVX512BW-FCP-NEXT: vpsrlq $48, %xmm1, %xmm8
-; AVX512BW-FCP-NEXT: vpunpcklwd {{.*#+}} xmm7 = xmm7[0],xmm8[0],xmm7[1],xmm8[1],xmm7[2],xmm8[2],xmm7[3],xmm8[3]
-; AVX512BW-FCP-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
-; AVX512BW-FCP-NEXT: vmovd {{.*#+}} xmm8 = [6,13,0,0,0,0,0,0]
-; AVX512BW-FCP-NEXT: vpermi2w %xmm1, %xmm0, %xmm8
+; AVX512BW-FCP-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm5 = xmm0[8,9,6,7,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[12,13,10,11,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512BW-FCP-NEXT: vpbroadcastw 8(%rdi), %xmm6
+; AVX512BW-FCP-NEXT: vpsrlq $48, %xmm1, %xmm7
+; AVX512BW-FCP-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3]
+; AVX512BW-FCP-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
+; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} xmm3 = [6,13,5,13,6,14,7,15]
+; AVX512BW-FCP-NEXT: vpermw (%rdi), %ymm3, %ymm3
; AVX512BW-FCP-NEXT: vmovd %xmm2, (%rsi)
; AVX512BW-FCP-NEXT: vmovd %xmm4, (%rdx)
-; AVX512BW-FCP-NEXT: vmovd %xmm6, (%rcx)
-; AVX512BW-FCP-NEXT: vmovd %xmm5, (%r8)
-; AVX512BW-FCP-NEXT: vmovd %xmm7, (%r9)
-; AVX512BW-FCP-NEXT: vmovd %xmm3, (%r10)
-; AVX512BW-FCP-NEXT: vmovd %xmm8, (%rax)
+; AVX512BW-FCP-NEXT: vmovd %xmm5, (%rcx)
+; AVX512BW-FCP-NEXT: vmovd %xmm0, (%r8)
+; AVX512BW-FCP-NEXT: vmovd %xmm6, (%r9)
+; AVX512BW-FCP-NEXT: vmovd %xmm1, (%r10)
+; AVX512BW-FCP-NEXT: vmovd %xmm3, (%rax)
+; AVX512BW-FCP-NEXT: vzeroupper
; AVX512BW-FCP-NEXT: retq
;
; AVX512DQ-BW-LABEL: load_i16_stride7_vf2:
@@ -378,22 +379,23 @@ define void @load_i16_stride7_vf2(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm0[0,1,14,15,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX512DQ-BW-FCP-NEXT: vpsrld $16, %xmm0, %xmm3
; AVX512DQ-BW-FCP-NEXT: vpunpcklwd {{.*#+}} xmm4 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3]
-; AVX512DQ-BW-FCP-NEXT: vpunpcklwd {{.*#+}} xmm5 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm6 = xmm5[8,9,6,7,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[12,13,10,11,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-BW-FCP-NEXT: vpbroadcastw 8(%rdi), %xmm7
-; AVX512DQ-BW-FCP-NEXT: vpsrlq $48, %xmm1, %xmm8
-; AVX512DQ-BW-FCP-NEXT: vpunpcklwd {{.*#+}} xmm7 = xmm7[0],xmm8[0],xmm7[1],xmm8[1],xmm7[2],xmm8[2],xmm7[3],xmm8[3]
-; AVX512DQ-BW-FCP-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
-; AVX512DQ-BW-FCP-NEXT: vmovd {{.*#+}} xmm8 = [6,13,0,0,0,0,0,0]
-; AVX512DQ-BW-FCP-NEXT: vpermi2w %xmm1, %xmm0, %xmm8
+; AVX512DQ-BW-FCP-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm5 = xmm0[8,9,6,7,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[12,13,10,11,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQ-BW-FCP-NEXT: vpbroadcastw 8(%rdi), %xmm6
+; AVX512DQ-BW-FCP-NEXT: vpsrlq $48, %xmm1, %xmm7
+; AVX512DQ-BW-FCP-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3]
+; AVX512DQ-BW-FCP-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} xmm3 = [6,13,5,13,6,14,7,15]
+; AVX512DQ-BW-FCP-NEXT: vpermw (%rdi), %ymm3, %ymm3
; AVX512DQ-BW-FCP-NEXT: vmovd %xmm2, (%rsi)
; AVX512DQ-BW-FCP-NEXT: vmovd %xmm4, (%rdx)
-; AVX512DQ-BW-FCP-NEXT: vmovd %xmm6, (%rcx)
-; AVX512DQ-BW-FCP-NEXT: vmovd %xmm5, (%r8)
-; AVX512DQ-BW-FCP-NEXT: vmovd %xmm7, (%r9)
-; AVX512DQ-BW-FCP-NEXT: vmovd %xmm3, (%r10)
-; AVX512DQ-BW-FCP-NEXT: vmovd %xmm8, (%rax)
+; AVX512DQ-BW-FCP-NEXT: vmovd %xmm5, (%rcx)
+; AVX512DQ-BW-FCP-NEXT: vmovd %xmm0, (%r8)
+; AVX512DQ-BW-FCP-NEXT: vmovd %xmm6, (%r9)
+; AVX512DQ-BW-FCP-NEXT: vmovd %xmm1, (%r10)
+; AVX512DQ-BW-FCP-NEXT: vmovd %xmm3, (%rax)
+; AVX512DQ-BW-FCP-NEXT: vzeroupper
; AVX512DQ-BW-FCP-NEXT: retq
%wide.vec = load <14 x i16>, ptr %in.vec, align 64
%strided.vec0 = shufflevector <14 x i16> %wide.vec, <14 x i16> poison, <2 x i32> <i32 0, i32 7>
@@ -906,28 +908,27 @@ define void @load_i16_stride7_vf4(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512BW-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX512BW-NEXT: movq {{[0-9]+}}(%rsp), %r10
; AVX512BW-NEXT: vmovq {{.*#+}} xmm0 = [0,7,14,21,0,0,0,0]
-; AVX512BW-NEXT: vmovdqa (%rdi), %ymm1
-; AVX512BW-NEXT: vmovdqa 32(%rdi), %ymm2
-; AVX512BW-NEXT: vpermi2w %ymm2, %ymm1, %ymm0
-; AVX512BW-NEXT: vmovq {{.*#+}} xmm3 = [1,8,15,22,0,0,0,0]
-; AVX512BW-NEXT: vpermi2w %ymm2, %ymm1, %ymm3
-; AVX512BW-NEXT: vmovq {{.*#+}} xmm4 = [2,9,16,23,0,0,0,0]
-; AVX512BW-NEXT: vpermi2w %ymm2, %ymm1, %ymm4
-; AVX512BW-NEXT: vmovq {{.*#+}} xmm5 = [3,10,17,24,0,0,0,0]
-; AVX512BW-NEXT: vpermi2w %ymm2, %ymm1, %ymm5
-; AVX512BW-NEXT: vmovq {{.*#+}} xmm6 = [4,11,18,25,0,0,0,0]
-; AVX512BW-NEXT: vpermi2w %ymm2, %ymm1, %ymm6
-; AVX512BW-NEXT: vmovq {{.*#+}} xmm7 = [5,12,19,26,0,0,0,0]
-; AVX512BW-NEXT: vpermi2w %ymm2, %ymm1, %ymm7
-; AVX512BW-NEXT: vmovq {{.*#+}} xmm8 = [6,13,20,27,0,0,0,0]
-; AVX512BW-NEXT: vpermi2w %ymm2, %ymm1, %ymm8
+; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm1
+; AVX512BW-NEXT: vpermw %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: vmovq {{.*#+}} xmm2 = [1,8,15,22,0,0,0,0]
+; AVX512BW-NEXT: vpermw %zmm1, %zmm2, %zmm2
+; AVX512BW-NEXT: vmovq {{.*#+}} xmm3 = [2,9,16,23,0,0,0,0]
+; AVX512BW-NEXT: vpermw %zmm1, %zmm3, %zmm3
+; AVX512BW-NEXT: vmovq {{.*#+}} xmm4 = [3,10,17,24,0,0,0,0]
+; AVX512BW-NEXT: vpermw %zmm1, %zmm4, %zmm4
+; AVX512BW-NEXT: vmovq {{.*#+}} xmm5 = [4,11,18,25,0,0,0,0]
+; AVX512BW-NEXT: vpermw %zmm1, %zmm5, %zmm5
+; AVX512BW-NEXT: vmovq {{.*#+}} xmm6 = [5,12,19,26,0,0,0,0]
+; AVX512BW-NEXT: vpermw %zmm1, %zmm6, %zmm6
+; AVX512BW-NEXT: vmovq {{.*#+}} xmm7 = [6,13,20,27,0,0,0,0]
+; AVX512BW-NEXT: vpermw %zmm1, %zmm7, %zmm1
; AVX512BW-NEXT: vmovq %xmm0, (%rsi)
-; AVX512BW-NEXT: vmovq %xmm3, (%rdx)
-; AVX512BW-NEXT: vmovq %xmm4, (%rcx)
-; AVX512BW-NEXT: vmovq %xmm5, (%r8)
-; AVX512BW-NEXT: vmovq %xmm6, (%r9)
-; AVX512BW-NEXT: vmovq %xmm7, (%r10)
-; AVX512BW-NEXT: vmovq %xmm8, (%rax)
+; AVX512BW-NEXT: vmovq %xmm2, (%rdx)
+; AVX512BW-NEXT: vmovq %xmm3, (%rcx)
+; AVX512BW-NEXT: vmovq %xmm4, (%r8)
+; AVX512BW-NEXT: vmovq %xmm5, (%r9)
+; AVX512BW-NEXT: vmovq %xmm6, (%r10)
+; AVX512BW-NEXT: vmovq %xmm1, (%rax)
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -936,28 +937,27 @@ define void @load_i16_stride7_vf4(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512BW-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX512BW-FCP-NEXT: movq {{[0-9]+}}(%rsp), %r10
; AVX512BW-FCP-NEXT: vmovq {{.*#+}} xmm0 = [0,7,14,21,0,0,0,0]
-; AVX512BW-FCP-NEXT: vmovdqa (%rdi), %ymm1
-; AVX512BW-FCP-NEXT: vmovdqa 32(%rdi), %ymm2
-; AVX512BW-FCP-NEXT: vpermi2w %ymm2, %ymm1, %ymm0
-; AVX512BW-FCP-NEXT: vmovq {{.*#+}} xmm3 = [1,8,15,22,0,0,0,0]
-; AVX512BW-FCP-NEXT: vpermi2w %ymm2, %ymm1, %ymm3
-; AVX512BW-FCP-NEXT: vmovq {{.*#+}} xmm4 = [2,9,16,23,0,0,0,0]
-; AVX512BW-FCP-NEXT: vpermi2w %ymm2, %ymm1, %ymm4
-; AVX512BW-FCP-NEXT: vmovq {{.*#+}} xmm5 = [3,10,17,24,0,0,0,0]
-; AVX512BW-FCP-NEXT: vpermi2w %ymm2, %ymm1, %ymm5
-; AVX512BW-FCP-NEXT: vmovq {{.*#+}} xmm6 = [4,11,18,25,0,0,0,0]
-; AVX512BW-FCP-NEXT: vpermi2w %ymm2, %ymm1, %ymm6
-; AVX512BW-FCP-NEXT: vmovq {{.*#+}} xmm7 = [5,12,19,26,0,0,0,0]
-; AVX512BW-FCP-NEXT: vpermi2w %ymm2, %ymm1, %ymm7
-; AVX512BW-FCP-NEXT: vmovq {{.*#+}} xmm8 = [6,13,20,27,0,0,0,0]
-; AVX512BW-FCP-NEXT: vpermi2w %ymm2, %ymm1, %ymm8
+; AVX512BW-FCP-NEXT: vmovdqa64 (%rdi), %zmm1
+; AVX512BW-FCP-NEXT: vpermw %zmm1, %zmm0, %zmm0
+; AVX512BW-FCP-NEXT: vmovq {{.*#+}} xmm2 = [1,8,15,22,0,0,0,0]
+; AVX512BW-FCP-NEXT: vpermw %zmm1, %zmm2, %zmm2
+; AVX512BW-FCP-NEXT: vmovq {{.*#+}} xmm3 = [2,9,16,23,0,0,0,0]
+; AVX512BW-FCP-NEXT: vpermw %zmm1, %zmm3, %zmm3
+; AVX512BW-FCP-NEXT: vmovq {{.*#+}} xmm4 = [3,10,17,24,0,0,0,0]
+; AVX512BW-FCP-NEXT: vpermw %zmm1, %zmm4, %zmm4
+; AVX512BW-FCP-NEXT: vmovq {{.*#+}} xmm5 = [4,11,18,25,0,0,0,0]
+; AVX512BW-FCP-NEXT: vpermw %zmm1, %zmm5, %zmm5
+; AVX512BW-FCP-NEXT: vmovq {{.*#+}} xmm6 = [5,12,19,26,0,0,0,0]
+; AVX512BW-FCP-NEXT: vpermw %zmm1, %zmm6, %zmm6
+; AVX512BW-FCP-NEXT: vmovq {{.*#+}} xmm7 = [6,13,20,27,0,0,0,0]
+; AVX512BW-FCP-NEXT: vpermw %zmm1, %zmm7, %zmm1
; AVX512BW-FCP-NEXT: vmovq %xmm0, (%rsi)
-; AVX512BW-FCP-NEXT: vmovq %xmm3, (%rdx)
-; AVX512BW-FCP-NEXT: vmovq %xmm4, (%rcx)
-; AVX512BW-FCP-NEXT: vmovq %xmm5, (%r8)
-; AVX512BW-FCP-NEXT: vmovq %xmm6, (%r9)
-; AVX512BW-FCP-NEXT: vmovq %xmm7, (%r10)
-; AVX512BW-FCP-NEXT: vmovq %xmm8, (%rax)
+; AVX512BW-FCP-NEXT: vmovq %xmm2, (%rdx)
+; AVX512BW-FCP-NEXT: vmovq %xmm3, (%rcx)
+; AVX512BW-FCP-NEXT: vmovq %xmm4, (%r8)
+; AVX512BW-FCP-NEXT: vmovq %xmm5, (%r9)
+; AVX512BW-FCP-NEXT: vmovq %xmm6, (%r10)
+; AVX512BW-FCP-NEXT: vmovq %xmm1, (%rax)
; AVX512BW-FCP-NEXT: vzeroupper
; AVX512BW-FCP-NEXT: retq
;
@@ -966,28 +966,27 @@ define void @load_i16_stride7_vf4(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-BW-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX512DQ-BW-NEXT: movq {{[0-9]+}}(%rsp), %r10
; AVX512DQ-BW-NEXT: vmovq {{.*#+}} xmm0 = [0,7,14,21,0,0,0,0]
-; AVX512DQ-BW-NEXT: vmovdqa (%rdi), %ymm1
-; AVX512DQ-BW-NEXT: vmovdqa 32(%rdi), %ymm2
-; AVX512DQ-BW-NEXT: vpermi2w %ymm2, %ymm1, %ymm0
-; AVX512DQ-BW-NEXT: vmovq {{.*#+}} xmm3 = [1,8,15,22,0,0,0,0]
-; AVX512DQ-BW-NEXT: vpermi2w %ymm2, %ymm1, %ymm3
-; AVX512DQ-BW-NEXT: vmovq {{.*#+}} xmm4 = [2,9,16,23,0,0,0,0]
-; AVX512DQ-BW-NEXT: vpermi2w %ymm2, %ymm1, %ymm4
-; AVX512DQ-BW-NEXT: vmovq {{.*#+}} xmm5 = [3,10,17,24,0,0,0,0]
-; AVX512DQ-BW-NEXT: vpermi2w %ymm2, %ymm1, %ymm5
-; AVX512DQ-BW-NEXT: vmovq {{.*#+}} xmm6 = [4,11,18,25,0,0,0,0]
-; AVX512DQ-BW-NEXT: vpermi2w %ymm2, %ymm1, %ymm6
-; AVX512DQ-BW-NEXT: vmovq {{.*#+}} xmm7 = [5,12,19,26,0,0,0,0]
-; AVX512DQ-BW-NEXT: vpermi2w %ymm2, %ymm1, %ymm7
-; AVX512DQ-BW-NEXT: vmovq {{.*#+}} xmm8 = [6,13,20,27,0,0,0,0]
-; AVX512DQ-BW-NEXT: vpermi2w %ymm2, %ymm1, %ymm8
+; AVX512DQ-BW-NEXT: vmovdqa64 (%rdi), %zmm1
+; AVX512DQ-BW-NEXT: vpermw %zmm1, %zmm0, %zmm0
+; AVX512DQ-BW-NEXT: vmovq {{.*#+}} xmm2 = [1,8,15,22,0,0,0,0]
+; AVX512DQ-BW-NEXT: vpermw %zmm1, %zmm2, %zmm2
+; AVX512DQ-BW-NEXT: vmovq {{.*#+}} xmm3 = [2,9,16,23,0,0,0,0]
+; AVX512DQ-BW-NEXT: vpermw %zmm1, %zmm3, %zmm3
+; AVX512DQ-BW-NEXT: vmovq {{.*#+}} xmm4 = [3,10,17,24,0,0,0,0]
+; AVX512DQ-BW-NEXT: vpermw %zmm1, %zmm4, %zmm4
+; AVX512DQ-BW-NEXT: vmovq {{.*#+}} xmm5 = [4,11,18,25,0,0,0,0]
+; AVX512DQ-BW-NEXT: vpermw %zmm1, %zmm5, %zmm5
+; AVX512DQ-BW-NEXT: vmovq {{.*#+}} xmm6 = [5,12,19,26,0,0,0,0]
+; AVX512DQ-BW-NEXT: vpermw %zmm1, %zmm6, %zmm6
+; AVX512DQ-BW-NEXT: vmovq {{.*#+}} xmm7 = [6,13,20,27,0,0,0,0]
+; AVX512DQ-BW-NEXT: vpermw %zmm1, %zmm7, %zmm1
; AVX512DQ-BW-NEXT: vmovq %xmm0, (%rsi)
-; AVX512DQ-BW-NEXT: vmovq %xmm3, (%rdx)
-; AVX512DQ-BW-NEXT: vmovq %xmm4, (%rcx)
-; AVX512DQ-BW-NEXT: vmovq %xmm5, (%r8)
-; AVX512DQ-BW-NEXT: vmovq %xmm6, (%r9)
-; AVX512DQ-BW-NEXT: vmovq %xmm7, (%r10)
-; AVX512DQ-BW-NEXT: vmovq %xmm8, (%rax)
+; AVX512DQ-BW-NEXT: vmovq %xmm2, (%rdx)
+; AVX512DQ-BW-NEXT: vmovq %xmm3, (%rcx)
+; AVX512DQ-BW-NEXT: vmovq %xmm4, (%r8)
+; AVX512DQ-BW-NEXT: vmovq %xmm5, (%r9)
+; AVX512DQ-BW-NEXT: vmovq %xmm6, (%r10)
+; AVX512DQ-BW-NEXT: vmovq %xmm1, (%rax)
; AVX512DQ-BW-NEXT: vzeroupper
; AVX512DQ-BW-NEXT: retq
;
@@ -996,28 +995,27 @@ define void @load_i16_stride7_vf4(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-BW-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX512DQ-BW-FCP-NEXT: movq {{[0-9]+}}(%rsp), %r10
; AVX512DQ-BW-FCP-NEXT: vmovq {{.*#+}} xmm0 = [0,7,14,21,0,0,0,0]
-; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdi), %ymm1
-; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%rdi), %ymm2
-; AVX512DQ-BW-FCP-NEXT: vpermi2w %ymm2, %ymm1, %ymm0
-; AVX512DQ-BW-FCP-NEXT: vmovq {{.*#+}} xmm3 = [1,8,15,22,0,0,0,0]
-; AVX512DQ-BW-FCP-NEXT: vpermi2w %ymm2, %ymm1, %ymm3
-; AVX512DQ-BW-FCP-NEXT: vmovq {{.*#+}} xmm4 = [2,9,16,23,0,0,0,0]
-; AVX512DQ-BW-FCP-NEXT: vpermi2w %ymm2, %ymm1, %ymm4
-; AVX512DQ-BW-FCP-NEXT: vmovq {{.*#+}} xmm5 = [3,10,17,24,0,0,0,0]
-; AVX512DQ-BW-FCP-NEXT: vpermi2w %ymm2, %ymm1, %ymm5
-; AVX512DQ-BW-FCP-NEXT: vmovq {{.*#+}} xmm6 = [4,11,18,25,0,0,0,0]
-; AVX512DQ-BW-FCP-NEXT: vpermi2w %ymm2, %ymm1, %ymm6
-; AVX512DQ-BW-FCP-NEXT: vmovq {{.*#+}} xmm7 = [5,12,19,26,0,0,0,0]
-; AVX512DQ-BW-FCP-NEXT: vpermi2w %ymm2, %ymm1, %ymm7
-; AVX512DQ-BW-FCP-NEXT: vmovq {{.*#+}} xmm8 = [6,13,20,27,0,0,0,0]
-; AVX512DQ-BW-FCP-NEXT: vpermi2w %ymm2, %ymm1, %ymm8
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rdi), %zmm1
+; AVX512DQ-BW-FCP-NEXT: vpermw %zmm1, %zmm0, %zmm0
+; AVX512DQ-BW-FCP-NEXT: vmovq {{.*#+}} xmm2 = [1,8,15,22,0,0,0,0]
+; AVX512DQ-BW-FCP-NEXT: vpermw %zmm1, %zmm2, %zmm2
+; AVX512DQ-BW-FCP-NEXT: vmovq {{.*#+}} xmm3 = [2,9,16,23,0,0,0,0]
+; AVX512DQ-BW-FCP-NEXT: vpermw %zmm1, %zmm3, %zmm3
+; AVX512DQ-BW-FCP-NEXT: vmovq {{.*#+}} xmm4 = [3,10,17,24,0,0,0,0]
+; AVX512DQ-BW-FCP-NEXT: vpermw %zmm1, %zmm4, %zmm4
+; AVX512DQ-BW-FCP-NEXT: vmovq {{.*#+}} xmm5 = [4,11,18,25,0,0,0,0]
+; AVX512DQ-BW-FCP-NEXT: vpermw %zmm1, %zmm5, %zmm5
+; AVX512DQ-BW-FCP-NEXT: vmovq {{.*#+}} xmm6 = [5,12,19,26,0,0,0,0]
+; AVX512DQ-BW-FCP-NEXT: vpermw %zmm1, %zmm6, %zmm6
+; AVX512DQ-BW-FCP-NEXT: vmovq {{.*#+}} xmm7 = [6,13,20,27,0,0,0,0]
+; AVX512DQ-BW-FCP-NEXT: vpermw %zmm1, %zmm7, %zmm1
; AVX512DQ-BW-FCP-NEXT: vmovq %xmm0, (%rsi)
-; AVX512DQ-BW-FCP-NEXT: vmovq %xmm3, (%rdx)
-; AVX512DQ-BW-FCP-NEXT: vmovq %xmm4, (%rcx)
-; AVX512DQ-BW-FCP-NEXT: vmovq %xmm5, (%r8)
-; AVX512DQ-BW-FCP-NEXT: vmovq %xmm6, (%r9)
-; AVX512DQ-BW-FCP-NEXT: vmovq %xmm7, (%r10)
-; AVX512DQ-BW-FCP-NEXT: vmovq %xmm8, (%rax)
+; AVX512DQ-BW-FCP-NEXT: vmovq %xmm2, (%rdx)
+; AVX512DQ-BW-FCP-NEXT: vmovq %xmm3, (%rcx)
+; AVX512DQ-BW-FCP-NEXT: vmovq %xmm4, (%r8)
+; AVX512DQ-BW-FCP-NEXT: vmovq %xmm5, (%r9)
+; AVX512DQ-BW-FCP-NEXT: vmovq %xmm6, (%r10)
+; AVX512DQ-BW-FCP-NEXT: vmovq %xmm1, (%rax)
; AVX512DQ-BW-FCP-NEXT: vzeroupper
; AVX512DQ-BW-FCP-NEXT: retq
%wide.vec = load <28 x i16>, ptr %in.vec, align 64
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-8.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-8.ll
index 051b4e300b8275..fff21f9aad1bbb 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-8.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-8.ll
@@ -623,31 +623,30 @@ define void @load_i16_stride8_vf4(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512BW-NEXT: movq {{[0-9]+}}(%rsp), %r10
; AVX512BW-NEXT: movq {{[0-9]+}}(%rsp), %r11
; AVX512BW-NEXT: vmovq {{.*#+}} xmm0 = [0,8,16,24,0,0,0,0]
-; AVX512BW-NEXT: vmovdqa (%rdi), %ymm1
-; AVX512BW-NEXT: vmovdqa 32(%rdi), %ymm2
-; AVX512BW-NEXT: vpermi2w %ymm2, %ymm1, %ymm0
-; AVX512BW-NEXT: vmovq {{.*#+}} xmm3 = [1,9,17,25,0,0,0,0]
-; AVX512BW-NEXT: vpermi2w %ymm2, %ymm1, %ymm3
-; AVX512BW-NEXT: vmovq {{.*#+}} xmm4 = [2,10,18,26,0,0,0,0]
-; AVX512BW-NEXT: vpermi2w %ymm2, %ymm1, %ymm4
-; AVX512BW-NEXT: vmovq {{.*#+}} xmm5 = [3,11,19,27,0,0,0,0]
-; AVX512BW-NEXT: vpermi2w %ymm2, %ymm1, %ymm5
-; AVX512BW-NEXT: vmovq {{.*#+}} xmm6 = [4,12,20,28,0,0,0,0]
-; AVX512BW-NEXT: vpermi2w %ymm2, %ymm1, %ymm6
-; AVX512BW-NEXT: vmovq {{.*#+}} xmm7 = [5,13,21,29,0,0,0,0]
-; AVX512BW-NEXT: vpermi2w %ymm2, %ymm1, %ymm7
-; AVX512BW-NEXT: vmovq {{.*#+}} xmm8 = [6,14,22,30,0,0,0,0]
-; AVX512BW-NEXT: vpermi2w %ymm2, %ymm1, %ymm8
-; AVX512BW-NEXT: vmovq {{.*#+}} xmm9 = [7,15,23,31,0,0,0,0]
-; AVX512BW-NEXT: vpermi2w %ymm2, %ymm1, %ymm9
+; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm1
+; AVX512BW-NEXT: vpermw %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: vmovq {{.*#+}} xmm2 = [1,9,17,25,0,0,0,0]
+; AVX512BW-NEXT: vpermw %zmm1, %zmm2, %zmm2
+; AVX512BW-NEXT: vmovq {{.*#+}} xmm3 = [2,10,18,26,0,0,0,0]
+; AVX512BW-NEXT: vpermw %zmm1, %zmm3, %zmm3
+; AVX512BW-NEXT: vmovq {{.*#+}} xmm4 = [3,11,19,27,0,0,0,0]
+; AVX512BW-NEXT: vpermw %zmm1, %zmm4, %zmm4
+; AVX512BW-NEXT: vmovq {{.*#+}} xmm5 = [4,12,20,28,0,0,0,0]
+; AVX512BW-NEXT: vpermw %zmm1, %zmm5, %zmm5
+; AVX512BW-NEXT: vmovq {{.*#+}} xmm6 = [5,13,21,29,0,0,0,0]
+; AVX512BW-NEXT: vpermw %zmm1, %zmm6, %zmm6
+; AVX512BW-NEXT: vmovq {{.*#+}} xmm7 = [6,14,22,30,0,0,0,0]
+; AVX512BW-NEXT: vpermw %zmm1, %zmm7, %zmm7
+; AVX512BW-NEXT: vmovq {{.*#+}} xmm8 = [7,15,23,31,0,0,0,0]
+; AVX512BW-NEXT: vpermw %zmm1, %zmm8, %zmm1
; AVX512BW-NEXT: vmovq %xmm0, (%rsi)
-; AVX512BW-NEXT: vmovq %xmm3, (%rdx)
-; AVX512BW-NEXT: vmovq %xmm4, (%rcx)
-; AVX512BW-NEXT: vmovq %xmm5, (%r8)
-; AVX512BW-NEXT: vmovq %xmm6, (%r9)
-; AVX512BW-NEXT: vmovq %xmm7, (%r11)
-; AVX512BW-NEXT: vmovq %xmm8, (%r10)
-; AVX512BW-NEXT: vmovq %xmm9, (%rax)
+; AVX512BW-NEXT: vmovq %xmm2, (%rdx)
+; AVX512BW-NEXT: vmovq %xmm3, (%rcx)
+; AVX512BW-NEXT: vmovq %xmm4, (%r8)
+; AVX512BW-NEXT: vmovq %xmm5, (%r9)
+; AVX512BW-NEXT: vmovq %xmm6, (%r11)
+; AVX512BW-NEXT: vmovq %xmm7, (%r10)
+; AVX512BW-NEXT: vmovq %xmm1, (%rax)
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -657,31 +656,30 @@ define void @load_i16_stride8_vf4(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512BW-FCP-NEXT: movq {{[0-9]+}}(%rsp), %r10
; AVX512BW-FCP-NEXT: movq {{[0-9]+}}(%rsp), %r11
; AVX512BW-FCP-NEXT: vmovq {{.*#+}} xmm0 = [0,8,16,24,0,0,0,0]
-; AVX512BW-FCP-NEXT: vmovdqa (%rdi), %ymm1
-; AVX512BW-FCP-NEXT: vmovdqa 32(%rdi), %ymm2
-; AVX512BW-FCP-NEXT: vpermi2w %ymm2, %ymm1, %ymm0
-; AVX512BW-FCP-NEXT: vmovq {{.*#+}} xmm3 = [1,9,17,25,0,0,0,0]
-; AVX512BW-FCP-NEXT: vpermi2w %ymm2, %ymm1, %ymm3
-; AVX512BW-FCP-NEXT: vmovq {{.*#+}} xmm4 = [2,10,18,26,0,0,0,0]
-; AVX512BW-FCP-NEXT: vpermi2w %ymm2, %ymm1, %ymm4
-; AVX512BW-FCP-NEXT: vmovq {{.*#+}} xmm5 = [3,11,19,27,0,0,0,0]
-; AVX512BW-FCP-NEXT: vpermi2w %ymm2, %ymm1, %ymm5
-; AVX512BW-FCP-NEXT: vmovq {{.*#+}} xmm6 = [4,12,20,28,0,0,0,0]
-; AVX512BW-FCP-NEXT: vpermi2w %ymm2, %ymm1, %ymm6
-; AVX512BW-FCP-NEXT: vmovq {{.*#+}} xmm7 = [5,13,21,29,0,0,0,0]
-; AVX512BW-FCP-NEXT: vpermi2w %ymm2, %ymm1, %ymm7
-; AVX512BW-FCP-NEXT: vmovq {{.*#+}} xmm8 = [6,14,22,30,0,0,0,0]
-; AVX512BW-FCP-NEXT: vpermi2w %ymm2, %ymm1, %ymm8
-; AVX512BW-FCP-NEXT: vmovq {{.*#+}} xmm9 = [7,15,23,31,0,0,0,0]
-; AVX512BW-FCP-NEXT: vpermi2w %ymm2, %ymm1, %ymm9
+; AVX512BW-FCP-NEXT: vmovdqa64 (%rdi), %zmm1
+; AVX512BW-FCP-NEXT: vpermw %zmm1, %zmm0, %zmm0
+; AVX512BW-FCP-NEXT: vmovq {{.*#+}} xmm2 = [1,9,17,25,0,0,0,0]
+; AVX512BW-FCP-NEXT: vpermw %zmm1, %zmm2, %zmm2
+; AVX512BW-FCP-NEXT: vmovq {{.*#+}} xmm3 = [2,10,18,26,0,0,0,0]
+; AVX512BW-FCP-NEXT: vpermw %zmm1, %zmm3, %zmm3
+; AVX512BW-FCP-NEXT: vmovq {{.*#+}} xmm4 = [3,11,19,27,0,0,0,0]
+; AVX512BW-FCP-NEXT: vpermw %zmm1, %zmm4, %zmm4
+; AVX512BW-FCP-NEXT: vmovq {{.*#+}} xmm5 = [4,12,20,28,0,0,0,0]
+; AVX512BW-FCP-NEXT: vpermw %zmm1, %zmm5, %zmm5
+; AVX512BW-FCP-NEXT: vmovq {{.*#+}} xmm6 = [5,13,21,29,0,0,0,0]
+; AVX512BW-FCP-NEXT: vpermw %zmm1, %zmm6, %zmm6
+; AVX512BW-FCP-NEXT: vmovq {{.*#+}} xmm7 = [6,14,22,30,0,0,0,0]
+; AVX512BW-FCP-NEXT: vpermw %zmm1, %zmm7, %zmm7
+; AVX512BW-FCP-NEXT: vmovq {{.*#+}} xmm8 = [7,15,23,31,0,0,0,0]
+; AVX512BW-FCP-NEXT: vpermw %zmm1, %zmm8, %zmm1
; AVX512BW-FCP-NEXT: vmovq %xmm0, (%rsi)
-; AVX512BW-FCP-NEXT: vmovq %xmm3, (%rdx)
-; AVX512BW-FCP-NEXT: vmovq %xmm4, (%rcx)
-; AVX512BW-FCP-NEXT: vmovq %xmm5, (%r8)
-; AVX512BW-FCP-NEXT: vmovq %xmm6, (%r9)
-; AVX512BW-FCP-NEXT: vmovq %xmm7, (%r11)
-; AVX512BW-FCP-NEXT: vmovq %xmm8, (%r10)
-; AVX512BW-FCP-NEXT: vmovq %xmm9, (%rax)
+; AVX512BW-FCP-NEXT: vmovq %xmm2, (%rdx)
+; AVX512BW-FCP-NEXT: vmovq %xmm3, (%rcx)
+; AVX512BW-FCP-NEXT: vmovq %xmm4, (%r8)
+; AVX512BW-FCP-NEXT: vmovq %xmm5, (%r9)
+; AVX512BW-FCP-NEXT: vmovq %xmm6, (%r11)
+; AVX512BW-FCP-NEXT: vmovq %xmm7, (%r10)
+; AVX512BW-FCP-NEXT: vmovq %xmm1, (%rax)
; AVX512BW-FCP-NEXT: vzeroupper
; AVX512BW-FCP-NEXT: retq
;
@@ -691,31 +689,30 @@ define void @load_i16_stride8_vf4(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-BW-NEXT: movq {{[0-9]+}}(%rsp), %r10
; AVX512DQ-BW-NEXT: movq {{[0-9]+}}(%rsp), %r11
; AVX512DQ-BW-NEXT: vmovq {{.*#+}} xmm0 = [0,8,16,24,0,0,0,0]
-; AVX512DQ-BW-NEXT: vmovdqa (%rdi), %ymm1
-; AVX512DQ-BW-NEXT: vmovdqa 32(%rdi), %ymm2
-; AVX512DQ-BW-NEXT: vpermi2w %ymm2, %ymm1, %ymm0
-; AVX512DQ-BW-NEXT: vmovq {{.*#+}} xmm3 = [1,9,17,25,0,0,0,0]
-; AVX512DQ-BW-NEXT: vpermi2w %ymm2, %ymm1, %ymm3
-; AVX512DQ-BW-NEXT: vmovq {{.*#+}} xmm4 = [2,10,18,26,0,0,0,0]
-; AVX512DQ-BW-NEXT: vpermi2w %ymm2, %ymm1, %ymm4
-; AVX512DQ-BW-NEXT: vmovq {{.*#+}} xmm5 = [3,11,19,27,0,0,0,0]
-; AVX512DQ-BW-NEXT: vpermi2w %ymm2, %ymm1, %ymm5
-; AVX512DQ-BW-NEXT: vmovq {{.*#+}} xmm6 = [4,12,20,28,0,0,0,0]
-; AVX512DQ-BW-NEXT: vpermi2w %ymm2, %ymm1, %ymm6
-; AVX512DQ-BW-NEXT: vmovq {{.*#+}} xmm7 = [5,13,21,29,0,0,0,0]
-; AVX512DQ-BW-NEXT: vpermi2w %ymm2, %ymm1, %ymm7
-; AVX512DQ-BW-NEXT: vmovq {{.*#+}} xmm8 = [6,14,22,30,0,0,0,0]
-; AVX512DQ-BW-NEXT: vpermi2w %ymm2, %ymm1, %ymm8
-; AVX512DQ-BW-NEXT: vmovq {{.*#+}} xmm9 = [7,15,23,31,0,0,0,0]
-; AVX512DQ-BW-NEXT: vpermi2w %ymm2, %ymm1, %ymm9
+; AVX512DQ-BW-NEXT: vmovdqa64 (%rdi), %zmm1
+; AVX512DQ-BW-NEXT: vpermw %zmm1, %zmm0, %zmm0
+; AVX512DQ-BW-NEXT: vmovq {{.*#+}} xmm2 = [1,9,17,25,0,0,0,0]
+; AVX512DQ-BW-NEXT: vpermw %zmm1, %zmm2, %zmm2
+; AVX512DQ-BW-NEXT: vmovq {{.*#+}} xmm3 = [2,10,18,26,0,0,0,0]
+; AVX512DQ-BW-NEXT: vpermw %zmm1, %zmm3, %zmm3
+; AVX512DQ-BW-NEXT: vmovq {{.*#+}} xmm4 = [3,11,19,27,0,0,0,0]
+; AVX512DQ-BW-NEXT: vpermw %zmm1, %zmm4, %zmm4
+; AVX512DQ-BW-NEXT: vmovq {{.*#+}} xmm5 = [4,12,20,28,0,0,0,0]
+; AVX512DQ-BW-NEXT: vpermw %zmm1, %zmm5, %zmm5
+; AVX512DQ-BW-NEXT: vmovq {{.*#+}} xmm6 = [5,13,21,29,0,0,0,0]
+; AVX512DQ-BW-NEXT: vpermw %zmm1, %zmm6, %zmm6
+; AVX512DQ-BW-NEXT: vmovq {{.*#+}} xmm7 = [6,14,22,30,0,0,0,0]
+; AVX512DQ-BW-NEXT: vpermw %zmm1, %zmm7, %zmm7
+; AVX512DQ-BW-NEXT: vmovq {{.*#+}} xmm8 = [7,15,23,31,0,0,0,0]
+; AVX512DQ-BW-NEXT: vpermw %zmm1, %zmm8, %zmm1
; AVX512DQ-BW-NEXT: vmovq %xmm0, (%rsi)
-; AVX512DQ-BW-NEXT: vmovq %xmm3, (%rdx)
-; AVX512DQ-BW-NEXT: vmovq %xmm4, (%rcx)
-; AVX512DQ-BW-NEXT: vmovq %xmm5, (%r8)
-; AVX512DQ-BW-NEXT: vmovq %xmm6, (%r9)
-; AVX512DQ-BW-NEXT: vmovq %xmm7, (%r11)
-; AVX512DQ-BW-NEXT: vmovq %xmm8, (%r10)
-; AVX512DQ-BW-NEXT: vmovq %xmm9, (%rax)
+; AVX512DQ-BW-NEXT: vmovq %xmm2, (%rdx)
+; AVX512DQ-BW-NEXT: vmovq %xmm3, (%rcx)
+; AVX512DQ-BW-NEXT: vmovq %xmm4, (%r8)
+; AVX512DQ-BW-NEXT: vmovq %xmm5, (%r9)
+; AVX512DQ-BW-NEXT: vmovq %xmm6, (%r11)
+; AVX512DQ-BW-NEXT: vmovq %xmm7, (%r10)
+; AVX512DQ-BW-NEXT: vmovq %xmm1, (%rax)
; AVX512DQ-BW-NEXT: vzeroupper
; AVX512DQ-BW-NEXT: retq
;
@@ -725,31 +722,30 @@ define void @load_i16_stride8_vf4(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-BW-FCP-NEXT: movq {{[0-9]+}}(%rsp), %r10
; AVX512DQ-BW-FCP-NEXT: movq {{[0-9]+}}(%rsp), %r11
; AVX512DQ-BW-FCP-NEXT: vmovq {{.*#+}} xmm0 = [0,8,16,24,0,0,0,0]
-; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdi), %ymm1
-; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%rdi), %ymm2
-; AVX512DQ-BW-FCP-NEXT: vpermi2w %ymm2, %ymm1, %ymm0
-; AVX512DQ-BW-FCP-NEXT: vmovq {{.*#+}} xmm3 = [1,9,17,25,0,0,0,0]
-; AVX512DQ-BW-FCP-NEXT: vpermi2w %ymm2, %ymm1, %ymm3
-; AVX512DQ-BW-FCP-NEXT: vmovq {{.*#+}} xmm4 = [2,10,18,26,0,0,0,0]
-; AVX512DQ-BW-FCP-NEXT: vpermi2w %ymm2, %ymm1, %ymm4
-; AVX512DQ-BW-FCP-NEXT: vmovq {{.*#+}} xmm5 = [3,11,19,27,0,0,0,0]
-; AVX512DQ-BW-FCP-NEXT: vpermi2w %ymm2, %ymm1, %ymm5
-; AVX512DQ-BW-FCP-NEXT: vmovq {{.*#+}} xmm6 = [4,12,20,28,0,0,0,0]
-; AVX512DQ-BW-FCP-NEXT: vpermi2w %ymm2, %ymm1, %ymm6
-; AVX512DQ-BW-FCP-NEXT: vmovq {{.*#+}} xmm7 = [5,13,21,29,0,0,0,0]
-; AVX512DQ-BW-FCP-NEXT: vpermi2w %ymm2, %ymm1, %ymm7
-; AVX512DQ-BW-FCP-NEXT: vmovq {{.*#+}} xmm8 = [6,14,22,30,0,0,0,0]
-; AVX512DQ-BW-FCP-NEXT: vpermi2w %ymm2, %ymm1, %ymm8
-; AVX512DQ-BW-FCP-NEXT: vmovq {{.*#+}} xmm9 = [7,15,23,31,0,0,0,0]
-; AVX512DQ-BW-FCP-NEXT: vpermi2w %ymm2, %ymm1, %ymm9
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rdi), %zmm1
+; AVX512DQ-BW-FCP-NEXT: vpermw %zmm1, %zmm0, %zmm0
+; AVX512DQ-BW-FCP-NEXT: vmovq {{.*#+}} xmm2 = [1,9,17,25,0,0,0,0]
+; AVX512DQ-BW-FCP-NEXT: vpermw %zmm1, %zmm2, %zmm2
+; AVX512DQ-BW-FCP-NEXT: vmovq {{.*#+}} xmm3 = [2,10,18,26,0,0,0,0]
+; AVX512DQ-BW-FCP-NEXT: vpermw %zmm1, %zmm3, %zmm3
+; AVX512DQ-BW-FCP-NEXT: vmovq {{.*#+}} xmm4 = [3,11,19,27,0,0,0,0]
+; AVX512DQ-BW-FCP-NEXT: vpermw %zmm1, %zmm4, %zmm4
+; AVX512DQ-BW-FCP-NEXT: vmovq {{.*#+}} xmm5 = [4,12,20,28,0,0,0,0]
+; AVX512DQ-BW-FCP-NEXT: vpermw %zmm1, %zmm5, %zmm5
+; AVX512DQ-BW-FCP-NEXT: vmovq {{.*#+}} xmm6 = [5,13,21,29,0,0,0,0]
+; AVX512DQ-BW-FCP-NEXT: vpermw %zmm1, %zmm6, %zmm6
+; AVX512DQ-BW-FCP-NEXT: vmovq {{.*#+}} xmm7 = [6,14,22,30,0,0,0,0]
+; AVX512DQ-BW-FCP-NEXT: vpermw %zmm1, %zmm7, %zmm7
+; AVX512DQ-BW-FCP-NEXT: vmovq {{.*#+}} xmm8 = [7,15,23,31,0,0,0,0]
+; AVX512DQ-BW-FCP-NEXT: vpermw %zmm1, %zmm8, %zmm1
; AVX512DQ-BW-FCP-NEXT: vmovq %xmm0, (%rsi)
-; AVX512DQ-BW-FCP-NEXT: vmovq %xmm3, (%rdx)
-; AVX512DQ-BW-FCP-NEXT: vmovq %xmm4, (%rcx)
-; AVX512DQ-BW-FCP-NEXT: vmovq %xmm5, (%r8)
-; AVX512DQ-BW-FCP-NEXT: vmovq %xmm6, (%r9)
-; AVX512DQ-BW-FCP-NEXT: vmovq %xmm7, (%r11)
-; AVX512DQ-BW-FCP-NEXT: vmovq %xmm8, (%r10)
-; AVX512DQ-BW-FCP-NEXT: vmovq %xmm9, (%rax)
+; AVX512DQ-BW-FCP-NEXT: vmovq %xmm2, (%rdx)
+; AVX512DQ-BW-FCP-NEXT: vmovq %xmm3, (%rcx)
+; AVX512DQ-BW-FCP-NEXT: vmovq %xmm4, (%r8)
+; AVX512DQ-BW-FCP-NEXT: vmovq %xmm5, (%r9)
+; AVX512DQ-BW-FCP-NEXT: vmovq %xmm6, (%r11)
+; AVX512DQ-BW-FCP-NEXT: vmovq %xmm7, (%r10)
+; AVX512DQ-BW-FCP-NEXT: vmovq %xmm1, (%rax)
; AVX512DQ-BW-FCP-NEXT: vzeroupper
; AVX512DQ-BW-FCP-NEXT: retq
%wide.vec = load <32 x i16>, ptr %in.vec, align 64
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-2.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-2.ll
index 7cb46b79f7f361..f2c5a91d2cca32 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-2.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-2.ll
@@ -363,11 +363,10 @@ define void @load_i32_stride2_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1) nou
; AVX512-FCP-LABEL: load_i32_stride2_vf8:
; AVX512-FCP: # %bb.0:
; AVX512-FCP-NEXT: vmovdqa64 (%rdi), %zmm0
-; AVX512-FCP-NEXT: vmovdqa (%rdi), %ymm1
-; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm2 = [1,3,5,7,9,11,13,15]
-; AVX512-FCP-NEXT: vpermi2d 32(%rdi), %ymm1, %ymm2
+; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm1 = [1,3,5,7,9,11,13,15]
+; AVX512-FCP-NEXT: vpermps (%rdi), %zmm1, %zmm1
; AVX512-FCP-NEXT: vpmovqd %zmm0, (%rsi)
-; AVX512-FCP-NEXT: vmovdqa %ymm2, (%rdx)
+; AVX512-FCP-NEXT: vmovaps %ymm1, (%rdx)
; AVX512-FCP-NEXT: vzeroupper
; AVX512-FCP-NEXT: retq
;
@@ -385,11 +384,10 @@ define void @load_i32_stride2_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1) nou
; AVX512DQ-FCP-LABEL: load_i32_stride2_vf8:
; AVX512DQ-FCP: # %bb.0:
; AVX512DQ-FCP-NEXT: vmovdqa64 (%rdi), %zmm0
-; AVX512DQ-FCP-NEXT: vmovdqa (%rdi), %ymm1
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm2 = [1,3,5,7,9,11,13,15]
-; AVX512DQ-FCP-NEXT: vpermi2d 32(%rdi), %ymm1, %ymm2
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm1 = [1,3,5,7,9,11,13,15]
+; AVX512DQ-FCP-NEXT: vpermps (%rdi), %zmm1, %zmm1
; AVX512DQ-FCP-NEXT: vpmovqd %zmm0, (%rsi)
-; AVX512DQ-FCP-NEXT: vmovdqa %ymm2, (%rdx)
+; AVX512DQ-FCP-NEXT: vmovaps %ymm1, (%rdx)
; AVX512DQ-FCP-NEXT: vzeroupper
; AVX512DQ-FCP-NEXT: retq
;
@@ -407,11 +405,10 @@ define void @load_i32_stride2_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1) nou
; AVX512BW-FCP-LABEL: load_i32_stride2_vf8:
; AVX512BW-FCP: # %bb.0:
; AVX512BW-FCP-NEXT: vmovdqa64 (%rdi), %zmm0
-; AVX512BW-FCP-NEXT: vmovdqa (%rdi), %ymm1
-; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm2 = [1,3,5,7,9,11,13,15]
-; AVX512BW-FCP-NEXT: vpermi2d 32(%rdi), %ymm1, %ymm2
+; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm1 = [1,3,5,7,9,11,13,15]
+; AVX512BW-FCP-NEXT: vpermps (%rdi), %zmm1, %zmm1
; AVX512BW-FCP-NEXT: vpmovqd %zmm0, (%rsi)
-; AVX512BW-FCP-NEXT: vmovdqa %ymm2, (%rdx)
+; AVX512BW-FCP-NEXT: vmovaps %ymm1, (%rdx)
; AVX512BW-FCP-NEXT: vzeroupper
; AVX512BW-FCP-NEXT: retq
;
@@ -429,11 +426,10 @@ define void @load_i32_stride2_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1) nou
; AVX512DQ-BW-FCP-LABEL: load_i32_stride2_vf8:
; AVX512DQ-BW-FCP: # %bb.0:
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rdi), %zmm0
-; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdi), %ymm1
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm2 = [1,3,5,7,9,11,13,15]
-; AVX512DQ-BW-FCP-NEXT: vpermi2d 32(%rdi), %ymm1, %ymm2
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm1 = [1,3,5,7,9,11,13,15]
+; AVX512DQ-BW-FCP-NEXT: vpermps (%rdi), %zmm1, %zmm1
; AVX512DQ-BW-FCP-NEXT: vpmovqd %zmm0, (%rsi)
-; AVX512DQ-BW-FCP-NEXT: vmovdqa %ymm2, (%rdx)
+; AVX512DQ-BW-FCP-NEXT: vmovaps %ymm1, (%rdx)
; AVX512DQ-BW-FCP-NEXT: vzeroupper
; AVX512DQ-BW-FCP-NEXT: retq
%wide.vec = load <16 x i32>, ptr %in.vec, align 64
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-3.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-3.ll
index 213c5febfca233..d9383f524f1d18 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-3.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-3.ll
@@ -310,128 +310,120 @@ define void @load_i32_stride3_vf4(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-LABEL: load_i32_stride3_vf4:
; AVX512: # %bb.0:
; AVX512-NEXT: vpmovsxbd {{.*#+}} xmm0 = [0,3,6,9]
-; AVX512-NEXT: vmovdqa (%rdi), %ymm1
-; AVX512-NEXT: vmovdqa 32(%rdi), %ymm2
-; AVX512-NEXT: vpermi2d %ymm2, %ymm1, %ymm0
-; AVX512-NEXT: vpmovsxbd {{.*#+}} xmm3 = [1,4,7,10]
-; AVX512-NEXT: vpermi2d %ymm2, %ymm1, %ymm3
-; AVX512-NEXT: vpmovsxbd {{.*#+}} xmm4 = [2,5,8,11]
-; AVX512-NEXT: vpermi2d %ymm2, %ymm1, %ymm4
-; AVX512-NEXT: vmovdqa %xmm0, (%rsi)
-; AVX512-NEXT: vmovdqa %xmm3, (%rdx)
-; AVX512-NEXT: vmovdqa %xmm4, (%rcx)
+; AVX512-NEXT: vmovaps (%rdi), %zmm1
+; AVX512-NEXT: vpermps %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpmovsxbd {{.*#+}} xmm2 = [1,4,7,10]
+; AVX512-NEXT: vpermps %zmm1, %zmm2, %zmm2
+; AVX512-NEXT: vpmovsxbd {{.*#+}} xmm3 = [2,5,8,11]
+; AVX512-NEXT: vpermps %zmm1, %zmm3, %zmm1
+; AVX512-NEXT: vmovaps %xmm0, (%rsi)
+; AVX512-NEXT: vmovaps %xmm2, (%rdx)
+; AVX512-NEXT: vmovaps %xmm1, (%rcx)
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
;
; AVX512-FCP-LABEL: load_i32_stride3_vf4:
; AVX512-FCP: # %bb.0:
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} xmm0 = [0,3,6,9]
-; AVX512-FCP-NEXT: vmovdqa (%rdi), %ymm1
-; AVX512-FCP-NEXT: vmovdqa 32(%rdi), %ymm2
-; AVX512-FCP-NEXT: vpermi2d %ymm2, %ymm1, %ymm0
-; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} xmm3 = [1,4,7,10]
-; AVX512-FCP-NEXT: vpermi2d %ymm2, %ymm1, %ymm3
-; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} xmm4 = [2,5,8,11]
-; AVX512-FCP-NEXT: vpermi2d %ymm2, %ymm1, %ymm4
-; AVX512-FCP-NEXT: vmovdqa %xmm0, (%rsi)
-; AVX512-FCP-NEXT: vmovdqa %xmm3, (%rdx)
-; AVX512-FCP-NEXT: vmovdqa %xmm4, (%rcx)
+; AVX512-FCP-NEXT: vmovaps (%rdi), %zmm1
+; AVX512-FCP-NEXT: vpermps %zmm1, %zmm0, %zmm0
+; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} xmm2 = [1,4,7,10]
+; AVX512-FCP-NEXT: vpermps %zmm1, %zmm2, %zmm2
+; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} xmm3 = [2,5,8,11]
+; AVX512-FCP-NEXT: vpermps %zmm1, %zmm3, %zmm1
+; AVX512-FCP-NEXT: vmovaps %xmm0, (%rsi)
+; AVX512-FCP-NEXT: vmovaps %xmm2, (%rdx)
+; AVX512-FCP-NEXT: vmovaps %xmm1, (%rcx)
; AVX512-FCP-NEXT: vzeroupper
; AVX512-FCP-NEXT: retq
;
; AVX512DQ-LABEL: load_i32_stride3_vf4:
; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} xmm0 = [0,3,6,9]
-; AVX512DQ-NEXT: vmovdqa (%rdi), %ymm1
-; AVX512DQ-NEXT: vmovdqa 32(%rdi), %ymm2
-; AVX512DQ-NEXT: vpermi2d %ymm2, %ymm1, %ymm0
-; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} xmm3 = [1,4,7,10]
-; AVX512DQ-NEXT: vpermi2d %ymm2, %ymm1, %ymm3
-; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} xmm4 = [2,5,8,11]
-; AVX512DQ-NEXT: vpermi2d %ymm2, %ymm1, %ymm4
-; AVX512DQ-NEXT: vmovdqa %xmm0, (%rsi)
-; AVX512DQ-NEXT: vmovdqa %xmm3, (%rdx)
-; AVX512DQ-NEXT: vmovdqa %xmm4, (%rcx)
+; AVX512DQ-NEXT: vmovaps (%rdi), %zmm1
+; AVX512DQ-NEXT: vpermps %zmm1, %zmm0, %zmm0
+; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} xmm2 = [1,4,7,10]
+; AVX512DQ-NEXT: vpermps %zmm1, %zmm2, %zmm2
+; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} xmm3 = [2,5,8,11]
+; AVX512DQ-NEXT: vpermps %zmm1, %zmm3, %zmm1
+; AVX512DQ-NEXT: vmovaps %xmm0, (%rsi)
+; AVX512DQ-NEXT: vmovaps %xmm2, (%rdx)
+; AVX512DQ-NEXT: vmovaps %xmm1, (%rcx)
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512DQ-FCP-LABEL: load_i32_stride3_vf4:
; AVX512DQ-FCP: # %bb.0:
; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} xmm0 = [0,3,6,9]
-; AVX512DQ-FCP-NEXT: vmovdqa (%rdi), %ymm1
-; AVX512DQ-FCP-NEXT: vmovdqa 32(%rdi), %ymm2
-; AVX512DQ-FCP-NEXT: vpermi2d %ymm2, %ymm1, %ymm0
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} xmm3 = [1,4,7,10]
-; AVX512DQ-FCP-NEXT: vpermi2d %ymm2, %ymm1, %ymm3
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} xmm4 = [2,5,8,11]
-; AVX512DQ-FCP-NEXT: vpermi2d %ymm2, %ymm1, %ymm4
-; AVX512DQ-FCP-NEXT: vmovdqa %xmm0, (%rsi)
-; AVX512DQ-FCP-NEXT: vmovdqa %xmm3, (%rdx)
-; AVX512DQ-FCP-NEXT: vmovdqa %xmm4, (%rcx)
+; AVX512DQ-FCP-NEXT: vmovaps (%rdi), %zmm1
+; AVX512DQ-FCP-NEXT: vpermps %zmm1, %zmm0, %zmm0
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} xmm2 = [1,4,7,10]
+; AVX512DQ-FCP-NEXT: vpermps %zmm1, %zmm2, %zmm2
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} xmm3 = [2,5,8,11]
+; AVX512DQ-FCP-NEXT: vpermps %zmm1, %zmm3, %zmm1
+; AVX512DQ-FCP-NEXT: vmovaps %xmm0, (%rsi)
+; AVX512DQ-FCP-NEXT: vmovaps %xmm2, (%rdx)
+; AVX512DQ-FCP-NEXT: vmovaps %xmm1, (%rcx)
; AVX512DQ-FCP-NEXT: vzeroupper
; AVX512DQ-FCP-NEXT: retq
;
; AVX512BW-LABEL: load_i32_stride3_vf4:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpmovsxbd {{.*#+}} xmm0 = [0,3,6,9]
-; AVX512BW-NEXT: vmovdqa (%rdi), %ymm1
-; AVX512BW-NEXT: vmovdqa 32(%rdi), %ymm2
-; AVX512BW-NEXT: vpermi2d %ymm2, %ymm1, %ymm0
-; AVX512BW-NEXT: vpmovsxbd {{.*#+}} xmm3 = [1,4,7,10]
-; AVX512BW-NEXT: vpermi2d %ymm2, %ymm1, %ymm3
-; AVX512BW-NEXT: vpmovsxbd {{.*#+}} xmm4 = [2,5,8,11]
-; AVX512BW-NEXT: vpermi2d %ymm2, %ymm1, %ymm4
-; AVX512BW-NEXT: vmovdqa %xmm0, (%rsi)
-; AVX512BW-NEXT: vmovdqa %xmm3, (%rdx)
-; AVX512BW-NEXT: vmovdqa %xmm4, (%rcx)
+; AVX512BW-NEXT: vmovaps (%rdi), %zmm1
+; AVX512BW-NEXT: vpermps %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: vpmovsxbd {{.*#+}} xmm2 = [1,4,7,10]
+; AVX512BW-NEXT: vpermps %zmm1, %zmm2, %zmm2
+; AVX512BW-NEXT: vpmovsxbd {{.*#+}} xmm3 = [2,5,8,11]
+; AVX512BW-NEXT: vpermps %zmm1, %zmm3, %zmm1
+; AVX512BW-NEXT: vmovaps %xmm0, (%rsi)
+; AVX512BW-NEXT: vmovaps %xmm2, (%rdx)
+; AVX512BW-NEXT: vmovaps %xmm1, (%rcx)
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512BW-FCP-LABEL: load_i32_stride3_vf4:
; AVX512BW-FCP: # %bb.0:
; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm0 = [0,3,6,9]
-; AVX512BW-FCP-NEXT: vmovdqa (%rdi), %ymm1
-; AVX512BW-FCP-NEXT: vmovdqa 32(%rdi), %ymm2
-; AVX512BW-FCP-NEXT: vpermi2d %ymm2, %ymm1, %ymm0
-; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm3 = [1,4,7,10]
-; AVX512BW-FCP-NEXT: vpermi2d %ymm2, %ymm1, %ymm3
-; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm4 = [2,5,8,11]
-; AVX512BW-FCP-NEXT: vpermi2d %ymm2, %ymm1, %ymm4
-; AVX512BW-FCP-NEXT: vmovdqa %xmm0, (%rsi)
-; AVX512BW-FCP-NEXT: vmovdqa %xmm3, (%rdx)
-; AVX512BW-FCP-NEXT: vmovdqa %xmm4, (%rcx)
+; AVX512BW-FCP-NEXT: vmovaps (%rdi), %zmm1
+; AVX512BW-FCP-NEXT: vpermps %zmm1, %zmm0, %zmm0
+; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm2 = [1,4,7,10]
+; AVX512BW-FCP-NEXT: vpermps %zmm1, %zmm2, %zmm2
+; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm3 = [2,5,8,11]
+; AVX512BW-FCP-NEXT: vpermps %zmm1, %zmm3, %zmm1
+; AVX512BW-FCP-NEXT: vmovaps %xmm0, (%rsi)
+; AVX512BW-FCP-NEXT: vmovaps %xmm2, (%rdx)
+; AVX512BW-FCP-NEXT: vmovaps %xmm1, (%rcx)
; AVX512BW-FCP-NEXT: vzeroupper
; AVX512BW-FCP-NEXT: retq
;
; AVX512DQ-BW-LABEL: load_i32_stride3_vf4:
; AVX512DQ-BW: # %bb.0:
; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} xmm0 = [0,3,6,9]
-; AVX512DQ-BW-NEXT: vmovdqa (%rdi), %ymm1
-; AVX512DQ-BW-NEXT: vmovdqa 32(%rdi), %ymm2
-; AVX512DQ-BW-NEXT: vpermi2d %ymm2, %ymm1, %ymm0
-; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} xmm3 = [1,4,7,10]
-; AVX512DQ-BW-NEXT: vpermi2d %ymm2, %ymm1, %ymm3
-; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} xmm4 = [2,5,8,11]
-; AVX512DQ-BW-NEXT: vpermi2d %ymm2, %ymm1, %ymm4
-; AVX512DQ-BW-NEXT: vmovdqa %xmm0, (%rsi)
-; AVX512DQ-BW-NEXT: vmovdqa %xmm3, (%rdx)
-; AVX512DQ-BW-NEXT: vmovdqa %xmm4, (%rcx)
+; AVX512DQ-BW-NEXT: vmovaps (%rdi), %zmm1
+; AVX512DQ-BW-NEXT: vpermps %zmm1, %zmm0, %zmm0
+; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} xmm2 = [1,4,7,10]
+; AVX512DQ-BW-NEXT: vpermps %zmm1, %zmm2, %zmm2
+; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} xmm3 = [2,5,8,11]
+; AVX512DQ-BW-NEXT: vpermps %zmm1, %zmm3, %zmm1
+; AVX512DQ-BW-NEXT: vmovaps %xmm0, (%rsi)
+; AVX512DQ-BW-NEXT: vmovaps %xmm2, (%rdx)
+; AVX512DQ-BW-NEXT: vmovaps %xmm1, (%rcx)
; AVX512DQ-BW-NEXT: vzeroupper
; AVX512DQ-BW-NEXT: retq
;
; AVX512DQ-BW-FCP-LABEL: load_i32_stride3_vf4:
; AVX512DQ-BW-FCP: # %bb.0:
; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm0 = [0,3,6,9]
-; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdi), %ymm1
-; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%rdi), %ymm2
-; AVX512DQ-BW-FCP-NEXT: vpermi2d %ymm2, %ymm1, %ymm0
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm3 = [1,4,7,10]
-; AVX512DQ-BW-FCP-NEXT: vpermi2d %ymm2, %ymm1, %ymm3
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm4 = [2,5,8,11]
-; AVX512DQ-BW-FCP-NEXT: vpermi2d %ymm2, %ymm1, %ymm4
-; AVX512DQ-BW-FCP-NEXT: vmovdqa %xmm0, (%rsi)
-; AVX512DQ-BW-FCP-NEXT: vmovdqa %xmm3, (%rdx)
-; AVX512DQ-BW-FCP-NEXT: vmovdqa %xmm4, (%rcx)
+; AVX512DQ-BW-FCP-NEXT: vmovaps (%rdi), %zmm1
+; AVX512DQ-BW-FCP-NEXT: vpermps %zmm1, %zmm0, %zmm0
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm2 = [1,4,7,10]
+; AVX512DQ-BW-FCP-NEXT: vpermps %zmm1, %zmm2, %zmm2
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm3 = [2,5,8,11]
+; AVX512DQ-BW-FCP-NEXT: vpermps %zmm1, %zmm3, %zmm1
+; AVX512DQ-BW-FCP-NEXT: vmovaps %xmm0, (%rsi)
+; AVX512DQ-BW-FCP-NEXT: vmovaps %xmm2, (%rdx)
+; AVX512DQ-BW-FCP-NEXT: vmovaps %xmm1, (%rcx)
; AVX512DQ-BW-FCP-NEXT: vzeroupper
; AVX512DQ-BW-FCP-NEXT: retq
%wide.vec = load <12 x i32>, ptr %in.vec, align 64
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-4.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-4.ll
index 61f91b2bb0c0cf..0bf12607384392 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-4.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-4.ll
@@ -106,13 +106,14 @@ define void @load_i32_stride4_vf2(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-FCP-NEXT: vmovdqa (%rdi), %xmm0
; AVX512-FCP-NEXT: vmovdqa 16(%rdi), %xmm1
; AVX512-FCP-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} xmm3 = [1,5,0,0]
-; AVX512-FCP-NEXT: vpermi2d %xmm1, %xmm0, %xmm3
+; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} xmm3 = [1,5,1,1]
+; AVX512-FCP-NEXT: vpermps (%rdi), %ymm3, %ymm3
; AVX512-FCP-NEXT: vpunpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; AVX512-FCP-NEXT: vmovq %xmm2, (%rsi)
-; AVX512-FCP-NEXT: vmovq %xmm3, (%rdx)
+; AVX512-FCP-NEXT: vmovlps %xmm3, (%rdx)
; AVX512-FCP-NEXT: vmovq %xmm0, (%rcx)
; AVX512-FCP-NEXT: vpextrq $1, %xmm0, (%r8)
+; AVX512-FCP-NEXT: vzeroupper
; AVX512-FCP-NEXT: retq
;
; AVX512DQ-LABEL: load_i32_stride4_vf2:
@@ -134,13 +135,14 @@ define void @load_i32_stride4_vf2(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-FCP-NEXT: vmovdqa (%rdi), %xmm0
; AVX512DQ-FCP-NEXT: vmovdqa 16(%rdi), %xmm1
; AVX512DQ-FCP-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} xmm3 = [1,5,0,0]
-; AVX512DQ-FCP-NEXT: vpermi2d %xmm1, %xmm0, %xmm3
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} xmm3 = [1,5,1,1]
+; AVX512DQ-FCP-NEXT: vpermps (%rdi), %ymm3, %ymm3
; AVX512DQ-FCP-NEXT: vpunpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; AVX512DQ-FCP-NEXT: vmovq %xmm2, (%rsi)
-; AVX512DQ-FCP-NEXT: vmovq %xmm3, (%rdx)
+; AVX512DQ-FCP-NEXT: vmovlps %xmm3, (%rdx)
; AVX512DQ-FCP-NEXT: vmovq %xmm0, (%rcx)
; AVX512DQ-FCP-NEXT: vpextrq $1, %xmm0, (%r8)
+; AVX512DQ-FCP-NEXT: vzeroupper
; AVX512DQ-FCP-NEXT: retq
;
; AVX512BW-LABEL: load_i32_stride4_vf2:
@@ -162,13 +164,14 @@ define void @load_i32_stride4_vf2(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512BW-FCP-NEXT: vmovdqa (%rdi), %xmm0
; AVX512BW-FCP-NEXT: vmovdqa 16(%rdi), %xmm1
; AVX512BW-FCP-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm3 = [1,5,0,0]
-; AVX512BW-FCP-NEXT: vpermi2d %xmm1, %xmm0, %xmm3
+; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm3 = [1,5,1,1]
+; AVX512BW-FCP-NEXT: vpermps (%rdi), %ymm3, %ymm3
; AVX512BW-FCP-NEXT: vpunpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; AVX512BW-FCP-NEXT: vmovq %xmm2, (%rsi)
-; AVX512BW-FCP-NEXT: vmovq %xmm3, (%rdx)
+; AVX512BW-FCP-NEXT: vmovlps %xmm3, (%rdx)
; AVX512BW-FCP-NEXT: vmovq %xmm0, (%rcx)
; AVX512BW-FCP-NEXT: vpextrq $1, %xmm0, (%r8)
+; AVX512BW-FCP-NEXT: vzeroupper
; AVX512BW-FCP-NEXT: retq
;
; AVX512DQ-BW-LABEL: load_i32_stride4_vf2:
@@ -190,13 +193,14 @@ define void @load_i32_stride4_vf2(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdi), %xmm0
; AVX512DQ-BW-FCP-NEXT: vmovdqa 16(%rdi), %xmm1
; AVX512DQ-BW-FCP-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm3 = [1,5,0,0]
-; AVX512DQ-BW-FCP-NEXT: vpermi2d %xmm1, %xmm0, %xmm3
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm3 = [1,5,1,1]
+; AVX512DQ-BW-FCP-NEXT: vpermps (%rdi), %ymm3, %ymm3
; AVX512DQ-BW-FCP-NEXT: vpunpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; AVX512DQ-BW-FCP-NEXT: vmovq %xmm2, (%rsi)
-; AVX512DQ-BW-FCP-NEXT: vmovq %xmm3, (%rdx)
+; AVX512DQ-BW-FCP-NEXT: vmovlps %xmm3, (%rdx)
; AVX512DQ-BW-FCP-NEXT: vmovq %xmm0, (%rcx)
; AVX512DQ-BW-FCP-NEXT: vpextrq $1, %xmm0, (%r8)
+; AVX512DQ-BW-FCP-NEXT: vzeroupper
; AVX512DQ-BW-FCP-NEXT: retq
%wide.vec = load <8 x i32>, ptr %in.vec, align 64
%strided.vec0 = shufflevector <8 x i32> %wide.vec, <8 x i32> poison, <2 x i32> <i32 0, i32 4>
@@ -361,152 +365,144 @@ define void @load_i32_stride4_vf4(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-LABEL: load_i32_stride4_vf4:
; AVX512: # %bb.0:
; AVX512-NEXT: vpmovsxbd {{.*#+}} xmm0 = [0,4,8,12]
-; AVX512-NEXT: vmovdqa (%rdi), %ymm1
-; AVX512-NEXT: vmovdqa 32(%rdi), %ymm2
-; AVX512-NEXT: vpermi2d %ymm2, %ymm1, %ymm0
-; AVX512-NEXT: vpmovsxbd {{.*#+}} xmm3 = [1,5,9,13]
-; AVX512-NEXT: vpermi2d %ymm2, %ymm1, %ymm3
-; AVX512-NEXT: vpmovsxbd {{.*#+}} xmm4 = [2,6,10,14]
-; AVX512-NEXT: vpermi2d %ymm2, %ymm1, %ymm4
-; AVX512-NEXT: vpmovsxbd {{.*#+}} xmm5 = [3,7,11,15]
-; AVX512-NEXT: vpermi2d %ymm2, %ymm1, %ymm5
-; AVX512-NEXT: vmovdqa %xmm0, (%rsi)
-; AVX512-NEXT: vmovdqa %xmm3, (%rdx)
-; AVX512-NEXT: vmovdqa %xmm4, (%rcx)
-; AVX512-NEXT: vmovdqa %xmm5, (%r8)
+; AVX512-NEXT: vmovaps (%rdi), %zmm1
+; AVX512-NEXT: vpermps %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpmovsxbd {{.*#+}} xmm2 = [1,5,9,13]
+; AVX512-NEXT: vpermps %zmm1, %zmm2, %zmm2
+; AVX512-NEXT: vpmovsxbd {{.*#+}} xmm3 = [2,6,10,14]
+; AVX512-NEXT: vpermps %zmm1, %zmm3, %zmm3
+; AVX512-NEXT: vpmovsxbd {{.*#+}} xmm4 = [3,7,11,15]
+; AVX512-NEXT: vpermps %zmm1, %zmm4, %zmm1
+; AVX512-NEXT: vmovaps %xmm0, (%rsi)
+; AVX512-NEXT: vmovaps %xmm2, (%rdx)
+; AVX512-NEXT: vmovaps %xmm3, (%rcx)
+; AVX512-NEXT: vmovaps %xmm1, (%r8)
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
;
; AVX512-FCP-LABEL: load_i32_stride4_vf4:
; AVX512-FCP: # %bb.0:
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} xmm0 = [0,4,8,12]
-; AVX512-FCP-NEXT: vmovdqa (%rdi), %ymm1
-; AVX512-FCP-NEXT: vmovdqa 32(%rdi), %ymm2
-; AVX512-FCP-NEXT: vpermi2d %ymm2, %ymm1, %ymm0
-; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} xmm3 = [1,5,9,13]
-; AVX512-FCP-NEXT: vpermi2d %ymm2, %ymm1, %ymm3
-; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} xmm4 = [2,6,10,14]
-; AVX512-FCP-NEXT: vpermi2d %ymm2, %ymm1, %ymm4
-; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} xmm5 = [3,7,11,15]
-; AVX512-FCP-NEXT: vpermi2d %ymm2, %ymm1, %ymm5
-; AVX512-FCP-NEXT: vmovdqa %xmm0, (%rsi)
-; AVX512-FCP-NEXT: vmovdqa %xmm3, (%rdx)
-; AVX512-FCP-NEXT: vmovdqa %xmm4, (%rcx)
-; AVX512-FCP-NEXT: vmovdqa %xmm5, (%r8)
+; AVX512-FCP-NEXT: vmovaps (%rdi), %zmm1
+; AVX512-FCP-NEXT: vpermps %zmm1, %zmm0, %zmm0
+; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} xmm2 = [1,5,9,13]
+; AVX512-FCP-NEXT: vpermps %zmm1, %zmm2, %zmm2
+; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} xmm3 = [2,6,10,14]
+; AVX512-FCP-NEXT: vpermps %zmm1, %zmm3, %zmm3
+; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} xmm4 = [3,7,11,15]
+; AVX512-FCP-NEXT: vpermps %zmm1, %zmm4, %zmm1
+; AVX512-FCP-NEXT: vmovaps %xmm0, (%rsi)
+; AVX512-FCP-NEXT: vmovaps %xmm2, (%rdx)
+; AVX512-FCP-NEXT: vmovaps %xmm3, (%rcx)
+; AVX512-FCP-NEXT: vmovaps %xmm1, (%r8)
; AVX512-FCP-NEXT: vzeroupper
; AVX512-FCP-NEXT: retq
;
; AVX512DQ-LABEL: load_i32_stride4_vf4:
; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} xmm0 = [0,4,8,12]
-; AVX512DQ-NEXT: vmovdqa (%rdi), %ymm1
-; AVX512DQ-NEXT: vmovdqa 32(%rdi), %ymm2
-; AVX512DQ-NEXT: vpermi2d %ymm2, %ymm1, %ymm0
-; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} xmm3 = [1,5,9,13]
-; AVX512DQ-NEXT: vpermi2d %ymm2, %ymm1, %ymm3
-; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} xmm4 = [2,6,10,14]
-; AVX512DQ-NEXT: vpermi2d %ymm2, %ymm1, %ymm4
-; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} xmm5 = [3,7,11,15]
-; AVX512DQ-NEXT: vpermi2d %ymm2, %ymm1, %ymm5
-; AVX512DQ-NEXT: vmovdqa %xmm0, (%rsi)
-; AVX512DQ-NEXT: vmovdqa %xmm3, (%rdx)
-; AVX512DQ-NEXT: vmovdqa %xmm4, (%rcx)
-; AVX512DQ-NEXT: vmovdqa %xmm5, (%r8)
+; AVX512DQ-NEXT: vmovaps (%rdi), %zmm1
+; AVX512DQ-NEXT: vpermps %zmm1, %zmm0, %zmm0
+; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} xmm2 = [1,5,9,13]
+; AVX512DQ-NEXT: vpermps %zmm1, %zmm2, %zmm2
+; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} xmm3 = [2,6,10,14]
+; AVX512DQ-NEXT: vpermps %zmm1, %zmm3, %zmm3
+; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} xmm4 = [3,7,11,15]
+; AVX512DQ-NEXT: vpermps %zmm1, %zmm4, %zmm1
+; AVX512DQ-NEXT: vmovaps %xmm0, (%rsi)
+; AVX512DQ-NEXT: vmovaps %xmm2, (%rdx)
+; AVX512DQ-NEXT: vmovaps %xmm3, (%rcx)
+; AVX512DQ-NEXT: vmovaps %xmm1, (%r8)
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512DQ-FCP-LABEL: load_i32_stride4_vf4:
; AVX512DQ-FCP: # %bb.0:
; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} xmm0 = [0,4,8,12]
-; AVX512DQ-FCP-NEXT: vmovdqa (%rdi), %ymm1
-; AVX512DQ-FCP-NEXT: vmovdqa 32(%rdi), %ymm2
-; AVX512DQ-FCP-NEXT: vpermi2d %ymm2, %ymm1, %ymm0
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} xmm3 = [1,5,9,13]
-; AVX512DQ-FCP-NEXT: vpermi2d %ymm2, %ymm1, %ymm3
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} xmm4 = [2,6,10,14]
-; AVX512DQ-FCP-NEXT: vpermi2d %ymm2, %ymm1, %ymm4
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} xmm5 = [3,7,11,15]
-; AVX512DQ-FCP-NEXT: vpermi2d %ymm2, %ymm1, %ymm5
-; AVX512DQ-FCP-NEXT: vmovdqa %xmm0, (%rsi)
-; AVX512DQ-FCP-NEXT: vmovdqa %xmm3, (%rdx)
-; AVX512DQ-FCP-NEXT: vmovdqa %xmm4, (%rcx)
-; AVX512DQ-FCP-NEXT: vmovdqa %xmm5, (%r8)
+; AVX512DQ-FCP-NEXT: vmovaps (%rdi), %zmm1
+; AVX512DQ-FCP-NEXT: vpermps %zmm1, %zmm0, %zmm0
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} xmm2 = [1,5,9,13]
+; AVX512DQ-FCP-NEXT: vpermps %zmm1, %zmm2, %zmm2
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} xmm3 = [2,6,10,14]
+; AVX512DQ-FCP-NEXT: vpermps %zmm1, %zmm3, %zmm3
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} xmm4 = [3,7,11,15]
+; AVX512DQ-FCP-NEXT: vpermps %zmm1, %zmm4, %zmm1
+; AVX512DQ-FCP-NEXT: vmovaps %xmm0, (%rsi)
+; AVX512DQ-FCP-NEXT: vmovaps %xmm2, (%rdx)
+; AVX512DQ-FCP-NEXT: vmovaps %xmm3, (%rcx)
+; AVX512DQ-FCP-NEXT: vmovaps %xmm1, (%r8)
; AVX512DQ-FCP-NEXT: vzeroupper
; AVX512DQ-FCP-NEXT: retq
;
; AVX512BW-LABEL: load_i32_stride4_vf4:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpmovsxbd {{.*#+}} xmm0 = [0,4,8,12]
-; AVX512BW-NEXT: vmovdqa (%rdi), %ymm1
-; AVX512BW-NEXT: vmovdqa 32(%rdi), %ymm2
-; AVX512BW-NEXT: vpermi2d %ymm2, %ymm1, %ymm0
-; AVX512BW-NEXT: vpmovsxbd {{.*#+}} xmm3 = [1,5,9,13]
-; AVX512BW-NEXT: vpermi2d %ymm2, %ymm1, %ymm3
-; AVX512BW-NEXT: vpmovsxbd {{.*#+}} xmm4 = [2,6,10,14]
-; AVX512BW-NEXT: vpermi2d %ymm2, %ymm1, %ymm4
-; AVX512BW-NEXT: vpmovsxbd {{.*#+}} xmm5 = [3,7,11,15]
-; AVX512BW-NEXT: vpermi2d %ymm2, %ymm1, %ymm5
-; AVX512BW-NEXT: vmovdqa %xmm0, (%rsi)
-; AVX512BW-NEXT: vmovdqa %xmm3, (%rdx)
-; AVX512BW-NEXT: vmovdqa %xmm4, (%rcx)
-; AVX512BW-NEXT: vmovdqa %xmm5, (%r8)
+; AVX512BW-NEXT: vmovaps (%rdi), %zmm1
+; AVX512BW-NEXT: vpermps %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: vpmovsxbd {{.*#+}} xmm2 = [1,5,9,13]
+; AVX512BW-NEXT: vpermps %zmm1, %zmm2, %zmm2
+; AVX512BW-NEXT: vpmovsxbd {{.*#+}} xmm3 = [2,6,10,14]
+; AVX512BW-NEXT: vpermps %zmm1, %zmm3, %zmm3
+; AVX512BW-NEXT: vpmovsxbd {{.*#+}} xmm4 = [3,7,11,15]
+; AVX512BW-NEXT: vpermps %zmm1, %zmm4, %zmm1
+; AVX512BW-NEXT: vmovaps %xmm0, (%rsi)
+; AVX512BW-NEXT: vmovaps %xmm2, (%rdx)
+; AVX512BW-NEXT: vmovaps %xmm3, (%rcx)
+; AVX512BW-NEXT: vmovaps %xmm1, (%r8)
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512BW-FCP-LABEL: load_i32_stride4_vf4:
; AVX512BW-FCP: # %bb.0:
; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm0 = [0,4,8,12]
-; AVX512BW-FCP-NEXT: vmovdqa (%rdi), %ymm1
-; AVX512BW-FCP-NEXT: vmovdqa 32(%rdi), %ymm2
-; AVX512BW-FCP-NEXT: vpermi2d %ymm2, %ymm1, %ymm0
-; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm3 = [1,5,9,13]
-; AVX512BW-FCP-NEXT: vpermi2d %ymm2, %ymm1, %ymm3
-; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm4 = [2,6,10,14]
-; AVX512BW-FCP-NEXT: vpermi2d %ymm2, %ymm1, %ymm4
-; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm5 = [3,7,11,15]
-; AVX512BW-FCP-NEXT: vpermi2d %ymm2, %ymm1, %ymm5
-; AVX512BW-FCP-NEXT: vmovdqa %xmm0, (%rsi)
-; AVX512BW-FCP-NEXT: vmovdqa %xmm3, (%rdx)
-; AVX512BW-FCP-NEXT: vmovdqa %xmm4, (%rcx)
-; AVX512BW-FCP-NEXT: vmovdqa %xmm5, (%r8)
+; AVX512BW-FCP-NEXT: vmovaps (%rdi), %zmm1
+; AVX512BW-FCP-NEXT: vpermps %zmm1, %zmm0, %zmm0
+; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm2 = [1,5,9,13]
+; AVX512BW-FCP-NEXT: vpermps %zmm1, %zmm2, %zmm2
+; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm3 = [2,6,10,14]
+; AVX512BW-FCP-NEXT: vpermps %zmm1, %zmm3, %zmm3
+; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm4 = [3,7,11,15]
+; AVX512BW-FCP-NEXT: vpermps %zmm1, %zmm4, %zmm1
+; AVX512BW-FCP-NEXT: vmovaps %xmm0, (%rsi)
+; AVX512BW-FCP-NEXT: vmovaps %xmm2, (%rdx)
+; AVX512BW-FCP-NEXT: vmovaps %xmm3, (%rcx)
+; AVX512BW-FCP-NEXT: vmovaps %xmm1, (%r8)
; AVX512BW-FCP-NEXT: vzeroupper
; AVX512BW-FCP-NEXT: retq
;
; AVX512DQ-BW-LABEL: load_i32_stride4_vf4:
; AVX512DQ-BW: # %bb.0:
; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} xmm0 = [0,4,8,12]
-; AVX512DQ-BW-NEXT: vmovdqa (%rdi), %ymm1
-; AVX512DQ-BW-NEXT: vmovdqa 32(%rdi), %ymm2
-; AVX512DQ-BW-NEXT: vpermi2d %ymm2, %ymm1, %ymm0
-; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} xmm3 = [1,5,9,13]
-; AVX512DQ-BW-NEXT: vpermi2d %ymm2, %ymm1, %ymm3
-; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} xmm4 = [2,6,10,14]
-; AVX512DQ-BW-NEXT: vpermi2d %ymm2, %ymm1, %ymm4
-; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} xmm5 = [3,7,11,15]
-; AVX512DQ-BW-NEXT: vpermi2d %ymm2, %ymm1, %ymm5
-; AVX512DQ-BW-NEXT: vmovdqa %xmm0, (%rsi)
-; AVX512DQ-BW-NEXT: vmovdqa %xmm3, (%rdx)
-; AVX512DQ-BW-NEXT: vmovdqa %xmm4, (%rcx)
-; AVX512DQ-BW-NEXT: vmovdqa %xmm5, (%r8)
+; AVX512DQ-BW-NEXT: vmovaps (%rdi), %zmm1
+; AVX512DQ-BW-NEXT: vpermps %zmm1, %zmm0, %zmm0
+; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} xmm2 = [1,5,9,13]
+; AVX512DQ-BW-NEXT: vpermps %zmm1, %zmm2, %zmm2
+; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} xmm3 = [2,6,10,14]
+; AVX512DQ-BW-NEXT: vpermps %zmm1, %zmm3, %zmm3
+; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} xmm4 = [3,7,11,15]
+; AVX512DQ-BW-NEXT: vpermps %zmm1, %zmm4, %zmm1
+; AVX512DQ-BW-NEXT: vmovaps %xmm0, (%rsi)
+; AVX512DQ-BW-NEXT: vmovaps %xmm2, (%rdx)
+; AVX512DQ-BW-NEXT: vmovaps %xmm3, (%rcx)
+; AVX512DQ-BW-NEXT: vmovaps %xmm1, (%r8)
; AVX512DQ-BW-NEXT: vzeroupper
; AVX512DQ-BW-NEXT: retq
;
; AVX512DQ-BW-FCP-LABEL: load_i32_stride4_vf4:
; AVX512DQ-BW-FCP: # %bb.0:
; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm0 = [0,4,8,12]
-; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdi), %ymm1
-; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%rdi), %ymm2
-; AVX512DQ-BW-FCP-NEXT: vpermi2d %ymm2, %ymm1, %ymm0
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm3 = [1,5,9,13]
-; AVX512DQ-BW-FCP-NEXT: vpermi2d %ymm2, %ymm1, %ymm3
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm4 = [2,6,10,14]
-; AVX512DQ-BW-FCP-NEXT: vpermi2d %ymm2, %ymm1, %ymm4
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm5 = [3,7,11,15]
-; AVX512DQ-BW-FCP-NEXT: vpermi2d %ymm2, %ymm1, %ymm5
-; AVX512DQ-BW-FCP-NEXT: vmovdqa %xmm0, (%rsi)
-; AVX512DQ-BW-FCP-NEXT: vmovdqa %xmm3, (%rdx)
-; AVX512DQ-BW-FCP-NEXT: vmovdqa %xmm4, (%rcx)
-; AVX512DQ-BW-FCP-NEXT: vmovdqa %xmm5, (%r8)
+; AVX512DQ-BW-FCP-NEXT: vmovaps (%rdi), %zmm1
+; AVX512DQ-BW-FCP-NEXT: vpermps %zmm1, %zmm0, %zmm0
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm2 = [1,5,9,13]
+; AVX512DQ-BW-FCP-NEXT: vpermps %zmm1, %zmm2, %zmm2
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm3 = [2,6,10,14]
+; AVX512DQ-BW-FCP-NEXT: vpermps %zmm1, %zmm3, %zmm3
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm4 = [3,7,11,15]
+; AVX512DQ-BW-FCP-NEXT: vpermps %zmm1, %zmm4, %zmm1
+; AVX512DQ-BW-FCP-NEXT: vmovaps %xmm0, (%rsi)
+; AVX512DQ-BW-FCP-NEXT: vmovaps %xmm2, (%rdx)
+; AVX512DQ-BW-FCP-NEXT: vmovaps %xmm3, (%rcx)
+; AVX512DQ-BW-FCP-NEXT: vmovaps %xmm1, (%r8)
; AVX512DQ-BW-FCP-NEXT: vzeroupper
; AVX512DQ-BW-FCP-NEXT: retq
%wide.vec = load <16 x i32>, ptr %in.vec, align 64
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-5.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-5.ll
index d8d48b0b8c73d3..c08442f9d9d01a 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-5.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-5.ll
@@ -144,19 +144,19 @@ define void @load_i32_stride5_vf2(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-FCP-LABEL: load_i32_stride5_vf2:
; AVX512-FCP: # %bb.0:
; AVX512-FCP-NEXT: vmovdqa (%rdi), %xmm0
-; AVX512-FCP-NEXT: vmovdqa 16(%rdi), %xmm1
-; AVX512-FCP-NEXT: vmovdqa 32(%rdi), %xmm2
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} xmm3 = xmm0[0],xmm1[1],xmm0[2,3]
-; AVX512-FCP-NEXT: vpbroadcastq {{.*#+}} xmm4 = [1,6,1,6]
-; AVX512-FCP-NEXT: vpermi2d %xmm1, %xmm0, %xmm4
-; AVX512-FCP-NEXT: vpbroadcastq {{.*#+}} xmm5 = [2,7,2,7]
-; AVX512-FCP-NEXT: vpermi2d %xmm1, %xmm0, %xmm5
-; AVX512-FCP-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[12,13,14,15],xmm2[0,1,2,3,4,5,6,7,8,9,10,11]
-; AVX512-FCP-NEXT: vpbroadcastd 16(%rdi), %ymm1
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3]
-; AVX512-FCP-NEXT: vmovq %xmm3, (%rsi)
-; AVX512-FCP-NEXT: vmovq %xmm4, (%rdx)
-; AVX512-FCP-NEXT: vmovq %xmm5, (%rcx)
+; AVX512-FCP-NEXT: vmovdqa 32(%rdi), %xmm1
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} xmm2 = xmm0[0],mem[1],xmm0[2,3]
+; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} xmm3 = [1,6,0,0]
+; AVX512-FCP-NEXT: vmovaps (%rdi), %ymm4
+; AVX512-FCP-NEXT: vpermps %ymm4, %ymm3, %ymm3
+; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} xmm5 = [2,7,0,0]
+; AVX512-FCP-NEXT: vpermps %ymm4, %ymm5, %ymm4
+; AVX512-FCP-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[12,13,14,15],xmm1[0,1,2,3,4,5,6,7,8,9,10,11]
+; AVX512-FCP-NEXT: vpbroadcastd 16(%rdi), %ymm5
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} xmm1 = xmm5[0],xmm1[1],xmm5[2,3]
+; AVX512-FCP-NEXT: vmovq %xmm2, (%rsi)
+; AVX512-FCP-NEXT: vmovlps %xmm3, (%rdx)
+; AVX512-FCP-NEXT: vmovlps %xmm4, (%rcx)
; AVX512-FCP-NEXT: vmovq %xmm0, (%r8)
; AVX512-FCP-NEXT: vmovq %xmm1, (%r9)
; AVX512-FCP-NEXT: vzeroupper
@@ -188,19 +188,19 @@ define void @load_i32_stride5_vf2(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-FCP-LABEL: load_i32_stride5_vf2:
; AVX512DQ-FCP: # %bb.0:
; AVX512DQ-FCP-NEXT: vmovdqa (%rdi), %xmm0
-; AVX512DQ-FCP-NEXT: vmovdqa 16(%rdi), %xmm1
-; AVX512DQ-FCP-NEXT: vmovdqa 32(%rdi), %xmm2
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} xmm3 = xmm0[0],xmm1[1],xmm0[2,3]
-; AVX512DQ-FCP-NEXT: vpbroadcastq {{.*#+}} xmm4 = [1,6,1,6]
-; AVX512DQ-FCP-NEXT: vpermi2d %xmm1, %xmm0, %xmm4
-; AVX512DQ-FCP-NEXT: vpbroadcastq {{.*#+}} xmm5 = [2,7,2,7]
-; AVX512DQ-FCP-NEXT: vpermi2d %xmm1, %xmm0, %xmm5
-; AVX512DQ-FCP-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[12,13,14,15],xmm2[0,1,2,3,4,5,6,7,8,9,10,11]
-; AVX512DQ-FCP-NEXT: vpbroadcastd 16(%rdi), %ymm1
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3]
-; AVX512DQ-FCP-NEXT: vmovq %xmm3, (%rsi)
-; AVX512DQ-FCP-NEXT: vmovq %xmm4, (%rdx)
-; AVX512DQ-FCP-NEXT: vmovq %xmm5, (%rcx)
+; AVX512DQ-FCP-NEXT: vmovdqa 32(%rdi), %xmm1
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} xmm2 = xmm0[0],mem[1],xmm0[2,3]
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} xmm3 = [1,6,0,0]
+; AVX512DQ-FCP-NEXT: vmovaps (%rdi), %ymm4
+; AVX512DQ-FCP-NEXT: vpermps %ymm4, %ymm3, %ymm3
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} xmm5 = [2,7,0,0]
+; AVX512DQ-FCP-NEXT: vpermps %ymm4, %ymm5, %ymm4
+; AVX512DQ-FCP-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[12,13,14,15],xmm1[0,1,2,3,4,5,6,7,8,9,10,11]
+; AVX512DQ-FCP-NEXT: vpbroadcastd 16(%rdi), %ymm5
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} xmm1 = xmm5[0],xmm1[1],xmm5[2,3]
+; AVX512DQ-FCP-NEXT: vmovq %xmm2, (%rsi)
+; AVX512DQ-FCP-NEXT: vmovlps %xmm3, (%rdx)
+; AVX512DQ-FCP-NEXT: vmovlps %xmm4, (%rcx)
; AVX512DQ-FCP-NEXT: vmovq %xmm0, (%r8)
; AVX512DQ-FCP-NEXT: vmovq %xmm1, (%r9)
; AVX512DQ-FCP-NEXT: vzeroupper
@@ -232,19 +232,19 @@ define void @load_i32_stride5_vf2(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512BW-FCP-LABEL: load_i32_stride5_vf2:
; AVX512BW-FCP: # %bb.0:
; AVX512BW-FCP-NEXT: vmovdqa (%rdi), %xmm0
-; AVX512BW-FCP-NEXT: vmovdqa 16(%rdi), %xmm1
-; AVX512BW-FCP-NEXT: vmovdqa 32(%rdi), %xmm2
-; AVX512BW-FCP-NEXT: vpblendd {{.*#+}} xmm3 = xmm0[0],xmm1[1],xmm0[2,3]
-; AVX512BW-FCP-NEXT: vpbroadcastq {{.*#+}} xmm4 = [1,6,1,6]
-; AVX512BW-FCP-NEXT: vpermi2d %xmm1, %xmm0, %xmm4
-; AVX512BW-FCP-NEXT: vpbroadcastq {{.*#+}} xmm5 = [2,7,2,7]
-; AVX512BW-FCP-NEXT: vpermi2d %xmm1, %xmm0, %xmm5
-; AVX512BW-FCP-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[12,13,14,15],xmm2[0,1,2,3,4,5,6,7,8,9,10,11]
-; AVX512BW-FCP-NEXT: vpbroadcastd 16(%rdi), %ymm1
-; AVX512BW-FCP-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3]
-; AVX512BW-FCP-NEXT: vmovq %xmm3, (%rsi)
-; AVX512BW-FCP-NEXT: vmovq %xmm4, (%rdx)
-; AVX512BW-FCP-NEXT: vmovq %xmm5, (%rcx)
+; AVX512BW-FCP-NEXT: vmovdqa 32(%rdi), %xmm1
+; AVX512BW-FCP-NEXT: vpblendd {{.*#+}} xmm2 = xmm0[0],mem[1],xmm0[2,3]
+; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm3 = [1,6,0,0]
+; AVX512BW-FCP-NEXT: vmovaps (%rdi), %ymm4
+; AVX512BW-FCP-NEXT: vpermps %ymm4, %ymm3, %ymm3
+; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm5 = [2,7,0,0]
+; AVX512BW-FCP-NEXT: vpermps %ymm4, %ymm5, %ymm4
+; AVX512BW-FCP-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[12,13,14,15],xmm1[0,1,2,3,4,5,6,7,8,9,10,11]
+; AVX512BW-FCP-NEXT: vpbroadcastd 16(%rdi), %ymm5
+; AVX512BW-FCP-NEXT: vpblendd {{.*#+}} xmm1 = xmm5[0],xmm1[1],xmm5[2,3]
+; AVX512BW-FCP-NEXT: vmovq %xmm2, (%rsi)
+; AVX512BW-FCP-NEXT: vmovlps %xmm3, (%rdx)
+; AVX512BW-FCP-NEXT: vmovlps %xmm4, (%rcx)
; AVX512BW-FCP-NEXT: vmovq %xmm0, (%r8)
; AVX512BW-FCP-NEXT: vmovq %xmm1, (%r9)
; AVX512BW-FCP-NEXT: vzeroupper
@@ -276,19 +276,19 @@ define void @load_i32_stride5_vf2(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-BW-FCP-LABEL: load_i32_stride5_vf2:
; AVX512DQ-BW-FCP: # %bb.0:
; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdi), %xmm0
-; AVX512DQ-BW-FCP-NEXT: vmovdqa 16(%rdi), %xmm1
-; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%rdi), %xmm2
-; AVX512DQ-BW-FCP-NEXT: vpblendd {{.*#+}} xmm3 = xmm0[0],xmm1[1],xmm0[2,3]
-; AVX512DQ-BW-FCP-NEXT: vpbroadcastq {{.*#+}} xmm4 = [1,6,1,6]
-; AVX512DQ-BW-FCP-NEXT: vpermi2d %xmm1, %xmm0, %xmm4
-; AVX512DQ-BW-FCP-NEXT: vpbroadcastq {{.*#+}} xmm5 = [2,7,2,7]
-; AVX512DQ-BW-FCP-NEXT: vpermi2d %xmm1, %xmm0, %xmm5
-; AVX512DQ-BW-FCP-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[12,13,14,15],xmm2[0,1,2,3,4,5,6,7,8,9,10,11]
-; AVX512DQ-BW-FCP-NEXT: vpbroadcastd 16(%rdi), %ymm1
-; AVX512DQ-BW-FCP-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3]
-; AVX512DQ-BW-FCP-NEXT: vmovq %xmm3, (%rsi)
-; AVX512DQ-BW-FCP-NEXT: vmovq %xmm4, (%rdx)
-; AVX512DQ-BW-FCP-NEXT: vmovq %xmm5, (%rcx)
+; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%rdi), %xmm1
+; AVX512DQ-BW-FCP-NEXT: vpblendd {{.*#+}} xmm2 = xmm0[0],mem[1],xmm0[2,3]
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm3 = [1,6,0,0]
+; AVX512DQ-BW-FCP-NEXT: vmovaps (%rdi), %ymm4
+; AVX512DQ-BW-FCP-NEXT: vpermps %ymm4, %ymm3, %ymm3
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm5 = [2,7,0,0]
+; AVX512DQ-BW-FCP-NEXT: vpermps %ymm4, %ymm5, %ymm4
+; AVX512DQ-BW-FCP-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[12,13,14,15],xmm1[0,1,2,3,4,5,6,7,8,9,10,11]
+; AVX512DQ-BW-FCP-NEXT: vpbroadcastd 16(%rdi), %ymm5
+; AVX512DQ-BW-FCP-NEXT: vpblendd {{.*#+}} xmm1 = xmm5[0],xmm1[1],xmm5[2,3]
+; AVX512DQ-BW-FCP-NEXT: vmovq %xmm2, (%rsi)
+; AVX512DQ-BW-FCP-NEXT: vmovlps %xmm3, (%rdx)
+; AVX512DQ-BW-FCP-NEXT: vmovlps %xmm4, (%rcx)
; AVX512DQ-BW-FCP-NEXT: vmovq %xmm0, (%r8)
; AVX512DQ-BW-FCP-NEXT: vmovq %xmm1, (%r9)
; AVX512DQ-BW-FCP-NEXT: vzeroupper
@@ -491,18 +491,17 @@ define void @load_i32_stride5_vf4(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512-NEXT: vmovdqa64 64(%rdi), %zmm1
; AVX512-NEXT: vpmovsxbd {{.*#+}} xmm2 = [0,5,10,15]
-; AVX512-NEXT: vmovdqa (%rdi), %ymm3
-; AVX512-NEXT: vpermt2d 32(%rdi), %ymm2, %ymm3
-; AVX512-NEXT: vpmovsxbd {{.*#+}} xmm2 = [1,6,11,16]
-; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm2
+; AVX512-NEXT: vpermd %zmm0, %zmm2, %zmm2
+; AVX512-NEXT: vpmovsxbd {{.*#+}} xmm3 = [1,6,11,16]
+; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm3
; AVX512-NEXT: vpmovsxbd {{.*#+}} xmm4 = [2,7,12,17]
; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm4
; AVX512-NEXT: vpmovsxbd {{.*#+}} xmm5 = [3,8,13,18]
; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm5
; AVX512-NEXT: vpmovsxbd {{.*#+}} xmm6 = [4,9,14,19]
; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm6
-; AVX512-NEXT: vmovdqa %xmm3, (%rsi)
-; AVX512-NEXT: vmovdqa %xmm2, (%rdx)
+; AVX512-NEXT: vmovdqa %xmm2, (%rsi)
+; AVX512-NEXT: vmovdqa %xmm3, (%rdx)
; AVX512-NEXT: vmovdqa %xmm4, (%rcx)
; AVX512-NEXT: vmovdqa %xmm5, (%r8)
; AVX512-NEXT: vmovdqa %xmm6, (%r9)
@@ -514,18 +513,17 @@ define void @load_i32_stride5_vf4(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-FCP-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512-FCP-NEXT: vmovdqa64 64(%rdi), %zmm1
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} xmm2 = [0,5,10,15]
-; AVX512-FCP-NEXT: vmovdqa (%rdi), %ymm3
-; AVX512-FCP-NEXT: vpermt2d 32(%rdi), %ymm2, %ymm3
-; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} xmm2 = [1,6,11,16]
-; AVX512-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm2
+; AVX512-FCP-NEXT: vpermd %zmm0, %zmm2, %zmm2
+; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} xmm3 = [1,6,11,16]
+; AVX512-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm3
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} xmm4 = [2,7,12,17]
; AVX512-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm4
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} xmm5 = [3,8,13,18]
; AVX512-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm5
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} xmm6 = [4,9,14,19]
; AVX512-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm6
-; AVX512-FCP-NEXT: vmovdqa %xmm3, (%rsi)
-; AVX512-FCP-NEXT: vmovdqa %xmm2, (%rdx)
+; AVX512-FCP-NEXT: vmovdqa %xmm2, (%rsi)
+; AVX512-FCP-NEXT: vmovdqa %xmm3, (%rdx)
; AVX512-FCP-NEXT: vmovdqa %xmm4, (%rcx)
; AVX512-FCP-NEXT: vmovdqa %xmm5, (%r8)
; AVX512-FCP-NEXT: vmovdqa %xmm6, (%r9)
@@ -537,18 +535,17 @@ define void @load_i32_stride5_vf4(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512DQ-NEXT: vmovdqa64 64(%rdi), %zmm1
; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} xmm2 = [0,5,10,15]
-; AVX512DQ-NEXT: vmovdqa (%rdi), %ymm3
-; AVX512DQ-NEXT: vpermt2d 32(%rdi), %ymm2, %ymm3
-; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} xmm2 = [1,6,11,16]
-; AVX512DQ-NEXT: vpermi2d %zmm1, %zmm0, %zmm2
+; AVX512DQ-NEXT: vpermd %zmm0, %zmm2, %zmm2
+; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} xmm3 = [1,6,11,16]
+; AVX512DQ-NEXT: vpermi2d %zmm1, %zmm0, %zmm3
; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} xmm4 = [2,7,12,17]
; AVX512DQ-NEXT: vpermi2d %zmm1, %zmm0, %zmm4
; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} xmm5 = [3,8,13,18]
; AVX512DQ-NEXT: vpermi2d %zmm1, %zmm0, %zmm5
; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} xmm6 = [4,9,14,19]
; AVX512DQ-NEXT: vpermi2d %zmm1, %zmm0, %zmm6
-; AVX512DQ-NEXT: vmovdqa %xmm3, (%rsi)
-; AVX512DQ-NEXT: vmovdqa %xmm2, (%rdx)
+; AVX512DQ-NEXT: vmovdqa %xmm2, (%rsi)
+; AVX512DQ-NEXT: vmovdqa %xmm3, (%rdx)
; AVX512DQ-NEXT: vmovdqa %xmm4, (%rcx)
; AVX512DQ-NEXT: vmovdqa %xmm5, (%r8)
; AVX512DQ-NEXT: vmovdqa %xmm6, (%r9)
@@ -560,18 +557,17 @@ define void @load_i32_stride5_vf4(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-FCP-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512DQ-FCP-NEXT: vmovdqa64 64(%rdi), %zmm1
; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} xmm2 = [0,5,10,15]
-; AVX512DQ-FCP-NEXT: vmovdqa (%rdi), %ymm3
-; AVX512DQ-FCP-NEXT: vpermt2d 32(%rdi), %ymm2, %ymm3
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} xmm2 = [1,6,11,16]
-; AVX512DQ-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm2
+; AVX512DQ-FCP-NEXT: vpermd %zmm0, %zmm2, %zmm2
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} xmm3 = [1,6,11,16]
+; AVX512DQ-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm3
; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} xmm4 = [2,7,12,17]
; AVX512DQ-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm4
; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} xmm5 = [3,8,13,18]
; AVX512DQ-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm5
; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} xmm6 = [4,9,14,19]
; AVX512DQ-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm6
-; AVX512DQ-FCP-NEXT: vmovdqa %xmm3, (%rsi)
-; AVX512DQ-FCP-NEXT: vmovdqa %xmm2, (%rdx)
+; AVX512DQ-FCP-NEXT: vmovdqa %xmm2, (%rsi)
+; AVX512DQ-FCP-NEXT: vmovdqa %xmm3, (%rdx)
; AVX512DQ-FCP-NEXT: vmovdqa %xmm4, (%rcx)
; AVX512DQ-FCP-NEXT: vmovdqa %xmm5, (%r8)
; AVX512DQ-FCP-NEXT: vmovdqa %xmm6, (%r9)
@@ -583,18 +579,17 @@ define void @load_i32_stride5_vf4(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512BW-NEXT: vmovdqa64 64(%rdi), %zmm1
; AVX512BW-NEXT: vpmovsxbd {{.*#+}} xmm2 = [0,5,10,15]
-; AVX512BW-NEXT: vmovdqa (%rdi), %ymm3
-; AVX512BW-NEXT: vpermt2d 32(%rdi), %ymm2, %ymm3
-; AVX512BW-NEXT: vpmovsxbd {{.*#+}} xmm2 = [1,6,11,16]
-; AVX512BW-NEXT: vpermi2d %zmm1, %zmm0, %zmm2
+; AVX512BW-NEXT: vpermd %zmm0, %zmm2, %zmm2
+; AVX512BW-NEXT: vpmovsxbd {{.*#+}} xmm3 = [1,6,11,16]
+; AVX512BW-NEXT: vpermi2d %zmm1, %zmm0, %zmm3
; AVX512BW-NEXT: vpmovsxbd {{.*#+}} xmm4 = [2,7,12,17]
; AVX512BW-NEXT: vpermi2d %zmm1, %zmm0, %zmm4
; AVX512BW-NEXT: vpmovsxbd {{.*#+}} xmm5 = [3,8,13,18]
; AVX512BW-NEXT: vpermi2d %zmm1, %zmm0, %zmm5
; AVX512BW-NEXT: vpmovsxbd {{.*#+}} xmm6 = [4,9,14,19]
; AVX512BW-NEXT: vpermi2d %zmm1, %zmm0, %zmm6
-; AVX512BW-NEXT: vmovdqa %xmm3, (%rsi)
-; AVX512BW-NEXT: vmovdqa %xmm2, (%rdx)
+; AVX512BW-NEXT: vmovdqa %xmm2, (%rsi)
+; AVX512BW-NEXT: vmovdqa %xmm3, (%rdx)
; AVX512BW-NEXT: vmovdqa %xmm4, (%rcx)
; AVX512BW-NEXT: vmovdqa %xmm5, (%r8)
; AVX512BW-NEXT: vmovdqa %xmm6, (%r9)
@@ -606,18 +601,17 @@ define void @load_i32_stride5_vf4(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512BW-FCP-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512BW-FCP-NEXT: vmovdqa64 64(%rdi), %zmm1
; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm2 = [0,5,10,15]
-; AVX512BW-FCP-NEXT: vmovdqa (%rdi), %ymm3
-; AVX512BW-FCP-NEXT: vpermt2d 32(%rdi), %ymm2, %ymm3
-; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm2 = [1,6,11,16]
-; AVX512BW-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm2
+; AVX512BW-FCP-NEXT: vpermd %zmm0, %zmm2, %zmm2
+; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm3 = [1,6,11,16]
+; AVX512BW-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm3
; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm4 = [2,7,12,17]
; AVX512BW-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm4
; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm5 = [3,8,13,18]
; AVX512BW-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm5
; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm6 = [4,9,14,19]
; AVX512BW-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm6
-; AVX512BW-FCP-NEXT: vmovdqa %xmm3, (%rsi)
-; AVX512BW-FCP-NEXT: vmovdqa %xmm2, (%rdx)
+; AVX512BW-FCP-NEXT: vmovdqa %xmm2, (%rsi)
+; AVX512BW-FCP-NEXT: vmovdqa %xmm3, (%rdx)
; AVX512BW-FCP-NEXT: vmovdqa %xmm4, (%rcx)
; AVX512BW-FCP-NEXT: vmovdqa %xmm5, (%r8)
; AVX512BW-FCP-NEXT: vmovdqa %xmm6, (%r9)
@@ -629,18 +623,17 @@ define void @load_i32_stride5_vf4(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-BW-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512DQ-BW-NEXT: vmovdqa64 64(%rdi), %zmm1
; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} xmm2 = [0,5,10,15]
-; AVX512DQ-BW-NEXT: vmovdqa (%rdi), %ymm3
-; AVX512DQ-BW-NEXT: vpermt2d 32(%rdi), %ymm2, %ymm3
-; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} xmm2 = [1,6,11,16]
-; AVX512DQ-BW-NEXT: vpermi2d %zmm1, %zmm0, %zmm2
+; AVX512DQ-BW-NEXT: vpermd %zmm0, %zmm2, %zmm2
+; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} xmm3 = [1,6,11,16]
+; AVX512DQ-BW-NEXT: vpermi2d %zmm1, %zmm0, %zmm3
; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} xmm4 = [2,7,12,17]
; AVX512DQ-BW-NEXT: vpermi2d %zmm1, %zmm0, %zmm4
; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} xmm5 = [3,8,13,18]
; AVX512DQ-BW-NEXT: vpermi2d %zmm1, %zmm0, %zmm5
; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} xmm6 = [4,9,14,19]
; AVX512DQ-BW-NEXT: vpermi2d %zmm1, %zmm0, %zmm6
-; AVX512DQ-BW-NEXT: vmovdqa %xmm3, (%rsi)
-; AVX512DQ-BW-NEXT: vmovdqa %xmm2, (%rdx)
+; AVX512DQ-BW-NEXT: vmovdqa %xmm2, (%rsi)
+; AVX512DQ-BW-NEXT: vmovdqa %xmm3, (%rdx)
; AVX512DQ-BW-NEXT: vmovdqa %xmm4, (%rcx)
; AVX512DQ-BW-NEXT: vmovdqa %xmm5, (%r8)
; AVX512DQ-BW-NEXT: vmovdqa %xmm6, (%r9)
@@ -652,18 +645,17 @@ define void @load_i32_stride5_vf4(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 64(%rdi), %zmm1
; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm2 = [0,5,10,15]
-; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdi), %ymm3
-; AVX512DQ-BW-FCP-NEXT: vpermt2d 32(%rdi), %ymm2, %ymm3
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm2 = [1,6,11,16]
-; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm2
+; AVX512DQ-BW-FCP-NEXT: vpermd %zmm0, %zmm2, %zmm2
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm3 = [1,6,11,16]
+; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm3
; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm4 = [2,7,12,17]
; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm4
; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm5 = [3,8,13,18]
; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm5
; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm6 = [4,9,14,19]
; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm6
-; AVX512DQ-BW-FCP-NEXT: vmovdqa %xmm3, (%rsi)
-; AVX512DQ-BW-FCP-NEXT: vmovdqa %xmm2, (%rdx)
+; AVX512DQ-BW-FCP-NEXT: vmovdqa %xmm2, (%rsi)
+; AVX512DQ-BW-FCP-NEXT: vmovdqa %xmm3, (%rdx)
; AVX512DQ-BW-FCP-NEXT: vmovdqa %xmm4, (%rcx)
; AVX512DQ-BW-FCP-NEXT: vmovdqa %xmm5, (%r8)
; AVX512DQ-BW-FCP-NEXT: vmovdqa %xmm6, (%r9)
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-6.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-6.ll
index 3ba41ad07ce836..ae3e5445bf2667 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-6.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-6.ll
@@ -192,29 +192,28 @@ define void @load_i32_stride6_vf2(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-FCP-LABEL: load_i32_stride6_vf2:
; AVX512-FCP: # %bb.0:
; AVX512-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX512-FCP-NEXT: vpbroadcastq {{.*#+}} xmm0 = [0,6,0,6]
-; AVX512-FCP-NEXT: vmovdqa (%rdi), %xmm1
-; AVX512-FCP-NEXT: vmovdqa 16(%rdi), %xmm2
-; AVX512-FCP-NEXT: vmovdqa 32(%rdi), %xmm3
-; AVX512-FCP-NEXT: vpermi2d %xmm2, %xmm1, %xmm0
-; AVX512-FCP-NEXT: vpbroadcastq {{.*#+}} xmm4 = [1,7,1,7]
-; AVX512-FCP-NEXT: vpermi2d %xmm2, %xmm1, %xmm4
-; AVX512-FCP-NEXT: vpbroadcastq {{.*#+}} xmm2 = [2,4,2,4]
-; AVX512-FCP-NEXT: vpermi2d %xmm3, %xmm1, %xmm2
-; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} xmm5 = [3,5,0,0]
-; AVX512-FCP-NEXT: vpermi2d %xmm3, %xmm1, %xmm5
-; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} xmm1 = [4,2,0,0]
-; AVX512-FCP-NEXT: vmovaps 32(%rdi), %ymm3
-; AVX512-FCP-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3],mem[4,5,6,7]
-; AVX512-FCP-NEXT: vpermps %ymm3, %ymm1, %ymm1
-; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} xmm6 = [5,3,0,0]
-; AVX512-FCP-NEXT: vpermps %ymm3, %ymm6, %ymm3
-; AVX512-FCP-NEXT: vmovq %xmm0, (%rsi)
-; AVX512-FCP-NEXT: vmovq %xmm4, (%rdx)
-; AVX512-FCP-NEXT: vmovq %xmm2, (%rcx)
-; AVX512-FCP-NEXT: vmovq %xmm5, (%r8)
-; AVX512-FCP-NEXT: vmovlps %xmm1, (%r9)
-; AVX512-FCP-NEXT: vmovlps %xmm3, (%rax)
+; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} xmm0 = [0,6,0,0]
+; AVX512-FCP-NEXT: vmovaps (%rdi), %ymm1
+; AVX512-FCP-NEXT: vpermps %ymm1, %ymm0, %ymm0
+; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} xmm2 = [1,7,0,0]
+; AVX512-FCP-NEXT: vpermps %ymm1, %ymm2, %ymm2
+; AVX512-FCP-NEXT: vpbroadcastq {{.*#+}} xmm3 = [2,4,2,4]
+; AVX512-FCP-NEXT: vmovdqa (%rdi), %xmm4
+; AVX512-FCP-NEXT: vmovdqa 32(%rdi), %xmm5
+; AVX512-FCP-NEXT: vpermi2d %xmm5, %xmm4, %xmm3
+; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} xmm6 = [3,5,0,0]
+; AVX512-FCP-NEXT: vpermi2d %xmm5, %xmm4, %xmm6
+; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} xmm4 = [4,2,0,0]
+; AVX512-FCP-NEXT: vblendps {{.*#+}} ymm1 = mem[0,1,2,3],ymm1[4,5,6,7]
+; AVX512-FCP-NEXT: vpermps %ymm1, %ymm4, %ymm4
+; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} xmm5 = [5,3,0,0]
+; AVX512-FCP-NEXT: vpermps %ymm1, %ymm5, %ymm1
+; AVX512-FCP-NEXT: vmovlps %xmm0, (%rsi)
+; AVX512-FCP-NEXT: vmovlps %xmm2, (%rdx)
+; AVX512-FCP-NEXT: vmovq %xmm3, (%rcx)
+; AVX512-FCP-NEXT: vmovq %xmm6, (%r8)
+; AVX512-FCP-NEXT: vmovlps %xmm4, (%r9)
+; AVX512-FCP-NEXT: vmovlps %xmm1, (%rax)
; AVX512-FCP-NEXT: vzeroupper
; AVX512-FCP-NEXT: retq
;
@@ -252,29 +251,28 @@ define void @load_i32_stride6_vf2(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-FCP-LABEL: load_i32_stride6_vf2:
; AVX512DQ-FCP: # %bb.0:
; AVX512DQ-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-FCP-NEXT: vpbroadcastq {{.*#+}} xmm0 = [0,6,0,6]
-; AVX512DQ-FCP-NEXT: vmovdqa (%rdi), %xmm1
-; AVX512DQ-FCP-NEXT: vmovdqa 16(%rdi), %xmm2
-; AVX512DQ-FCP-NEXT: vmovdqa 32(%rdi), %xmm3
-; AVX512DQ-FCP-NEXT: vpermi2d %xmm2, %xmm1, %xmm0
-; AVX512DQ-FCP-NEXT: vpbroadcastq {{.*#+}} xmm4 = [1,7,1,7]
-; AVX512DQ-FCP-NEXT: vpermi2d %xmm2, %xmm1, %xmm4
-; AVX512DQ-FCP-NEXT: vpbroadcastq {{.*#+}} xmm2 = [2,4,2,4]
-; AVX512DQ-FCP-NEXT: vpermi2d %xmm3, %xmm1, %xmm2
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} xmm5 = [3,5,0,0]
-; AVX512DQ-FCP-NEXT: vpermi2d %xmm3, %xmm1, %xmm5
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} xmm1 = [4,2,0,0]
-; AVX512DQ-FCP-NEXT: vmovaps 32(%rdi), %ymm3
-; AVX512DQ-FCP-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3],mem[4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpermps %ymm3, %ymm1, %ymm1
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} xmm6 = [5,3,0,0]
-; AVX512DQ-FCP-NEXT: vpermps %ymm3, %ymm6, %ymm3
-; AVX512DQ-FCP-NEXT: vmovq %xmm0, (%rsi)
-; AVX512DQ-FCP-NEXT: vmovq %xmm4, (%rdx)
-; AVX512DQ-FCP-NEXT: vmovq %xmm2, (%rcx)
-; AVX512DQ-FCP-NEXT: vmovq %xmm5, (%r8)
-; AVX512DQ-FCP-NEXT: vmovlps %xmm1, (%r9)
-; AVX512DQ-FCP-NEXT: vmovlps %xmm3, (%rax)
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} xmm0 = [0,6,0,0]
+; AVX512DQ-FCP-NEXT: vmovaps (%rdi), %ymm1
+; AVX512DQ-FCP-NEXT: vpermps %ymm1, %ymm0, %ymm0
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} xmm2 = [1,7,0,0]
+; AVX512DQ-FCP-NEXT: vpermps %ymm1, %ymm2, %ymm2
+; AVX512DQ-FCP-NEXT: vpbroadcastq {{.*#+}} xmm3 = [2,4,2,4]
+; AVX512DQ-FCP-NEXT: vmovdqa (%rdi), %xmm4
+; AVX512DQ-FCP-NEXT: vmovdqa 32(%rdi), %xmm5
+; AVX512DQ-FCP-NEXT: vpermi2d %xmm5, %xmm4, %xmm3
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} xmm6 = [3,5,0,0]
+; AVX512DQ-FCP-NEXT: vpermi2d %xmm5, %xmm4, %xmm6
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} xmm4 = [4,2,0,0]
+; AVX512DQ-FCP-NEXT: vblendps {{.*#+}} ymm1 = mem[0,1,2,3],ymm1[4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpermps %ymm1, %ymm4, %ymm4
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} xmm5 = [5,3,0,0]
+; AVX512DQ-FCP-NEXT: vpermps %ymm1, %ymm5, %ymm1
+; AVX512DQ-FCP-NEXT: vmovlps %xmm0, (%rsi)
+; AVX512DQ-FCP-NEXT: vmovlps %xmm2, (%rdx)
+; AVX512DQ-FCP-NEXT: vmovq %xmm3, (%rcx)
+; AVX512DQ-FCP-NEXT: vmovq %xmm6, (%r8)
+; AVX512DQ-FCP-NEXT: vmovlps %xmm4, (%r9)
+; AVX512DQ-FCP-NEXT: vmovlps %xmm1, (%rax)
; AVX512DQ-FCP-NEXT: vzeroupper
; AVX512DQ-FCP-NEXT: retq
;
@@ -312,29 +310,28 @@ define void @load_i32_stride6_vf2(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512BW-FCP-LABEL: load_i32_stride6_vf2:
; AVX512BW-FCP: # %bb.0:
; AVX512BW-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-FCP-NEXT: vpbroadcastq {{.*#+}} xmm0 = [0,6,0,6]
-; AVX512BW-FCP-NEXT: vmovdqa (%rdi), %xmm1
-; AVX512BW-FCP-NEXT: vmovdqa 16(%rdi), %xmm2
-; AVX512BW-FCP-NEXT: vmovdqa 32(%rdi), %xmm3
-; AVX512BW-FCP-NEXT: vpermi2d %xmm2, %xmm1, %xmm0
-; AVX512BW-FCP-NEXT: vpbroadcastq {{.*#+}} xmm4 = [1,7,1,7]
-; AVX512BW-FCP-NEXT: vpermi2d %xmm2, %xmm1, %xmm4
-; AVX512BW-FCP-NEXT: vpbroadcastq {{.*#+}} xmm2 = [2,4,2,4]
-; AVX512BW-FCP-NEXT: vpermi2d %xmm3, %xmm1, %xmm2
-; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm5 = [3,5,0,0]
-; AVX512BW-FCP-NEXT: vpermi2d %xmm3, %xmm1, %xmm5
-; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm1 = [4,2,0,0]
-; AVX512BW-FCP-NEXT: vmovaps 32(%rdi), %ymm3
-; AVX512BW-FCP-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-FCP-NEXT: vpermps %ymm3, %ymm1, %ymm1
-; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm6 = [5,3,0,0]
-; AVX512BW-FCP-NEXT: vpermps %ymm3, %ymm6, %ymm3
-; AVX512BW-FCP-NEXT: vmovq %xmm0, (%rsi)
-; AVX512BW-FCP-NEXT: vmovq %xmm4, (%rdx)
-; AVX512BW-FCP-NEXT: vmovq %xmm2, (%rcx)
-; AVX512BW-FCP-NEXT: vmovq %xmm5, (%r8)
-; AVX512BW-FCP-NEXT: vmovlps %xmm1, (%r9)
-; AVX512BW-FCP-NEXT: vmovlps %xmm3, (%rax)
+; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm0 = [0,6,0,0]
+; AVX512BW-FCP-NEXT: vmovaps (%rdi), %ymm1
+; AVX512BW-FCP-NEXT: vpermps %ymm1, %ymm0, %ymm0
+; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm2 = [1,7,0,0]
+; AVX512BW-FCP-NEXT: vpermps %ymm1, %ymm2, %ymm2
+; AVX512BW-FCP-NEXT: vpbroadcastq {{.*#+}} xmm3 = [2,4,2,4]
+; AVX512BW-FCP-NEXT: vmovdqa (%rdi), %xmm4
+; AVX512BW-FCP-NEXT: vmovdqa 32(%rdi), %xmm5
+; AVX512BW-FCP-NEXT: vpermi2d %xmm5, %xmm4, %xmm3
+; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm6 = [3,5,0,0]
+; AVX512BW-FCP-NEXT: vpermi2d %xmm5, %xmm4, %xmm6
+; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm4 = [4,2,0,0]
+; AVX512BW-FCP-NEXT: vblendps {{.*#+}} ymm1 = mem[0,1,2,3],ymm1[4,5,6,7]
+; AVX512BW-FCP-NEXT: vpermps %ymm1, %ymm4, %ymm4
+; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm5 = [5,3,0,0]
+; AVX512BW-FCP-NEXT: vpermps %ymm1, %ymm5, %ymm1
+; AVX512BW-FCP-NEXT: vmovlps %xmm0, (%rsi)
+; AVX512BW-FCP-NEXT: vmovlps %xmm2, (%rdx)
+; AVX512BW-FCP-NEXT: vmovq %xmm3, (%rcx)
+; AVX512BW-FCP-NEXT: vmovq %xmm6, (%r8)
+; AVX512BW-FCP-NEXT: vmovlps %xmm4, (%r9)
+; AVX512BW-FCP-NEXT: vmovlps %xmm1, (%rax)
; AVX512BW-FCP-NEXT: vzeroupper
; AVX512BW-FCP-NEXT: retq
;
@@ -372,29 +369,28 @@ define void @load_i32_stride6_vf2(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-BW-FCP-LABEL: load_i32_stride6_vf2:
; AVX512DQ-BW-FCP: # %bb.0:
; AVX512DQ-BW-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-BW-FCP-NEXT: vpbroadcastq {{.*#+}} xmm0 = [0,6,0,6]
-; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdi), %xmm1
-; AVX512DQ-BW-FCP-NEXT: vmovdqa 16(%rdi), %xmm2
-; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%rdi), %xmm3
-; AVX512DQ-BW-FCP-NEXT: vpermi2d %xmm2, %xmm1, %xmm0
-; AVX512DQ-BW-FCP-NEXT: vpbroadcastq {{.*#+}} xmm4 = [1,7,1,7]
-; AVX512DQ-BW-FCP-NEXT: vpermi2d %xmm2, %xmm1, %xmm4
-; AVX512DQ-BW-FCP-NEXT: vpbroadcastq {{.*#+}} xmm2 = [2,4,2,4]
-; AVX512DQ-BW-FCP-NEXT: vpermi2d %xmm3, %xmm1, %xmm2
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm5 = [3,5,0,0]
-; AVX512DQ-BW-FCP-NEXT: vpermi2d %xmm3, %xmm1, %xmm5
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm1 = [4,2,0,0]
-; AVX512DQ-BW-FCP-NEXT: vmovaps 32(%rdi), %ymm3
-; AVX512DQ-BW-FCP-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3],mem[4,5,6,7]
-; AVX512DQ-BW-FCP-NEXT: vpermps %ymm3, %ymm1, %ymm1
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm6 = [5,3,0,0]
-; AVX512DQ-BW-FCP-NEXT: vpermps %ymm3, %ymm6, %ymm3
-; AVX512DQ-BW-FCP-NEXT: vmovq %xmm0, (%rsi)
-; AVX512DQ-BW-FCP-NEXT: vmovq %xmm4, (%rdx)
-; AVX512DQ-BW-FCP-NEXT: vmovq %xmm2, (%rcx)
-; AVX512DQ-BW-FCP-NEXT: vmovq %xmm5, (%r8)
-; AVX512DQ-BW-FCP-NEXT: vmovlps %xmm1, (%r9)
-; AVX512DQ-BW-FCP-NEXT: vmovlps %xmm3, (%rax)
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm0 = [0,6,0,0]
+; AVX512DQ-BW-FCP-NEXT: vmovaps (%rdi), %ymm1
+; AVX512DQ-BW-FCP-NEXT: vpermps %ymm1, %ymm0, %ymm0
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm2 = [1,7,0,0]
+; AVX512DQ-BW-FCP-NEXT: vpermps %ymm1, %ymm2, %ymm2
+; AVX512DQ-BW-FCP-NEXT: vpbroadcastq {{.*#+}} xmm3 = [2,4,2,4]
+; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdi), %xmm4
+; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%rdi), %xmm5
+; AVX512DQ-BW-FCP-NEXT: vpermi2d %xmm5, %xmm4, %xmm3
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm6 = [3,5,0,0]
+; AVX512DQ-BW-FCP-NEXT: vpermi2d %xmm5, %xmm4, %xmm6
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm4 = [4,2,0,0]
+; AVX512DQ-BW-FCP-NEXT: vblendps {{.*#+}} ymm1 = mem[0,1,2,3],ymm1[4,5,6,7]
+; AVX512DQ-BW-FCP-NEXT: vpermps %ymm1, %ymm4, %ymm4
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm5 = [5,3,0,0]
+; AVX512DQ-BW-FCP-NEXT: vpermps %ymm1, %ymm5, %ymm1
+; AVX512DQ-BW-FCP-NEXT: vmovlps %xmm0, (%rsi)
+; AVX512DQ-BW-FCP-NEXT: vmovlps %xmm2, (%rdx)
+; AVX512DQ-BW-FCP-NEXT: vmovq %xmm3, (%rcx)
+; AVX512DQ-BW-FCP-NEXT: vmovq %xmm6, (%r8)
+; AVX512DQ-BW-FCP-NEXT: vmovlps %xmm4, (%r9)
+; AVX512DQ-BW-FCP-NEXT: vmovlps %xmm1, (%rax)
; AVX512DQ-BW-FCP-NEXT: vzeroupper
; AVX512DQ-BW-FCP-NEXT: retq
%wide.vec = load <12 x i32>, ptr %in.vec, align 64
@@ -1291,352 +1287,360 @@ define void @load_i32_stride6_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-LABEL: load_i32_stride6_vf8:
; AVX512: # %bb.0:
; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX512-NEXT: vmovdqa64 (%rdi), %zmm2
-; AVX512-NEXT: vmovdqa64 64(%rdi), %zmm3
; AVX512-NEXT: vmovdqa 128(%rdi), %ymm0
; AVX512-NEXT: vmovdqa 160(%rdi), %ymm1
-; AVX512-NEXT: vpblendd {{.*#+}} ymm4 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; AVX512-NEXT: vpmovsxbd {{.*#+}} ymm5 = [0,6,12,18,24,30,0,0]
-; AVX512-NEXT: vpermi2d %zmm3, %zmm2, %zmm5
-; AVX512-NEXT: vpmovsxbd {{.*#+}} ymm6 = [0,1,2,3,4,5,12,10]
-; AVX512-NEXT: vpermi2d %ymm4, %ymm5, %ymm6
-; AVX512-NEXT: vpmovsxbd {{.*#+}} ymm5 = [1,7,13,19,25,31,0,0]
-; AVX512-NEXT: vpermi2d %zmm3, %zmm2, %zmm5
-; AVX512-NEXT: vpmovsxbd {{.*#+}} ymm7 = [0,1,2,3,4,5,13,11]
-; AVX512-NEXT: vpermi2d %ymm4, %ymm5, %ymm7
-; AVX512-NEXT: vpmovsxbd {{.*#+}} ymm4 = [0,0,0,0,0,0,6,12]
-; AVX512-NEXT: vpermi2d %ymm1, %ymm0, %ymm4
-; AVX512-NEXT: vpmovsxbd {{.*#+}} ymm5 = [2,8,14,20,26,0,0,0]
-; AVX512-NEXT: vpermi2d %zmm3, %zmm2, %zmm5
-; AVX512-NEXT: vpblendd {{.*#+}} ymm4 = ymm5[0,1,2,3,4],ymm4[5,6,7]
-; AVX512-NEXT: vpmovsxbd {{.*#+}} ymm5 = [0,0,0,0,0,1,7,13]
-; AVX512-NEXT: vpermi2d %ymm1, %ymm0, %ymm5
-; AVX512-NEXT: vpmovsxbd {{.*#+}} ymm8 = [3,9,15,21,27,0,0,0]
-; AVX512-NEXT: vpermi2d %zmm3, %zmm2, %zmm8
-; AVX512-NEXT: vpblendd {{.*#+}} ymm5 = ymm8[0,1,2,3,4],ymm5[5,6,7]
+; AVX512-NEXT: vpblendd {{.*#+}} ymm2 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX512-NEXT: vpmovsxbd {{.*#+}} ymm3 = [0,6,12,18,24,30,0,0]
+; AVX512-NEXT: vmovdqa64 (%rdi), %zmm4
+; AVX512-NEXT: vmovdqa64 64(%rdi), %zmm5
+; AVX512-NEXT: vmovdqa64 128(%rdi), %zmm6
+; AVX512-NEXT: vpermi2d %zmm5, %zmm4, %zmm3
+; AVX512-NEXT: vpmovsxbd {{.*#+}} ymm7 = [0,1,2,3,4,5,12,10]
+; AVX512-NEXT: vpermi2d %ymm2, %ymm3, %ymm7
+; AVX512-NEXT: vpmovsxbd {{.*#+}} ymm3 = [1,7,13,19,25,31,0,0]
+; AVX512-NEXT: vpermi2d %zmm5, %zmm4, %zmm3
+; AVX512-NEXT: vpmovsxbd {{.*#+}} ymm8 = [0,1,2,3,4,5,13,11]
+; AVX512-NEXT: vpermi2d %ymm2, %ymm3, %ymm8
+; AVX512-NEXT: vpmovsxbd {{.*#+}} ymm2 = [0,0,2,8,0,0,6,12]
+; AVX512-NEXT: vpermd %zmm6, %zmm2, %zmm2
+; AVX512-NEXT: vpmovsxbd {{.*#+}} ymm3 = [2,8,14,20,26,0,0,0]
+; AVX512-NEXT: vpermi2d %zmm5, %zmm4, %zmm3
+; AVX512-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3,4],ymm2[5,6,7]
+; AVX512-NEXT: vpmovsxbd {{.*#+}} ymm3 = [0,1,3,9,0,1,7,13]
+; AVX512-NEXT: vpermd %zmm6, %zmm3, %zmm3
+; AVX512-NEXT: vpmovsxbd {{.*#+}} ymm6 = [3,9,15,21,27,0,0,0]
+; AVX512-NEXT: vpermi2d %zmm5, %zmm4, %zmm6
+; AVX512-NEXT: vpblendd {{.*#+}} ymm3 = ymm6[0,1,2,3,4],ymm3[5,6,7]
; AVX512-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5,6,7]
; AVX512-NEXT: vpmovsxbd {{.*#+}} ymm1 = [20,26,0,6,12,0,0,0]
-; AVX512-NEXT: vpermi2d %zmm2, %zmm3, %zmm1
-; AVX512-NEXT: vpmovsxbd {{.*#+}} ymm8 = [0,1,2,3,4,10,8,14]
-; AVX512-NEXT: vpermi2d %ymm0, %ymm1, %ymm8
+; AVX512-NEXT: vpermi2d %zmm4, %zmm5, %zmm1
+; AVX512-NEXT: vpmovsxbd {{.*#+}} ymm6 = [0,1,2,3,4,10,8,14]
+; AVX512-NEXT: vpermi2d %ymm0, %ymm1, %ymm6
; AVX512-NEXT: vpmovsxbd {{.*#+}} ymm1 = [21,27,1,7,13,0,0,0]
-; AVX512-NEXT: vpermi2d %zmm2, %zmm3, %zmm1
-; AVX512-NEXT: vpmovsxbd {{.*#+}} ymm2 = [0,1,2,3,4,11,9,15]
-; AVX512-NEXT: vpermi2d %ymm0, %ymm1, %ymm2
-; AVX512-NEXT: vmovdqa %ymm6, (%rsi)
-; AVX512-NEXT: vmovdqa %ymm7, (%rdx)
-; AVX512-NEXT: vmovdqa %ymm4, (%rcx)
-; AVX512-NEXT: vmovdqa %ymm5, (%r8)
-; AVX512-NEXT: vmovdqa %ymm8, (%r9)
-; AVX512-NEXT: vmovdqa %ymm2, (%rax)
+; AVX512-NEXT: vpermi2d %zmm4, %zmm5, %zmm1
+; AVX512-NEXT: vpmovsxbd {{.*#+}} ymm4 = [0,1,2,3,4,11,9,15]
+; AVX512-NEXT: vpermi2d %ymm0, %ymm1, %ymm4
+; AVX512-NEXT: vmovdqa %ymm7, (%rsi)
+; AVX512-NEXT: vmovdqa %ymm8, (%rdx)
+; AVX512-NEXT: vmovdqa %ymm2, (%rcx)
+; AVX512-NEXT: vmovdqa %ymm3, (%r8)
+; AVX512-NEXT: vmovdqa %ymm6, (%r9)
+; AVX512-NEXT: vmovdqa %ymm4, (%rax)
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
;
; AVX512-FCP-LABEL: load_i32_stride6_vf8:
; AVX512-FCP: # %bb.0:
; AVX512-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX512-FCP-NEXT: vmovdqa64 (%rdi), %zmm2
-; AVX512-FCP-NEXT: vmovdqa64 64(%rdi), %zmm3
; AVX512-FCP-NEXT: vmovdqa 128(%rdi), %ymm0
; AVX512-FCP-NEXT: vmovdqa 160(%rdi), %ymm1
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm5 = [0,6,12,18,24,30,0,0]
-; AVX512-FCP-NEXT: vpermi2d %zmm3, %zmm2, %zmm5
-; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm6 = [0,1,2,3,4,5,12,10]
-; AVX512-FCP-NEXT: vpermi2d %ymm4, %ymm5, %ymm6
-; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm5 = [1,7,13,19,25,31,0,0]
-; AVX512-FCP-NEXT: vpermi2d %zmm3, %zmm2, %zmm5
-; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm7 = [0,1,2,3,4,5,13,11]
-; AVX512-FCP-NEXT: vpermi2d %ymm4, %ymm5, %ymm7
-; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm4 = [0,0,0,0,0,0,6,12]
-; AVX512-FCP-NEXT: vpermi2d %ymm1, %ymm0, %ymm4
-; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm5 = [2,8,14,20,26,0,0,0]
-; AVX512-FCP-NEXT: vpermi2d %zmm3, %zmm2, %zmm5
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm5[0,1,2,3,4],ymm4[5,6,7]
-; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm5 = [0,0,0,0,0,1,7,13]
-; AVX512-FCP-NEXT: vpermi2d %ymm1, %ymm0, %ymm5
-; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm8 = [3,9,15,21,27,0,0,0]
-; AVX512-FCP-NEXT: vpermi2d %zmm3, %zmm2, %zmm8
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm8[0,1,2,3,4],ymm5[5,6,7]
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm3 = [0,6,12,18,24,30,0,0]
+; AVX512-FCP-NEXT: vmovdqa64 (%rdi), %zmm4
+; AVX512-FCP-NEXT: vmovdqa64 64(%rdi), %zmm5
+; AVX512-FCP-NEXT: vmovdqa64 128(%rdi), %zmm6
+; AVX512-FCP-NEXT: vpermi2d %zmm5, %zmm4, %zmm3
+; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm7 = [0,1,2,3,4,5,12,10]
+; AVX512-FCP-NEXT: vpermi2d %ymm2, %ymm3, %ymm7
+; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm3 = [1,7,13,19,25,31,0,0]
+; AVX512-FCP-NEXT: vpermi2d %zmm5, %zmm4, %zmm3
+; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm8 = [0,1,2,3,4,5,13,11]
+; AVX512-FCP-NEXT: vpermi2d %ymm2, %ymm3, %ymm8
+; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm2 = [0,0,2,8,0,0,6,12]
+; AVX512-FCP-NEXT: vpermd %zmm6, %zmm2, %zmm2
+; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm3 = [2,8,14,20,26,0,0,0]
+; AVX512-FCP-NEXT: vpermi2d %zmm5, %zmm4, %zmm3
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3,4],ymm2[5,6,7]
+; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm3 = [0,1,3,9,0,1,7,13]
+; AVX512-FCP-NEXT: vpermd %zmm6, %zmm3, %zmm3
+; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm6 = [3,9,15,21,27,0,0,0]
+; AVX512-FCP-NEXT: vpermi2d %zmm5, %zmm4, %zmm6
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm6[0,1,2,3,4],ymm3[5,6,7]
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5,6,7]
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm1 = [20,26,0,6,12,0,0,0]
-; AVX512-FCP-NEXT: vpermi2d %zmm2, %zmm3, %zmm1
-; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm8 = [0,1,2,3,4,10,8,14]
-; AVX512-FCP-NEXT: vpermi2d %ymm0, %ymm1, %ymm8
+; AVX512-FCP-NEXT: vpermi2d %zmm4, %zmm5, %zmm1
+; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm6 = [0,1,2,3,4,10,8,14]
+; AVX512-FCP-NEXT: vpermi2d %ymm0, %ymm1, %ymm6
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm1 = [21,27,1,7,13,0,0,0]
-; AVX512-FCP-NEXT: vpermi2d %zmm2, %zmm3, %zmm1
-; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm2 = [0,1,2,3,4,11,9,15]
-; AVX512-FCP-NEXT: vpermi2d %ymm0, %ymm1, %ymm2
-; AVX512-FCP-NEXT: vmovdqa %ymm6, (%rsi)
-; AVX512-FCP-NEXT: vmovdqa %ymm7, (%rdx)
-; AVX512-FCP-NEXT: vmovdqa %ymm4, (%rcx)
-; AVX512-FCP-NEXT: vmovdqa %ymm5, (%r8)
-; AVX512-FCP-NEXT: vmovdqa %ymm8, (%r9)
-; AVX512-FCP-NEXT: vmovdqa %ymm2, (%rax)
+; AVX512-FCP-NEXT: vpermi2d %zmm4, %zmm5, %zmm1
+; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm4 = [0,1,2,3,4,11,9,15]
+; AVX512-FCP-NEXT: vpermi2d %ymm0, %ymm1, %ymm4
+; AVX512-FCP-NEXT: vmovdqa %ymm7, (%rsi)
+; AVX512-FCP-NEXT: vmovdqa %ymm8, (%rdx)
+; AVX512-FCP-NEXT: vmovdqa %ymm2, (%rcx)
+; AVX512-FCP-NEXT: vmovdqa %ymm3, (%r8)
+; AVX512-FCP-NEXT: vmovdqa %ymm6, (%r9)
+; AVX512-FCP-NEXT: vmovdqa %ymm4, (%rax)
; AVX512-FCP-NEXT: vzeroupper
; AVX512-FCP-NEXT: retq
;
; AVX512DQ-LABEL: load_i32_stride6_vf8:
; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-NEXT: vmovdqa64 (%rdi), %zmm2
-; AVX512DQ-NEXT: vmovdqa64 64(%rdi), %zmm3
; AVX512DQ-NEXT: vmovdqa 128(%rdi), %ymm0
; AVX512DQ-NEXT: vmovdqa 160(%rdi), %ymm1
-; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm4 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} ymm5 = [0,6,12,18,24,30,0,0]
-; AVX512DQ-NEXT: vpermi2d %zmm3, %zmm2, %zmm5
-; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} ymm6 = [0,1,2,3,4,5,12,10]
-; AVX512DQ-NEXT: vpermi2d %ymm4, %ymm5, %ymm6
-; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} ymm5 = [1,7,13,19,25,31,0,0]
-; AVX512DQ-NEXT: vpermi2d %zmm3, %zmm2, %zmm5
-; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} ymm7 = [0,1,2,3,4,5,13,11]
-; AVX512DQ-NEXT: vpermi2d %ymm4, %ymm5, %ymm7
-; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} ymm4 = [0,0,0,0,0,0,6,12]
-; AVX512DQ-NEXT: vpermi2d %ymm1, %ymm0, %ymm4
-; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} ymm5 = [2,8,14,20,26,0,0,0]
-; AVX512DQ-NEXT: vpermi2d %zmm3, %zmm2, %zmm5
-; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm4 = ymm5[0,1,2,3,4],ymm4[5,6,7]
-; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} ymm5 = [0,0,0,0,0,1,7,13]
-; AVX512DQ-NEXT: vpermi2d %ymm1, %ymm0, %ymm5
-; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} ymm8 = [3,9,15,21,27,0,0,0]
-; AVX512DQ-NEXT: vpermi2d %zmm3, %zmm2, %zmm8
-; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm5 = ymm8[0,1,2,3,4],ymm5[5,6,7]
+; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm2 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} ymm3 = [0,6,12,18,24,30,0,0]
+; AVX512DQ-NEXT: vmovdqa64 (%rdi), %zmm4
+; AVX512DQ-NEXT: vmovdqa64 64(%rdi), %zmm5
+; AVX512DQ-NEXT: vmovdqa64 128(%rdi), %zmm6
+; AVX512DQ-NEXT: vpermi2d %zmm5, %zmm4, %zmm3
+; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} ymm7 = [0,1,2,3,4,5,12,10]
+; AVX512DQ-NEXT: vpermi2d %ymm2, %ymm3, %ymm7
+; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} ymm3 = [1,7,13,19,25,31,0,0]
+; AVX512DQ-NEXT: vpermi2d %zmm5, %zmm4, %zmm3
+; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} ymm8 = [0,1,2,3,4,5,13,11]
+; AVX512DQ-NEXT: vpermi2d %ymm2, %ymm3, %ymm8
+; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} ymm2 = [0,0,2,8,0,0,6,12]
+; AVX512DQ-NEXT: vpermd %zmm6, %zmm2, %zmm2
+; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} ymm3 = [2,8,14,20,26,0,0,0]
+; AVX512DQ-NEXT: vpermi2d %zmm5, %zmm4, %zmm3
+; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3,4],ymm2[5,6,7]
+; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} ymm3 = [0,1,3,9,0,1,7,13]
+; AVX512DQ-NEXT: vpermd %zmm6, %zmm3, %zmm3
+; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} ymm6 = [3,9,15,21,27,0,0,0]
+; AVX512DQ-NEXT: vpermi2d %zmm5, %zmm4, %zmm6
+; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm3 = ymm6[0,1,2,3,4],ymm3[5,6,7]
; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5,6,7]
; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} ymm1 = [20,26,0,6,12,0,0,0]
-; AVX512DQ-NEXT: vpermi2d %zmm2, %zmm3, %zmm1
-; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} ymm8 = [0,1,2,3,4,10,8,14]
-; AVX512DQ-NEXT: vpermi2d %ymm0, %ymm1, %ymm8
+; AVX512DQ-NEXT: vpermi2d %zmm4, %zmm5, %zmm1
+; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} ymm6 = [0,1,2,3,4,10,8,14]
+; AVX512DQ-NEXT: vpermi2d %ymm0, %ymm1, %ymm6
; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} ymm1 = [21,27,1,7,13,0,0,0]
-; AVX512DQ-NEXT: vpermi2d %zmm2, %zmm3, %zmm1
-; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} ymm2 = [0,1,2,3,4,11,9,15]
-; AVX512DQ-NEXT: vpermi2d %ymm0, %ymm1, %ymm2
-; AVX512DQ-NEXT: vmovdqa %ymm6, (%rsi)
-; AVX512DQ-NEXT: vmovdqa %ymm7, (%rdx)
-; AVX512DQ-NEXT: vmovdqa %ymm4, (%rcx)
-; AVX512DQ-NEXT: vmovdqa %ymm5, (%r8)
-; AVX512DQ-NEXT: vmovdqa %ymm8, (%r9)
-; AVX512DQ-NEXT: vmovdqa %ymm2, (%rax)
+; AVX512DQ-NEXT: vpermi2d %zmm4, %zmm5, %zmm1
+; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} ymm4 = [0,1,2,3,4,11,9,15]
+; AVX512DQ-NEXT: vpermi2d %ymm0, %ymm1, %ymm4
+; AVX512DQ-NEXT: vmovdqa %ymm7, (%rsi)
+; AVX512DQ-NEXT: vmovdqa %ymm8, (%rdx)
+; AVX512DQ-NEXT: vmovdqa %ymm2, (%rcx)
+; AVX512DQ-NEXT: vmovdqa %ymm3, (%r8)
+; AVX512DQ-NEXT: vmovdqa %ymm6, (%r9)
+; AVX512DQ-NEXT: vmovdqa %ymm4, (%rax)
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512DQ-FCP-LABEL: load_i32_stride6_vf8:
; AVX512DQ-FCP: # %bb.0:
; AVX512DQ-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-FCP-NEXT: vmovdqa64 (%rdi), %zmm2
-; AVX512DQ-FCP-NEXT: vmovdqa64 64(%rdi), %zmm3
; AVX512DQ-FCP-NEXT: vmovdqa 128(%rdi), %ymm0
; AVX512DQ-FCP-NEXT: vmovdqa 160(%rdi), %ymm1
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm5 = [0,6,12,18,24,30,0,0]
-; AVX512DQ-FCP-NEXT: vpermi2d %zmm3, %zmm2, %zmm5
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm6 = [0,1,2,3,4,5,12,10]
-; AVX512DQ-FCP-NEXT: vpermi2d %ymm4, %ymm5, %ymm6
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm5 = [1,7,13,19,25,31,0,0]
-; AVX512DQ-FCP-NEXT: vpermi2d %zmm3, %zmm2, %zmm5
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm7 = [0,1,2,3,4,5,13,11]
-; AVX512DQ-FCP-NEXT: vpermi2d %ymm4, %ymm5, %ymm7
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm4 = [0,0,0,0,0,0,6,12]
-; AVX512DQ-FCP-NEXT: vpermi2d %ymm1, %ymm0, %ymm4
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm5 = [2,8,14,20,26,0,0,0]
-; AVX512DQ-FCP-NEXT: vpermi2d %zmm3, %zmm2, %zmm5
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm5[0,1,2,3,4],ymm4[5,6,7]
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm5 = [0,0,0,0,0,1,7,13]
-; AVX512DQ-FCP-NEXT: vpermi2d %ymm1, %ymm0, %ymm5
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm8 = [3,9,15,21,27,0,0,0]
-; AVX512DQ-FCP-NEXT: vpermi2d %zmm3, %zmm2, %zmm8
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm8[0,1,2,3,4],ymm5[5,6,7]
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm3 = [0,6,12,18,24,30,0,0]
+; AVX512DQ-FCP-NEXT: vmovdqa64 (%rdi), %zmm4
+; AVX512DQ-FCP-NEXT: vmovdqa64 64(%rdi), %zmm5
+; AVX512DQ-FCP-NEXT: vmovdqa64 128(%rdi), %zmm6
+; AVX512DQ-FCP-NEXT: vpermi2d %zmm5, %zmm4, %zmm3
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm7 = [0,1,2,3,4,5,12,10]
+; AVX512DQ-FCP-NEXT: vpermi2d %ymm2, %ymm3, %ymm7
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm3 = [1,7,13,19,25,31,0,0]
+; AVX512DQ-FCP-NEXT: vpermi2d %zmm5, %zmm4, %zmm3
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm8 = [0,1,2,3,4,5,13,11]
+; AVX512DQ-FCP-NEXT: vpermi2d %ymm2, %ymm3, %ymm8
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm2 = [0,0,2,8,0,0,6,12]
+; AVX512DQ-FCP-NEXT: vpermd %zmm6, %zmm2, %zmm2
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm3 = [2,8,14,20,26,0,0,0]
+; AVX512DQ-FCP-NEXT: vpermi2d %zmm5, %zmm4, %zmm3
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3,4],ymm2[5,6,7]
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm3 = [0,1,3,9,0,1,7,13]
+; AVX512DQ-FCP-NEXT: vpermd %zmm6, %zmm3, %zmm3
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm6 = [3,9,15,21,27,0,0,0]
+; AVX512DQ-FCP-NEXT: vpermi2d %zmm5, %zmm4, %zmm6
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm6[0,1,2,3,4],ymm3[5,6,7]
; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5,6,7]
; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm1 = [20,26,0,6,12,0,0,0]
-; AVX512DQ-FCP-NEXT: vpermi2d %zmm2, %zmm3, %zmm1
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm8 = [0,1,2,3,4,10,8,14]
-; AVX512DQ-FCP-NEXT: vpermi2d %ymm0, %ymm1, %ymm8
+; AVX512DQ-FCP-NEXT: vpermi2d %zmm4, %zmm5, %zmm1
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm6 = [0,1,2,3,4,10,8,14]
+; AVX512DQ-FCP-NEXT: vpermi2d %ymm0, %ymm1, %ymm6
; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm1 = [21,27,1,7,13,0,0,0]
-; AVX512DQ-FCP-NEXT: vpermi2d %zmm2, %zmm3, %zmm1
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm2 = [0,1,2,3,4,11,9,15]
-; AVX512DQ-FCP-NEXT: vpermi2d %ymm0, %ymm1, %ymm2
-; AVX512DQ-FCP-NEXT: vmovdqa %ymm6, (%rsi)
-; AVX512DQ-FCP-NEXT: vmovdqa %ymm7, (%rdx)
-; AVX512DQ-FCP-NEXT: vmovdqa %ymm4, (%rcx)
-; AVX512DQ-FCP-NEXT: vmovdqa %ymm5, (%r8)
-; AVX512DQ-FCP-NEXT: vmovdqa %ymm8, (%r9)
-; AVX512DQ-FCP-NEXT: vmovdqa %ymm2, (%rax)
+; AVX512DQ-FCP-NEXT: vpermi2d %zmm4, %zmm5, %zmm1
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm4 = [0,1,2,3,4,11,9,15]
+; AVX512DQ-FCP-NEXT: vpermi2d %ymm0, %ymm1, %ymm4
+; AVX512DQ-FCP-NEXT: vmovdqa %ymm7, (%rsi)
+; AVX512DQ-FCP-NEXT: vmovdqa %ymm8, (%rdx)
+; AVX512DQ-FCP-NEXT: vmovdqa %ymm2, (%rcx)
+; AVX512DQ-FCP-NEXT: vmovdqa %ymm3, (%r8)
+; AVX512DQ-FCP-NEXT: vmovdqa %ymm6, (%r9)
+; AVX512DQ-FCP-NEXT: vmovdqa %ymm4, (%rax)
; AVX512DQ-FCP-NEXT: vzeroupper
; AVX512DQ-FCP-NEXT: retq
;
; AVX512BW-LABEL: load_i32_stride6_vf8:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm2
-; AVX512BW-NEXT: vmovdqa64 64(%rdi), %zmm3
; AVX512BW-NEXT: vmovdqa 128(%rdi), %ymm0
; AVX512BW-NEXT: vmovdqa 160(%rdi), %ymm1
-; AVX512BW-NEXT: vpblendd {{.*#+}} ymm4 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; AVX512BW-NEXT: vpmovsxbd {{.*#+}} ymm5 = [0,6,12,18,24,30,0,0]
-; AVX512BW-NEXT: vpermi2d %zmm3, %zmm2, %zmm5
-; AVX512BW-NEXT: vpmovsxbd {{.*#+}} ymm6 = [0,1,2,3,4,5,12,10]
-; AVX512BW-NEXT: vpermi2d %ymm4, %ymm5, %ymm6
-; AVX512BW-NEXT: vpmovsxbd {{.*#+}} ymm5 = [1,7,13,19,25,31,0,0]
-; AVX512BW-NEXT: vpermi2d %zmm3, %zmm2, %zmm5
-; AVX512BW-NEXT: vpmovsxbd {{.*#+}} ymm7 = [0,1,2,3,4,5,13,11]
-; AVX512BW-NEXT: vpermi2d %ymm4, %ymm5, %ymm7
-; AVX512BW-NEXT: vpmovsxbd {{.*#+}} ymm4 = [0,0,0,0,0,0,6,12]
-; AVX512BW-NEXT: vpermi2d %ymm1, %ymm0, %ymm4
-; AVX512BW-NEXT: vpmovsxbd {{.*#+}} ymm5 = [2,8,14,20,26,0,0,0]
-; AVX512BW-NEXT: vpermi2d %zmm3, %zmm2, %zmm5
-; AVX512BW-NEXT: vpblendd {{.*#+}} ymm4 = ymm5[0,1,2,3,4],ymm4[5,6,7]
-; AVX512BW-NEXT: vpmovsxbd {{.*#+}} ymm5 = [0,0,0,0,0,1,7,13]
-; AVX512BW-NEXT: vpermi2d %ymm1, %ymm0, %ymm5
-; AVX512BW-NEXT: vpmovsxbd {{.*#+}} ymm8 = [3,9,15,21,27,0,0,0]
-; AVX512BW-NEXT: vpermi2d %zmm3, %zmm2, %zmm8
-; AVX512BW-NEXT: vpblendd {{.*#+}} ymm5 = ymm8[0,1,2,3,4],ymm5[5,6,7]
+; AVX512BW-NEXT: vpblendd {{.*#+}} ymm2 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX512BW-NEXT: vpmovsxbd {{.*#+}} ymm3 = [0,6,12,18,24,30,0,0]
+; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm4
+; AVX512BW-NEXT: vmovdqa64 64(%rdi), %zmm5
+; AVX512BW-NEXT: vmovdqa64 128(%rdi), %zmm6
+; AVX512BW-NEXT: vpermi2d %zmm5, %zmm4, %zmm3
+; AVX512BW-NEXT: vpmovsxbd {{.*#+}} ymm7 = [0,1,2,3,4,5,12,10]
+; AVX512BW-NEXT: vpermi2d %ymm2, %ymm3, %ymm7
+; AVX512BW-NEXT: vpmovsxbd {{.*#+}} ymm3 = [1,7,13,19,25,31,0,0]
+; AVX512BW-NEXT: vpermi2d %zmm5, %zmm4, %zmm3
+; AVX512BW-NEXT: vpmovsxbd {{.*#+}} ymm8 = [0,1,2,3,4,5,13,11]
+; AVX512BW-NEXT: vpermi2d %ymm2, %ymm3, %ymm8
+; AVX512BW-NEXT: vpmovsxbd {{.*#+}} ymm2 = [0,0,2,8,0,0,6,12]
+; AVX512BW-NEXT: vpermd %zmm6, %zmm2, %zmm2
+; AVX512BW-NEXT: vpmovsxbd {{.*#+}} ymm3 = [2,8,14,20,26,0,0,0]
+; AVX512BW-NEXT: vpermi2d %zmm5, %zmm4, %zmm3
+; AVX512BW-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3,4],ymm2[5,6,7]
+; AVX512BW-NEXT: vpmovsxbd {{.*#+}} ymm3 = [0,1,3,9,0,1,7,13]
+; AVX512BW-NEXT: vpermd %zmm6, %zmm3, %zmm3
+; AVX512BW-NEXT: vpmovsxbd {{.*#+}} ymm6 = [3,9,15,21,27,0,0,0]
+; AVX512BW-NEXT: vpermi2d %zmm5, %zmm4, %zmm6
+; AVX512BW-NEXT: vpblendd {{.*#+}} ymm3 = ymm6[0,1,2,3,4],ymm3[5,6,7]
; AVX512BW-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5,6,7]
; AVX512BW-NEXT: vpmovsxbd {{.*#+}} ymm1 = [20,26,0,6,12,0,0,0]
-; AVX512BW-NEXT: vpermi2d %zmm2, %zmm3, %zmm1
-; AVX512BW-NEXT: vpmovsxbd {{.*#+}} ymm8 = [0,1,2,3,4,10,8,14]
-; AVX512BW-NEXT: vpermi2d %ymm0, %ymm1, %ymm8
+; AVX512BW-NEXT: vpermi2d %zmm4, %zmm5, %zmm1
+; AVX512BW-NEXT: vpmovsxbd {{.*#+}} ymm6 = [0,1,2,3,4,10,8,14]
+; AVX512BW-NEXT: vpermi2d %ymm0, %ymm1, %ymm6
; AVX512BW-NEXT: vpmovsxbd {{.*#+}} ymm1 = [21,27,1,7,13,0,0,0]
-; AVX512BW-NEXT: vpermi2d %zmm2, %zmm3, %zmm1
-; AVX512BW-NEXT: vpmovsxbd {{.*#+}} ymm2 = [0,1,2,3,4,11,9,15]
-; AVX512BW-NEXT: vpermi2d %ymm0, %ymm1, %ymm2
-; AVX512BW-NEXT: vmovdqa %ymm6, (%rsi)
-; AVX512BW-NEXT: vmovdqa %ymm7, (%rdx)
-; AVX512BW-NEXT: vmovdqa %ymm4, (%rcx)
-; AVX512BW-NEXT: vmovdqa %ymm5, (%r8)
-; AVX512BW-NEXT: vmovdqa %ymm8, (%r9)
-; AVX512BW-NEXT: vmovdqa %ymm2, (%rax)
+; AVX512BW-NEXT: vpermi2d %zmm4, %zmm5, %zmm1
+; AVX512BW-NEXT: vpmovsxbd {{.*#+}} ymm4 = [0,1,2,3,4,11,9,15]
+; AVX512BW-NEXT: vpermi2d %ymm0, %ymm1, %ymm4
+; AVX512BW-NEXT: vmovdqa %ymm7, (%rsi)
+; AVX512BW-NEXT: vmovdqa %ymm8, (%rdx)
+; AVX512BW-NEXT: vmovdqa %ymm2, (%rcx)
+; AVX512BW-NEXT: vmovdqa %ymm3, (%r8)
+; AVX512BW-NEXT: vmovdqa %ymm6, (%r9)
+; AVX512BW-NEXT: vmovdqa %ymm4, (%rax)
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512BW-FCP-LABEL: load_i32_stride6_vf8:
; AVX512BW-FCP: # %bb.0:
; AVX512BW-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-FCP-NEXT: vmovdqa64 (%rdi), %zmm2
-; AVX512BW-FCP-NEXT: vmovdqa64 64(%rdi), %zmm3
; AVX512BW-FCP-NEXT: vmovdqa 128(%rdi), %ymm0
; AVX512BW-FCP-NEXT: vmovdqa 160(%rdi), %ymm1
-; AVX512BW-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm5 = [0,6,12,18,24,30,0,0]
-; AVX512BW-FCP-NEXT: vpermi2d %zmm3, %zmm2, %zmm5
-; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm6 = [0,1,2,3,4,5,12,10]
-; AVX512BW-FCP-NEXT: vpermi2d %ymm4, %ymm5, %ymm6
-; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm5 = [1,7,13,19,25,31,0,0]
-; AVX512BW-FCP-NEXT: vpermi2d %zmm3, %zmm2, %zmm5
-; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm7 = [0,1,2,3,4,5,13,11]
-; AVX512BW-FCP-NEXT: vpermi2d %ymm4, %ymm5, %ymm7
-; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm4 = [0,0,0,0,0,0,6,12]
-; AVX512BW-FCP-NEXT: vpermi2d %ymm1, %ymm0, %ymm4
-; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm5 = [2,8,14,20,26,0,0,0]
-; AVX512BW-FCP-NEXT: vpermi2d %zmm3, %zmm2, %zmm5
-; AVX512BW-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm5[0,1,2,3,4],ymm4[5,6,7]
-; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm5 = [0,0,0,0,0,1,7,13]
-; AVX512BW-FCP-NEXT: vpermi2d %ymm1, %ymm0, %ymm5
-; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm8 = [3,9,15,21,27,0,0,0]
-; AVX512BW-FCP-NEXT: vpermi2d %zmm3, %zmm2, %zmm8
-; AVX512BW-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm8[0,1,2,3,4],ymm5[5,6,7]
+; AVX512BW-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm3 = [0,6,12,18,24,30,0,0]
+; AVX512BW-FCP-NEXT: vmovdqa64 (%rdi), %zmm4
+; AVX512BW-FCP-NEXT: vmovdqa64 64(%rdi), %zmm5
+; AVX512BW-FCP-NEXT: vmovdqa64 128(%rdi), %zmm6
+; AVX512BW-FCP-NEXT: vpermi2d %zmm5, %zmm4, %zmm3
+; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm7 = [0,1,2,3,4,5,12,10]
+; AVX512BW-FCP-NEXT: vpermi2d %ymm2, %ymm3, %ymm7
+; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm3 = [1,7,13,19,25,31,0,0]
+; AVX512BW-FCP-NEXT: vpermi2d %zmm5, %zmm4, %zmm3
+; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm8 = [0,1,2,3,4,5,13,11]
+; AVX512BW-FCP-NEXT: vpermi2d %ymm2, %ymm3, %ymm8
+; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm2 = [0,0,2,8,0,0,6,12]
+; AVX512BW-FCP-NEXT: vpermd %zmm6, %zmm2, %zmm2
+; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm3 = [2,8,14,20,26,0,0,0]
+; AVX512BW-FCP-NEXT: vpermi2d %zmm5, %zmm4, %zmm3
+; AVX512BW-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3,4],ymm2[5,6,7]
+; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm3 = [0,1,3,9,0,1,7,13]
+; AVX512BW-FCP-NEXT: vpermd %zmm6, %zmm3, %zmm3
+; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm6 = [3,9,15,21,27,0,0,0]
+; AVX512BW-FCP-NEXT: vpermi2d %zmm5, %zmm4, %zmm6
+; AVX512BW-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm6[0,1,2,3,4],ymm3[5,6,7]
; AVX512BW-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5,6,7]
; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm1 = [20,26,0,6,12,0,0,0]
-; AVX512BW-FCP-NEXT: vpermi2d %zmm2, %zmm3, %zmm1
-; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm8 = [0,1,2,3,4,10,8,14]
-; AVX512BW-FCP-NEXT: vpermi2d %ymm0, %ymm1, %ymm8
+; AVX512BW-FCP-NEXT: vpermi2d %zmm4, %zmm5, %zmm1
+; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm6 = [0,1,2,3,4,10,8,14]
+; AVX512BW-FCP-NEXT: vpermi2d %ymm0, %ymm1, %ymm6
; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm1 = [21,27,1,7,13,0,0,0]
-; AVX512BW-FCP-NEXT: vpermi2d %zmm2, %zmm3, %zmm1
-; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm2 = [0,1,2,3,4,11,9,15]
-; AVX512BW-FCP-NEXT: vpermi2d %ymm0, %ymm1, %ymm2
-; AVX512BW-FCP-NEXT: vmovdqa %ymm6, (%rsi)
-; AVX512BW-FCP-NEXT: vmovdqa %ymm7, (%rdx)
-; AVX512BW-FCP-NEXT: vmovdqa %ymm4, (%rcx)
-; AVX512BW-FCP-NEXT: vmovdqa %ymm5, (%r8)
-; AVX512BW-FCP-NEXT: vmovdqa %ymm8, (%r9)
-; AVX512BW-FCP-NEXT: vmovdqa %ymm2, (%rax)
+; AVX512BW-FCP-NEXT: vpermi2d %zmm4, %zmm5, %zmm1
+; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm4 = [0,1,2,3,4,11,9,15]
+; AVX512BW-FCP-NEXT: vpermi2d %ymm0, %ymm1, %ymm4
+; AVX512BW-FCP-NEXT: vmovdqa %ymm7, (%rsi)
+; AVX512BW-FCP-NEXT: vmovdqa %ymm8, (%rdx)
+; AVX512BW-FCP-NEXT: vmovdqa %ymm2, (%rcx)
+; AVX512BW-FCP-NEXT: vmovdqa %ymm3, (%r8)
+; AVX512BW-FCP-NEXT: vmovdqa %ymm6, (%r9)
+; AVX512BW-FCP-NEXT: vmovdqa %ymm4, (%rax)
; AVX512BW-FCP-NEXT: vzeroupper
; AVX512BW-FCP-NEXT: retq
;
; AVX512DQ-BW-LABEL: load_i32_stride6_vf8:
; AVX512DQ-BW: # %bb.0:
; AVX512DQ-BW-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-BW-NEXT: vmovdqa64 (%rdi), %zmm2
-; AVX512DQ-BW-NEXT: vmovdqa64 64(%rdi), %zmm3
; AVX512DQ-BW-NEXT: vmovdqa 128(%rdi), %ymm0
; AVX512DQ-BW-NEXT: vmovdqa 160(%rdi), %ymm1
-; AVX512DQ-BW-NEXT: vpblendd {{.*#+}} ymm4 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} ymm5 = [0,6,12,18,24,30,0,0]
-; AVX512DQ-BW-NEXT: vpermi2d %zmm3, %zmm2, %zmm5
-; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} ymm6 = [0,1,2,3,4,5,12,10]
-; AVX512DQ-BW-NEXT: vpermi2d %ymm4, %ymm5, %ymm6
-; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} ymm5 = [1,7,13,19,25,31,0,0]
-; AVX512DQ-BW-NEXT: vpermi2d %zmm3, %zmm2, %zmm5
-; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} ymm7 = [0,1,2,3,4,5,13,11]
-; AVX512DQ-BW-NEXT: vpermi2d %ymm4, %ymm5, %ymm7
-; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} ymm4 = [0,0,0,0,0,0,6,12]
-; AVX512DQ-BW-NEXT: vpermi2d %ymm1, %ymm0, %ymm4
-; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} ymm5 = [2,8,14,20,26,0,0,0]
-; AVX512DQ-BW-NEXT: vpermi2d %zmm3, %zmm2, %zmm5
-; AVX512DQ-BW-NEXT: vpblendd {{.*#+}} ymm4 = ymm5[0,1,2,3,4],ymm4[5,6,7]
-; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} ymm5 = [0,0,0,0,0,1,7,13]
-; AVX512DQ-BW-NEXT: vpermi2d %ymm1, %ymm0, %ymm5
-; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} ymm8 = [3,9,15,21,27,0,0,0]
-; AVX512DQ-BW-NEXT: vpermi2d %zmm3, %zmm2, %zmm8
-; AVX512DQ-BW-NEXT: vpblendd {{.*#+}} ymm5 = ymm8[0,1,2,3,4],ymm5[5,6,7]
+; AVX512DQ-BW-NEXT: vpblendd {{.*#+}} ymm2 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} ymm3 = [0,6,12,18,24,30,0,0]
+; AVX512DQ-BW-NEXT: vmovdqa64 (%rdi), %zmm4
+; AVX512DQ-BW-NEXT: vmovdqa64 64(%rdi), %zmm5
+; AVX512DQ-BW-NEXT: vmovdqa64 128(%rdi), %zmm6
+; AVX512DQ-BW-NEXT: vpermi2d %zmm5, %zmm4, %zmm3
+; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} ymm7 = [0,1,2,3,4,5,12,10]
+; AVX512DQ-BW-NEXT: vpermi2d %ymm2, %ymm3, %ymm7
+; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} ymm3 = [1,7,13,19,25,31,0,0]
+; AVX512DQ-BW-NEXT: vpermi2d %zmm5, %zmm4, %zmm3
+; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} ymm8 = [0,1,2,3,4,5,13,11]
+; AVX512DQ-BW-NEXT: vpermi2d %ymm2, %ymm3, %ymm8
+; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} ymm2 = [0,0,2,8,0,0,6,12]
+; AVX512DQ-BW-NEXT: vpermd %zmm6, %zmm2, %zmm2
+; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} ymm3 = [2,8,14,20,26,0,0,0]
+; AVX512DQ-BW-NEXT: vpermi2d %zmm5, %zmm4, %zmm3
+; AVX512DQ-BW-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3,4],ymm2[5,6,7]
+; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} ymm3 = [0,1,3,9,0,1,7,13]
+; AVX512DQ-BW-NEXT: vpermd %zmm6, %zmm3, %zmm3
+; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} ymm6 = [3,9,15,21,27,0,0,0]
+; AVX512DQ-BW-NEXT: vpermi2d %zmm5, %zmm4, %zmm6
+; AVX512DQ-BW-NEXT: vpblendd {{.*#+}} ymm3 = ymm6[0,1,2,3,4],ymm3[5,6,7]
; AVX512DQ-BW-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5,6,7]
; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} ymm1 = [20,26,0,6,12,0,0,0]
-; AVX512DQ-BW-NEXT: vpermi2d %zmm2, %zmm3, %zmm1
-; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} ymm8 = [0,1,2,3,4,10,8,14]
-; AVX512DQ-BW-NEXT: vpermi2d %ymm0, %ymm1, %ymm8
+; AVX512DQ-BW-NEXT: vpermi2d %zmm4, %zmm5, %zmm1
+; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} ymm6 = [0,1,2,3,4,10,8,14]
+; AVX512DQ-BW-NEXT: vpermi2d %ymm0, %ymm1, %ymm6
; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} ymm1 = [21,27,1,7,13,0,0,0]
-; AVX512DQ-BW-NEXT: vpermi2d %zmm2, %zmm3, %zmm1
-; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} ymm2 = [0,1,2,3,4,11,9,15]
-; AVX512DQ-BW-NEXT: vpermi2d %ymm0, %ymm1, %ymm2
-; AVX512DQ-BW-NEXT: vmovdqa %ymm6, (%rsi)
-; AVX512DQ-BW-NEXT: vmovdqa %ymm7, (%rdx)
-; AVX512DQ-BW-NEXT: vmovdqa %ymm4, (%rcx)
-; AVX512DQ-BW-NEXT: vmovdqa %ymm5, (%r8)
-; AVX512DQ-BW-NEXT: vmovdqa %ymm8, (%r9)
-; AVX512DQ-BW-NEXT: vmovdqa %ymm2, (%rax)
+; AVX512DQ-BW-NEXT: vpermi2d %zmm4, %zmm5, %zmm1
+; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} ymm4 = [0,1,2,3,4,11,9,15]
+; AVX512DQ-BW-NEXT: vpermi2d %ymm0, %ymm1, %ymm4
+; AVX512DQ-BW-NEXT: vmovdqa %ymm7, (%rsi)
+; AVX512DQ-BW-NEXT: vmovdqa %ymm8, (%rdx)
+; AVX512DQ-BW-NEXT: vmovdqa %ymm2, (%rcx)
+; AVX512DQ-BW-NEXT: vmovdqa %ymm3, (%r8)
+; AVX512DQ-BW-NEXT: vmovdqa %ymm6, (%r9)
+; AVX512DQ-BW-NEXT: vmovdqa %ymm4, (%rax)
; AVX512DQ-BW-NEXT: vzeroupper
; AVX512DQ-BW-NEXT: retq
;
; AVX512DQ-BW-FCP-LABEL: load_i32_stride6_vf8:
; AVX512DQ-BW-FCP: # %bb.0:
; AVX512DQ-BW-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rdi), %zmm2
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 64(%rdi), %zmm3
; AVX512DQ-BW-FCP-NEXT: vmovdqa 128(%rdi), %ymm0
; AVX512DQ-BW-FCP-NEXT: vmovdqa 160(%rdi), %ymm1
-; AVX512DQ-BW-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm5 = [0,6,12,18,24,30,0,0]
-; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm3, %zmm2, %zmm5
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm6 = [0,1,2,3,4,5,12,10]
-; AVX512DQ-BW-FCP-NEXT: vpermi2d %ymm4, %ymm5, %ymm6
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm5 = [1,7,13,19,25,31,0,0]
-; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm3, %zmm2, %zmm5
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm7 = [0,1,2,3,4,5,13,11]
-; AVX512DQ-BW-FCP-NEXT: vpermi2d %ymm4, %ymm5, %ymm7
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm4 = [0,0,0,0,0,0,6,12]
-; AVX512DQ-BW-FCP-NEXT: vpermi2d %ymm1, %ymm0, %ymm4
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm5 = [2,8,14,20,26,0,0,0]
-; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm3, %zmm2, %zmm5
-; AVX512DQ-BW-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm5[0,1,2,3,4],ymm4[5,6,7]
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm5 = [0,0,0,0,0,1,7,13]
-; AVX512DQ-BW-FCP-NEXT: vpermi2d %ymm1, %ymm0, %ymm5
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm8 = [3,9,15,21,27,0,0,0]
-; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm3, %zmm2, %zmm8
-; AVX512DQ-BW-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm8[0,1,2,3,4],ymm5[5,6,7]
+; AVX512DQ-BW-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm3 = [0,6,12,18,24,30,0,0]
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rdi), %zmm4
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 64(%rdi), %zmm5
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 128(%rdi), %zmm6
+; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm5, %zmm4, %zmm3
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm7 = [0,1,2,3,4,5,12,10]
+; AVX512DQ-BW-FCP-NEXT: vpermi2d %ymm2, %ymm3, %ymm7
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm3 = [1,7,13,19,25,31,0,0]
+; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm5, %zmm4, %zmm3
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm8 = [0,1,2,3,4,5,13,11]
+; AVX512DQ-BW-FCP-NEXT: vpermi2d %ymm2, %ymm3, %ymm8
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm2 = [0,0,2,8,0,0,6,12]
+; AVX512DQ-BW-FCP-NEXT: vpermd %zmm6, %zmm2, %zmm2
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm3 = [2,8,14,20,26,0,0,0]
+; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm5, %zmm4, %zmm3
+; AVX512DQ-BW-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3,4],ymm2[5,6,7]
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm3 = [0,1,3,9,0,1,7,13]
+; AVX512DQ-BW-FCP-NEXT: vpermd %zmm6, %zmm3, %zmm3
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm6 = [3,9,15,21,27,0,0,0]
+; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm5, %zmm4, %zmm6
+; AVX512DQ-BW-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm6[0,1,2,3,4],ymm3[5,6,7]
; AVX512DQ-BW-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5,6,7]
; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm1 = [20,26,0,6,12,0,0,0]
-; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm2, %zmm3, %zmm1
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm8 = [0,1,2,3,4,10,8,14]
-; AVX512DQ-BW-FCP-NEXT: vpermi2d %ymm0, %ymm1, %ymm8
+; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm4, %zmm5, %zmm1
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm6 = [0,1,2,3,4,10,8,14]
+; AVX512DQ-BW-FCP-NEXT: vpermi2d %ymm0, %ymm1, %ymm6
; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm1 = [21,27,1,7,13,0,0,0]
-; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm2, %zmm3, %zmm1
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm2 = [0,1,2,3,4,11,9,15]
-; AVX512DQ-BW-FCP-NEXT: vpermi2d %ymm0, %ymm1, %ymm2
-; AVX512DQ-BW-FCP-NEXT: vmovdqa %ymm6, (%rsi)
-; AVX512DQ-BW-FCP-NEXT: vmovdqa %ymm7, (%rdx)
-; AVX512DQ-BW-FCP-NEXT: vmovdqa %ymm4, (%rcx)
-; AVX512DQ-BW-FCP-NEXT: vmovdqa %ymm5, (%r8)
-; AVX512DQ-BW-FCP-NEXT: vmovdqa %ymm8, (%r9)
-; AVX512DQ-BW-FCP-NEXT: vmovdqa %ymm2, (%rax)
+; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm4, %zmm5, %zmm1
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm4 = [0,1,2,3,4,11,9,15]
+; AVX512DQ-BW-FCP-NEXT: vpermi2d %ymm0, %ymm1, %ymm4
+; AVX512DQ-BW-FCP-NEXT: vmovdqa %ymm7, (%rsi)
+; AVX512DQ-BW-FCP-NEXT: vmovdqa %ymm8, (%rdx)
+; AVX512DQ-BW-FCP-NEXT: vmovdqa %ymm2, (%rcx)
+; AVX512DQ-BW-FCP-NEXT: vmovdqa %ymm3, (%r8)
+; AVX512DQ-BW-FCP-NEXT: vmovdqa %ymm6, (%r9)
+; AVX512DQ-BW-FCP-NEXT: vmovdqa %ymm4, (%rax)
; AVX512DQ-BW-FCP-NEXT: vzeroupper
; AVX512DQ-BW-FCP-NEXT: retq
%wide.vec = load <48 x i32>, ptr %in.vec, align 64
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-7.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-7.ll
index d806253ef23a08..694f2bc53c515c 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-7.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-7.ll
@@ -204,22 +204,22 @@ define void @load_i32_stride7_vf2(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3]
; AVX512-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[3,2,2,3]
; AVX512-NEXT: vpmovsxbd {{.*#+}} xmm1 = [4,11,0,0]
-; AVX512-NEXT: vmovdqa 32(%rdi), %ymm5
-; AVX512-NEXT: vmovdqa (%rdi), %ymm6
-; AVX512-NEXT: vpermi2d %ymm5, %ymm6, %ymm1
-; AVX512-NEXT: vpblendd {{.*#+}} ymm7 = ymm5[0],ymm6[1],ymm5[2,3,4],ymm6[5],ymm5[6,7]
-; AVX512-NEXT: vextracti128 $1, %ymm7, %xmm7
-; AVX512-NEXT: vpshufd {{.*#+}} xmm7 = xmm7[1,0,2,3]
-; AVX512-NEXT: vpshufd {{.*#+}} ymm6 = ymm6[2,3,2,3,6,7,6,7]
-; AVX512-NEXT: vpblendd {{.*#+}} ymm5 = ymm6[0],ymm5[1],ymm6[2,3,4],ymm5[5],ymm6[6,7]
-; AVX512-NEXT: vextracti128 $1, %ymm5, %xmm5
+; AVX512-NEXT: vpermps (%rdi), %zmm1, %zmm1
+; AVX512-NEXT: vmovaps (%rdi), %ymm5
+; AVX512-NEXT: vmovaps 32(%rdi), %ymm6
+; AVX512-NEXT: vblendps {{.*#+}} ymm7 = ymm6[0],ymm5[1],ymm6[2,3,4],ymm5[5],ymm6[6,7]
+; AVX512-NEXT: vextractf128 $1, %ymm7, %xmm7
+; AVX512-NEXT: vshufps {{.*#+}} xmm7 = xmm7[1,0,2,3]
+; AVX512-NEXT: vshufps {{.*#+}} ymm5 = ymm5[2,3,2,3,6,7,6,7]
+; AVX512-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0],ymm6[1],ymm5[2,3,4],ymm6[5],ymm5[6,7]
+; AVX512-NEXT: vextractf128 $1, %ymm5, %xmm5
; AVX512-NEXT: vmovq %xmm2, (%rsi)
; AVX512-NEXT: vmovq %xmm3, (%rdx)
; AVX512-NEXT: vmovq %xmm4, (%rcx)
; AVX512-NEXT: vmovq %xmm0, (%r8)
-; AVX512-NEXT: vmovq %xmm1, (%r9)
-; AVX512-NEXT: vmovq %xmm7, (%r10)
-; AVX512-NEXT: vmovq %xmm5, (%rax)
+; AVX512-NEXT: vmovlps %xmm1, (%r9)
+; AVX512-NEXT: vmovlps %xmm7, (%r10)
+; AVX512-NEXT: vmovlps %xmm5, (%rax)
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
;
@@ -227,30 +227,31 @@ define void @load_i32_stride7_vf2(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-FCP: # %bb.0:
; AVX512-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX512-FCP-NEXT: movq {{[0-9]+}}(%rsp), %r10
-; AVX512-FCP-NEXT: vmovdqa (%rdi), %xmm0
-; AVX512-FCP-NEXT: vmovdqa 32(%rdi), %xmm1
-; AVX512-FCP-NEXT: vpinsrd $1, 28(%rdi), %xmm0, %xmm2
-; AVX512-FCP-NEXT: vpbroadcastq {{.*#+}} xmm3 = [1,4,1,4]
-; AVX512-FCP-NEXT: vpermi2d %xmm1, %xmm0, %xmm3
-; AVX512-FCP-NEXT: vpbroadcastd 8(%rdi), %xmm4
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} xmm4 = xmm4[0],xmm1[1],xmm4[2,3]
-; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} xmm5 = [7,2,0,0]
-; AVX512-FCP-NEXT: vpermi2d %xmm0, %xmm1, %xmm5
-; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} xmm0 = [4,11,0,0]
-; AVX512-FCP-NEXT: vmovdqa 32(%rdi), %ymm1
-; AVX512-FCP-NEXT: vmovdqa (%rdi), %ymm6
-; AVX512-FCP-NEXT: vpermi2d %ymm1, %ymm6, %ymm0
-; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} xmm7 = [13,4,6,7]
-; AVX512-FCP-NEXT: vpermi2d %ymm6, %ymm1, %ymm7
-; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} xmm8 = [6,13,6,7]
-; AVX512-FCP-NEXT: vpermi2d %ymm1, %ymm6, %ymm8
-; AVX512-FCP-NEXT: vmovq %xmm2, (%rsi)
-; AVX512-FCP-NEXT: vmovq %xmm3, (%rdx)
-; AVX512-FCP-NEXT: vmovq %xmm4, (%rcx)
-; AVX512-FCP-NEXT: vmovq %xmm5, (%r8)
-; AVX512-FCP-NEXT: vmovq %xmm0, (%r9)
+; AVX512-FCP-NEXT: vmovaps (%rdi), %zmm0
+; AVX512-FCP-NEXT: vmovdqa (%rdi), %xmm1
+; AVX512-FCP-NEXT: vmovdqa 32(%rdi), %xmm2
+; AVX512-FCP-NEXT: vpinsrd $1, 28(%rdi), %xmm1, %xmm3
+; AVX512-FCP-NEXT: vpbroadcastq {{.*#+}} xmm4 = [1,4,1,4]
+; AVX512-FCP-NEXT: vpermi2d %xmm2, %xmm1, %xmm4
+; AVX512-FCP-NEXT: vpbroadcastd 8(%rdi), %xmm5
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} xmm5 = xmm5[0],xmm2[1],xmm5[2,3]
+; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} xmm6 = [7,2,0,0]
+; AVX512-FCP-NEXT: vpermi2d %xmm1, %xmm2, %xmm6
+; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} xmm1 = [4,11,0,0]
+; AVX512-FCP-NEXT: vpermps %zmm0, %zmm1, %zmm1
+; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} xmm2 = [13,4,6,7]
+; AVX512-FCP-NEXT: vmovdqa 32(%rdi), %ymm7
+; AVX512-FCP-NEXT: vpermt2d (%rdi), %ymm2, %ymm7
+; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm2 = [0,0,0,0,6,13,6,7]
+; AVX512-FCP-NEXT: vpermps %zmm0, %zmm2, %zmm0
+; AVX512-FCP-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX512-FCP-NEXT: vmovq %xmm3, (%rsi)
+; AVX512-FCP-NEXT: vmovq %xmm4, (%rdx)
+; AVX512-FCP-NEXT: vmovq %xmm5, (%rcx)
+; AVX512-FCP-NEXT: vmovq %xmm6, (%r8)
+; AVX512-FCP-NEXT: vmovlps %xmm1, (%r9)
; AVX512-FCP-NEXT: vmovq %xmm7, (%r10)
-; AVX512-FCP-NEXT: vmovq %xmm8, (%rax)
+; AVX512-FCP-NEXT: vmovlps %xmm0, (%rax)
; AVX512-FCP-NEXT: vzeroupper
; AVX512-FCP-NEXT: retq
;
@@ -269,22 +270,22 @@ define void @load_i32_stride7_vf2(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3]
; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[3,2,2,3]
; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} xmm1 = [4,11,0,0]
-; AVX512DQ-NEXT: vmovdqa 32(%rdi), %ymm5
-; AVX512DQ-NEXT: vmovdqa (%rdi), %ymm6
-; AVX512DQ-NEXT: vpermi2d %ymm5, %ymm6, %ymm1
-; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm7 = ymm5[0],ymm6[1],ymm5[2,3,4],ymm6[5],ymm5[6,7]
-; AVX512DQ-NEXT: vextracti128 $1, %ymm7, %xmm7
-; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm7 = xmm7[1,0,2,3]
-; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm6 = ymm6[2,3,2,3,6,7,6,7]
-; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm5 = ymm6[0],ymm5[1],ymm6[2,3,4],ymm5[5],ymm6[6,7]
-; AVX512DQ-NEXT: vextracti128 $1, %ymm5, %xmm5
+; AVX512DQ-NEXT: vpermps (%rdi), %zmm1, %zmm1
+; AVX512DQ-NEXT: vmovaps (%rdi), %ymm5
+; AVX512DQ-NEXT: vmovaps 32(%rdi), %ymm6
+; AVX512DQ-NEXT: vblendps {{.*#+}} ymm7 = ymm6[0],ymm5[1],ymm6[2,3,4],ymm5[5],ymm6[6,7]
+; AVX512DQ-NEXT: vextractf128 $1, %ymm7, %xmm7
+; AVX512DQ-NEXT: vshufps {{.*#+}} xmm7 = xmm7[1,0,2,3]
+; AVX512DQ-NEXT: vshufps {{.*#+}} ymm5 = ymm5[2,3,2,3,6,7,6,7]
+; AVX512DQ-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0],ymm6[1],ymm5[2,3,4],ymm6[5],ymm5[6,7]
+; AVX512DQ-NEXT: vextractf128 $1, %ymm5, %xmm5
; AVX512DQ-NEXT: vmovq %xmm2, (%rsi)
; AVX512DQ-NEXT: vmovq %xmm3, (%rdx)
; AVX512DQ-NEXT: vmovq %xmm4, (%rcx)
; AVX512DQ-NEXT: vmovq %xmm0, (%r8)
-; AVX512DQ-NEXT: vmovq %xmm1, (%r9)
-; AVX512DQ-NEXT: vmovq %xmm7, (%r10)
-; AVX512DQ-NEXT: vmovq %xmm5, (%rax)
+; AVX512DQ-NEXT: vmovlps %xmm1, (%r9)
+; AVX512DQ-NEXT: vmovlps %xmm7, (%r10)
+; AVX512DQ-NEXT: vmovlps %xmm5, (%rax)
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
@@ -292,30 +293,31 @@ define void @load_i32_stride7_vf2(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-FCP: # %bb.0:
; AVX512DQ-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX512DQ-FCP-NEXT: movq {{[0-9]+}}(%rsp), %r10
-; AVX512DQ-FCP-NEXT: vmovdqa (%rdi), %xmm0
-; AVX512DQ-FCP-NEXT: vmovdqa 32(%rdi), %xmm1
-; AVX512DQ-FCP-NEXT: vpinsrd $1, 28(%rdi), %xmm0, %xmm2
-; AVX512DQ-FCP-NEXT: vpbroadcastq {{.*#+}} xmm3 = [1,4,1,4]
-; AVX512DQ-FCP-NEXT: vpermi2d %xmm1, %xmm0, %xmm3
-; AVX512DQ-FCP-NEXT: vpbroadcastd 8(%rdi), %xmm4
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} xmm4 = xmm4[0],xmm1[1],xmm4[2,3]
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} xmm5 = [7,2,0,0]
-; AVX512DQ-FCP-NEXT: vpermi2d %xmm0, %xmm1, %xmm5
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} xmm0 = [4,11,0,0]
-; AVX512DQ-FCP-NEXT: vmovdqa 32(%rdi), %ymm1
-; AVX512DQ-FCP-NEXT: vmovdqa (%rdi), %ymm6
-; AVX512DQ-FCP-NEXT: vpermi2d %ymm1, %ymm6, %ymm0
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} xmm7 = [13,4,6,7]
-; AVX512DQ-FCP-NEXT: vpermi2d %ymm6, %ymm1, %ymm7
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} xmm8 = [6,13,6,7]
-; AVX512DQ-FCP-NEXT: vpermi2d %ymm1, %ymm6, %ymm8
-; AVX512DQ-FCP-NEXT: vmovq %xmm2, (%rsi)
-; AVX512DQ-FCP-NEXT: vmovq %xmm3, (%rdx)
-; AVX512DQ-FCP-NEXT: vmovq %xmm4, (%rcx)
-; AVX512DQ-FCP-NEXT: vmovq %xmm5, (%r8)
-; AVX512DQ-FCP-NEXT: vmovq %xmm0, (%r9)
+; AVX512DQ-FCP-NEXT: vmovaps (%rdi), %zmm0
+; AVX512DQ-FCP-NEXT: vmovdqa (%rdi), %xmm1
+; AVX512DQ-FCP-NEXT: vmovdqa 32(%rdi), %xmm2
+; AVX512DQ-FCP-NEXT: vpinsrd $1, 28(%rdi), %xmm1, %xmm3
+; AVX512DQ-FCP-NEXT: vpbroadcastq {{.*#+}} xmm4 = [1,4,1,4]
+; AVX512DQ-FCP-NEXT: vpermi2d %xmm2, %xmm1, %xmm4
+; AVX512DQ-FCP-NEXT: vpbroadcastd 8(%rdi), %xmm5
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} xmm5 = xmm5[0],xmm2[1],xmm5[2,3]
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} xmm6 = [7,2,0,0]
+; AVX512DQ-FCP-NEXT: vpermi2d %xmm1, %xmm2, %xmm6
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} xmm1 = [4,11,0,0]
+; AVX512DQ-FCP-NEXT: vpermps %zmm0, %zmm1, %zmm1
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} xmm2 = [13,4,6,7]
+; AVX512DQ-FCP-NEXT: vmovdqa 32(%rdi), %ymm7
+; AVX512DQ-FCP-NEXT: vpermt2d (%rdi), %ymm2, %ymm7
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm2 = [0,0,0,0,6,13,6,7]
+; AVX512DQ-FCP-NEXT: vpermps %zmm0, %zmm2, %zmm0
+; AVX512DQ-FCP-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX512DQ-FCP-NEXT: vmovq %xmm3, (%rsi)
+; AVX512DQ-FCP-NEXT: vmovq %xmm4, (%rdx)
+; AVX512DQ-FCP-NEXT: vmovq %xmm5, (%rcx)
+; AVX512DQ-FCP-NEXT: vmovq %xmm6, (%r8)
+; AVX512DQ-FCP-NEXT: vmovlps %xmm1, (%r9)
; AVX512DQ-FCP-NEXT: vmovq %xmm7, (%r10)
-; AVX512DQ-FCP-NEXT: vmovq %xmm8, (%rax)
+; AVX512DQ-FCP-NEXT: vmovlps %xmm0, (%rax)
; AVX512DQ-FCP-NEXT: vzeroupper
; AVX512DQ-FCP-NEXT: retq
;
@@ -334,22 +336,22 @@ define void @load_i32_stride7_vf2(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512BW-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3]
; AVX512BW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[3,2,2,3]
; AVX512BW-NEXT: vpmovsxbd {{.*#+}} xmm1 = [4,11,0,0]
-; AVX512BW-NEXT: vmovdqa 32(%rdi), %ymm5
-; AVX512BW-NEXT: vmovdqa (%rdi), %ymm6
-; AVX512BW-NEXT: vpermi2d %ymm5, %ymm6, %ymm1
-; AVX512BW-NEXT: vpblendd {{.*#+}} ymm7 = ymm5[0],ymm6[1],ymm5[2,3,4],ymm6[5],ymm5[6,7]
-; AVX512BW-NEXT: vextracti128 $1, %ymm7, %xmm7
-; AVX512BW-NEXT: vpshufd {{.*#+}} xmm7 = xmm7[1,0,2,3]
-; AVX512BW-NEXT: vpshufd {{.*#+}} ymm6 = ymm6[2,3,2,3,6,7,6,7]
-; AVX512BW-NEXT: vpblendd {{.*#+}} ymm5 = ymm6[0],ymm5[1],ymm6[2,3,4],ymm5[5],ymm6[6,7]
-; AVX512BW-NEXT: vextracti128 $1, %ymm5, %xmm5
+; AVX512BW-NEXT: vpermps (%rdi), %zmm1, %zmm1
+; AVX512BW-NEXT: vmovaps (%rdi), %ymm5
+; AVX512BW-NEXT: vmovaps 32(%rdi), %ymm6
+; AVX512BW-NEXT: vblendps {{.*#+}} ymm7 = ymm6[0],ymm5[1],ymm6[2,3,4],ymm5[5],ymm6[6,7]
+; AVX512BW-NEXT: vextractf128 $1, %ymm7, %xmm7
+; AVX512BW-NEXT: vshufps {{.*#+}} xmm7 = xmm7[1,0,2,3]
+; AVX512BW-NEXT: vshufps {{.*#+}} ymm5 = ymm5[2,3,2,3,6,7,6,7]
+; AVX512BW-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0],ymm6[1],ymm5[2,3,4],ymm6[5],ymm5[6,7]
+; AVX512BW-NEXT: vextractf128 $1, %ymm5, %xmm5
; AVX512BW-NEXT: vmovq %xmm2, (%rsi)
; AVX512BW-NEXT: vmovq %xmm3, (%rdx)
; AVX512BW-NEXT: vmovq %xmm4, (%rcx)
; AVX512BW-NEXT: vmovq %xmm0, (%r8)
-; AVX512BW-NEXT: vmovq %xmm1, (%r9)
-; AVX512BW-NEXT: vmovq %xmm7, (%r10)
-; AVX512BW-NEXT: vmovq %xmm5, (%rax)
+; AVX512BW-NEXT: vmovlps %xmm1, (%r9)
+; AVX512BW-NEXT: vmovlps %xmm7, (%r10)
+; AVX512BW-NEXT: vmovlps %xmm5, (%rax)
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -357,30 +359,31 @@ define void @load_i32_stride7_vf2(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512BW-FCP: # %bb.0:
; AVX512BW-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX512BW-FCP-NEXT: movq {{[0-9]+}}(%rsp), %r10
-; AVX512BW-FCP-NEXT: vmovdqa (%rdi), %xmm0
-; AVX512BW-FCP-NEXT: vmovdqa 32(%rdi), %xmm1
-; AVX512BW-FCP-NEXT: vpinsrd $1, 28(%rdi), %xmm0, %xmm2
-; AVX512BW-FCP-NEXT: vpbroadcastq {{.*#+}} xmm3 = [1,4,1,4]
-; AVX512BW-FCP-NEXT: vpermi2d %xmm1, %xmm0, %xmm3
-; AVX512BW-FCP-NEXT: vpbroadcastd 8(%rdi), %xmm4
-; AVX512BW-FCP-NEXT: vpblendd {{.*#+}} xmm4 = xmm4[0],xmm1[1],xmm4[2,3]
-; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm5 = [7,2,0,0]
-; AVX512BW-FCP-NEXT: vpermi2d %xmm0, %xmm1, %xmm5
-; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm0 = [4,11,0,0]
-; AVX512BW-FCP-NEXT: vmovdqa 32(%rdi), %ymm1
-; AVX512BW-FCP-NEXT: vmovdqa (%rdi), %ymm6
-; AVX512BW-FCP-NEXT: vpermi2d %ymm1, %ymm6, %ymm0
-; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm7 = [13,4,6,7]
-; AVX512BW-FCP-NEXT: vpermi2d %ymm6, %ymm1, %ymm7
-; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm8 = [6,13,6,7]
-; AVX512BW-FCP-NEXT: vpermi2d %ymm1, %ymm6, %ymm8
-; AVX512BW-FCP-NEXT: vmovq %xmm2, (%rsi)
-; AVX512BW-FCP-NEXT: vmovq %xmm3, (%rdx)
-; AVX512BW-FCP-NEXT: vmovq %xmm4, (%rcx)
-; AVX512BW-FCP-NEXT: vmovq %xmm5, (%r8)
-; AVX512BW-FCP-NEXT: vmovq %xmm0, (%r9)
+; AVX512BW-FCP-NEXT: vmovaps (%rdi), %zmm0
+; AVX512BW-FCP-NEXT: vmovdqa (%rdi), %xmm1
+; AVX512BW-FCP-NEXT: vmovdqa 32(%rdi), %xmm2
+; AVX512BW-FCP-NEXT: vpinsrd $1, 28(%rdi), %xmm1, %xmm3
+; AVX512BW-FCP-NEXT: vpbroadcastq {{.*#+}} xmm4 = [1,4,1,4]
+; AVX512BW-FCP-NEXT: vpermi2d %xmm2, %xmm1, %xmm4
+; AVX512BW-FCP-NEXT: vpbroadcastd 8(%rdi), %xmm5
+; AVX512BW-FCP-NEXT: vpblendd {{.*#+}} xmm5 = xmm5[0],xmm2[1],xmm5[2,3]
+; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm6 = [7,2,0,0]
+; AVX512BW-FCP-NEXT: vpermi2d %xmm1, %xmm2, %xmm6
+; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm1 = [4,11,0,0]
+; AVX512BW-FCP-NEXT: vpermps %zmm0, %zmm1, %zmm1
+; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm2 = [13,4,6,7]
+; AVX512BW-FCP-NEXT: vmovdqa 32(%rdi), %ymm7
+; AVX512BW-FCP-NEXT: vpermt2d (%rdi), %ymm2, %ymm7
+; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm2 = [0,0,0,0,6,13,6,7]
+; AVX512BW-FCP-NEXT: vpermps %zmm0, %zmm2, %zmm0
+; AVX512BW-FCP-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX512BW-FCP-NEXT: vmovq %xmm3, (%rsi)
+; AVX512BW-FCP-NEXT: vmovq %xmm4, (%rdx)
+; AVX512BW-FCP-NEXT: vmovq %xmm5, (%rcx)
+; AVX512BW-FCP-NEXT: vmovq %xmm6, (%r8)
+; AVX512BW-FCP-NEXT: vmovlps %xmm1, (%r9)
; AVX512BW-FCP-NEXT: vmovq %xmm7, (%r10)
-; AVX512BW-FCP-NEXT: vmovq %xmm8, (%rax)
+; AVX512BW-FCP-NEXT: vmovlps %xmm0, (%rax)
; AVX512BW-FCP-NEXT: vzeroupper
; AVX512BW-FCP-NEXT: retq
;
@@ -399,22 +402,22 @@ define void @load_i32_stride7_vf2(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-BW-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3]
; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[3,2,2,3]
; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} xmm1 = [4,11,0,0]
-; AVX512DQ-BW-NEXT: vmovdqa 32(%rdi), %ymm5
-; AVX512DQ-BW-NEXT: vmovdqa (%rdi), %ymm6
-; AVX512DQ-BW-NEXT: vpermi2d %ymm5, %ymm6, %ymm1
-; AVX512DQ-BW-NEXT: vpblendd {{.*#+}} ymm7 = ymm5[0],ymm6[1],ymm5[2,3,4],ymm6[5],ymm5[6,7]
-; AVX512DQ-BW-NEXT: vextracti128 $1, %ymm7, %xmm7
-; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} xmm7 = xmm7[1,0,2,3]
-; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} ymm6 = ymm6[2,3,2,3,6,7,6,7]
-; AVX512DQ-BW-NEXT: vpblendd {{.*#+}} ymm5 = ymm6[0],ymm5[1],ymm6[2,3,4],ymm5[5],ymm6[6,7]
-; AVX512DQ-BW-NEXT: vextracti128 $1, %ymm5, %xmm5
+; AVX512DQ-BW-NEXT: vpermps (%rdi), %zmm1, %zmm1
+; AVX512DQ-BW-NEXT: vmovaps (%rdi), %ymm5
+; AVX512DQ-BW-NEXT: vmovaps 32(%rdi), %ymm6
+; AVX512DQ-BW-NEXT: vblendps {{.*#+}} ymm7 = ymm6[0],ymm5[1],ymm6[2,3,4],ymm5[5],ymm6[6,7]
+; AVX512DQ-BW-NEXT: vextractf128 $1, %ymm7, %xmm7
+; AVX512DQ-BW-NEXT: vshufps {{.*#+}} xmm7 = xmm7[1,0,2,3]
+; AVX512DQ-BW-NEXT: vshufps {{.*#+}} ymm5 = ymm5[2,3,2,3,6,7,6,7]
+; AVX512DQ-BW-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0],ymm6[1],ymm5[2,3,4],ymm6[5],ymm5[6,7]
+; AVX512DQ-BW-NEXT: vextractf128 $1, %ymm5, %xmm5
; AVX512DQ-BW-NEXT: vmovq %xmm2, (%rsi)
; AVX512DQ-BW-NEXT: vmovq %xmm3, (%rdx)
; AVX512DQ-BW-NEXT: vmovq %xmm4, (%rcx)
; AVX512DQ-BW-NEXT: vmovq %xmm0, (%r8)
-; AVX512DQ-BW-NEXT: vmovq %xmm1, (%r9)
-; AVX512DQ-BW-NEXT: vmovq %xmm7, (%r10)
-; AVX512DQ-BW-NEXT: vmovq %xmm5, (%rax)
+; AVX512DQ-BW-NEXT: vmovlps %xmm1, (%r9)
+; AVX512DQ-BW-NEXT: vmovlps %xmm7, (%r10)
+; AVX512DQ-BW-NEXT: vmovlps %xmm5, (%rax)
; AVX512DQ-BW-NEXT: vzeroupper
; AVX512DQ-BW-NEXT: retq
;
@@ -422,30 +425,31 @@ define void @load_i32_stride7_vf2(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-BW-FCP: # %bb.0:
; AVX512DQ-BW-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX512DQ-BW-FCP-NEXT: movq {{[0-9]+}}(%rsp), %r10
-; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdi), %xmm0
-; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%rdi), %xmm1
-; AVX512DQ-BW-FCP-NEXT: vpinsrd $1, 28(%rdi), %xmm0, %xmm2
-; AVX512DQ-BW-FCP-NEXT: vpbroadcastq {{.*#+}} xmm3 = [1,4,1,4]
-; AVX512DQ-BW-FCP-NEXT: vpermi2d %xmm1, %xmm0, %xmm3
-; AVX512DQ-BW-FCP-NEXT: vpbroadcastd 8(%rdi), %xmm4
-; AVX512DQ-BW-FCP-NEXT: vpblendd {{.*#+}} xmm4 = xmm4[0],xmm1[1],xmm4[2,3]
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm5 = [7,2,0,0]
-; AVX512DQ-BW-FCP-NEXT: vpermi2d %xmm0, %xmm1, %xmm5
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm0 = [4,11,0,0]
-; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%rdi), %ymm1
-; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdi), %ymm6
-; AVX512DQ-BW-FCP-NEXT: vpermi2d %ymm1, %ymm6, %ymm0
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm7 = [13,4,6,7]
-; AVX512DQ-BW-FCP-NEXT: vpermi2d %ymm6, %ymm1, %ymm7
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm8 = [6,13,6,7]
-; AVX512DQ-BW-FCP-NEXT: vpermi2d %ymm1, %ymm6, %ymm8
-; AVX512DQ-BW-FCP-NEXT: vmovq %xmm2, (%rsi)
-; AVX512DQ-BW-FCP-NEXT: vmovq %xmm3, (%rdx)
-; AVX512DQ-BW-FCP-NEXT: vmovq %xmm4, (%rcx)
-; AVX512DQ-BW-FCP-NEXT: vmovq %xmm5, (%r8)
-; AVX512DQ-BW-FCP-NEXT: vmovq %xmm0, (%r9)
+; AVX512DQ-BW-FCP-NEXT: vmovaps (%rdi), %zmm0
+; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdi), %xmm1
+; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%rdi), %xmm2
+; AVX512DQ-BW-FCP-NEXT: vpinsrd $1, 28(%rdi), %xmm1, %xmm3
+; AVX512DQ-BW-FCP-NEXT: vpbroadcastq {{.*#+}} xmm4 = [1,4,1,4]
+; AVX512DQ-BW-FCP-NEXT: vpermi2d %xmm2, %xmm1, %xmm4
+; AVX512DQ-BW-FCP-NEXT: vpbroadcastd 8(%rdi), %xmm5
+; AVX512DQ-BW-FCP-NEXT: vpblendd {{.*#+}} xmm5 = xmm5[0],xmm2[1],xmm5[2,3]
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm6 = [7,2,0,0]
+; AVX512DQ-BW-FCP-NEXT: vpermi2d %xmm1, %xmm2, %xmm6
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm1 = [4,11,0,0]
+; AVX512DQ-BW-FCP-NEXT: vpermps %zmm0, %zmm1, %zmm1
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm2 = [13,4,6,7]
+; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%rdi), %ymm7
+; AVX512DQ-BW-FCP-NEXT: vpermt2d (%rdi), %ymm2, %ymm7
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm2 = [0,0,0,0,6,13,6,7]
+; AVX512DQ-BW-FCP-NEXT: vpermps %zmm0, %zmm2, %zmm0
+; AVX512DQ-BW-FCP-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX512DQ-BW-FCP-NEXT: vmovq %xmm3, (%rsi)
+; AVX512DQ-BW-FCP-NEXT: vmovq %xmm4, (%rdx)
+; AVX512DQ-BW-FCP-NEXT: vmovq %xmm5, (%rcx)
+; AVX512DQ-BW-FCP-NEXT: vmovq %xmm6, (%r8)
+; AVX512DQ-BW-FCP-NEXT: vmovlps %xmm1, (%r9)
; AVX512DQ-BW-FCP-NEXT: vmovq %xmm7, (%r10)
-; AVX512DQ-BW-FCP-NEXT: vmovq %xmm8, (%rax)
+; AVX512DQ-BW-FCP-NEXT: vmovlps %xmm0, (%rax)
; AVX512DQ-BW-FCP-NEXT: vzeroupper
; AVX512DQ-BW-FCP-NEXT: retq
%wide.vec = load <14 x i32>, ptr %in.vec, align 64
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-8.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-8.ll
index f0c95f4fa9ef8c..8d7f8d1db85220 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-8.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-8.ll
@@ -222,24 +222,25 @@ define void @load_i32_stride8_vf2(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} xmm3 = [1,5,0,0]
; AVX512-FCP-NEXT: vpermi2d %xmm1, %xmm0, %xmm3
; AVX512-FCP-NEXT: vpunpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; AVX512-FCP-NEXT: vmovdqa 32(%rdi), %ymm1
-; AVX512-FCP-NEXT: vmovdqa (%rdi), %ymm4
-; AVX512-FCP-NEXT: vpunpckldq {{.*#+}} ymm5 = ymm4[0],ymm1[0],ymm4[1],ymm1[1],ymm4[4],ymm1[4],ymm4[5],ymm1[5]
-; AVX512-FCP-NEXT: vextracti128 $1, %ymm5, %xmm5
-; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} xmm6 = [5,13,5,5]
-; AVX512-FCP-NEXT: vpermi2d %ymm1, %ymm4, %ymm6
-; AVX512-FCP-NEXT: vpunpckhdq {{.*#+}} ymm1 = ymm4[2],ymm1[2],ymm4[3],ymm1[3],ymm4[6],ymm1[6],ymm4[7],ymm1[7]
-; AVX512-FCP-NEXT: vextracti128 $1, %ymm1, %xmm4
-; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[2,3,2,3,6,7,6,7]
-; AVX512-FCP-NEXT: vextracti128 $1, %ymm1, %xmm1
+; AVX512-FCP-NEXT: vmovaps 32(%rdi), %ymm1
+; AVX512-FCP-NEXT: vmovaps (%rdi), %ymm4
+; AVX512-FCP-NEXT: vunpcklps {{.*#+}} ymm5 = ymm4[0],ymm1[0],ymm4[1],ymm1[1],ymm4[4],ymm1[4],ymm4[5],ymm1[5]
+; AVX512-FCP-NEXT: vextractf128 $1, %ymm5, %xmm5
+; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm6 = [0,0,0,0,5,13,5,5]
+; AVX512-FCP-NEXT: vpermps (%rdi), %zmm6, %zmm6
+; AVX512-FCP-NEXT: vextractf128 $1, %ymm6, %xmm6
+; AVX512-FCP-NEXT: vunpckhps {{.*#+}} ymm1 = ymm4[2],ymm1[2],ymm4[3],ymm1[3],ymm4[6],ymm1[6],ymm4[7],ymm1[7]
+; AVX512-FCP-NEXT: vextractf128 $1, %ymm1, %xmm4
+; AVX512-FCP-NEXT: vshufps {{.*#+}} ymm1 = ymm1[2,3,2,3,6,7,6,7]
+; AVX512-FCP-NEXT: vextractf128 $1, %ymm1, %xmm1
; AVX512-FCP-NEXT: vmovq %xmm2, (%rsi)
; AVX512-FCP-NEXT: vmovq %xmm3, (%rdx)
; AVX512-FCP-NEXT: vmovq %xmm0, (%rcx)
; AVX512-FCP-NEXT: vpextrq $1, %xmm0, (%r8)
-; AVX512-FCP-NEXT: vmovq %xmm5, (%r9)
-; AVX512-FCP-NEXT: vmovq %xmm6, (%r11)
-; AVX512-FCP-NEXT: vmovq %xmm4, (%r10)
-; AVX512-FCP-NEXT: vmovq %xmm1, (%rax)
+; AVX512-FCP-NEXT: vmovlps %xmm5, (%r9)
+; AVX512-FCP-NEXT: vmovlps %xmm6, (%r11)
+; AVX512-FCP-NEXT: vmovlps %xmm4, (%r10)
+; AVX512-FCP-NEXT: vmovlps %xmm1, (%rax)
; AVX512-FCP-NEXT: vzeroupper
; AVX512-FCP-NEXT: retq
;
@@ -287,24 +288,25 @@ define void @load_i32_stride8_vf2(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} xmm3 = [1,5,0,0]
; AVX512DQ-FCP-NEXT: vpermi2d %xmm1, %xmm0, %xmm3
; AVX512DQ-FCP-NEXT: vpunpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; AVX512DQ-FCP-NEXT: vmovdqa 32(%rdi), %ymm1
-; AVX512DQ-FCP-NEXT: vmovdqa (%rdi), %ymm4
-; AVX512DQ-FCP-NEXT: vpunpckldq {{.*#+}} ymm5 = ymm4[0],ymm1[0],ymm4[1],ymm1[1],ymm4[4],ymm1[4],ymm4[5],ymm1[5]
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm5, %xmm5
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} xmm6 = [5,13,5,5]
-; AVX512DQ-FCP-NEXT: vpermi2d %ymm1, %ymm4, %ymm6
-; AVX512DQ-FCP-NEXT: vpunpckhdq {{.*#+}} ymm1 = ymm4[2],ymm1[2],ymm4[3],ymm1[3],ymm4[6],ymm1[6],ymm4[7],ymm1[7]
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm1, %xmm4
-; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[2,3,2,3,6,7,6,7]
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm1, %xmm1
+; AVX512DQ-FCP-NEXT: vmovaps 32(%rdi), %ymm1
+; AVX512DQ-FCP-NEXT: vmovaps (%rdi), %ymm4
+; AVX512DQ-FCP-NEXT: vunpcklps {{.*#+}} ymm5 = ymm4[0],ymm1[0],ymm4[1],ymm1[1],ymm4[4],ymm1[4],ymm4[5],ymm1[5]
+; AVX512DQ-FCP-NEXT: vextractf128 $1, %ymm5, %xmm5
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm6 = [0,0,0,0,5,13,5,5]
+; AVX512DQ-FCP-NEXT: vpermps (%rdi), %zmm6, %zmm6
+; AVX512DQ-FCP-NEXT: vextractf128 $1, %ymm6, %xmm6
+; AVX512DQ-FCP-NEXT: vunpckhps {{.*#+}} ymm1 = ymm4[2],ymm1[2],ymm4[3],ymm1[3],ymm4[6],ymm1[6],ymm4[7],ymm1[7]
+; AVX512DQ-FCP-NEXT: vextractf128 $1, %ymm1, %xmm4
+; AVX512DQ-FCP-NEXT: vshufps {{.*#+}} ymm1 = ymm1[2,3,2,3,6,7,6,7]
+; AVX512DQ-FCP-NEXT: vextractf128 $1, %ymm1, %xmm1
; AVX512DQ-FCP-NEXT: vmovq %xmm2, (%rsi)
; AVX512DQ-FCP-NEXT: vmovq %xmm3, (%rdx)
; AVX512DQ-FCP-NEXT: vmovq %xmm0, (%rcx)
; AVX512DQ-FCP-NEXT: vpextrq $1, %xmm0, (%r8)
-; AVX512DQ-FCP-NEXT: vmovq %xmm5, (%r9)
-; AVX512DQ-FCP-NEXT: vmovq %xmm6, (%r11)
-; AVX512DQ-FCP-NEXT: vmovq %xmm4, (%r10)
-; AVX512DQ-FCP-NEXT: vmovq %xmm1, (%rax)
+; AVX512DQ-FCP-NEXT: vmovlps %xmm5, (%r9)
+; AVX512DQ-FCP-NEXT: vmovlps %xmm6, (%r11)
+; AVX512DQ-FCP-NEXT: vmovlps %xmm4, (%r10)
+; AVX512DQ-FCP-NEXT: vmovlps %xmm1, (%rax)
; AVX512DQ-FCP-NEXT: vzeroupper
; AVX512DQ-FCP-NEXT: retq
;
@@ -352,24 +354,25 @@ define void @load_i32_stride8_vf2(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm3 = [1,5,0,0]
; AVX512BW-FCP-NEXT: vpermi2d %xmm1, %xmm0, %xmm3
; AVX512BW-FCP-NEXT: vpunpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; AVX512BW-FCP-NEXT: vmovdqa 32(%rdi), %ymm1
-; AVX512BW-FCP-NEXT: vmovdqa (%rdi), %ymm4
-; AVX512BW-FCP-NEXT: vpunpckldq {{.*#+}} ymm5 = ymm4[0],ymm1[0],ymm4[1],ymm1[1],ymm4[4],ymm1[4],ymm4[5],ymm1[5]
-; AVX512BW-FCP-NEXT: vextracti128 $1, %ymm5, %xmm5
-; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm6 = [5,13,5,5]
-; AVX512BW-FCP-NEXT: vpermi2d %ymm1, %ymm4, %ymm6
-; AVX512BW-FCP-NEXT: vpunpckhdq {{.*#+}} ymm1 = ymm4[2],ymm1[2],ymm4[3],ymm1[3],ymm4[6],ymm1[6],ymm4[7],ymm1[7]
-; AVX512BW-FCP-NEXT: vextracti128 $1, %ymm1, %xmm4
-; AVX512BW-FCP-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[2,3,2,3,6,7,6,7]
-; AVX512BW-FCP-NEXT: vextracti128 $1, %ymm1, %xmm1
+; AVX512BW-FCP-NEXT: vmovaps 32(%rdi), %ymm1
+; AVX512BW-FCP-NEXT: vmovaps (%rdi), %ymm4
+; AVX512BW-FCP-NEXT: vunpcklps {{.*#+}} ymm5 = ymm4[0],ymm1[0],ymm4[1],ymm1[1],ymm4[4],ymm1[4],ymm4[5],ymm1[5]
+; AVX512BW-FCP-NEXT: vextractf128 $1, %ymm5, %xmm5
+; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm6 = [0,0,0,0,5,13,5,5]
+; AVX512BW-FCP-NEXT: vpermps (%rdi), %zmm6, %zmm6
+; AVX512BW-FCP-NEXT: vextractf128 $1, %ymm6, %xmm6
+; AVX512BW-FCP-NEXT: vunpckhps {{.*#+}} ymm1 = ymm4[2],ymm1[2],ymm4[3],ymm1[3],ymm4[6],ymm1[6],ymm4[7],ymm1[7]
+; AVX512BW-FCP-NEXT: vextractf128 $1, %ymm1, %xmm4
+; AVX512BW-FCP-NEXT: vshufps {{.*#+}} ymm1 = ymm1[2,3,2,3,6,7,6,7]
+; AVX512BW-FCP-NEXT: vextractf128 $1, %ymm1, %xmm1
; AVX512BW-FCP-NEXT: vmovq %xmm2, (%rsi)
; AVX512BW-FCP-NEXT: vmovq %xmm3, (%rdx)
; AVX512BW-FCP-NEXT: vmovq %xmm0, (%rcx)
; AVX512BW-FCP-NEXT: vpextrq $1, %xmm0, (%r8)
-; AVX512BW-FCP-NEXT: vmovq %xmm5, (%r9)
-; AVX512BW-FCP-NEXT: vmovq %xmm6, (%r11)
-; AVX512BW-FCP-NEXT: vmovq %xmm4, (%r10)
-; AVX512BW-FCP-NEXT: vmovq %xmm1, (%rax)
+; AVX512BW-FCP-NEXT: vmovlps %xmm5, (%r9)
+; AVX512BW-FCP-NEXT: vmovlps %xmm6, (%r11)
+; AVX512BW-FCP-NEXT: vmovlps %xmm4, (%r10)
+; AVX512BW-FCP-NEXT: vmovlps %xmm1, (%rax)
; AVX512BW-FCP-NEXT: vzeroupper
; AVX512BW-FCP-NEXT: retq
;
@@ -417,24 +420,25 @@ define void @load_i32_stride8_vf2(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm3 = [1,5,0,0]
; AVX512DQ-BW-FCP-NEXT: vpermi2d %xmm1, %xmm0, %xmm3
; AVX512DQ-BW-FCP-NEXT: vpunpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%rdi), %ymm1
-; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdi), %ymm4
-; AVX512DQ-BW-FCP-NEXT: vpunpckldq {{.*#+}} ymm5 = ymm4[0],ymm1[0],ymm4[1],ymm1[1],ymm4[4],ymm1[4],ymm4[5],ymm1[5]
-; AVX512DQ-BW-FCP-NEXT: vextracti128 $1, %ymm5, %xmm5
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm6 = [5,13,5,5]
-; AVX512DQ-BW-FCP-NEXT: vpermi2d %ymm1, %ymm4, %ymm6
-; AVX512DQ-BW-FCP-NEXT: vpunpckhdq {{.*#+}} ymm1 = ymm4[2],ymm1[2],ymm4[3],ymm1[3],ymm4[6],ymm1[6],ymm4[7],ymm1[7]
-; AVX512DQ-BW-FCP-NEXT: vextracti128 $1, %ymm1, %xmm4
-; AVX512DQ-BW-FCP-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[2,3,2,3,6,7,6,7]
-; AVX512DQ-BW-FCP-NEXT: vextracti128 $1, %ymm1, %xmm1
+; AVX512DQ-BW-FCP-NEXT: vmovaps 32(%rdi), %ymm1
+; AVX512DQ-BW-FCP-NEXT: vmovaps (%rdi), %ymm4
+; AVX512DQ-BW-FCP-NEXT: vunpcklps {{.*#+}} ymm5 = ymm4[0],ymm1[0],ymm4[1],ymm1[1],ymm4[4],ymm1[4],ymm4[5],ymm1[5]
+; AVX512DQ-BW-FCP-NEXT: vextractf128 $1, %ymm5, %xmm5
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm6 = [0,0,0,0,5,13,5,5]
+; AVX512DQ-BW-FCP-NEXT: vpermps (%rdi), %zmm6, %zmm6
+; AVX512DQ-BW-FCP-NEXT: vextractf128 $1, %ymm6, %xmm6
+; AVX512DQ-BW-FCP-NEXT: vunpckhps {{.*#+}} ymm1 = ymm4[2],ymm1[2],ymm4[3],ymm1[3],ymm4[6],ymm1[6],ymm4[7],ymm1[7]
+; AVX512DQ-BW-FCP-NEXT: vextractf128 $1, %ymm1, %xmm4
+; AVX512DQ-BW-FCP-NEXT: vshufps {{.*#+}} ymm1 = ymm1[2,3,2,3,6,7,6,7]
+; AVX512DQ-BW-FCP-NEXT: vextractf128 $1, %ymm1, %xmm1
; AVX512DQ-BW-FCP-NEXT: vmovq %xmm2, (%rsi)
; AVX512DQ-BW-FCP-NEXT: vmovq %xmm3, (%rdx)
; AVX512DQ-BW-FCP-NEXT: vmovq %xmm0, (%rcx)
; AVX512DQ-BW-FCP-NEXT: vpextrq $1, %xmm0, (%r8)
-; AVX512DQ-BW-FCP-NEXT: vmovq %xmm5, (%r9)
-; AVX512DQ-BW-FCP-NEXT: vmovq %xmm6, (%r11)
-; AVX512DQ-BW-FCP-NEXT: vmovq %xmm4, (%r10)
-; AVX512DQ-BW-FCP-NEXT: vmovq %xmm1, (%rax)
+; AVX512DQ-BW-FCP-NEXT: vmovlps %xmm5, (%r9)
+; AVX512DQ-BW-FCP-NEXT: vmovlps %xmm6, (%r11)
+; AVX512DQ-BW-FCP-NEXT: vmovlps %xmm4, (%r10)
+; AVX512DQ-BW-FCP-NEXT: vmovlps %xmm1, (%rax)
; AVX512DQ-BW-FCP-NEXT: vzeroupper
; AVX512DQ-BW-FCP-NEXT: retq
%wide.vec = load <16 x i32>, ptr %in.vec, align 64
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-2.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-2.ll
index 2381df6d732891..aa7d8ceb149503 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-2.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-2.ll
@@ -245,13 +245,12 @@ define void @load_i64_stride2_vf4(ptr %in.vec, ptr %out.vec0, ptr %out.vec1) nou
; AVX512-FCP-LABEL: load_i64_stride2_vf4:
; AVX512-FCP: # %bb.0:
; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} ymm0 = [0,2,4,6]
-; AVX512-FCP-NEXT: vmovdqa (%rdi), %ymm1
-; AVX512-FCP-NEXT: vmovdqa 32(%rdi), %ymm2
-; AVX512-FCP-NEXT: vpermi2q %ymm2, %ymm1, %ymm0
-; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} ymm3 = [1,3,5,7]
-; AVX512-FCP-NEXT: vpermi2q %ymm2, %ymm1, %ymm3
-; AVX512-FCP-NEXT: vmovdqa %ymm0, (%rsi)
-; AVX512-FCP-NEXT: vmovdqa %ymm3, (%rdx)
+; AVX512-FCP-NEXT: vmovaps (%rdi), %zmm1
+; AVX512-FCP-NEXT: vpermpd %zmm1, %zmm0, %zmm0
+; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} ymm2 = [1,3,5,7]
+; AVX512-FCP-NEXT: vpermpd %zmm1, %zmm2, %zmm1
+; AVX512-FCP-NEXT: vmovaps %ymm0, (%rsi)
+; AVX512-FCP-NEXT: vmovaps %ymm1, (%rdx)
; AVX512-FCP-NEXT: vzeroupper
; AVX512-FCP-NEXT: retq
;
@@ -271,13 +270,12 @@ define void @load_i64_stride2_vf4(ptr %in.vec, ptr %out.vec0, ptr %out.vec1) nou
; AVX512DQ-FCP-LABEL: load_i64_stride2_vf4:
; AVX512DQ-FCP: # %bb.0:
; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} ymm0 = [0,2,4,6]
-; AVX512DQ-FCP-NEXT: vmovdqa (%rdi), %ymm1
-; AVX512DQ-FCP-NEXT: vmovdqa 32(%rdi), %ymm2
-; AVX512DQ-FCP-NEXT: vpermi2q %ymm2, %ymm1, %ymm0
-; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} ymm3 = [1,3,5,7]
-; AVX512DQ-FCP-NEXT: vpermi2q %ymm2, %ymm1, %ymm3
-; AVX512DQ-FCP-NEXT: vmovdqa %ymm0, (%rsi)
-; AVX512DQ-FCP-NEXT: vmovdqa %ymm3, (%rdx)
+; AVX512DQ-FCP-NEXT: vmovaps (%rdi), %zmm1
+; AVX512DQ-FCP-NEXT: vpermpd %zmm1, %zmm0, %zmm0
+; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} ymm2 = [1,3,5,7]
+; AVX512DQ-FCP-NEXT: vpermpd %zmm1, %zmm2, %zmm1
+; AVX512DQ-FCP-NEXT: vmovaps %ymm0, (%rsi)
+; AVX512DQ-FCP-NEXT: vmovaps %ymm1, (%rdx)
; AVX512DQ-FCP-NEXT: vzeroupper
; AVX512DQ-FCP-NEXT: retq
;
@@ -297,13 +295,12 @@ define void @load_i64_stride2_vf4(ptr %in.vec, ptr %out.vec0, ptr %out.vec1) nou
; AVX512BW-FCP-LABEL: load_i64_stride2_vf4:
; AVX512BW-FCP: # %bb.0:
; AVX512BW-FCP-NEXT: vpmovsxbq {{.*#+}} ymm0 = [0,2,4,6]
-; AVX512BW-FCP-NEXT: vmovdqa (%rdi), %ymm1
-; AVX512BW-FCP-NEXT: vmovdqa 32(%rdi), %ymm2
-; AVX512BW-FCP-NEXT: vpermi2q %ymm2, %ymm1, %ymm0
-; AVX512BW-FCP-NEXT: vpmovsxbq {{.*#+}} ymm3 = [1,3,5,7]
-; AVX512BW-FCP-NEXT: vpermi2q %ymm2, %ymm1, %ymm3
-; AVX512BW-FCP-NEXT: vmovdqa %ymm0, (%rsi)
-; AVX512BW-FCP-NEXT: vmovdqa %ymm3, (%rdx)
+; AVX512BW-FCP-NEXT: vmovaps (%rdi), %zmm1
+; AVX512BW-FCP-NEXT: vpermpd %zmm1, %zmm0, %zmm0
+; AVX512BW-FCP-NEXT: vpmovsxbq {{.*#+}} ymm2 = [1,3,5,7]
+; AVX512BW-FCP-NEXT: vpermpd %zmm1, %zmm2, %zmm1
+; AVX512BW-FCP-NEXT: vmovaps %ymm0, (%rsi)
+; AVX512BW-FCP-NEXT: vmovaps %ymm1, (%rdx)
; AVX512BW-FCP-NEXT: vzeroupper
; AVX512BW-FCP-NEXT: retq
;
@@ -323,13 +320,12 @@ define void @load_i64_stride2_vf4(ptr %in.vec, ptr %out.vec0, ptr %out.vec1) nou
; AVX512DQ-BW-FCP-LABEL: load_i64_stride2_vf4:
; AVX512DQ-BW-FCP: # %bb.0:
; AVX512DQ-BW-FCP-NEXT: vpmovsxbq {{.*#+}} ymm0 = [0,2,4,6]
-; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdi), %ymm1
-; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%rdi), %ymm2
-; AVX512DQ-BW-FCP-NEXT: vpermi2q %ymm2, %ymm1, %ymm0
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbq {{.*#+}} ymm3 = [1,3,5,7]
-; AVX512DQ-BW-FCP-NEXT: vpermi2q %ymm2, %ymm1, %ymm3
-; AVX512DQ-BW-FCP-NEXT: vmovdqa %ymm0, (%rsi)
-; AVX512DQ-BW-FCP-NEXT: vmovdqa %ymm3, (%rdx)
+; AVX512DQ-BW-FCP-NEXT: vmovaps (%rdi), %zmm1
+; AVX512DQ-BW-FCP-NEXT: vpermpd %zmm1, %zmm0, %zmm0
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbq {{.*#+}} ymm2 = [1,3,5,7]
+; AVX512DQ-BW-FCP-NEXT: vpermpd %zmm1, %zmm2, %zmm1
+; AVX512DQ-BW-FCP-NEXT: vmovaps %ymm0, (%rsi)
+; AVX512DQ-BW-FCP-NEXT: vmovaps %ymm1, (%rdx)
; AVX512DQ-BW-FCP-NEXT: vzeroupper
; AVX512DQ-BW-FCP-NEXT: retq
%wide.vec = load <8 x i64>, ptr %in.vec, align 64
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-6.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-6.ll
index f82bcd1ce3e1eb..7d3209397c3df0 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-6.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-6.ll
@@ -611,32 +611,31 @@ define void @load_i64_stride6_vf4(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} ymm1 = [1,7,13,0]
; AVX512-FCP-NEXT: vpermi2q %zmm3, %zmm2, %zmm1
-; AVX512-FCP-NEXT: vmovdqa 128(%rdi), %ymm4
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm4[6,7]
-; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} ymm5 = [10,0,6,0]
-; AVX512-FCP-NEXT: vpermi2q %zmm2, %zmm3, %zmm5
-; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} ymm6 = [0,1,2,4]
-; AVX512-FCP-NEXT: vmovdqa 160(%rdi), %ymm7
-; AVX512-FCP-NEXT: vpermi2q %ymm7, %ymm5, %ymm6
-; AVX512-FCP-NEXT: vinserti128 $1, 160(%rdi), %ymm0, %ymm5
-; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} ymm8 = [11,1,7,0]
-; AVX512-FCP-NEXT: vpermi2q %zmm2, %zmm3, %zmm8
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm8[0,1,2,3,4,5],ymm5[6,7]
-; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} ymm8 = [0,0,0,6]
-; AVX512-FCP-NEXT: vpermi2q %ymm7, %ymm4, %ymm8
-; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} xmm4 = [4,10]
-; AVX512-FCP-NEXT: vpermi2q %zmm3, %zmm2, %zmm4
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm8[4,5,6,7]
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],mem[6,7]
+; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} ymm4 = [10,0,6,0]
+; AVX512-FCP-NEXT: vpermi2q %zmm2, %zmm3, %zmm4
+; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} ymm5 = [0,1,2,4]
+; AVX512-FCP-NEXT: vmovdqa 160(%rdi), %ymm6
+; AVX512-FCP-NEXT: vpermi2q %ymm6, %ymm4, %ymm5
+; AVX512-FCP-NEXT: vinserti128 $1, 160(%rdi), %ymm0, %ymm4
+; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} ymm7 = [11,1,7,0]
+; AVX512-FCP-NEXT: vpermi2q %zmm2, %zmm3, %zmm7
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm7[0,1,2,3,4,5],ymm4[6,7]
+; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} ymm7 = [0,4,0,6]
+; AVX512-FCP-NEXT: vpermq 128(%rdi), %zmm7, %zmm7
+; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} xmm8 = [4,10]
+; AVX512-FCP-NEXT: vpermi2q %zmm3, %zmm2, %zmm8
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm8[0,1,2,3],ymm7[4,5,6,7]
; AVX512-FCP-NEXT: vpbroadcastq 136(%rdi), %ymm8
-; AVX512-FCP-NEXT: vpunpckhqdq {{.*#+}} ymm7 = ymm8[1],ymm7[1],ymm8[3],ymm7[3]
+; AVX512-FCP-NEXT: vpunpckhqdq {{.*#+}} ymm6 = ymm8[1],ymm6[1],ymm8[3],ymm6[3]
; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} xmm8 = [5,11]
; AVX512-FCP-NEXT: vpermi2q %zmm3, %zmm2, %zmm8
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm8[0,1,2,3],ymm7[4,5,6,7]
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm8[0,1,2,3],ymm6[4,5,6,7]
; AVX512-FCP-NEXT: vmovdqa %ymm0, (%rsi)
; AVX512-FCP-NEXT: vmovdqa %ymm1, (%rdx)
-; AVX512-FCP-NEXT: vmovdqa %ymm6, (%rcx)
-; AVX512-FCP-NEXT: vmovdqa %ymm5, (%r8)
-; AVX512-FCP-NEXT: vmovdqa %ymm4, (%r9)
+; AVX512-FCP-NEXT: vmovdqa %ymm5, (%rcx)
+; AVX512-FCP-NEXT: vmovdqa %ymm4, (%r8)
+; AVX512-FCP-NEXT: vmovdqa %ymm7, (%r9)
; AVX512-FCP-NEXT: vmovdqa %ymm2, (%rax)
; AVX512-FCP-NEXT: vzeroupper
; AVX512-FCP-NEXT: retq
@@ -694,32 +693,31 @@ define void @load_i64_stride6_vf4(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} ymm1 = [1,7,13,0]
; AVX512DQ-FCP-NEXT: vpermi2q %zmm3, %zmm2, %zmm1
-; AVX512DQ-FCP-NEXT: vmovdqa 128(%rdi), %ymm4
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm4[6,7]
-; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} ymm5 = [10,0,6,0]
-; AVX512DQ-FCP-NEXT: vpermi2q %zmm2, %zmm3, %zmm5
-; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} ymm6 = [0,1,2,4]
-; AVX512DQ-FCP-NEXT: vmovdqa 160(%rdi), %ymm7
-; AVX512DQ-FCP-NEXT: vpermi2q %ymm7, %ymm5, %ymm6
-; AVX512DQ-FCP-NEXT: vinserti128 $1, 160(%rdi), %ymm0, %ymm5
-; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} ymm8 = [11,1,7,0]
-; AVX512DQ-FCP-NEXT: vpermi2q %zmm2, %zmm3, %zmm8
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm8[0,1,2,3,4,5],ymm5[6,7]
-; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} ymm8 = [0,0,0,6]
-; AVX512DQ-FCP-NEXT: vpermi2q %ymm7, %ymm4, %ymm8
-; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} xmm4 = [4,10]
-; AVX512DQ-FCP-NEXT: vpermi2q %zmm3, %zmm2, %zmm4
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm8[4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],mem[6,7]
+; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} ymm4 = [10,0,6,0]
+; AVX512DQ-FCP-NEXT: vpermi2q %zmm2, %zmm3, %zmm4
+; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} ymm5 = [0,1,2,4]
+; AVX512DQ-FCP-NEXT: vmovdqa 160(%rdi), %ymm6
+; AVX512DQ-FCP-NEXT: vpermi2q %ymm6, %ymm4, %ymm5
+; AVX512DQ-FCP-NEXT: vinserti128 $1, 160(%rdi), %ymm0, %ymm4
+; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} ymm7 = [11,1,7,0]
+; AVX512DQ-FCP-NEXT: vpermi2q %zmm2, %zmm3, %zmm7
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm7[0,1,2,3,4,5],ymm4[6,7]
+; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} ymm7 = [0,4,0,6]
+; AVX512DQ-FCP-NEXT: vpermq 128(%rdi), %zmm7, %zmm7
+; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} xmm8 = [4,10]
+; AVX512DQ-FCP-NEXT: vpermi2q %zmm3, %zmm2, %zmm8
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm8[0,1,2,3],ymm7[4,5,6,7]
; AVX512DQ-FCP-NEXT: vpbroadcastq 136(%rdi), %ymm8
-; AVX512DQ-FCP-NEXT: vpunpckhqdq {{.*#+}} ymm7 = ymm8[1],ymm7[1],ymm8[3],ymm7[3]
+; AVX512DQ-FCP-NEXT: vpunpckhqdq {{.*#+}} ymm6 = ymm8[1],ymm6[1],ymm8[3],ymm6[3]
; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} xmm8 = [5,11]
; AVX512DQ-FCP-NEXT: vpermi2q %zmm3, %zmm2, %zmm8
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm8[0,1,2,3],ymm7[4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm8[0,1,2,3],ymm6[4,5,6,7]
; AVX512DQ-FCP-NEXT: vmovdqa %ymm0, (%rsi)
; AVX512DQ-FCP-NEXT: vmovdqa %ymm1, (%rdx)
-; AVX512DQ-FCP-NEXT: vmovdqa %ymm6, (%rcx)
-; AVX512DQ-FCP-NEXT: vmovdqa %ymm5, (%r8)
-; AVX512DQ-FCP-NEXT: vmovdqa %ymm4, (%r9)
+; AVX512DQ-FCP-NEXT: vmovdqa %ymm5, (%rcx)
+; AVX512DQ-FCP-NEXT: vmovdqa %ymm4, (%r8)
+; AVX512DQ-FCP-NEXT: vmovdqa %ymm7, (%r9)
; AVX512DQ-FCP-NEXT: vmovdqa %ymm2, (%rax)
; AVX512DQ-FCP-NEXT: vzeroupper
; AVX512DQ-FCP-NEXT: retq
@@ -777,32 +775,31 @@ define void @load_i64_stride6_vf4(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512BW-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
; AVX512BW-FCP-NEXT: vpmovsxbq {{.*#+}} ymm1 = [1,7,13,0]
; AVX512BW-FCP-NEXT: vpermi2q %zmm3, %zmm2, %zmm1
-; AVX512BW-FCP-NEXT: vmovdqa 128(%rdi), %ymm4
-; AVX512BW-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm4[6,7]
-; AVX512BW-FCP-NEXT: vpmovsxbq {{.*#+}} ymm5 = [10,0,6,0]
-; AVX512BW-FCP-NEXT: vpermi2q %zmm2, %zmm3, %zmm5
-; AVX512BW-FCP-NEXT: vpmovsxbq {{.*#+}} ymm6 = [0,1,2,4]
-; AVX512BW-FCP-NEXT: vmovdqa 160(%rdi), %ymm7
-; AVX512BW-FCP-NEXT: vpermi2q %ymm7, %ymm5, %ymm6
-; AVX512BW-FCP-NEXT: vinserti128 $1, 160(%rdi), %ymm0, %ymm5
-; AVX512BW-FCP-NEXT: vpmovsxbq {{.*#+}} ymm8 = [11,1,7,0]
-; AVX512BW-FCP-NEXT: vpermi2q %zmm2, %zmm3, %zmm8
-; AVX512BW-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm8[0,1,2,3,4,5],ymm5[6,7]
-; AVX512BW-FCP-NEXT: vpmovsxbq {{.*#+}} ymm8 = [0,0,0,6]
-; AVX512BW-FCP-NEXT: vpermi2q %ymm7, %ymm4, %ymm8
-; AVX512BW-FCP-NEXT: vpmovsxbq {{.*#+}} xmm4 = [4,10]
-; AVX512BW-FCP-NEXT: vpermi2q %zmm3, %zmm2, %zmm4
-; AVX512BW-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm8[4,5,6,7]
+; AVX512BW-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],mem[6,7]
+; AVX512BW-FCP-NEXT: vpmovsxbq {{.*#+}} ymm4 = [10,0,6,0]
+; AVX512BW-FCP-NEXT: vpermi2q %zmm2, %zmm3, %zmm4
+; AVX512BW-FCP-NEXT: vpmovsxbq {{.*#+}} ymm5 = [0,1,2,4]
+; AVX512BW-FCP-NEXT: vmovdqa 160(%rdi), %ymm6
+; AVX512BW-FCP-NEXT: vpermi2q %ymm6, %ymm4, %ymm5
+; AVX512BW-FCP-NEXT: vinserti128 $1, 160(%rdi), %ymm0, %ymm4
+; AVX512BW-FCP-NEXT: vpmovsxbq {{.*#+}} ymm7 = [11,1,7,0]
+; AVX512BW-FCP-NEXT: vpermi2q %zmm2, %zmm3, %zmm7
+; AVX512BW-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm7[0,1,2,3,4,5],ymm4[6,7]
+; AVX512BW-FCP-NEXT: vpmovsxbq {{.*#+}} ymm7 = [0,4,0,6]
+; AVX512BW-FCP-NEXT: vpermq 128(%rdi), %zmm7, %zmm7
+; AVX512BW-FCP-NEXT: vpmovsxbq {{.*#+}} xmm8 = [4,10]
+; AVX512BW-FCP-NEXT: vpermi2q %zmm3, %zmm2, %zmm8
+; AVX512BW-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm8[0,1,2,3],ymm7[4,5,6,7]
; AVX512BW-FCP-NEXT: vpbroadcastq 136(%rdi), %ymm8
-; AVX512BW-FCP-NEXT: vpunpckhqdq {{.*#+}} ymm7 = ymm8[1],ymm7[1],ymm8[3],ymm7[3]
+; AVX512BW-FCP-NEXT: vpunpckhqdq {{.*#+}} ymm6 = ymm8[1],ymm6[1],ymm8[3],ymm6[3]
; AVX512BW-FCP-NEXT: vpmovsxbq {{.*#+}} xmm8 = [5,11]
; AVX512BW-FCP-NEXT: vpermi2q %zmm3, %zmm2, %zmm8
-; AVX512BW-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm8[0,1,2,3],ymm7[4,5,6,7]
+; AVX512BW-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm8[0,1,2,3],ymm6[4,5,6,7]
; AVX512BW-FCP-NEXT: vmovdqa %ymm0, (%rsi)
; AVX512BW-FCP-NEXT: vmovdqa %ymm1, (%rdx)
-; AVX512BW-FCP-NEXT: vmovdqa %ymm6, (%rcx)
-; AVX512BW-FCP-NEXT: vmovdqa %ymm5, (%r8)
-; AVX512BW-FCP-NEXT: vmovdqa %ymm4, (%r9)
+; AVX512BW-FCP-NEXT: vmovdqa %ymm5, (%rcx)
+; AVX512BW-FCP-NEXT: vmovdqa %ymm4, (%r8)
+; AVX512BW-FCP-NEXT: vmovdqa %ymm7, (%r9)
; AVX512BW-FCP-NEXT: vmovdqa %ymm2, (%rax)
; AVX512BW-FCP-NEXT: vzeroupper
; AVX512BW-FCP-NEXT: retq
@@ -860,32 +857,31 @@ define void @load_i64_stride6_vf4(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-BW-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
; AVX512DQ-BW-FCP-NEXT: vpmovsxbq {{.*#+}} ymm1 = [1,7,13,0]
; AVX512DQ-BW-FCP-NEXT: vpermi2q %zmm3, %zmm2, %zmm1
-; AVX512DQ-BW-FCP-NEXT: vmovdqa 128(%rdi), %ymm4
-; AVX512DQ-BW-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm4[6,7]
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbq {{.*#+}} ymm5 = [10,0,6,0]
-; AVX512DQ-BW-FCP-NEXT: vpermi2q %zmm2, %zmm3, %zmm5
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbq {{.*#+}} ymm6 = [0,1,2,4]
-; AVX512DQ-BW-FCP-NEXT: vmovdqa 160(%rdi), %ymm7
-; AVX512DQ-BW-FCP-NEXT: vpermi2q %ymm7, %ymm5, %ymm6
-; AVX512DQ-BW-FCP-NEXT: vinserti128 $1, 160(%rdi), %ymm0, %ymm5
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbq {{.*#+}} ymm8 = [11,1,7,0]
-; AVX512DQ-BW-FCP-NEXT: vpermi2q %zmm2, %zmm3, %zmm8
-; AVX512DQ-BW-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm8[0,1,2,3,4,5],ymm5[6,7]
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbq {{.*#+}} ymm8 = [0,0,0,6]
-; AVX512DQ-BW-FCP-NEXT: vpermi2q %ymm7, %ymm4, %ymm8
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbq {{.*#+}} xmm4 = [4,10]
-; AVX512DQ-BW-FCP-NEXT: vpermi2q %zmm3, %zmm2, %zmm4
-; AVX512DQ-BW-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm8[4,5,6,7]
+; AVX512DQ-BW-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],mem[6,7]
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbq {{.*#+}} ymm4 = [10,0,6,0]
+; AVX512DQ-BW-FCP-NEXT: vpermi2q %zmm2, %zmm3, %zmm4
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbq {{.*#+}} ymm5 = [0,1,2,4]
+; AVX512DQ-BW-FCP-NEXT: vmovdqa 160(%rdi), %ymm6
+; AVX512DQ-BW-FCP-NEXT: vpermi2q %ymm6, %ymm4, %ymm5
+; AVX512DQ-BW-FCP-NEXT: vinserti128 $1, 160(%rdi), %ymm0, %ymm4
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbq {{.*#+}} ymm7 = [11,1,7,0]
+; AVX512DQ-BW-FCP-NEXT: vpermi2q %zmm2, %zmm3, %zmm7
+; AVX512DQ-BW-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm7[0,1,2,3,4,5],ymm4[6,7]
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbq {{.*#+}} ymm7 = [0,4,0,6]
+; AVX512DQ-BW-FCP-NEXT: vpermq 128(%rdi), %zmm7, %zmm7
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbq {{.*#+}} xmm8 = [4,10]
+; AVX512DQ-BW-FCP-NEXT: vpermi2q %zmm3, %zmm2, %zmm8
+; AVX512DQ-BW-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm8[0,1,2,3],ymm7[4,5,6,7]
; AVX512DQ-BW-FCP-NEXT: vpbroadcastq 136(%rdi), %ymm8
-; AVX512DQ-BW-FCP-NEXT: vpunpckhqdq {{.*#+}} ymm7 = ymm8[1],ymm7[1],ymm8[3],ymm7[3]
+; AVX512DQ-BW-FCP-NEXT: vpunpckhqdq {{.*#+}} ymm6 = ymm8[1],ymm6[1],ymm8[3],ymm6[3]
; AVX512DQ-BW-FCP-NEXT: vpmovsxbq {{.*#+}} xmm8 = [5,11]
; AVX512DQ-BW-FCP-NEXT: vpermi2q %zmm3, %zmm2, %zmm8
-; AVX512DQ-BW-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm8[0,1,2,3],ymm7[4,5,6,7]
+; AVX512DQ-BW-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm8[0,1,2,3],ymm6[4,5,6,7]
; AVX512DQ-BW-FCP-NEXT: vmovdqa %ymm0, (%rsi)
; AVX512DQ-BW-FCP-NEXT: vmovdqa %ymm1, (%rdx)
-; AVX512DQ-BW-FCP-NEXT: vmovdqa %ymm6, (%rcx)
-; AVX512DQ-BW-FCP-NEXT: vmovdqa %ymm5, (%r8)
-; AVX512DQ-BW-FCP-NEXT: vmovdqa %ymm4, (%r9)
+; AVX512DQ-BW-FCP-NEXT: vmovdqa %ymm5, (%rcx)
+; AVX512DQ-BW-FCP-NEXT: vmovdqa %ymm4, (%r8)
+; AVX512DQ-BW-FCP-NEXT: vmovdqa %ymm7, (%r9)
; AVX512DQ-BW-FCP-NEXT: vmovdqa %ymm2, (%rax)
; AVX512DQ-BW-FCP-NEXT: vzeroupper
; AVX512DQ-BW-FCP-NEXT: retq
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-7.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-7.ll
index 4e5501b1041d3f..cc3e5f3d1d82e2 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-7.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-7.ll
@@ -709,28 +709,28 @@ define void @load_i64_stride7_vf4(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-FCP-NEXT: vpermi2q %zmm5, %zmm4, %zmm1
; AVX512-FCP-NEXT: vpbroadcastq 176(%rdi), %ymm2
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm2[6,7]
-; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} ymm2 = [0,0,0,7]
-; AVX512-FCP-NEXT: vmovdqa 128(%rdi), %ymm6
-; AVX512-FCP-NEXT: vpermi2q 160(%rdi), %ymm6, %ymm2
-; AVX512-FCP-NEXT: vmovdqa 16(%rdi), %xmm7
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} xmm7 = xmm7[0,1],mem[2,3]
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm7[0,1,2,3],ymm2[4,5,6,7]
-; AVX512-FCP-NEXT: vmovdqa 64(%rdi), %ymm7
-; AVX512-FCP-NEXT: vpalignr {{.*#+}} ymm7 = mem[8,9,10,11,12,13,14,15],ymm7[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm7[16,17,18,19,20,21,22,23]
-; AVX512-FCP-NEXT: vmovdqa 192(%rdi), %xmm8
-; AVX512-FCP-NEXT: vpalignr {{.*#+}} xmm9 = mem[8,9,10,11,12,13,14,15],xmm8[0,1,2,3,4,5,6,7]
-; AVX512-FCP-NEXT: vinserti128 $1, %xmm9, %ymm0, %ymm9
-; AVX512-FCP-NEXT: vperm2i128 {{.*#+}} ymm7 = ymm7[2,3],ymm9[2,3]
+; AVX512-FCP-NEXT: vmovdqa 16(%rdi), %xmm2
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0,1],mem[2,3]
+; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} ymm6 = [0,0,0,7]
+; AVX512-FCP-NEXT: vpermq %zmm3, %zmm6, %zmm6
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm6[4,5,6,7]
+; AVX512-FCP-NEXT: vmovdqa 64(%rdi), %ymm6
+; AVX512-FCP-NEXT: vpalignr {{.*#+}} ymm6 = mem[8,9,10,11,12,13,14,15],ymm6[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm6[16,17,18,19,20,21,22,23]
+; AVX512-FCP-NEXT: vmovdqa 192(%rdi), %xmm7
+; AVX512-FCP-NEXT: vpalignr {{.*#+}} xmm8 = mem[8,9,10,11,12,13,14,15],xmm7[0,1,2,3,4,5,6,7]
; AVX512-FCP-NEXT: vinserti128 $1, %xmm8, %ymm0, %ymm8
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm6[0,1,2,3,4,5],ymm8[6,7]
+; AVX512-FCP-NEXT: vperm2i128 {{.*#+}} ymm6 = ymm6[2,3],ymm8[2,3]
+; AVX512-FCP-NEXT: vinserti128 $1, %xmm7, %ymm0, %ymm7
+; AVX512-FCP-NEXT: vmovdqa 128(%rdi), %ymm8
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm8[0,1,2,3,4,5],ymm7[6,7]
; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} xmm9 = [4,11]
; AVX512-FCP-NEXT: vpermi2q %zmm4, %zmm5, %zmm9
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm9[0,1,2,3],ymm8[4,5,6,7]
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm9[0,1,2,3],ymm7[4,5,6,7]
; AVX512-FCP-NEXT: vmovdqa 192(%rdi), %ymm9
-; AVX512-FCP-NEXT: vpalignr {{.*#+}} ymm6 = ymm6[8,9,10,11,12,13,14,15],ymm9[0,1,2,3,4,5,6,7],ymm6[24,25,26,27,28,29,30,31],ymm9[16,17,18,19,20,21,22,23]
+; AVX512-FCP-NEXT: vpalignr {{.*#+}} ymm8 = ymm8[8,9,10,11,12,13,14,15],ymm9[0,1,2,3,4,5,6,7],ymm8[24,25,26,27,28,29,30,31],ymm9[16,17,18,19,20,21,22,23]
; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} xmm9 = [5,12]
; AVX512-FCP-NEXT: vpermi2q %zmm4, %zmm5, %zmm9
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm9[0,1,2,3],ymm6[4,5,6,7]
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm9[0,1,2,3],ymm8[4,5,6,7]
; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} ymm9 = [0,0,4,11]
; AVX512-FCP-NEXT: vpermi2q 192(%rdi), %zmm3, %zmm9
; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} xmm3 = [6,13]
@@ -739,9 +739,9 @@ define void @load_i64_stride7_vf4(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-FCP-NEXT: vmovdqa %ymm0, (%rsi)
; AVX512-FCP-NEXT: vmovdqa %ymm1, (%rdx)
; AVX512-FCP-NEXT: vmovdqa %ymm2, (%rcx)
-; AVX512-FCP-NEXT: vmovdqa %ymm7, (%r8)
-; AVX512-FCP-NEXT: vmovdqa %ymm8, (%r9)
-; AVX512-FCP-NEXT: vmovdqa %ymm6, (%r10)
+; AVX512-FCP-NEXT: vmovdqa %ymm6, (%r8)
+; AVX512-FCP-NEXT: vmovdqa %ymm7, (%r9)
+; AVX512-FCP-NEXT: vmovdqa %ymm8, (%r10)
; AVX512-FCP-NEXT: vmovdqa %ymm3, (%rax)
; AVX512-FCP-NEXT: vzeroupper
; AVX512-FCP-NEXT: retq
@@ -814,28 +814,28 @@ define void @load_i64_stride7_vf4(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-FCP-NEXT: vpermi2q %zmm5, %zmm4, %zmm1
; AVX512DQ-FCP-NEXT: vpbroadcastq 176(%rdi), %ymm2
; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm2[6,7]
-; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} ymm2 = [0,0,0,7]
-; AVX512DQ-FCP-NEXT: vmovdqa 128(%rdi), %ymm6
-; AVX512DQ-FCP-NEXT: vpermi2q 160(%rdi), %ymm6, %ymm2
-; AVX512DQ-FCP-NEXT: vmovdqa 16(%rdi), %xmm7
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} xmm7 = xmm7[0,1],mem[2,3]
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm7[0,1,2,3],ymm2[4,5,6,7]
-; AVX512DQ-FCP-NEXT: vmovdqa 64(%rdi), %ymm7
-; AVX512DQ-FCP-NEXT: vpalignr {{.*#+}} ymm7 = mem[8,9,10,11,12,13,14,15],ymm7[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm7[16,17,18,19,20,21,22,23]
-; AVX512DQ-FCP-NEXT: vmovdqa 192(%rdi), %xmm8
-; AVX512DQ-FCP-NEXT: vpalignr {{.*#+}} xmm9 = mem[8,9,10,11,12,13,14,15],xmm8[0,1,2,3,4,5,6,7]
-; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm9, %ymm0, %ymm9
-; AVX512DQ-FCP-NEXT: vperm2i128 {{.*#+}} ymm7 = ymm7[2,3],ymm9[2,3]
+; AVX512DQ-FCP-NEXT: vmovdqa 16(%rdi), %xmm2
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0,1],mem[2,3]
+; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} ymm6 = [0,0,0,7]
+; AVX512DQ-FCP-NEXT: vpermq %zmm3, %zmm6, %zmm6
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm6[4,5,6,7]
+; AVX512DQ-FCP-NEXT: vmovdqa 64(%rdi), %ymm6
+; AVX512DQ-FCP-NEXT: vpalignr {{.*#+}} ymm6 = mem[8,9,10,11,12,13,14,15],ymm6[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm6[16,17,18,19,20,21,22,23]
+; AVX512DQ-FCP-NEXT: vmovdqa 192(%rdi), %xmm7
+; AVX512DQ-FCP-NEXT: vpalignr {{.*#+}} xmm8 = mem[8,9,10,11,12,13,14,15],xmm7[0,1,2,3,4,5,6,7]
; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm8, %ymm0, %ymm8
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm6[0,1,2,3,4,5],ymm8[6,7]
+; AVX512DQ-FCP-NEXT: vperm2i128 {{.*#+}} ymm6 = ymm6[2,3],ymm8[2,3]
+; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm7, %ymm0, %ymm7
+; AVX512DQ-FCP-NEXT: vmovdqa 128(%rdi), %ymm8
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm8[0,1,2,3,4,5],ymm7[6,7]
; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} xmm9 = [4,11]
; AVX512DQ-FCP-NEXT: vpermi2q %zmm4, %zmm5, %zmm9
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm9[0,1,2,3],ymm8[4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm9[0,1,2,3],ymm7[4,5,6,7]
; AVX512DQ-FCP-NEXT: vmovdqa 192(%rdi), %ymm9
-; AVX512DQ-FCP-NEXT: vpalignr {{.*#+}} ymm6 = ymm6[8,9,10,11,12,13,14,15],ymm9[0,1,2,3,4,5,6,7],ymm6[24,25,26,27,28,29,30,31],ymm9[16,17,18,19,20,21,22,23]
+; AVX512DQ-FCP-NEXT: vpalignr {{.*#+}} ymm8 = ymm8[8,9,10,11,12,13,14,15],ymm9[0,1,2,3,4,5,6,7],ymm8[24,25,26,27,28,29,30,31],ymm9[16,17,18,19,20,21,22,23]
; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} xmm9 = [5,12]
; AVX512DQ-FCP-NEXT: vpermi2q %zmm4, %zmm5, %zmm9
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm9[0,1,2,3],ymm6[4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm9[0,1,2,3],ymm8[4,5,6,7]
; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} ymm9 = [0,0,4,11]
; AVX512DQ-FCP-NEXT: vpermi2q 192(%rdi), %zmm3, %zmm9
; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} xmm3 = [6,13]
@@ -844,9 +844,9 @@ define void @load_i64_stride7_vf4(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-FCP-NEXT: vmovdqa %ymm0, (%rsi)
; AVX512DQ-FCP-NEXT: vmovdqa %ymm1, (%rdx)
; AVX512DQ-FCP-NEXT: vmovdqa %ymm2, (%rcx)
-; AVX512DQ-FCP-NEXT: vmovdqa %ymm7, (%r8)
-; AVX512DQ-FCP-NEXT: vmovdqa %ymm8, (%r9)
-; AVX512DQ-FCP-NEXT: vmovdqa %ymm6, (%r10)
+; AVX512DQ-FCP-NEXT: vmovdqa %ymm6, (%r8)
+; AVX512DQ-FCP-NEXT: vmovdqa %ymm7, (%r9)
+; AVX512DQ-FCP-NEXT: vmovdqa %ymm8, (%r10)
; AVX512DQ-FCP-NEXT: vmovdqa %ymm3, (%rax)
; AVX512DQ-FCP-NEXT: vzeroupper
; AVX512DQ-FCP-NEXT: retq
@@ -919,28 +919,28 @@ define void @load_i64_stride7_vf4(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512BW-FCP-NEXT: vpermi2q %zmm5, %zmm4, %zmm1
; AVX512BW-FCP-NEXT: vpbroadcastq 176(%rdi), %ymm2
; AVX512BW-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm2[6,7]
-; AVX512BW-FCP-NEXT: vpmovsxbq {{.*#+}} ymm2 = [0,0,0,7]
-; AVX512BW-FCP-NEXT: vmovdqa 128(%rdi), %ymm6
-; AVX512BW-FCP-NEXT: vpermi2q 160(%rdi), %ymm6, %ymm2
-; AVX512BW-FCP-NEXT: vmovdqa 16(%rdi), %xmm7
-; AVX512BW-FCP-NEXT: vpblendd {{.*#+}} xmm7 = xmm7[0,1],mem[2,3]
-; AVX512BW-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm7[0,1,2,3],ymm2[4,5,6,7]
-; AVX512BW-FCP-NEXT: vmovdqa 64(%rdi), %ymm7
-; AVX512BW-FCP-NEXT: vpalignr {{.*#+}} ymm7 = mem[8,9,10,11,12,13,14,15],ymm7[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm7[16,17,18,19,20,21,22,23]
-; AVX512BW-FCP-NEXT: vmovdqa 192(%rdi), %xmm8
-; AVX512BW-FCP-NEXT: vpalignr {{.*#+}} xmm9 = mem[8,9,10,11,12,13,14,15],xmm8[0,1,2,3,4,5,6,7]
-; AVX512BW-FCP-NEXT: vinserti128 $1, %xmm9, %ymm0, %ymm9
-; AVX512BW-FCP-NEXT: vperm2i128 {{.*#+}} ymm7 = ymm7[2,3],ymm9[2,3]
+; AVX512BW-FCP-NEXT: vmovdqa 16(%rdi), %xmm2
+; AVX512BW-FCP-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0,1],mem[2,3]
+; AVX512BW-FCP-NEXT: vpmovsxbq {{.*#+}} ymm6 = [0,0,0,7]
+; AVX512BW-FCP-NEXT: vpermq %zmm3, %zmm6, %zmm6
+; AVX512BW-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm6[4,5,6,7]
+; AVX512BW-FCP-NEXT: vmovdqa 64(%rdi), %ymm6
+; AVX512BW-FCP-NEXT: vpalignr {{.*#+}} ymm6 = mem[8,9,10,11,12,13,14,15],ymm6[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm6[16,17,18,19,20,21,22,23]
+; AVX512BW-FCP-NEXT: vmovdqa 192(%rdi), %xmm7
+; AVX512BW-FCP-NEXT: vpalignr {{.*#+}} xmm8 = mem[8,9,10,11,12,13,14,15],xmm7[0,1,2,3,4,5,6,7]
; AVX512BW-FCP-NEXT: vinserti128 $1, %xmm8, %ymm0, %ymm8
-; AVX512BW-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm6[0,1,2,3,4,5],ymm8[6,7]
+; AVX512BW-FCP-NEXT: vperm2i128 {{.*#+}} ymm6 = ymm6[2,3],ymm8[2,3]
+; AVX512BW-FCP-NEXT: vinserti128 $1, %xmm7, %ymm0, %ymm7
+; AVX512BW-FCP-NEXT: vmovdqa 128(%rdi), %ymm8
+; AVX512BW-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm8[0,1,2,3,4,5],ymm7[6,7]
; AVX512BW-FCP-NEXT: vpmovsxbq {{.*#+}} xmm9 = [4,11]
; AVX512BW-FCP-NEXT: vpermi2q %zmm4, %zmm5, %zmm9
-; AVX512BW-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm9[0,1,2,3],ymm8[4,5,6,7]
+; AVX512BW-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm9[0,1,2,3],ymm7[4,5,6,7]
; AVX512BW-FCP-NEXT: vmovdqa 192(%rdi), %ymm9
-; AVX512BW-FCP-NEXT: vpalignr {{.*#+}} ymm6 = ymm6[8,9,10,11,12,13,14,15],ymm9[0,1,2,3,4,5,6,7],ymm6[24,25,26,27,28,29,30,31],ymm9[16,17,18,19,20,21,22,23]
+; AVX512BW-FCP-NEXT: vpalignr {{.*#+}} ymm8 = ymm8[8,9,10,11,12,13,14,15],ymm9[0,1,2,3,4,5,6,7],ymm8[24,25,26,27,28,29,30,31],ymm9[16,17,18,19,20,21,22,23]
; AVX512BW-FCP-NEXT: vpmovsxbq {{.*#+}} xmm9 = [5,12]
; AVX512BW-FCP-NEXT: vpermi2q %zmm4, %zmm5, %zmm9
-; AVX512BW-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm9[0,1,2,3],ymm6[4,5,6,7]
+; AVX512BW-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm9[0,1,2,3],ymm8[4,5,6,7]
; AVX512BW-FCP-NEXT: vpmovsxbq {{.*#+}} ymm9 = [0,0,4,11]
; AVX512BW-FCP-NEXT: vpermi2q 192(%rdi), %zmm3, %zmm9
; AVX512BW-FCP-NEXT: vpmovsxbq {{.*#+}} xmm3 = [6,13]
@@ -949,9 +949,9 @@ define void @load_i64_stride7_vf4(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512BW-FCP-NEXT: vmovdqa %ymm0, (%rsi)
; AVX512BW-FCP-NEXT: vmovdqa %ymm1, (%rdx)
; AVX512BW-FCP-NEXT: vmovdqa %ymm2, (%rcx)
-; AVX512BW-FCP-NEXT: vmovdqa %ymm7, (%r8)
-; AVX512BW-FCP-NEXT: vmovdqa %ymm8, (%r9)
-; AVX512BW-FCP-NEXT: vmovdqa %ymm6, (%r10)
+; AVX512BW-FCP-NEXT: vmovdqa %ymm6, (%r8)
+; AVX512BW-FCP-NEXT: vmovdqa %ymm7, (%r9)
+; AVX512BW-FCP-NEXT: vmovdqa %ymm8, (%r10)
; AVX512BW-FCP-NEXT: vmovdqa %ymm3, (%rax)
; AVX512BW-FCP-NEXT: vzeroupper
; AVX512BW-FCP-NEXT: retq
@@ -1024,28 +1024,28 @@ define void @load_i64_stride7_vf4(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-BW-FCP-NEXT: vpermi2q %zmm5, %zmm4, %zmm1
; AVX512DQ-BW-FCP-NEXT: vpbroadcastq 176(%rdi), %ymm2
; AVX512DQ-BW-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm2[6,7]
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbq {{.*#+}} ymm2 = [0,0,0,7]
-; AVX512DQ-BW-FCP-NEXT: vmovdqa 128(%rdi), %ymm6
-; AVX512DQ-BW-FCP-NEXT: vpermi2q 160(%rdi), %ymm6, %ymm2
-; AVX512DQ-BW-FCP-NEXT: vmovdqa 16(%rdi), %xmm7
-; AVX512DQ-BW-FCP-NEXT: vpblendd {{.*#+}} xmm7 = xmm7[0,1],mem[2,3]
-; AVX512DQ-BW-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm7[0,1,2,3],ymm2[4,5,6,7]
-; AVX512DQ-BW-FCP-NEXT: vmovdqa 64(%rdi), %ymm7
-; AVX512DQ-BW-FCP-NEXT: vpalignr {{.*#+}} ymm7 = mem[8,9,10,11,12,13,14,15],ymm7[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm7[16,17,18,19,20,21,22,23]
-; AVX512DQ-BW-FCP-NEXT: vmovdqa 192(%rdi), %xmm8
-; AVX512DQ-BW-FCP-NEXT: vpalignr {{.*#+}} xmm9 = mem[8,9,10,11,12,13,14,15],xmm8[0,1,2,3,4,5,6,7]
-; AVX512DQ-BW-FCP-NEXT: vinserti128 $1, %xmm9, %ymm0, %ymm9
-; AVX512DQ-BW-FCP-NEXT: vperm2i128 {{.*#+}} ymm7 = ymm7[2,3],ymm9[2,3]
+; AVX512DQ-BW-FCP-NEXT: vmovdqa 16(%rdi), %xmm2
+; AVX512DQ-BW-FCP-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0,1],mem[2,3]
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbq {{.*#+}} ymm6 = [0,0,0,7]
+; AVX512DQ-BW-FCP-NEXT: vpermq %zmm3, %zmm6, %zmm6
+; AVX512DQ-BW-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm6[4,5,6,7]
+; AVX512DQ-BW-FCP-NEXT: vmovdqa 64(%rdi), %ymm6
+; AVX512DQ-BW-FCP-NEXT: vpalignr {{.*#+}} ymm6 = mem[8,9,10,11,12,13,14,15],ymm6[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm6[16,17,18,19,20,21,22,23]
+; AVX512DQ-BW-FCP-NEXT: vmovdqa 192(%rdi), %xmm7
+; AVX512DQ-BW-FCP-NEXT: vpalignr {{.*#+}} xmm8 = mem[8,9,10,11,12,13,14,15],xmm7[0,1,2,3,4,5,6,7]
; AVX512DQ-BW-FCP-NEXT: vinserti128 $1, %xmm8, %ymm0, %ymm8
-; AVX512DQ-BW-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm6[0,1,2,3,4,5],ymm8[6,7]
+; AVX512DQ-BW-FCP-NEXT: vperm2i128 {{.*#+}} ymm6 = ymm6[2,3],ymm8[2,3]
+; AVX512DQ-BW-FCP-NEXT: vinserti128 $1, %xmm7, %ymm0, %ymm7
+; AVX512DQ-BW-FCP-NEXT: vmovdqa 128(%rdi), %ymm8
+; AVX512DQ-BW-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm8[0,1,2,3,4,5],ymm7[6,7]
; AVX512DQ-BW-FCP-NEXT: vpmovsxbq {{.*#+}} xmm9 = [4,11]
; AVX512DQ-BW-FCP-NEXT: vpermi2q %zmm4, %zmm5, %zmm9
-; AVX512DQ-BW-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm9[0,1,2,3],ymm8[4,5,6,7]
+; AVX512DQ-BW-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm9[0,1,2,3],ymm7[4,5,6,7]
; AVX512DQ-BW-FCP-NEXT: vmovdqa 192(%rdi), %ymm9
-; AVX512DQ-BW-FCP-NEXT: vpalignr {{.*#+}} ymm6 = ymm6[8,9,10,11,12,13,14,15],ymm9[0,1,2,3,4,5,6,7],ymm6[24,25,26,27,28,29,30,31],ymm9[16,17,18,19,20,21,22,23]
+; AVX512DQ-BW-FCP-NEXT: vpalignr {{.*#+}} ymm8 = ymm8[8,9,10,11,12,13,14,15],ymm9[0,1,2,3,4,5,6,7],ymm8[24,25,26,27,28,29,30,31],ymm9[16,17,18,19,20,21,22,23]
; AVX512DQ-BW-FCP-NEXT: vpmovsxbq {{.*#+}} xmm9 = [5,12]
; AVX512DQ-BW-FCP-NEXT: vpermi2q %zmm4, %zmm5, %zmm9
-; AVX512DQ-BW-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm9[0,1,2,3],ymm6[4,5,6,7]
+; AVX512DQ-BW-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm9[0,1,2,3],ymm8[4,5,6,7]
; AVX512DQ-BW-FCP-NEXT: vpmovsxbq {{.*#+}} ymm9 = [0,0,4,11]
; AVX512DQ-BW-FCP-NEXT: vpermi2q 192(%rdi), %zmm3, %zmm9
; AVX512DQ-BW-FCP-NEXT: vpmovsxbq {{.*#+}} xmm3 = [6,13]
@@ -1054,9 +1054,9 @@ define void @load_i64_stride7_vf4(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-BW-FCP-NEXT: vmovdqa %ymm0, (%rsi)
; AVX512DQ-BW-FCP-NEXT: vmovdqa %ymm1, (%rdx)
; AVX512DQ-BW-FCP-NEXT: vmovdqa %ymm2, (%rcx)
-; AVX512DQ-BW-FCP-NEXT: vmovdqa %ymm7, (%r8)
-; AVX512DQ-BW-FCP-NEXT: vmovdqa %ymm8, (%r9)
-; AVX512DQ-BW-FCP-NEXT: vmovdqa %ymm6, (%r10)
+; AVX512DQ-BW-FCP-NEXT: vmovdqa %ymm6, (%r8)
+; AVX512DQ-BW-FCP-NEXT: vmovdqa %ymm7, (%r9)
+; AVX512DQ-BW-FCP-NEXT: vmovdqa %ymm8, (%r10)
; AVX512DQ-BW-FCP-NEXT: vmovdqa %ymm3, (%rax)
; AVX512DQ-BW-FCP-NEXT: vzeroupper
; AVX512DQ-BW-FCP-NEXT: retq
diff --git a/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast_from_memory.ll b/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast_from_memory.ll
index 181f5651784d8a..acedcf42639066 100644
--- a/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast_from_memory.ll
+++ b/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast_from_memory.ll
@@ -1337,10 +1337,9 @@ define void @vec256_i16_widen_to_i32_factor2_broadcast_to_v8i32_factor8(ptr %in.
;
; AVX512BW-LABEL: vec256_i16_widen_to_i32_factor2_broadcast_to_v8i32_factor8:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: vmovdqa (%rdi), %ymm0
-; AVX512BW-NEXT: vpmovsxbw {{.*#+}} ymm1 = [0,17,0,19,0,21,0,23,0,25,0,27,0,29,0,31]
-; AVX512BW-NEXT: vpermi2w 32(%rdi), %ymm0, %ymm1
-; AVX512BW-NEXT: vpaddb (%rsi), %zmm1, %zmm0
+; AVX512BW-NEXT: vpmovsxbw {{.*#+}} ymm0 = [0,17,0,19,0,21,0,23,0,25,0,27,0,29,0,31]
+; AVX512BW-NEXT: vpermw (%rdi), %zmm0, %zmm0
+; AVX512BW-NEXT: vpaddb (%rsi), %zmm0, %zmm0
; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rdx)
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
@@ -1789,10 +1788,9 @@ define void @vec256_i64_widen_to_i128_factor2_broadcast_to_v2i128_factor2(ptr %i
;
; AVX512F-FAST-LABEL: vec256_i64_widen_to_i128_factor2_broadcast_to_v2i128_factor2:
; AVX512F-FAST: # %bb.0:
-; AVX512F-FAST-NEXT: vmovdqa (%rdi), %ymm0
-; AVX512F-FAST-NEXT: vpmovsxbq {{.*#+}} ymm1 = [0,5,0,7]
-; AVX512F-FAST-NEXT: vpermi2q 32(%rdi), %ymm0, %ymm1
-; AVX512F-FAST-NEXT: vpaddb (%rsi), %ymm1, %ymm0
+; AVX512F-FAST-NEXT: vpmovsxbq {{.*#+}} ymm0 = [0,5,0,7]
+; AVX512F-FAST-NEXT: vpermq (%rdi), %zmm0, %zmm0
+; AVX512F-FAST-NEXT: vpaddb (%rsi), %ymm0, %ymm0
; AVX512F-FAST-NEXT: vmovdqa %ymm0, (%rdx)
; AVX512F-FAST-NEXT: vzeroupper
; AVX512F-FAST-NEXT: retq
@@ -1808,10 +1806,9 @@ define void @vec256_i64_widen_to_i128_factor2_broadcast_to_v2i128_factor2(ptr %i
;
; AVX512DQ-FAST-LABEL: vec256_i64_widen_to_i128_factor2_broadcast_to_v2i128_factor2:
; AVX512DQ-FAST: # %bb.0:
-; AVX512DQ-FAST-NEXT: vmovdqa (%rdi), %ymm0
-; AVX512DQ-FAST-NEXT: vpmovsxbq {{.*#+}} ymm1 = [0,5,0,7]
-; AVX512DQ-FAST-NEXT: vpermi2q 32(%rdi), %ymm0, %ymm1
-; AVX512DQ-FAST-NEXT: vpaddb (%rsi), %ymm1, %ymm0
+; AVX512DQ-FAST-NEXT: vpmovsxbq {{.*#+}} ymm0 = [0,5,0,7]
+; AVX512DQ-FAST-NEXT: vpermq (%rdi), %zmm0, %zmm0
+; AVX512DQ-FAST-NEXT: vpaddb (%rsi), %ymm0, %ymm0
; AVX512DQ-FAST-NEXT: vmovdqa %ymm0, (%rdx)
; AVX512DQ-FAST-NEXT: vzeroupper
; AVX512DQ-FAST-NEXT: retq
@@ -1827,10 +1824,9 @@ define void @vec256_i64_widen_to_i128_factor2_broadcast_to_v2i128_factor2(ptr %i
;
; AVX512BW-FAST-LABEL: vec256_i64_widen_to_i128_factor2_broadcast_to_v2i128_factor2:
; AVX512BW-FAST: # %bb.0:
-; AVX512BW-FAST-NEXT: vmovdqa (%rdi), %ymm0
-; AVX512BW-FAST-NEXT: vpmovsxbq {{.*#+}} ymm1 = [0,5,0,7]
-; AVX512BW-FAST-NEXT: vpermi2q 32(%rdi), %ymm0, %ymm1
-; AVX512BW-FAST-NEXT: vpaddb (%rsi), %zmm1, %zmm0
+; AVX512BW-FAST-NEXT: vpmovsxbq {{.*#+}} ymm0 = [0,5,0,7]
+; AVX512BW-FAST-NEXT: vpermq (%rdi), %zmm0, %zmm0
+; AVX512BW-FAST-NEXT: vpaddb (%rsi), %zmm0, %zmm0
; AVX512BW-FAST-NEXT: vmovdqa64 %zmm0, (%rdx)
; AVX512BW-FAST-NEXT: vzeroupper
; AVX512BW-FAST-NEXT: retq
More information about the llvm-commits
mailing list