[llvm] [SelectionDAG] Deal with POISON for INSERT_VECTOR_ELT/INSERT_SUBVECTOR (part 3) (PR #143105)
Björn Pettersson via llvm-commits
llvm-commits at lists.llvm.org
Fri Jun 6 02:59:06 PDT 2025
https://github.com/bjope created https://github.com/llvm/llvm-project/pull/143105
Target specific patches to avoid regressions seen after "part 1"
aiming at fixing github issue https://github.com/llvm/llvm-project/issues/141034.
One perhaps controversial change here is that convertToScalableVector
now uses POISON instead of UNDEF for any additional elements added
when converting to the scalable vector. This can avoid that we end
up with things like
t31: nxv1f32 =
t32: v2f32 = extract_subvector t31, Constant:i64<0>
t38: nxv1f32 = insert_subvector undef:nxv1f32, t32, Constant:i64<0>
since if we instead try to insert into poison we can just use t31
instead of t38 without the risk that t31 would be more poisonous.
>From 004e26ec69adb15235b2c7efe4c39cbfd93129eb Mon Sep 17 00:00:00 2001
From: Bjorn Pettersson <bjorn.a.pettersson at ericsson.com>
Date: Tue, 3 Jun 2025 11:36:26 +0200
Subject: [PATCH 1/4] [test][AArch64] Adjust vector insertion lit tests
The test cases test_insert_v16i8_insert_2_undef_base and
test_insert_v16i8_insert_2_undef_base_different_valeus in
CodeGen/AArch64/arm64-vector-insertion.ll was leaving element 8
in the vector as "undef" without any real explanation. It kind of
looked like a typo as the input IR looked like this
%v.8 = insertelement <16 x i8> %v.7, i8 %a, i32 8
%v.10 = insertelement <16 x i8> %v.7, i8 %a, i32 10
leaving %v.8 as unused.
This patch is cleaning up the tests a bit by adding separate test
cases to validate what is happening when skipping insert at index 8,
while amending the original tests cases to use %v.8 instead of %v.7
when creating %v.10.
---
.../CodeGen/AArch64/arm64-vector-insertion.ll | 65 +++++++++++++++++++
1 file changed, 65 insertions(+)
diff --git a/llvm/test/CodeGen/AArch64/arm64-vector-insertion.ll b/llvm/test/CodeGen/AArch64/arm64-vector-insertion.ll
index 94074d1689f6a..5962150ac9ffc 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vector-insertion.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vector-insertion.ll
@@ -66,6 +66,35 @@ define <16 x i8> @test_insert_v16i8_insert_2_undef_base(i8 %a) {
%v.6 = insertelement <16 x i8> %v.4, i8 %a, i32 6
%v.7 = insertelement <16 x i8> %v.6, i8 %a, i32 7
%v.8 = insertelement <16 x i8> %v.7, i8 %a, i32 8
+ %v.10 = insertelement <16 x i8> %v.8, i8 %a, i32 10
+ %v.11 = insertelement <16 x i8> %v.10, i8 %a, i32 11
+ %v.12 = insertelement <16 x i8> %v.11, i8 %a, i32 12
+ %v.13 = insertelement <16 x i8> %v.12, i8 %a, i32 13
+ %v.14 = insertelement <16 x i8> %v.13, i8 %a, i32 14
+ %v.15 = insertelement <16 x i8> %v.14, i8 %a, i32 15
+ ret <16 x i8> %v.15
+}
+
+; Similar to above, but we leave element 8 as undef. One interesting part with
+; this test case is that %a may be poison, so simply inserting %a also at
+; index 8 would make the result vector more poisonous.
+define <16 x i8> @test_insert_v16i8_insert_2_undef_base_skip8(i32 %a0) {
+; CHECK-LABEL: test_insert_v16i8_insert_2_undef_base_skip8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: lsr w8, w0, #5
+; CHECK-NEXT: dup.16b v0, w8
+; CHECK-NEXT: mov.b v0[5], wzr
+; CHECK-NEXT: mov.b v0[9], wzr
+; CHECK-NEXT: ret
+ %a1 = lshr exact i32 %a0, 5
+ %a = trunc i32 %a1 to i8
+ %v.0 = insertelement <16 x i8> <i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 0, i8 undef, i8 undef, i8 undef, i8 0, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef> , i8 %a, i32 0
+ %v.1 = insertelement <16 x i8> %v.0, i8 %a, i32 1
+ %v.2 = insertelement <16 x i8> %v.1, i8 %a, i32 2
+ %v.3 = insertelement <16 x i8> %v.2, i8 %a, i32 3
+ %v.4 = insertelement <16 x i8> %v.3, i8 %a, i32 4
+ %v.6 = insertelement <16 x i8> %v.4, i8 %a, i32 6
+ %v.7 = insertelement <16 x i8> %v.6, i8 %a, i32 7
%v.10 = insertelement <16 x i8> %v.7, i8 %a, i32 10
%v.11 = insertelement <16 x i8> %v.10, i8 %a, i32 11
%v.12 = insertelement <16 x i8> %v.11, i8 %a, i32 12
@@ -94,6 +123,42 @@ define <16 x i8> @test_insert_v16i8_insert_2_undef_base_different_valeus(i8 %a,
%v.6 = insertelement <16 x i8> %v.4, i8 %a, i32 6
%v.7 = insertelement <16 x i8> %v.6, i8 %b, i32 7
%v.8 = insertelement <16 x i8> %v.7, i8 %a, i32 8
+ %v.10 = insertelement <16 x i8> %v.8, i8 %a, i32 10
+ %v.11 = insertelement <16 x i8> %v.10, i8 %a, i32 11
+ %v.12 = insertelement <16 x i8> %v.11, i8 %b, i32 12
+ %v.13 = insertelement <16 x i8> %v.12, i8 %a, i32 13
+ %v.14 = insertelement <16 x i8> %v.13, i8 %a, i32 14
+ %v.15 = insertelement <16 x i8> %v.14, i8 %b, i32 15
+ ret <16 x i8> %v.15
+}
+
+; Similar to above, but we leave element 8 as undef. One interesting part with
+; this test case is that %a and %b may be poison, so simply inserting %a or %b
+; at index 8 would make the result vector more poisonous.
+define <16 x i8> @test_insert_v16i8_insert_2_undef_base_different_valeus_skip8(i32 %a0, i32 %b0) {
+; CHECK-LABEL: test_insert_v16i8_insert_2_undef_base_different_valeus_skip8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: lsr w8, w0, #5
+; CHECK-NEXT: dup.16b v0, w8
+; CHECK-NEXT: lsr w8, w1, #5
+; CHECK-NEXT: mov.b v0[2], w8
+; CHECK-NEXT: mov.b v0[5], wzr
+; CHECK-NEXT: mov.b v0[7], w8
+; CHECK-NEXT: mov.b v0[9], wzr
+; CHECK-NEXT: mov.b v0[12], w8
+; CHECK-NEXT: mov.b v0[15], w8
+; CHECK-NEXT: ret
+ %a1 = lshr exact i32 %a0, 5
+ %a = trunc i32 %a1 to i8
+ %b1 = lshr exact i32 %b0, 5
+ %b = trunc i32 %b1 to i8
+ %v.0 = insertelement <16 x i8> <i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 0, i8 undef, i8 undef, i8 undef, i8 0, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef> , i8 %a, i32 0
+ %v.1 = insertelement <16 x i8> %v.0, i8 %a, i32 1
+ %v.2 = insertelement <16 x i8> %v.1, i8 %b, i32 2
+ %v.3 = insertelement <16 x i8> %v.2, i8 %a, i32 3
+ %v.4 = insertelement <16 x i8> %v.3, i8 %a, i32 4
+ %v.6 = insertelement <16 x i8> %v.4, i8 %a, i32 6
+ %v.7 = insertelement <16 x i8> %v.6, i8 %b, i32 7
%v.10 = insertelement <16 x i8> %v.7, i8 %a, i32 10
%v.11 = insertelement <16 x i8> %v.10, i8 %a, i32 11
%v.12 = insertelement <16 x i8> %v.11, i8 %b, i32 12
>From b3a6687301a6161d9e466f318b271d8c5c2538c0 Mon Sep 17 00:00:00 2001
From: Bjorn Pettersson <bjorn.a.pettersson at ericsson.com>
Date: Sat, 31 May 2025 09:35:09 +0200
Subject: [PATCH 2/4] [SelectionDAG] Deal with POISON for
INSERT_VECTOR_ELT/INSERT_SUBVECTOR (part 1)
As reported in #141034 SelectionDAG::getNode had some unexpected
behaviors when trying to create vectors with UNDEF elements. Since
we treat both UNDEF and POISON as undefined (when using isUndef())
we can't just fold away INSERT_VECTOR_ELT/INSERT_SUBVECTOR based on
isUndef(), as that could make the resulting vector more poisonous.
Same kind of bug existed in DAGCombiner::visitINSERT_SUBVECTOR.
Here are some examples:
This fold was done even if vec[idx] was POISON:
INSERT_VECTOR_ELT vec, UNDEF, idx -> vec
This fold was done even if any of vec[idx..idx+size] was POISON:
INSERT_SUBVECTOR vec, UNDEF, idx -> vec
This fold was done even if the elements not extracted from vec could
be POISON:
sub = EXTRACT_SUBVECTOR vec, idx
INSERT_SUBVECTOR UNDEF, sub, idx -> vec
With this patch we avoid such folds unless we can prove that the
result isn't more poisonous when eliminating the insert.
This patch in itself result in some regressions. Goal is to try to
deal with those regressions in follow up commits.
Fixes #141034
---
llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 64 +-
.../lib/CodeGen/SelectionDAG/SelectionDAG.cpp | 65 +-
.../aarch64-neon-vector-insert-uaddlv.ll | 3 +-
.../CodeGen/AArch64/arm64-vector-insertion.ll | 2 +
.../AArch64/sve-fixed-length-fp-vselect.ll | 81 +-
.../AArch64/sve-fixed-length-int-vselect.ll | 108 +-
.../AArch64/sve-fixed-length-masked-gather.ll | 6 +-
...-streaming-mode-fixed-length-fp-vselect.ll | 21 +
...streaming-mode-fixed-length-int-vselect.ll | 28 +
.../AMDGPU/load-local-redundant-copies.ll | 45 +-
.../fixed-vectors-vfw-web-simplification.ll | 90 +-
.../fixed-vectors-vw-web-simplification.ll | 55 +-
.../CodeGen/RISCV/rvv/vector-deinterleave.ll | 36 +
llvm/test/CodeGen/Thumb2/active_lane_mask.ll | 3 +-
llvm/test/CodeGen/Thumb2/mve-masked-ldst.ll | 36 +-
.../vector-interleaved-load-i16-stride-4.ll | 1290 +++++++++--------
.../vector-interleaved-store-i16-stride-5.ll | 4 +
.../vector-interleaved-store-i16-stride-7.ll | 32 +-
.../vector-interleaved-store-i8-stride-5.ll | 16 +-
.../vector-interleaved-store-i8-stride-7.ll | 380 ++---
20 files changed, 1437 insertions(+), 928 deletions(-)
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index aba3c0f80a024..a52edca64dbce 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -22905,6 +22905,7 @@ SDValue DAGCombiner::visitINSERT_VECTOR_ELT(SDNode *N) {
auto *IndexC = dyn_cast<ConstantSDNode>(EltNo);
// Insert into out-of-bounds element is undefined.
+ // Code below relies on that we handle this special case early.
if (IndexC && VT.isFixedLengthVector() &&
IndexC->getZExtValue() >= VT.getVectorNumElements())
return DAG.getUNDEF(VT);
@@ -22915,14 +22916,29 @@ SDValue DAGCombiner::visitINSERT_VECTOR_ELT(SDNode *N) {
InVec == InVal.getOperand(0) && EltNo == InVal.getOperand(1))
return InVec;
- if (!IndexC) {
- // If this is variable insert to undef vector, it might be better to splat:
- // inselt undef, InVal, EltNo --> build_vector < InVal, InVal, ... >
- if (InVec.isUndef() && TLI.shouldSplatInsEltVarIndex(VT))
- return DAG.getSplat(VT, DL, InVal);
- return SDValue();
+ // If this is variable insert to undef vector, it might be better to splat:
+ // inselt undef, InVal, EltNo --> build_vector < InVal, InVal, ... >
+ if (!IndexC && InVec.isUndef() && TLI.shouldSplatInsEltVarIndex(VT))
+ return DAG.getSplat(VT, DL, InVal);
+
+ // Try to drop insert of UNDEF/POISON elements. This is also done in getNode,
+ // but we also do it as a DAG combine since for example simplifications into
+ // SPLAT_VECTOR/BUILD_VECTOR may turn poison elements into undef/zero etc, and
+ // then suddenly the InVec is guaranteed to not be poison.
+ if (InVal.isUndef()) {
+ if (IndexC && VT.isFixedLengthVector()) {
+ APInt EltMask = APInt::getOneBitSet(VT.getVectorNumElements(),
+ IndexC->getZExtValue());
+ if (DAG.isGuaranteedNotToBePoison(InVec, EltMask))
+ return InVec;
+ } else if (DAG.isGuaranteedNotToBePoison(InVec)) {
+ return InVec;
+ }
}
+ if (!IndexC)
+ return SDValue();
+
if (VT.isScalableVector())
return SDValue();
@@ -27355,18 +27371,40 @@ SDValue DAGCombiner::visitINSERT_SUBVECTOR(SDNode *N) {
SDValue N2 = N->getOperand(2);
uint64_t InsIdx = N->getConstantOperandVal(2);
- // If inserting an UNDEF, just return the original vector.
- if (N1.isUndef())
- return N0;
+ // If inserting an UNDEF, just return the original vector (unless it makes the
+ // result more poisonous).
+ if (N1.isUndef()){
+ if (VT.isFixedLengthVector()) {
+ unsigned SubVecNumElts = N1.getValueType().getVectorNumElements();
+ APInt EltMask = APInt::getBitsSet(VT.getVectorNumElements(),
+ InsIdx, InsIdx + SubVecNumElts);
+ if (DAG.isGuaranteedNotToBePoison(N0, EltMask))
+ return N0;
+ } else if (DAG.isGuaranteedNotToBePoison(N0))
+ return N0;
+ }
- // If this is an insert of an extracted vector into an undef vector, we can
- // just use the input to the extract if the types match, and can simplify
+ // If this is an insert of an extracted vector into an undef/poison vector, we
+ // can just use the input to the extract if the types match, and can simplify
// in some cases even if they don't.
if (N0.isUndef() && N1.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
N1.getOperand(1) == N2) {
+ EVT N1VT = N1.getValueType();
EVT SrcVT = N1.getOperand(0).getValueType();
- if (SrcVT == VT)
- return N1.getOperand(0);
+ if (SrcVT == VT) {
+ // Need to ensure that result isn't more poisonous if skipping both the
+ // extract+insert.
+ if (N0.getOpcode() == ISD::POISON)
+ return N1.getOperand(0);
+ if (VT.isFixedLengthVector() && N1VT.isFixedLengthVector()) {
+ unsigned SubVecNumElts = N1VT.getVectorNumElements();
+ APInt EltMask = APInt::getBitsSet(VT.getVectorNumElements(),
+ InsIdx, InsIdx + SubVecNumElts);
+ if (DAG.isGuaranteedNotToBePoison(N1.getOperand(0), ~EltMask))
+ return N1.getOperand(0);
+ } else if (DAG.isGuaranteedNotToBePoison(N1.getOperand(0)))
+ return N1.getOperand(0);
+ }
// TODO: To remove the zero check, need to adjust the offset to
// a multiple of the new src type.
if (isNullConstant(N2)) {
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index 279c7daf71c33..6bff2d90b91ac 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -7900,23 +7900,42 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
// INSERT_VECTOR_ELT into out-of-bounds element is an UNDEF, except
// for scalable vectors where we will generate appropriate code to
// deal with out-of-bounds cases correctly.
- if (N3C && N1.getValueType().isFixedLengthVector() &&
- N3C->getZExtValue() >= N1.getValueType().getVectorNumElements())
+ if (N3C && VT.isFixedLengthVector() &&
+ N3C->getZExtValue() >= VT.getVectorNumElements())
return getUNDEF(VT);
// Undefined index can be assumed out-of-bounds, so that's UNDEF too.
if (N3.isUndef())
return getUNDEF(VT);
- // If the inserted element is an UNDEF, just use the input vector.
- if (N2.isUndef())
+ // If inserting poison, just use the input vector.
+ if (N2.getOpcode() == ISD::POISON)
return N1;
+ // Inserting undef into undef/poison is still undef.
+ if (N2.getOpcode() == ISD::UNDEF && N1.isUndef())
+ return getUNDEF(VT);
+
+ // If the inserted element is an UNDEF, just use the input vector.
+ // But not if skipping the insert could make the result more poisonous.
+ if (N2.isUndef()) {
+ if (N3C && VT.isFixedLengthVector()) {
+ APInt EltMask = APInt::getOneBitSet(VT.getVectorNumElements(),
+ N3C->getZExtValue());
+ if (isGuaranteedNotToBePoison(N1, EltMask))
+ return N1;
+ } else if (isGuaranteedNotToBePoison(N1))
+ return N1;
+ }
break;
}
case ISD::INSERT_SUBVECTOR: {
- // Inserting undef into undef is still undef.
- if (N1.isUndef() && N2.isUndef())
+ // If inserting poison, just use the input vector,
+ if (N2.getOpcode() == ISD::POISON)
+ return N1;
+
+ // Inserting undef into undef/poison is still undef.
+ if (N2.getOpcode() == ISD::UNDEF && N1.isUndef())
return getUNDEF(VT);
EVT N2VT = N2.getValueType();
@@ -7945,11 +7964,37 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
if (VT == N2VT)
return N2;
- // If this is an insert of an extracted vector into an undef vector, we
- // can just use the input to the extract.
+ // If this is an insert of an extracted vector into an undef/poison vector,
+ // we can just use the input to the extract. But not if skipping the
+ // extract+insert could make the result more poisonous.
if (N1.isUndef() && N2.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
- N2.getOperand(1) == N3 && N2.getOperand(0).getValueType() == VT)
- return N2.getOperand(0);
+ N2.getOperand(1) == N3 && N2.getOperand(0).getValueType() == VT) {
+ if (N1.getOpcode() == ISD::POISON)
+ return N2.getOperand(0);
+ if (VT.isFixedLengthVector() && N2VT.isFixedLengthVector()) {
+ unsigned LoBit = N3->getAsZExtVal();
+ unsigned HiBit = LoBit + N2VT.getVectorNumElements();
+ APInt EltMask = APInt::getBitsSet(VT.getVectorNumElements(),
+ LoBit, HiBit);
+ if (isGuaranteedNotToBePoison(N2.getOperand(0), ~EltMask))
+ return N2.getOperand(0);
+ } else if (isGuaranteedNotToBePoison(N2.getOperand(0)))
+ return N2.getOperand(0);
+ }
+
+ // If the inserted subvector is UNDEF, just use the input vector.
+ // But not if skipping the insert could make the result more poisonous.
+ if (N2.isUndef()) {
+ if (VT.isFixedLengthVector()) {
+ unsigned LoBit = N3->getAsZExtVal();
+ unsigned HiBit = LoBit + N2VT.getVectorNumElements();
+ APInt EltMask = APInt::getBitsSet(VT.getVectorNumElements(),
+ LoBit, HiBit);
+ if (isGuaranteedNotToBePoison(N1, EltMask))
+ return N1;
+ } else if (isGuaranteedNotToBePoison(N1))
+ return N1;
+ }
break;
}
case ISD::BITCAST:
diff --git a/llvm/test/CodeGen/AArch64/aarch64-neon-vector-insert-uaddlv.ll b/llvm/test/CodeGen/AArch64/aarch64-neon-vector-insert-uaddlv.ll
index 91eda8d552397..83e35599c4e9e 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-neon-vector-insert-uaddlv.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-neon-vector-insert-uaddlv.ll
@@ -229,9 +229,10 @@ define void @insert_vec_v3i16_uaddlv_from_v8i16(ptr %0) {
; CHECK: ; %bb.0: ; %entry
; CHECK-NEXT: movi.2d v0, #0000000000000000
; CHECK-NEXT: movi.2d v1, #0000000000000000
-; CHECK-NEXT: add x8, x0, #8
; CHECK-NEXT: uaddlv.8h s0, v0
; CHECK-NEXT: mov.h v1[0], v0[0]
+; CHECK-NEXT: mov.h v1[3], w8
+; CHECK-NEXT: add x8, x0, #8
; CHECK-NEXT: ushll.4s v1, v1, #0
; CHECK-NEXT: ucvtf.4s v1, v1
; CHECK-NEXT: st1.s { v1 }[2], [x8]
diff --git a/llvm/test/CodeGen/AArch64/arm64-vector-insertion.ll b/llvm/test/CodeGen/AArch64/arm64-vector-insertion.ll
index 5962150ac9ffc..2c44f56316801 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vector-insertion.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vector-insertion.ll
@@ -84,6 +84,7 @@ define <16 x i8> @test_insert_v16i8_insert_2_undef_base_skip8(i32 %a0) {
; CHECK-NEXT: lsr w8, w0, #5
; CHECK-NEXT: dup.16b v0, w8
; CHECK-NEXT: mov.b v0[5], wzr
+; CHECK-NEXT: mov.b v0[8], w8
; CHECK-NEXT: mov.b v0[9], wzr
; CHECK-NEXT: ret
%a1 = lshr exact i32 %a0, 5
@@ -144,6 +145,7 @@ define <16 x i8> @test_insert_v16i8_insert_2_undef_base_different_valeus_skip8(i
; CHECK-NEXT: mov.b v0[2], w8
; CHECK-NEXT: mov.b v0[5], wzr
; CHECK-NEXT: mov.b v0[7], w8
+; CHECK-NEXT: mov.b v0[8], w8
; CHECK-NEXT: mov.b v0[9], wzr
; CHECK-NEXT: mov.b v0[12], w8
; CHECK-NEXT: mov.b v0[15], w8
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-vselect.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-vselect.ll
index 9efe0b33910c8..2905d707bdd09 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-vselect.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-vselect.ll
@@ -37,6 +37,10 @@ define void @select_v16f16(ptr %a, ptr %b) vscale_range(2,0) #0 {
; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0]
; CHECK-NEXT: ld1h { z1.h }, p0/z, [x1]
; CHECK-NEXT: fcmeq p1.h, p0/z, z0.h, z1.h
+; CHECK-NEXT: mov z2.h, p1/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT: ptrue p1.h
+; CHECK-NEXT: and z2.h, z2.h, #0x1
+; CHECK-NEXT: cmpne p1.h, p1/z, z2.h, #0
; CHECK-NEXT: sel z0.h, p1, z0.h, z1.h
; CHECK-NEXT: st1h { z0.h }, p0, [x0]
; CHECK-NEXT: ret
@@ -59,8 +63,15 @@ define void @select_v32f16(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1h { z3.h }, p0/z, [x1]
; VBITS_GE_256-NEXT: fcmeq p1.h, p0/z, z0.h, z1.h
; VBITS_GE_256-NEXT: fcmeq p2.h, p0/z, z2.h, z3.h
-; VBITS_GE_256-NEXT: sel z0.h, p1, z0.h, z1.h
-; VBITS_GE_256-NEXT: sel z1.h, p2, z2.h, z3.h
+; VBITS_GE_256-NEXT: mov z4.h, p1/z, #-1 // =0xffffffffffffffff
+; VBITS_GE_256-NEXT: ptrue p1.h
+; VBITS_GE_256-NEXT: mov z5.h, p2/z, #-1 // =0xffffffffffffffff
+; VBITS_GE_256-NEXT: and z4.h, z4.h, #0x1
+; VBITS_GE_256-NEXT: and z5.h, z5.h, #0x1
+; VBITS_GE_256-NEXT: cmpne p2.h, p1/z, z4.h, #0
+; VBITS_GE_256-NEXT: cmpne p1.h, p1/z, z5.h, #0
+; VBITS_GE_256-NEXT: sel z0.h, p2, z0.h, z1.h
+; VBITS_GE_256-NEXT: sel z1.h, p1, z2.h, z3.h
; VBITS_GE_256-NEXT: st1h { z0.h }, p0, [x0, x8, lsl #1]
; VBITS_GE_256-NEXT: st1h { z1.h }, p0, [x0]
; VBITS_GE_256-NEXT: ret
@@ -71,6 +82,10 @@ define void @select_v32f16(ptr %a, ptr %b) #0 {
; VBITS_GE_512-NEXT: ld1h { z0.h }, p0/z, [x0]
; VBITS_GE_512-NEXT: ld1h { z1.h }, p0/z, [x1]
; VBITS_GE_512-NEXT: fcmeq p1.h, p0/z, z0.h, z1.h
+; VBITS_GE_512-NEXT: mov z2.h, p1/z, #-1 // =0xffffffffffffffff
+; VBITS_GE_512-NEXT: ptrue p1.h
+; VBITS_GE_512-NEXT: and z2.h, z2.h, #0x1
+; VBITS_GE_512-NEXT: cmpne p1.h, p1/z, z2.h, #0
; VBITS_GE_512-NEXT: sel z0.h, p1, z0.h, z1.h
; VBITS_GE_512-NEXT: st1h { z0.h }, p0, [x0]
; VBITS_GE_512-NEXT: ret
@@ -89,6 +104,10 @@ define void @select_v64f16(ptr %a, ptr %b) vscale_range(8,0) #0 {
; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0]
; CHECK-NEXT: ld1h { z1.h }, p0/z, [x1]
; CHECK-NEXT: fcmeq p1.h, p0/z, z0.h, z1.h
+; CHECK-NEXT: mov z2.h, p1/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT: ptrue p1.h
+; CHECK-NEXT: and z2.h, z2.h, #0x1
+; CHECK-NEXT: cmpne p1.h, p1/z, z2.h, #0
; CHECK-NEXT: sel z0.h, p1, z0.h, z1.h
; CHECK-NEXT: st1h { z0.h }, p0, [x0]
; CHECK-NEXT: ret
@@ -107,6 +126,10 @@ define void @select_v128f16(ptr %a, ptr %b) vscale_range(16,0) #0 {
; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0]
; CHECK-NEXT: ld1h { z1.h }, p0/z, [x1]
; CHECK-NEXT: fcmeq p1.h, p0/z, z0.h, z1.h
+; CHECK-NEXT: mov z2.h, p1/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT: ptrue p1.h
+; CHECK-NEXT: and z2.h, z2.h, #0x1
+; CHECK-NEXT: cmpne p1.h, p1/z, z2.h, #0
; CHECK-NEXT: sel z0.h, p1, z0.h, z1.h
; CHECK-NEXT: st1h { z0.h }, p0, [x0]
; CHECK-NEXT: ret
@@ -150,6 +173,10 @@ define void @select_v8f32(ptr %a, ptr %b) vscale_range(2,0) #0 {
; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0]
; CHECK-NEXT: ld1w { z1.s }, p0/z, [x1]
; CHECK-NEXT: fcmeq p1.s, p0/z, z0.s, z1.s
+; CHECK-NEXT: mov z2.s, p1/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT: ptrue p1.s
+; CHECK-NEXT: and z2.s, z2.s, #0x1
+; CHECK-NEXT: cmpne p1.s, p1/z, z2.s, #0
; CHECK-NEXT: sel z0.s, p1, z0.s, z1.s
; CHECK-NEXT: st1w { z0.s }, p0, [x0]
; CHECK-NEXT: ret
@@ -172,8 +199,15 @@ define void @select_v16f32(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1w { z3.s }, p0/z, [x1]
; VBITS_GE_256-NEXT: fcmeq p1.s, p0/z, z0.s, z1.s
; VBITS_GE_256-NEXT: fcmeq p2.s, p0/z, z2.s, z3.s
-; VBITS_GE_256-NEXT: sel z0.s, p1, z0.s, z1.s
-; VBITS_GE_256-NEXT: sel z1.s, p2, z2.s, z3.s
+; VBITS_GE_256-NEXT: mov z4.s, p1/z, #-1 // =0xffffffffffffffff
+; VBITS_GE_256-NEXT: ptrue p1.s
+; VBITS_GE_256-NEXT: mov z5.s, p2/z, #-1 // =0xffffffffffffffff
+; VBITS_GE_256-NEXT: and z4.s, z4.s, #0x1
+; VBITS_GE_256-NEXT: and z5.s, z5.s, #0x1
+; VBITS_GE_256-NEXT: cmpne p2.s, p1/z, z4.s, #0
+; VBITS_GE_256-NEXT: cmpne p1.s, p1/z, z5.s, #0
+; VBITS_GE_256-NEXT: sel z0.s, p2, z0.s, z1.s
+; VBITS_GE_256-NEXT: sel z1.s, p1, z2.s, z3.s
; VBITS_GE_256-NEXT: st1w { z0.s }, p0, [x0, x8, lsl #2]
; VBITS_GE_256-NEXT: st1w { z1.s }, p0, [x0]
; VBITS_GE_256-NEXT: ret
@@ -184,6 +218,10 @@ define void @select_v16f32(ptr %a, ptr %b) #0 {
; VBITS_GE_512-NEXT: ld1w { z0.s }, p0/z, [x0]
; VBITS_GE_512-NEXT: ld1w { z1.s }, p0/z, [x1]
; VBITS_GE_512-NEXT: fcmeq p1.s, p0/z, z0.s, z1.s
+; VBITS_GE_512-NEXT: mov z2.s, p1/z, #-1 // =0xffffffffffffffff
+; VBITS_GE_512-NEXT: ptrue p1.s
+; VBITS_GE_512-NEXT: and z2.s, z2.s, #0x1
+; VBITS_GE_512-NEXT: cmpne p1.s, p1/z, z2.s, #0
; VBITS_GE_512-NEXT: sel z0.s, p1, z0.s, z1.s
; VBITS_GE_512-NEXT: st1w { z0.s }, p0, [x0]
; VBITS_GE_512-NEXT: ret
@@ -202,6 +240,10 @@ define void @select_v32f32(ptr %a, ptr %b) vscale_range(8,0) #0 {
; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0]
; CHECK-NEXT: ld1w { z1.s }, p0/z, [x1]
; CHECK-NEXT: fcmeq p1.s, p0/z, z0.s, z1.s
+; CHECK-NEXT: mov z2.s, p1/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT: ptrue p1.s
+; CHECK-NEXT: and z2.s, z2.s, #0x1
+; CHECK-NEXT: cmpne p1.s, p1/z, z2.s, #0
; CHECK-NEXT: sel z0.s, p1, z0.s, z1.s
; CHECK-NEXT: st1w { z0.s }, p0, [x0]
; CHECK-NEXT: ret
@@ -220,6 +262,10 @@ define void @select_v64f32(ptr %a, ptr %b) vscale_range(16,0) #0 {
; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0]
; CHECK-NEXT: ld1w { z1.s }, p0/z, [x1]
; CHECK-NEXT: fcmeq p1.s, p0/z, z0.s, z1.s
+; CHECK-NEXT: mov z2.s, p1/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT: ptrue p1.s
+; CHECK-NEXT: and z2.s, z2.s, #0x1
+; CHECK-NEXT: cmpne p1.s, p1/z, z2.s, #0
; CHECK-NEXT: sel z0.s, p1, z0.s, z1.s
; CHECK-NEXT: st1w { z0.s }, p0, [x0]
; CHECK-NEXT: ret
@@ -264,6 +310,10 @@ define void @select_v4f64(ptr %a, ptr %b) vscale_range(2,0) #0 {
; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0]
; CHECK-NEXT: ld1d { z1.d }, p0/z, [x1]
; CHECK-NEXT: fcmeq p1.d, p0/z, z0.d, z1.d
+; CHECK-NEXT: mov z2.d, p1/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT: ptrue p1.d
+; CHECK-NEXT: and z2.d, z2.d, #0x1
+; CHECK-NEXT: cmpne p1.d, p1/z, z2.d, #0
; CHECK-NEXT: sel z0.d, p1, z0.d, z1.d
; CHECK-NEXT: st1d { z0.d }, p0, [x0]
; CHECK-NEXT: ret
@@ -286,8 +336,15 @@ define void @select_v8f64(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1d { z3.d }, p0/z, [x1]
; VBITS_GE_256-NEXT: fcmeq p1.d, p0/z, z0.d, z1.d
; VBITS_GE_256-NEXT: fcmeq p2.d, p0/z, z2.d, z3.d
-; VBITS_GE_256-NEXT: sel z0.d, p1, z0.d, z1.d
-; VBITS_GE_256-NEXT: sel z1.d, p2, z2.d, z3.d
+; VBITS_GE_256-NEXT: mov z4.d, p1/z, #-1 // =0xffffffffffffffff
+; VBITS_GE_256-NEXT: ptrue p1.d
+; VBITS_GE_256-NEXT: mov z5.d, p2/z, #-1 // =0xffffffffffffffff
+; VBITS_GE_256-NEXT: and z4.d, z4.d, #0x1
+; VBITS_GE_256-NEXT: and z5.d, z5.d, #0x1
+; VBITS_GE_256-NEXT: cmpne p2.d, p1/z, z4.d, #0
+; VBITS_GE_256-NEXT: cmpne p1.d, p1/z, z5.d, #0
+; VBITS_GE_256-NEXT: sel z0.d, p2, z0.d, z1.d
+; VBITS_GE_256-NEXT: sel z1.d, p1, z2.d, z3.d
; VBITS_GE_256-NEXT: st1d { z0.d }, p0, [x0, x8, lsl #3]
; VBITS_GE_256-NEXT: st1d { z1.d }, p0, [x0]
; VBITS_GE_256-NEXT: ret
@@ -298,6 +355,10 @@ define void @select_v8f64(ptr %a, ptr %b) #0 {
; VBITS_GE_512-NEXT: ld1d { z0.d }, p0/z, [x0]
; VBITS_GE_512-NEXT: ld1d { z1.d }, p0/z, [x1]
; VBITS_GE_512-NEXT: fcmeq p1.d, p0/z, z0.d, z1.d
+; VBITS_GE_512-NEXT: mov z2.d, p1/z, #-1 // =0xffffffffffffffff
+; VBITS_GE_512-NEXT: ptrue p1.d
+; VBITS_GE_512-NEXT: and z2.d, z2.d, #0x1
+; VBITS_GE_512-NEXT: cmpne p1.d, p1/z, z2.d, #0
; VBITS_GE_512-NEXT: sel z0.d, p1, z0.d, z1.d
; VBITS_GE_512-NEXT: st1d { z0.d }, p0, [x0]
; VBITS_GE_512-NEXT: ret
@@ -316,6 +377,10 @@ define void @select_v16f64(ptr %a, ptr %b) vscale_range(8,0) #0 {
; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0]
; CHECK-NEXT: ld1d { z1.d }, p0/z, [x1]
; CHECK-NEXT: fcmeq p1.d, p0/z, z0.d, z1.d
+; CHECK-NEXT: mov z2.d, p1/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT: ptrue p1.d
+; CHECK-NEXT: and z2.d, z2.d, #0x1
+; CHECK-NEXT: cmpne p1.d, p1/z, z2.d, #0
; CHECK-NEXT: sel z0.d, p1, z0.d, z1.d
; CHECK-NEXT: st1d { z0.d }, p0, [x0]
; CHECK-NEXT: ret
@@ -334,6 +399,10 @@ define void @select_v32f64(ptr %a, ptr %b) vscale_range(16,0) #0 {
; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0]
; CHECK-NEXT: ld1d { z1.d }, p0/z, [x1]
; CHECK-NEXT: fcmeq p1.d, p0/z, z0.d, z1.d
+; CHECK-NEXT: mov z2.d, p1/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT: ptrue p1.d
+; CHECK-NEXT: and z2.d, z2.d, #0x1
+; CHECK-NEXT: cmpne p1.d, p1/z, z2.d, #0
; CHECK-NEXT: sel z0.d, p1, z0.d, z1.d
; CHECK-NEXT: st1d { z0.d }, p0, [x0]
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-vselect.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-vselect.ll
index 9cebbc4aab9b7..0e95da31c13cc 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-vselect.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-vselect.ll
@@ -36,6 +36,10 @@ define void @select_v32i8(ptr %a, ptr %b) vscale_range(2,0) #0 {
; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0]
; CHECK-NEXT: ld1b { z1.b }, p0/z, [x1]
; CHECK-NEXT: cmpeq p1.b, p0/z, z0.b, z1.b
+; CHECK-NEXT: mov z2.b, p1/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT: ptrue p1.b
+; CHECK-NEXT: and z2.b, z2.b, #0x1
+; CHECK-NEXT: cmpne p1.b, p1/z, z2.b, #0
; CHECK-NEXT: sel z0.b, p1, z0.b, z1.b
; CHECK-NEXT: st1b { z0.b }, p0, [x0]
; CHECK-NEXT: ret
@@ -58,8 +62,15 @@ define void @select_v64i8(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1b { z3.b }, p0/z, [x1]
; VBITS_GE_256-NEXT: cmpeq p1.b, p0/z, z0.b, z1.b
; VBITS_GE_256-NEXT: cmpeq p2.b, p0/z, z2.b, z3.b
-; VBITS_GE_256-NEXT: sel z0.b, p1, z0.b, z1.b
-; VBITS_GE_256-NEXT: sel z1.b, p2, z2.b, z3.b
+; VBITS_GE_256-NEXT: mov z4.b, p1/z, #-1 // =0xffffffffffffffff
+; VBITS_GE_256-NEXT: ptrue p1.b
+; VBITS_GE_256-NEXT: mov z5.b, p2/z, #-1 // =0xffffffffffffffff
+; VBITS_GE_256-NEXT: and z4.b, z4.b, #0x1
+; VBITS_GE_256-NEXT: and z5.b, z5.b, #0x1
+; VBITS_GE_256-NEXT: cmpne p2.b, p1/z, z4.b, #0
+; VBITS_GE_256-NEXT: cmpne p1.b, p1/z, z5.b, #0
+; VBITS_GE_256-NEXT: sel z0.b, p2, z0.b, z1.b
+; VBITS_GE_256-NEXT: sel z1.b, p1, z2.b, z3.b
; VBITS_GE_256-NEXT: st1b { z0.b }, p0, [x0, x8]
; VBITS_GE_256-NEXT: st1b { z1.b }, p0, [x0]
; VBITS_GE_256-NEXT: ret
@@ -70,6 +81,10 @@ define void @select_v64i8(ptr %a, ptr %b) #0 {
; VBITS_GE_512-NEXT: ld1b { z0.b }, p0/z, [x0]
; VBITS_GE_512-NEXT: ld1b { z1.b }, p0/z, [x1]
; VBITS_GE_512-NEXT: cmpeq p1.b, p0/z, z0.b, z1.b
+; VBITS_GE_512-NEXT: mov z2.b, p1/z, #-1 // =0xffffffffffffffff
+; VBITS_GE_512-NEXT: ptrue p1.b
+; VBITS_GE_512-NEXT: and z2.b, z2.b, #0x1
+; VBITS_GE_512-NEXT: cmpne p1.b, p1/z, z2.b, #0
; VBITS_GE_512-NEXT: sel z0.b, p1, z0.b, z1.b
; VBITS_GE_512-NEXT: st1b { z0.b }, p0, [x0]
; VBITS_GE_512-NEXT: ret
@@ -88,6 +103,10 @@ define void @select_v128i8(ptr %a, ptr %b) vscale_range(8,0) #0 {
; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0]
; CHECK-NEXT: ld1b { z1.b }, p0/z, [x1]
; CHECK-NEXT: cmpeq p1.b, p0/z, z0.b, z1.b
+; CHECK-NEXT: mov z2.b, p1/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT: ptrue p1.b
+; CHECK-NEXT: and z2.b, z2.b, #0x1
+; CHECK-NEXT: cmpne p1.b, p1/z, z2.b, #0
; CHECK-NEXT: sel z0.b, p1, z0.b, z1.b
; CHECK-NEXT: st1b { z0.b }, p0, [x0]
; CHECK-NEXT: ret
@@ -106,6 +125,10 @@ define void @select_v256i8(ptr %a, ptr %b) vscale_range(16,0) #0 {
; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0]
; CHECK-NEXT: ld1b { z1.b }, p0/z, [x1]
; CHECK-NEXT: cmpeq p1.b, p0/z, z0.b, z1.b
+; CHECK-NEXT: mov z2.b, p1/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT: ptrue p1.b
+; CHECK-NEXT: and z2.b, z2.b, #0x1
+; CHECK-NEXT: cmpne p1.b, p1/z, z2.b, #0
; CHECK-NEXT: sel z0.b, p1, z0.b, z1.b
; CHECK-NEXT: st1b { z0.b }, p0, [x0]
; CHECK-NEXT: ret
@@ -149,6 +172,10 @@ define void @select_v16i16(ptr %a, ptr %b) vscale_range(2,0) #0 {
; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0]
; CHECK-NEXT: ld1h { z1.h }, p0/z, [x1]
; CHECK-NEXT: cmpeq p1.h, p0/z, z0.h, z1.h
+; CHECK-NEXT: mov z2.h, p1/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT: ptrue p1.h
+; CHECK-NEXT: and z2.h, z2.h, #0x1
+; CHECK-NEXT: cmpne p1.h, p1/z, z2.h, #0
; CHECK-NEXT: sel z0.h, p1, z0.h, z1.h
; CHECK-NEXT: st1h { z0.h }, p0, [x0]
; CHECK-NEXT: ret
@@ -171,8 +198,15 @@ define void @select_v32i16(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1h { z3.h }, p0/z, [x1]
; VBITS_GE_256-NEXT: cmpeq p1.h, p0/z, z0.h, z1.h
; VBITS_GE_256-NEXT: cmpeq p2.h, p0/z, z2.h, z3.h
-; VBITS_GE_256-NEXT: sel z0.h, p1, z0.h, z1.h
-; VBITS_GE_256-NEXT: sel z1.h, p2, z2.h, z3.h
+; VBITS_GE_256-NEXT: mov z4.h, p1/z, #-1 // =0xffffffffffffffff
+; VBITS_GE_256-NEXT: ptrue p1.h
+; VBITS_GE_256-NEXT: mov z5.h, p2/z, #-1 // =0xffffffffffffffff
+; VBITS_GE_256-NEXT: and z4.h, z4.h, #0x1
+; VBITS_GE_256-NEXT: and z5.h, z5.h, #0x1
+; VBITS_GE_256-NEXT: cmpne p2.h, p1/z, z4.h, #0
+; VBITS_GE_256-NEXT: cmpne p1.h, p1/z, z5.h, #0
+; VBITS_GE_256-NEXT: sel z0.h, p2, z0.h, z1.h
+; VBITS_GE_256-NEXT: sel z1.h, p1, z2.h, z3.h
; VBITS_GE_256-NEXT: st1h { z0.h }, p0, [x0, x8, lsl #1]
; VBITS_GE_256-NEXT: st1h { z1.h }, p0, [x0]
; VBITS_GE_256-NEXT: ret
@@ -183,6 +217,10 @@ define void @select_v32i16(ptr %a, ptr %b) #0 {
; VBITS_GE_512-NEXT: ld1h { z0.h }, p0/z, [x0]
; VBITS_GE_512-NEXT: ld1h { z1.h }, p0/z, [x1]
; VBITS_GE_512-NEXT: cmpeq p1.h, p0/z, z0.h, z1.h
+; VBITS_GE_512-NEXT: mov z2.h, p1/z, #-1 // =0xffffffffffffffff
+; VBITS_GE_512-NEXT: ptrue p1.h
+; VBITS_GE_512-NEXT: and z2.h, z2.h, #0x1
+; VBITS_GE_512-NEXT: cmpne p1.h, p1/z, z2.h, #0
; VBITS_GE_512-NEXT: sel z0.h, p1, z0.h, z1.h
; VBITS_GE_512-NEXT: st1h { z0.h }, p0, [x0]
; VBITS_GE_512-NEXT: ret
@@ -201,6 +239,10 @@ define void @select_v64i16(ptr %a, ptr %b) vscale_range(8,0) #0 {
; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0]
; CHECK-NEXT: ld1h { z1.h }, p0/z, [x1]
; CHECK-NEXT: cmpeq p1.h, p0/z, z0.h, z1.h
+; CHECK-NEXT: mov z2.h, p1/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT: ptrue p1.h
+; CHECK-NEXT: and z2.h, z2.h, #0x1
+; CHECK-NEXT: cmpne p1.h, p1/z, z2.h, #0
; CHECK-NEXT: sel z0.h, p1, z0.h, z1.h
; CHECK-NEXT: st1h { z0.h }, p0, [x0]
; CHECK-NEXT: ret
@@ -219,6 +261,10 @@ define void @select_v128i16(ptr %a, ptr %b) vscale_range(16,0) #0 {
; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0]
; CHECK-NEXT: ld1h { z1.h }, p0/z, [x1]
; CHECK-NEXT: cmpeq p1.h, p0/z, z0.h, z1.h
+; CHECK-NEXT: mov z2.h, p1/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT: ptrue p1.h
+; CHECK-NEXT: and z2.h, z2.h, #0x1
+; CHECK-NEXT: cmpne p1.h, p1/z, z2.h, #0
; CHECK-NEXT: sel z0.h, p1, z0.h, z1.h
; CHECK-NEXT: st1h { z0.h }, p0, [x0]
; CHECK-NEXT: ret
@@ -262,6 +308,10 @@ define void @select_v8i32(ptr %a, ptr %b) vscale_range(2,0) #0 {
; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0]
; CHECK-NEXT: ld1w { z1.s }, p0/z, [x1]
; CHECK-NEXT: cmpeq p1.s, p0/z, z0.s, z1.s
+; CHECK-NEXT: mov z2.s, p1/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT: ptrue p1.s
+; CHECK-NEXT: and z2.s, z2.s, #0x1
+; CHECK-NEXT: cmpne p1.s, p1/z, z2.s, #0
; CHECK-NEXT: sel z0.s, p1, z0.s, z1.s
; CHECK-NEXT: st1w { z0.s }, p0, [x0]
; CHECK-NEXT: ret
@@ -284,8 +334,15 @@ define void @select_v16i32(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1w { z3.s }, p0/z, [x1]
; VBITS_GE_256-NEXT: cmpeq p1.s, p0/z, z0.s, z1.s
; VBITS_GE_256-NEXT: cmpeq p2.s, p0/z, z2.s, z3.s
-; VBITS_GE_256-NEXT: sel z0.s, p1, z0.s, z1.s
-; VBITS_GE_256-NEXT: sel z1.s, p2, z2.s, z3.s
+; VBITS_GE_256-NEXT: mov z4.s, p1/z, #-1 // =0xffffffffffffffff
+; VBITS_GE_256-NEXT: ptrue p1.s
+; VBITS_GE_256-NEXT: mov z5.s, p2/z, #-1 // =0xffffffffffffffff
+; VBITS_GE_256-NEXT: and z4.s, z4.s, #0x1
+; VBITS_GE_256-NEXT: and z5.s, z5.s, #0x1
+; VBITS_GE_256-NEXT: cmpne p2.s, p1/z, z4.s, #0
+; VBITS_GE_256-NEXT: cmpne p1.s, p1/z, z5.s, #0
+; VBITS_GE_256-NEXT: sel z0.s, p2, z0.s, z1.s
+; VBITS_GE_256-NEXT: sel z1.s, p1, z2.s, z3.s
; VBITS_GE_256-NEXT: st1w { z0.s }, p0, [x0, x8, lsl #2]
; VBITS_GE_256-NEXT: st1w { z1.s }, p0, [x0]
; VBITS_GE_256-NEXT: ret
@@ -296,6 +353,10 @@ define void @select_v16i32(ptr %a, ptr %b) #0 {
; VBITS_GE_512-NEXT: ld1w { z0.s }, p0/z, [x0]
; VBITS_GE_512-NEXT: ld1w { z1.s }, p0/z, [x1]
; VBITS_GE_512-NEXT: cmpeq p1.s, p0/z, z0.s, z1.s
+; VBITS_GE_512-NEXT: mov z2.s, p1/z, #-1 // =0xffffffffffffffff
+; VBITS_GE_512-NEXT: ptrue p1.s
+; VBITS_GE_512-NEXT: and z2.s, z2.s, #0x1
+; VBITS_GE_512-NEXT: cmpne p1.s, p1/z, z2.s, #0
; VBITS_GE_512-NEXT: sel z0.s, p1, z0.s, z1.s
; VBITS_GE_512-NEXT: st1w { z0.s }, p0, [x0]
; VBITS_GE_512-NEXT: ret
@@ -314,6 +375,10 @@ define void @select_v32i32(ptr %a, ptr %b) vscale_range(8,0) #0 {
; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0]
; CHECK-NEXT: ld1w { z1.s }, p0/z, [x1]
; CHECK-NEXT: cmpeq p1.s, p0/z, z0.s, z1.s
+; CHECK-NEXT: mov z2.s, p1/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT: ptrue p1.s
+; CHECK-NEXT: and z2.s, z2.s, #0x1
+; CHECK-NEXT: cmpne p1.s, p1/z, z2.s, #0
; CHECK-NEXT: sel z0.s, p1, z0.s, z1.s
; CHECK-NEXT: st1w { z0.s }, p0, [x0]
; CHECK-NEXT: ret
@@ -332,6 +397,10 @@ define void @select_v64i32(ptr %a, ptr %b) vscale_range(16,0) #0 {
; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0]
; CHECK-NEXT: ld1w { z1.s }, p0/z, [x1]
; CHECK-NEXT: cmpeq p1.s, p0/z, z0.s, z1.s
+; CHECK-NEXT: mov z2.s, p1/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT: ptrue p1.s
+; CHECK-NEXT: and z2.s, z2.s, #0x1
+; CHECK-NEXT: cmpne p1.s, p1/z, z2.s, #0
; CHECK-NEXT: sel z0.s, p1, z0.s, z1.s
; CHECK-NEXT: st1w { z0.s }, p0, [x0]
; CHECK-NEXT: ret
@@ -376,6 +445,10 @@ define void @select_v4i64(ptr %a, ptr %b) vscale_range(2,0) #0 {
; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0]
; CHECK-NEXT: ld1d { z1.d }, p0/z, [x1]
; CHECK-NEXT: cmpeq p1.d, p0/z, z0.d, z1.d
+; CHECK-NEXT: mov z2.d, p1/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT: ptrue p1.d
+; CHECK-NEXT: and z2.d, z2.d, #0x1
+; CHECK-NEXT: cmpne p1.d, p1/z, z2.d, #0
; CHECK-NEXT: sel z0.d, p1, z0.d, z1.d
; CHECK-NEXT: st1d { z0.d }, p0, [x0]
; CHECK-NEXT: ret
@@ -398,8 +471,15 @@ define void @select_v8i64(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1d { z3.d }, p0/z, [x1]
; VBITS_GE_256-NEXT: cmpeq p1.d, p0/z, z0.d, z1.d
; VBITS_GE_256-NEXT: cmpeq p2.d, p0/z, z2.d, z3.d
-; VBITS_GE_256-NEXT: sel z0.d, p1, z0.d, z1.d
-; VBITS_GE_256-NEXT: sel z1.d, p2, z2.d, z3.d
+; VBITS_GE_256-NEXT: mov z4.d, p1/z, #-1 // =0xffffffffffffffff
+; VBITS_GE_256-NEXT: ptrue p1.d
+; VBITS_GE_256-NEXT: mov z5.d, p2/z, #-1 // =0xffffffffffffffff
+; VBITS_GE_256-NEXT: and z4.d, z4.d, #0x1
+; VBITS_GE_256-NEXT: and z5.d, z5.d, #0x1
+; VBITS_GE_256-NEXT: cmpne p2.d, p1/z, z4.d, #0
+; VBITS_GE_256-NEXT: cmpne p1.d, p1/z, z5.d, #0
+; VBITS_GE_256-NEXT: sel z0.d, p2, z0.d, z1.d
+; VBITS_GE_256-NEXT: sel z1.d, p1, z2.d, z3.d
; VBITS_GE_256-NEXT: st1d { z0.d }, p0, [x0, x8, lsl #3]
; VBITS_GE_256-NEXT: st1d { z1.d }, p0, [x0]
; VBITS_GE_256-NEXT: ret
@@ -410,6 +490,10 @@ define void @select_v8i64(ptr %a, ptr %b) #0 {
; VBITS_GE_512-NEXT: ld1d { z0.d }, p0/z, [x0]
; VBITS_GE_512-NEXT: ld1d { z1.d }, p0/z, [x1]
; VBITS_GE_512-NEXT: cmpeq p1.d, p0/z, z0.d, z1.d
+; VBITS_GE_512-NEXT: mov z2.d, p1/z, #-1 // =0xffffffffffffffff
+; VBITS_GE_512-NEXT: ptrue p1.d
+; VBITS_GE_512-NEXT: and z2.d, z2.d, #0x1
+; VBITS_GE_512-NEXT: cmpne p1.d, p1/z, z2.d, #0
; VBITS_GE_512-NEXT: sel z0.d, p1, z0.d, z1.d
; VBITS_GE_512-NEXT: st1d { z0.d }, p0, [x0]
; VBITS_GE_512-NEXT: ret
@@ -428,6 +512,10 @@ define void @select_v16i64(ptr %a, ptr %b) vscale_range(8,0) #0 {
; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0]
; CHECK-NEXT: ld1d { z1.d }, p0/z, [x1]
; CHECK-NEXT: cmpeq p1.d, p0/z, z0.d, z1.d
+; CHECK-NEXT: mov z2.d, p1/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT: ptrue p1.d
+; CHECK-NEXT: and z2.d, z2.d, #0x1
+; CHECK-NEXT: cmpne p1.d, p1/z, z2.d, #0
; CHECK-NEXT: sel z0.d, p1, z0.d, z1.d
; CHECK-NEXT: st1d { z0.d }, p0, [x0]
; CHECK-NEXT: ret
@@ -446,6 +534,10 @@ define void @select_v32i64(ptr %a, ptr %b) vscale_range(16,0) #0 {
; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0]
; CHECK-NEXT: ld1d { z1.d }, p0/z, [x1]
; CHECK-NEXT: cmpeq p1.d, p0/z, z0.d, z1.d
+; CHECK-NEXT: mov z2.d, p1/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT: ptrue p1.d
+; CHECK-NEXT: and z2.d, z2.d, #0x1
+; CHECK-NEXT: cmpne p1.d, p1/z, z2.d, #0
; CHECK-NEXT: sel z0.d, p1, z0.d, z1.d
; CHECK-NEXT: st1d { z0.d }, p0, [x0]
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-gather.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-gather.ll
index 093e6cd9328c8..ebd32c73ec65b 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-gather.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-gather.ll
@@ -1198,11 +1198,15 @@ define void @masked_gather_passthru(ptr %a, ptr %b, ptr %c) vscale_range(16,0) #
; CHECK-NEXT: ptrue p0.s, vl32
; CHECK-NEXT: ptrue p2.d, vl32
; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0]
-; CHECK-NEXT: ld1w { z1.s }, p0/z, [x2]
; CHECK-NEXT: fcmeq p1.s, p0/z, z0.s, #0.0
; CHECK-NEXT: ld1d { z0.d }, p2/z, [x1]
; CHECK-NEXT: punpklo p2.h, p1.b
+; CHECK-NEXT: mov z1.s, p1/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT: ptrue p1.s
; CHECK-NEXT: ld1w { z0.d }, p2/z, [z0.d]
+; CHECK-NEXT: and z1.s, z1.s, #0x1
+; CHECK-NEXT: cmpne p1.s, p1/z, z1.s, #0
+; CHECK-NEXT: ld1w { z1.s }, p0/z, [x2]
; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s
; CHECK-NEXT: sel z0.s, p1, z0.s, z1.s
; CHECK-NEXT: st1w { z0.s }, p0, [x0]
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-vselect.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-vselect.ll
index ec0693a541e44..8b845dff64ffe 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-vselect.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-vselect.ll
@@ -199,6 +199,13 @@ define void @select_v16f16(ptr %a, ptr %b) {
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: fcmeq p1.h, p0/z, z1.h, z0.h
; CHECK-NEXT: fcmeq p0.h, p0/z, z2.h, z3.h
+; CHECK-NEXT: mov z4.h, p1/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT: mov z5.h, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT: ptrue p0.h
+; CHECK-NEXT: and z4.h, z4.h, #0x1
+; CHECK-NEXT: and z5.h, z5.h, #0x1
+; CHECK-NEXT: cmpne p1.h, p0/z, z4.h, #0
+; CHECK-NEXT: cmpne p0.h, p0/z, z5.h, #0
; CHECK-NEXT: mov z0.h, p1/m, z1.h
; CHECK-NEXT: sel z1.h, p0, z2.h, z3.h
; CHECK-NEXT: stp q0, q1, [x0]
@@ -434,6 +441,13 @@ define void @select_v8f32(ptr %a, ptr %b) {
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: fcmeq p1.s, p0/z, z1.s, z0.s
; CHECK-NEXT: fcmeq p0.s, p0/z, z2.s, z3.s
+; CHECK-NEXT: mov z4.s, p1/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT: mov z5.s, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT: ptrue p0.s
+; CHECK-NEXT: and z4.s, z4.s, #0x1
+; CHECK-NEXT: and z5.s, z5.s, #0x1
+; CHECK-NEXT: cmpne p1.s, p0/z, z4.s, #0
+; CHECK-NEXT: cmpne p0.s, p0/z, z5.s, #0
; CHECK-NEXT: mov z0.s, p1/m, z1.s
; CHECK-NEXT: sel z1.s, p0, z2.s, z3.s
; CHECK-NEXT: stp q0, q1, [x0]
@@ -558,6 +572,13 @@ define void @select_v4f64(ptr %a, ptr %b) {
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: fcmeq p1.d, p0/z, z1.d, z0.d
; CHECK-NEXT: fcmeq p0.d, p0/z, z2.d, z3.d
+; CHECK-NEXT: mov z4.d, p1/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT: mov z5.d, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: and z4.d, z4.d, #0x1
+; CHECK-NEXT: and z5.d, z5.d, #0x1
+; CHECK-NEXT: cmpne p1.d, p0/z, z4.d, #0
+; CHECK-NEXT: cmpne p0.d, p0/z, z5.d, #0
; CHECK-NEXT: mov z0.d, p1/m, z1.d
; CHECK-NEXT: sel z1.d, p0, z2.d, z3.d
; CHECK-NEXT: stp q0, q1, [x0]
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-vselect.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-vselect.ll
index 39701131d7db6..12b7886d76c70 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-vselect.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-vselect.ll
@@ -293,6 +293,13 @@ define void @select_v32i8(ptr %a, ptr %b) {
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: cmpeq p1.b, p0/z, z1.b, z0.b
; CHECK-NEXT: cmpeq p0.b, p0/z, z2.b, z3.b
+; CHECK-NEXT: mov z4.b, p1/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT: mov z5.b, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT: ptrue p0.b
+; CHECK-NEXT: and z4.b, z4.b, #0x1
+; CHECK-NEXT: and z5.b, z5.b, #0x1
+; CHECK-NEXT: cmpne p1.b, p0/z, z4.b, #0
+; CHECK-NEXT: cmpne p0.b, p0/z, z5.b, #0
; CHECK-NEXT: mov z0.b, p1/m, z1.b
; CHECK-NEXT: sel z1.b, p0, z2.b, z3.b
; CHECK-NEXT: stp q0, q1, [x0]
@@ -697,6 +704,13 @@ define void @select_v16i16(ptr %a, ptr %b) {
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: cmpeq p1.h, p0/z, z1.h, z0.h
; CHECK-NEXT: cmpeq p0.h, p0/z, z2.h, z3.h
+; CHECK-NEXT: mov z4.h, p1/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT: mov z5.h, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT: ptrue p0.h
+; CHECK-NEXT: and z4.h, z4.h, #0x1
+; CHECK-NEXT: and z5.h, z5.h, #0x1
+; CHECK-NEXT: cmpne p1.h, p0/z, z4.h, #0
+; CHECK-NEXT: cmpne p0.h, p0/z, z5.h, #0
; CHECK-NEXT: mov z0.h, p1/m, z1.h
; CHECK-NEXT: sel z1.h, p0, z2.h, z3.h
; CHECK-NEXT: stp q0, q1, [x0]
@@ -911,6 +925,13 @@ define void @select_v8i32(ptr %a, ptr %b) {
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: cmpeq p1.s, p0/z, z1.s, z0.s
; CHECK-NEXT: cmpeq p0.s, p0/z, z2.s, z3.s
+; CHECK-NEXT: mov z4.s, p1/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT: mov z5.s, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT: ptrue p0.s
+; CHECK-NEXT: and z4.s, z4.s, #0x1
+; CHECK-NEXT: and z5.s, z5.s, #0x1
+; CHECK-NEXT: cmpne p1.s, p0/z, z4.s, #0
+; CHECK-NEXT: cmpne p0.s, p0/z, z5.s, #0
; CHECK-NEXT: mov z0.s, p1/m, z1.s
; CHECK-NEXT: sel z1.s, p0, z2.s, z3.s
; CHECK-NEXT: stp q0, q1, [x0]
@@ -1044,6 +1065,13 @@ define void @select_v4i64(ptr %a, ptr %b) {
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: cmpeq p1.d, p0/z, z1.d, z0.d
; CHECK-NEXT: cmpeq p0.d, p0/z, z2.d, z3.d
+; CHECK-NEXT: mov z4.d, p1/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT: mov z5.d, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: and z4.d, z4.d, #0x1
+; CHECK-NEXT: and z5.d, z5.d, #0x1
+; CHECK-NEXT: cmpne p1.d, p0/z, z4.d, #0
+; CHECK-NEXT: cmpne p0.d, p0/z, z5.d, #0
; CHECK-NEXT: mov z0.d, p1/m, z1.d
; CHECK-NEXT: sel z1.d, p0, z2.d, z3.d
; CHECK-NEXT: stp q0, q1, [x0]
diff --git a/llvm/test/CodeGen/AMDGPU/load-local-redundant-copies.ll b/llvm/test/CodeGen/AMDGPU/load-local-redundant-copies.ll
index 8a3cc57e08579..2269c68b941e7 100644
--- a/llvm/test/CodeGen/AMDGPU/load-local-redundant-copies.ll
+++ b/llvm/test/CodeGen/AMDGPU/load-local-redundant-copies.ll
@@ -66,38 +66,39 @@ define amdgpu_vs void @test_3(i32 inreg %arg1, i32 inreg %arg2, ptr addrspace(8)
; CHECK-NEXT: s_mov_b32 s6, s4
; CHECK-NEXT: s_mov_b32 s5, s3
; CHECK-NEXT: s_mov_b32 s4, s2
-; CHECK-NEXT: v_add_i32_e32 v0, vcc, 12, v1
-; CHECK-NEXT: v_add_i32_e32 v3, vcc, 8, v1
-; CHECK-NEXT: v_add_i32_e32 v4, vcc, 4, v1
-; CHECK-NEXT: v_add_i32_e32 v7, vcc, 20, v1
-; CHECK-NEXT: v_add_i32_e32 v9, vcc, 16, v1
+; CHECK-NEXT: v_add_i32_e32 v0, vcc, 20, v1
+; CHECK-NEXT: v_add_i32_e32 v3, vcc, 16, v1
+; CHECK-NEXT: v_add_i32_e32 v4, vcc, 12, v1
+; CHECK-NEXT: v_add_i32_e32 v5, vcc, 8, v1
+; CHECK-NEXT: v_add_i32_e32 v9, vcc, 4, v1
; CHECK-NEXT: v_mov_b32_e32 v10, s0
-; CHECK-NEXT: v_add_i32_e32 v11, vcc, 12, v2
-; CHECK-NEXT: v_add_i32_e32 v12, vcc, 8, v2
+; CHECK-NEXT: v_add_i32_e32 v11, vcc, 20, v2
+; CHECK-NEXT: v_add_i32_e32 v12, vcc, 16, v2
; CHECK-NEXT: s_mov_b32 m0, -1
-; CHECK-NEXT: ds_read_b32 v6, v0
-; CHECK-NEXT: ds_read_b32 v5, v3
-; CHECK-NEXT: ds_read_b32 v4, v4
-; CHECK-NEXT: ds_read_b32 v8, v7
-; CHECK-NEXT: ds_read_b32 v7, v9
+; CHECK-NEXT: ds_read_b32 v8, v0
+; CHECK-NEXT: ds_read_b32 v7, v3
+; CHECK-NEXT: ds_read_b32 v6, v4
+; CHECK-NEXT: ds_read_b32 v5, v5
+; CHECK-NEXT: ds_read_b32 v4, v9
; CHECK-NEXT: ds_read_b32 v3, v1
-; CHECK-NEXT: v_add_i32_e32 v0, vcc, 4, v2
-; CHECK-NEXT: v_add_i32_e32 v1, vcc, 20, v2
-; CHECK-NEXT: v_add_i32_e32 v9, vcc, 16, v2
+; CHECK-NEXT: v_add_i32_e32 v1, vcc, 12, v2
+; CHECK-NEXT: v_add_i32_e32 v9, vcc, 8, v2
+; CHECK-NEXT: v_add_i32_e32 v13, vcc, 4, v2
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
; CHECK-NEXT: tbuffer_store_format_xyzw v[3:6], v10, s[4:7], s1 format:[BUF_DATA_FORMAT_32_32_32,BUF_NUM_FORMAT_UINT] idxen offset:264 glc slc
; CHECK-NEXT: tbuffer_store_format_xy v[7:8], v10, s[4:7], s1 format:[BUF_DATA_FORMAT_INVALID,BUF_NUM_FORMAT_UINT] idxen offset:280 glc slc
+; CHECK-NEXT: ds_read_b32 v0, v12
; CHECK-NEXT: s_waitcnt expcnt(1)
-; CHECK-NEXT: ds_read_b32 v5, v11
-; CHECK-NEXT: ds_read_b32 v4, v12
-; CHECK-NEXT: ds_read_b32 v3, v0
-; CHECK-NEXT: ds_read_b32 v1, v1
-; CHECK-NEXT: ds_read_b32 v0, v9
+; CHECK-NEXT: ds_read_b32 v5, v1
+; CHECK-NEXT: ds_read_b32 v4, v9
+; CHECK-NEXT: ds_read_b32 v3, v13
; CHECK-NEXT: ds_read_b32 v2, v2
-; CHECK-NEXT: s_waitcnt lgkmcnt(1)
+; CHECK-NEXT: ds_read_b32 v1, v11
+; CHECK-NEXT: s_waitcnt lgkmcnt(5)
; CHECK-NEXT: exp mrt0 off, off, off, off
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_waitcnt lgkmcnt(1)
; CHECK-NEXT: tbuffer_store_format_xyzw v[2:5], v10, s[4:7], s1 format:[BUF_DATA_FORMAT_32_32_32,BUF_NUM_FORMAT_UINT] idxen offset:240 glc slc
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
; CHECK-NEXT: tbuffer_store_format_xy v[0:1], v10, s[4:7], s1 format:[BUF_DATA_FORMAT_INVALID,BUF_NUM_FORMAT_UINT] idxen offset:256 glc slc
; CHECK-NEXT: s_endpgm
%load1 = load <6 x float>, ptr addrspace(3) %arg5, align 4
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfw-web-simplification.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfw-web-simplification.ll
index aba9056c78cda..5aa3a246d7616 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfw-web-simplification.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfw-web-simplification.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+zvfh,+f,+d -verify-machineinstrs %s -o - --riscv-lower-ext-max-web-size=1 | FileCheck %s --check-prefixes=NO_FOLDING
-; RUN: llc -mtriple=riscv64 -mattr=+v,+zvfh,+f,+d -verify-machineinstrs %s -o - --riscv-lower-ext-max-web-size=2 | FileCheck %s --check-prefixes=NO_FOLDING
+; RUN: llc -mtriple=riscv64 -mattr=+v,+zvfh,+f,+d -verify-machineinstrs %s -o - --riscv-lower-ext-max-web-size=1 | FileCheck %s --check-prefixes=NO_FOLDING,NO_FOLDING1
+; RUN: llc -mtriple=riscv64 -mattr=+v,+zvfh,+f,+d -verify-machineinstrs %s -o - --riscv-lower-ext-max-web-size=2 | FileCheck %s --check-prefixes=NO_FOLDING,NO_FOLDING2
; RUN: llc -mtriple=riscv64 -mattr=+v,+zvfh,+f,+d -verify-machineinstrs %s -o - --riscv-lower-ext-max-web-size=3 | FileCheck %s --check-prefixes=FOLDING,ZVFH
; RUN: llc -mtriple=riscv64 -mattr=+v,+zvfhmin,+f,+d -verify-machineinstrs %s -o - --riscv-lower-ext-max-web-size=3 | FileCheck %s --check-prefixes=FOLDING,ZVFHMIN
; Check that the default value enables the web folding and
@@ -8,20 +8,35 @@
; RUN: llc -mtriple=riscv64 -mattr=+v,+zvfh,+f,+d -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=FOLDING
define void @vfwmul_v2f116_multiple_users(ptr %x, ptr %y, ptr %z, <2 x half> %a, <2 x half> %b, <2 x half> %b2) {
-; NO_FOLDING-LABEL: vfwmul_v2f116_multiple_users:
-; NO_FOLDING: # %bb.0:
-; NO_FOLDING-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
-; NO_FOLDING-NEXT: vfwcvt.f.f.v v11, v8
-; NO_FOLDING-NEXT: vfwcvt.f.f.v v8, v9
-; NO_FOLDING-NEXT: vfwcvt.f.f.v v9, v10
-; NO_FOLDING-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
-; NO_FOLDING-NEXT: vfmul.vv v10, v11, v8
-; NO_FOLDING-NEXT: vfadd.vv v11, v11, v9
-; NO_FOLDING-NEXT: vfsub.vv v8, v8, v9
-; NO_FOLDING-NEXT: vse32.v v10, (a0)
-; NO_FOLDING-NEXT: vse32.v v11, (a1)
-; NO_FOLDING-NEXT: vse32.v v8, (a2)
-; NO_FOLDING-NEXT: ret
+; NO_FOLDING1-LABEL: vfwmul_v2f116_multiple_users:
+; NO_FOLDING1: # %bb.0:
+; NO_FOLDING1-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; NO_FOLDING1-NEXT: vfwcvt.f.f.v v11, v8
+; NO_FOLDING1-NEXT: vfwcvt.f.f.v v8, v9
+; NO_FOLDING1-NEXT: vfwcvt.f.f.v v9, v10
+; NO_FOLDING1-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; NO_FOLDING1-NEXT: vfmul.vv v10, v11, v8
+; NO_FOLDING1-NEXT: vfadd.vv v11, v11, v9
+; NO_FOLDING1-NEXT: vfsub.vv v8, v8, v9
+; NO_FOLDING1-NEXT: vse32.v v10, (a0)
+; NO_FOLDING1-NEXT: vse32.v v11, (a1)
+; NO_FOLDING1-NEXT: vse32.v v8, (a2)
+; NO_FOLDING1-NEXT: ret
+;
+; NO_FOLDING2-LABEL: vfwmul_v2f116_multiple_users:
+; NO_FOLDING2: # %bb.0:
+; NO_FOLDING2-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; NO_FOLDING2-NEXT: vfwcvt.f.f.v v11, v8
+; NO_FOLDING2-NEXT: vfwcvt.f.f.v v8, v9
+; NO_FOLDING2-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; NO_FOLDING2-NEXT: vfmul.vv v9, v11, v8
+; NO_FOLDING2-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; NO_FOLDING2-NEXT: vfwadd.wv v11, v11, v10
+; NO_FOLDING2-NEXT: vfwsub.wv v8, v8, v10
+; NO_FOLDING2-NEXT: vse32.v v9, (a0)
+; NO_FOLDING2-NEXT: vse32.v v11, (a1)
+; NO_FOLDING2-NEXT: vse32.v v8, (a2)
+; NO_FOLDING2-NEXT: ret
;
; ZVFH-LABEL: vfwmul_v2f116_multiple_users:
; ZVFH: # %bb.0:
@@ -61,20 +76,35 @@ define void @vfwmul_v2f116_multiple_users(ptr %x, ptr %y, ptr %z, <2 x half> %a,
}
define void @vfwmul_v2f32_multiple_users(ptr %x, ptr %y, ptr %z, <2 x float> %a, <2 x float> %b, <2 x float> %b2) {
-; NO_FOLDING-LABEL: vfwmul_v2f32_multiple_users:
-; NO_FOLDING: # %bb.0:
-; NO_FOLDING-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; NO_FOLDING-NEXT: vfwcvt.f.f.v v11, v8
-; NO_FOLDING-NEXT: vfwcvt.f.f.v v8, v9
-; NO_FOLDING-NEXT: vfwcvt.f.f.v v9, v10
-; NO_FOLDING-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; NO_FOLDING-NEXT: vfmul.vv v10, v11, v8
-; NO_FOLDING-NEXT: vfadd.vv v11, v11, v9
-; NO_FOLDING-NEXT: vfsub.vv v8, v8, v9
-; NO_FOLDING-NEXT: vse64.v v10, (a0)
-; NO_FOLDING-NEXT: vse64.v v11, (a1)
-; NO_FOLDING-NEXT: vse64.v v8, (a2)
-; NO_FOLDING-NEXT: ret
+; NO_FOLDING1-LABEL: vfwmul_v2f32_multiple_users:
+; NO_FOLDING1: # %bb.0:
+; NO_FOLDING1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; NO_FOLDING1-NEXT: vfwcvt.f.f.v v11, v8
+; NO_FOLDING1-NEXT: vfwcvt.f.f.v v8, v9
+; NO_FOLDING1-NEXT: vfwcvt.f.f.v v9, v10
+; NO_FOLDING1-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; NO_FOLDING1-NEXT: vfmul.vv v10, v11, v8
+; NO_FOLDING1-NEXT: vfadd.vv v11, v11, v9
+; NO_FOLDING1-NEXT: vfsub.vv v8, v8, v9
+; NO_FOLDING1-NEXT: vse64.v v10, (a0)
+; NO_FOLDING1-NEXT: vse64.v v11, (a1)
+; NO_FOLDING1-NEXT: vse64.v v8, (a2)
+; NO_FOLDING1-NEXT: ret
+;
+; NO_FOLDING2-LABEL: vfwmul_v2f32_multiple_users:
+; NO_FOLDING2: # %bb.0:
+; NO_FOLDING2-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; NO_FOLDING2-NEXT: vfwcvt.f.f.v v11, v8
+; NO_FOLDING2-NEXT: vfwcvt.f.f.v v8, v9
+; NO_FOLDING2-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; NO_FOLDING2-NEXT: vfmul.vv v9, v11, v8
+; NO_FOLDING2-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; NO_FOLDING2-NEXT: vfwadd.wv v11, v11, v10
+; NO_FOLDING2-NEXT: vfwsub.wv v8, v8, v10
+; NO_FOLDING2-NEXT: vse64.v v9, (a0)
+; NO_FOLDING2-NEXT: vse64.v v11, (a1)
+; NO_FOLDING2-NEXT: vse64.v v8, (a2)
+; NO_FOLDING2-NEXT: ret
;
; FOLDING-LABEL: vfwmul_v2f32_multiple_users:
; FOLDING: # %bb.0:
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vw-web-simplification.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vw-web-simplification.ll
index 227a428831b60..b093e9e35edad 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vw-web-simplification.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vw-web-simplification.ll
@@ -1,8 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs %s -o - --riscv-lower-ext-max-web-size=1 | FileCheck %s --check-prefixes=NO_FOLDING
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs %s -o - --riscv-lower-ext-max-web-size=1 | FileCheck %s --check-prefixes=NO_FOLDING
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs %s -o - --riscv-lower-ext-max-web-size=2 | FileCheck %s --check-prefixes=NO_FOLDING
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs %s -o - --riscv-lower-ext-max-web-size=2 | FileCheck %s --check-prefixes=NO_FOLDING
+; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs %s -o - --riscv-lower-ext-max-web-size=1 | FileCheck %s --check-prefixes=NO_FOLDING1
+; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs %s -o - --riscv-lower-ext-max-web-size=1 | FileCheck %s --check-prefixes=NO_FOLDING1
+; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs %s -o - --riscv-lower-ext-max-web-size=2 | FileCheck %s --check-prefixes=NO_FOLDING2
+; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs %s -o - --riscv-lower-ext-max-web-size=2 | FileCheck %s --check-prefixes=NO_FOLDING2
; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs %s -o - --riscv-lower-ext-max-web-size=3 | FileCheck %s --check-prefixes=FOLDING
; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs %s -o - --riscv-lower-ext-max-web-size=3 | FileCheck %s --check-prefixes=FOLDING
; Check that the default value enables the web folding and
@@ -16,21 +16,38 @@
; We need the web size to be at least 3 for the folding to happen, because
; %c has 3 uses.
define <2 x i16> @vwmul_v2i16_multiple_users(ptr %x, ptr %y, ptr %z) {
-; NO_FOLDING-LABEL: vwmul_v2i16_multiple_users:
-; NO_FOLDING: # %bb.0:
-; NO_FOLDING-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
-; NO_FOLDING-NEXT: vle8.v v8, (a0)
-; NO_FOLDING-NEXT: vle8.v v9, (a1)
-; NO_FOLDING-NEXT: vle8.v v10, (a2)
-; NO_FOLDING-NEXT: vsext.vf2 v11, v8
-; NO_FOLDING-NEXT: vsext.vf2 v8, v9
-; NO_FOLDING-NEXT: vsext.vf2 v9, v10
-; NO_FOLDING-NEXT: vmul.vv v8, v11, v8
-; NO_FOLDING-NEXT: vadd.vv v10, v11, v9
-; NO_FOLDING-NEXT: vsub.vv v9, v11, v9
-; NO_FOLDING-NEXT: vor.vv v8, v8, v10
-; NO_FOLDING-NEXT: vor.vv v8, v8, v9
-; NO_FOLDING-NEXT: ret
+; NO_FOLDING1-LABEL: vwmul_v2i16_multiple_users:
+; NO_FOLDING1: # %bb.0:
+; NO_FOLDING1-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; NO_FOLDING1-NEXT: vle8.v v8, (a0)
+; NO_FOLDING1-NEXT: vle8.v v9, (a1)
+; NO_FOLDING1-NEXT: vle8.v v10, (a2)
+; NO_FOLDING1-NEXT: vsext.vf2 v11, v8
+; NO_FOLDING1-NEXT: vsext.vf2 v8, v9
+; NO_FOLDING1-NEXT: vsext.vf2 v9, v10
+; NO_FOLDING1-NEXT: vmul.vv v8, v11, v8
+; NO_FOLDING1-NEXT: vadd.vv v10, v11, v9
+; NO_FOLDING1-NEXT: vsub.vv v9, v11, v9
+; NO_FOLDING1-NEXT: vor.vv v8, v8, v10
+; NO_FOLDING1-NEXT: vor.vv v8, v8, v9
+; NO_FOLDING1-NEXT: ret
+;
+; NO_FOLDING2-LABEL: vwmul_v2i16_multiple_users:
+; NO_FOLDING2: # %bb.0:
+; NO_FOLDING2-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; NO_FOLDING2-NEXT: vle8.v v8, (a0)
+; NO_FOLDING2-NEXT: vle8.v v9, (a1)
+; NO_FOLDING2-NEXT: vle8.v v10, (a2)
+; NO_FOLDING2-NEXT: vsext.vf2 v11, v8
+; NO_FOLDING2-NEXT: vsext.vf2 v8, v9
+; NO_FOLDING2-NEXT: vmul.vv v8, v11, v8
+; NO_FOLDING2-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
+; NO_FOLDING2-NEXT: vwadd.wv v9, v11, v10
+; NO_FOLDING2-NEXT: vwsub.wv v11, v11, v10
+; NO_FOLDING2-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; NO_FOLDING2-NEXT: vor.vv v8, v8, v9
+; NO_FOLDING2-NEXT: vor.vv v8, v8, v11
+; NO_FOLDING2-NEXT: ret
;
; FOLDING-LABEL: vwmul_v2i16_multiple_users:
; FOLDING: # %bb.0:
diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll
index 6a08f5a28a295..c55bf684cc462 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll
@@ -1936,6 +1936,8 @@ define {<vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>} @vector_d
; CHECK-NEXT: srli a0, a0, 2
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; CHECK-NEXT: vslidedown.vx v10, v8, a0
+; CHECK-NEXT: add a1, a0, a0
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vslideup.vx v8, v10, a0
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vs2r.v v8, (a0)
@@ -2001,6 +2003,8 @@ define {<vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>} @ve
; CHECK-NEXT: srli a0, a0, 2
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; CHECK-NEXT: vslidedown.vx v10, v8, a0
+; CHECK-NEXT: add a1, a0, a0
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vslideup.vx v8, v10, a0
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vs2r.v v8, (a0)
@@ -2066,6 +2070,8 @@ define {<vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>} @vecto
; CHECK-NEXT: srli a0, a0, 3
; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
; CHECK-NEXT: vslidedown.vx v10, v8, a0
+; CHECK-NEXT: add a1, a0, a0
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vslideup.vx v8, v10, a0
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vs2r.v v8, (a0)
@@ -2391,8 +2397,12 @@ define {<vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x
; CHECK-NEXT: srli a0, a0, 2
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; CHECK-NEXT: vslidedown.vx v11, v9, a0
+; CHECK-NEXT: add a1, a0, a0
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vslideup.vx v9, v11, a0
+; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, ma
; CHECK-NEXT: vslidedown.vx v11, v8, a0
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vslideup.vx v8, v11, a0
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vs4r.v v8, (a0)
@@ -2478,8 +2488,12 @@ define {<vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vs
; CHECK-NEXT: srli a0, a0, 2
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; CHECK-NEXT: vslidedown.vx v11, v9, a0
+; CHECK-NEXT: add a1, a0, a0
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vslideup.vx v9, v11, a0
+; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, ma
; CHECK-NEXT: vslidedown.vx v11, v8, a0
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vslideup.vx v8, v11, a0
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vs4r.v v8, (a0)
@@ -2565,8 +2579,12 @@ define {<vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>, <vscal
; CHECK-NEXT: srli a0, a0, 3
; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
; CHECK-NEXT: vslidedown.vx v11, v9, a0
+; CHECK-NEXT: add a1, a0, a0
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vslideup.vx v9, v11, a0
+; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, ma
; CHECK-NEXT: vslidedown.vx v11, v8, a0
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vslideup.vx v8, v11, a0
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vs4r.v v8, (a0)
@@ -3061,10 +3079,16 @@ define {<vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x
; CHECK-NEXT: srli a0, a0, 2
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; CHECK-NEXT: vslidedown.vx v12, v9, a0
+; CHECK-NEXT: add a1, a0, a0
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vslideup.vx v9, v12, a0
+; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, ma
; CHECK-NEXT: vslidedown.vx v12, v8, a0
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vslideup.vx v8, v12, a0
+; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, ma
; CHECK-NEXT: vslidedown.vx v12, v10, a0
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vslideup.vx v10, v12, a0
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vs4r.v v8, (a0)
@@ -3156,10 +3180,16 @@ define {<vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vs
; CHECK-NEXT: srli a0, a0, 2
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; CHECK-NEXT: vslidedown.vx v12, v9, a0
+; CHECK-NEXT: add a1, a0, a0
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vslideup.vx v9, v12, a0
+; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, ma
; CHECK-NEXT: vslidedown.vx v12, v8, a0
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vslideup.vx v8, v12, a0
+; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, ma
; CHECK-NEXT: vslidedown.vx v12, v10, a0
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vslideup.vx v10, v12, a0
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vs4r.v v8, (a0)
@@ -3251,10 +3281,16 @@ define {<vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>, <vscal
; CHECK-NEXT: srli a0, a0, 3
; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
; CHECK-NEXT: vslidedown.vx v12, v9, a0
+; CHECK-NEXT: add a1, a0, a0
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vslideup.vx v9, v12, a0
+; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, ma
; CHECK-NEXT: vslidedown.vx v12, v8, a0
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vslideup.vx v8, v12, a0
+; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, ma
; CHECK-NEXT: vslidedown.vx v12, v10, a0
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vslideup.vx v10, v12, a0
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vs4r.v v8, (a0)
diff --git a/llvm/test/CodeGen/Thumb2/active_lane_mask.ll b/llvm/test/CodeGen/Thumb2/active_lane_mask.ll
index bcd92f81911b2..da13d07abf09b 100644
--- a/llvm/test/CodeGen/Thumb2/active_lane_mask.ll
+++ b/llvm/test/CodeGen/Thumb2/active_lane_mask.ll
@@ -306,9 +306,10 @@ define void @test_width2(ptr nocapture readnone %x, ptr nocapture %y, i8 zeroext
; CHECK-NEXT: ldrne.w r3, [r12]
; CHECK-NEXT: vmovne.32 q0[0], r3
; CHECK-NEXT: lsls r0, r0, #30
-; CHECK-NEXT: itt mi
+; CHECK-NEXT: ittt mi
; CHECK-NEXT: ldrmi.w r0, [r12, #4]
; CHECK-NEXT: vmovmi.32 q0[2], r0
+; CHECK-NEXT: vmovmi.32 q0[3], r0
; CHECK-NEXT: vmrs r3, p0
; CHECK-NEXT: and r0, r3, #1
; CHECK-NEXT: ubfx r3, r3, #8, #1
diff --git a/llvm/test/CodeGen/Thumb2/mve-masked-ldst.ll b/llvm/test/CodeGen/Thumb2/mve-masked-ldst.ll
index 67910e40e7705..53595bc5a5595 100644
--- a/llvm/test/CodeGen/Thumb2/mve-masked-ldst.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-masked-ldst.ll
@@ -110,9 +110,10 @@ define void @foo_sext_v2i64_v2i32(ptr %dest, ptr %mask, ptr %src) {
; CHECK-LE-NEXT: ldrne r3, [r2]
; CHECK-LE-NEXT: vmovne.32 q1[0], r3
; CHECK-LE-NEXT: lsls r1, r1, #30
-; CHECK-LE-NEXT: itt mi
+; CHECK-LE-NEXT: ittt mi
; CHECK-LE-NEXT: ldrmi r1, [r2, #4]
; CHECK-LE-NEXT: vmovmi.32 q1[2], r1
+; CHECK-LE-NEXT: vmovmi.32 q1[3], r0
; CHECK-LE-NEXT: vmov r2, s6
; CHECK-LE-NEXT: movs r1, #0
; CHECK-LE-NEXT: vmov r3, s0
@@ -219,9 +220,9 @@ define void @foo_sext_v2i64_v2i32_unaligned(ptr %dest, ptr %mask, ptr %src) {
; CHECK-LE-NEXT: sub sp, #4
; CHECK-LE-NEXT: ldrd r12, lr, [r1]
; CHECK-LE-NEXT: movs r1, #0
-; CHECK-LE-NEXT: @ implicit-def: $q0
+; CHECK-LE-NEXT: @ implicit-def: $q1
; CHECK-LE-NEXT: rsbs.w r3, r12, #0
-; CHECK-LE-NEXT: vmov q1[2], q1[0], r12, lr
+; CHECK-LE-NEXT: vmov q0[2], q0[0], r12, lr
; CHECK-LE-NEXT: sbcs.w r3, r1, r12, asr #31
; CHECK-LE-NEXT: csetm r3, lt
; CHECK-LE-NEXT: rsbs.w r4, lr, #0
@@ -232,23 +233,24 @@ define void @foo_sext_v2i64_v2i32_unaligned(ptr %dest, ptr %mask, ptr %src) {
; CHECK-LE-NEXT: lsls r3, r1, #31
; CHECK-LE-NEXT: itt ne
; CHECK-LE-NEXT: ldrne r3, [r2]
-; CHECK-LE-NEXT: vmovne.32 q0[0], r3
+; CHECK-LE-NEXT: vmovne.32 q1[0], r3
; CHECK-LE-NEXT: lsls r1, r1, #30
-; CHECK-LE-NEXT: itt mi
+; CHECK-LE-NEXT: ittt mi
; CHECK-LE-NEXT: ldrmi r1, [r2, #4]
-; CHECK-LE-NEXT: vmovmi.32 q0[2], r1
-; CHECK-LE-NEXT: vmov r2, s2
+; CHECK-LE-NEXT: vmovmi.32 q1[2], r1
+; CHECK-LE-NEXT: vmovmi.32 q1[3], r0
+; CHECK-LE-NEXT: vmov r2, s6
; CHECK-LE-NEXT: movs r1, #0
-; CHECK-LE-NEXT: vmov r3, s4
-; CHECK-LE-NEXT: vmov r4, s0
-; CHECK-LE-NEXT: vmov q0[2], q0[0], r4, r2
+; CHECK-LE-NEXT: vmov r3, s0
+; CHECK-LE-NEXT: vmov r4, s4
+; CHECK-LE-NEXT: vmov q1[2], q1[0], r4, r2
; CHECK-LE-NEXT: rsbs r5, r3, #0
; CHECK-LE-NEXT: asr.w r12, r2, #31
; CHECK-LE-NEXT: sbcs.w r2, r1, r3, asr #31
-; CHECK-LE-NEXT: vmov r3, s6
+; CHECK-LE-NEXT: vmov r3, s2
; CHECK-LE-NEXT: csetm r2, lt
; CHECK-LE-NEXT: asr.w lr, r4, #31
-; CHECK-LE-NEXT: vmov q0[3], q0[1], lr, r12
+; CHECK-LE-NEXT: vmov q1[3], q1[1], lr, r12
; CHECK-LE-NEXT: rsbs r5, r3, #0
; CHECK-LE-NEXT: sbcs.w r3, r1, r3, asr #31
; CHECK-LE-NEXT: bfi r1, r2, #0, #1
@@ -256,11 +258,11 @@ define void @foo_sext_v2i64_v2i32_unaligned(ptr %dest, ptr %mask, ptr %src) {
; CHECK-LE-NEXT: bfi r1, r2, #1, #1
; CHECK-LE-NEXT: lsls r2, r1, #31
; CHECK-LE-NEXT: itt ne
-; CHECK-LE-NEXT: vmovne r2, r3, d0
+; CHECK-LE-NEXT: vmovne r2, r3, d2
; CHECK-LE-NEXT: strdne r2, r3, [r0]
; CHECK-LE-NEXT: lsls r1, r1, #30
; CHECK-LE-NEXT: itt mi
-; CHECK-LE-NEXT: vmovmi r1, r2, d1
+; CHECK-LE-NEXT: vmovmi r1, r2, d3
; CHECK-LE-NEXT: strdmi r1, r2, [r0, #8]
; CHECK-LE-NEXT: add sp, #4
; CHECK-LE-NEXT: pop {r4, r5, r7, pc}
@@ -363,9 +365,10 @@ define void @foo_zext_v2i64_v2i32(ptr %dest, ptr %mask, ptr %src) {
; CHECK-LE-NEXT: ldrne r3, [r2]
; CHECK-LE-NEXT: vmovne.32 q0[0], r3
; CHECK-LE-NEXT: lsls r1, r1, #30
-; CHECK-LE-NEXT: itt mi
+; CHECK-LE-NEXT: ittt mi
; CHECK-LE-NEXT: ldrmi r1, [r2, #4]
; CHECK-LE-NEXT: vmovmi.32 q0[2], r1
+; CHECK-LE-NEXT: vmovmi.32 q0[3], r0
; CHECK-LE-NEXT: vmov r2, s4
; CHECK-LE-NEXT: movs r1, #0
; CHECK-LE-NEXT: vand q0, q0, q2
@@ -478,9 +481,10 @@ define void @foo_zext_v2i64_v2i32_unaligned(ptr %dest, ptr %mask, ptr %src) {
; CHECK-LE-NEXT: ldrne r3, [r2]
; CHECK-LE-NEXT: vmovne.32 q0[0], r3
; CHECK-LE-NEXT: lsls r1, r1, #30
-; CHECK-LE-NEXT: itt mi
+; CHECK-LE-NEXT: ittt mi
; CHECK-LE-NEXT: ldrmi r1, [r2, #4]
; CHECK-LE-NEXT: vmovmi.32 q0[2], r1
+; CHECK-LE-NEXT: vmovmi.32 q0[3], r0
; CHECK-LE-NEXT: vmov r2, s4
; CHECK-LE-NEXT: movs r1, #0
; CHECK-LE-NEXT: vand q0, q0, q2
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-4.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-4.ll
index ae4f85ce42a19..321aba7034498 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-4.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-4.ll
@@ -562,14 +562,18 @@ define void @load_i16_stride4_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
;
; AVX2-LABEL: load_i16_stride4_vf8:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpxor %xmm0, %xmm0, %xmm0
-; AVX2-NEXT: vpblendw {{.*#+}} ymm1 = mem[0],ymm0[1,2,3],mem[4],ymm0[5,6,7],mem[8],ymm0[9,10,11],mem[12],ymm0[13,14,15]
-; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
-; AVX2-NEXT: vpackusdw %xmm2, %xmm1, %xmm1
-; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = mem[0],ymm0[1,2,3],mem[4],ymm0[5,6,7],mem[8],ymm0[9,10,11],mem[12],ymm0[13,14,15]
+; AVX2-NEXT: vmovdqa 32(%rdi), %ymm0
+; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6,7]
+; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7],ymm0[8],ymm1[9,10,11],ymm0[12],ymm1[13,14,15]
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
; AVX2-NEXT: vpackusdw %xmm2, %xmm0, %xmm0
-; AVX2-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vmovdqa (%rdi), %ymm2
+; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6,7]
+; AVX2-NEXT: vpblendw {{.*#+}} ymm1 = ymm2[0],ymm1[1,2,3],ymm2[4],ymm1[5,6,7],ymm2[8],ymm1[9,10,11],ymm2[12],ymm1[13,14,15]
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
+; AVX2-NEXT: vpackusdw %xmm2, %xmm1, %xmm1
+; AVX2-NEXT: vpackusdw %xmm0, %xmm1, %xmm0
; AVX2-NEXT: vmovdqa (%rdi), %xmm1
; AVX2-NEXT: vmovdqa 16(%rdi), %xmm2
; AVX2-NEXT: vmovdqa 32(%rdi), %xmm3
@@ -612,14 +616,18 @@ define void @load_i16_stride4_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
;
; AVX2-FP-LABEL: load_i16_stride4_vf8:
; AVX2-FP: # %bb.0:
-; AVX2-FP-NEXT: vpxor %xmm0, %xmm0, %xmm0
-; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm1 = mem[0],ymm0[1,2,3],mem[4],ymm0[5,6,7],mem[8],ymm0[9,10,11],mem[12],ymm0[13,14,15]
-; AVX2-FP-NEXT: vextracti128 $1, %ymm1, %xmm2
-; AVX2-FP-NEXT: vpackusdw %xmm2, %xmm1, %xmm1
-; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm0 = mem[0],ymm0[1,2,3],mem[4],ymm0[5,6,7],mem[8],ymm0[9,10,11],mem[12],ymm0[13,14,15]
+; AVX2-FP-NEXT: vmovdqa 32(%rdi), %ymm0
+; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6,7]
+; AVX2-FP-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7],ymm0[8],ymm1[9,10,11],ymm0[12],ymm1[13,14,15]
; AVX2-FP-NEXT: vextracti128 $1, %ymm0, %xmm2
; AVX2-FP-NEXT: vpackusdw %xmm2, %xmm0, %xmm0
-; AVX2-FP-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
+; AVX2-FP-NEXT: vmovdqa (%rdi), %ymm2
+; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6,7]
+; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm1 = ymm2[0],ymm1[1,2,3],ymm2[4],ymm1[5,6,7],ymm2[8],ymm1[9,10,11],ymm2[12],ymm1[13,14,15]
+; AVX2-FP-NEXT: vextracti128 $1, %ymm1, %xmm2
+; AVX2-FP-NEXT: vpackusdw %xmm2, %xmm1, %xmm1
+; AVX2-FP-NEXT: vpackusdw %xmm0, %xmm1, %xmm0
; AVX2-FP-NEXT: vmovdqa (%rdi), %xmm1
; AVX2-FP-NEXT: vmovdqa 16(%rdi), %xmm2
; AVX2-FP-NEXT: vmovdqa 32(%rdi), %xmm3
@@ -660,14 +668,18 @@ define void @load_i16_stride4_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
;
; AVX2-FCP-LABEL: load_i16_stride4_vf8:
; AVX2-FCP: # %bb.0:
-; AVX2-FCP-NEXT: vpxor %xmm0, %xmm0, %xmm0
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm1 = mem[0],ymm0[1,2,3],mem[4],ymm0[5,6,7],mem[8],ymm0[9,10,11],mem[12],ymm0[13,14,15]
-; AVX2-FCP-NEXT: vextracti128 $1, %ymm1, %xmm2
-; AVX2-FCP-NEXT: vpackusdw %xmm2, %xmm1, %xmm1
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm0 = mem[0],ymm0[1,2,3],mem[4],ymm0[5,6,7],mem[8],ymm0[9,10,11],mem[12],ymm0[13,14,15]
+; AVX2-FCP-NEXT: vmovdqa 32(%rdi), %ymm0
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6,7]
+; AVX2-FCP-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7],ymm0[8],ymm1[9,10,11],ymm0[12],ymm1[13,14,15]
; AVX2-FCP-NEXT: vextracti128 $1, %ymm0, %xmm2
; AVX2-FCP-NEXT: vpackusdw %xmm2, %xmm0, %xmm0
-; AVX2-FCP-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
+; AVX2-FCP-NEXT: vmovdqa (%rdi), %ymm2
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6,7]
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm2[0],ymm1[1,2,3],ymm2[4],ymm1[5,6,7],ymm2[8],ymm1[9,10,11],ymm2[12],ymm1[13,14,15]
+; AVX2-FCP-NEXT: vextracti128 $1, %ymm1, %xmm2
+; AVX2-FCP-NEXT: vpackusdw %xmm2, %xmm1, %xmm1
+; AVX2-FCP-NEXT: vpackusdw %xmm0, %xmm1, %xmm0
; AVX2-FCP-NEXT: vmovdqa (%rdi), %xmm1
; AVX2-FCP-NEXT: vmovdqa 16(%rdi), %xmm2
; AVX2-FCP-NEXT: vmovdqa 32(%rdi), %xmm3
@@ -1021,26 +1033,30 @@ define void @load_i16_stride4_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
;
; AVX2-LABEL: load_i16_stride4_vf16:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpxor %xmm0, %xmm0, %xmm0
-; AVX2-NEXT: vpblendw {{.*#+}} ymm1 = mem[0],ymm0[1,2,3],mem[4],ymm0[5,6,7],mem[8],ymm0[9,10,11],mem[12],ymm0[13,14,15]
-; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
-; AVX2-NEXT: vpackusdw %xmm2, %xmm1, %xmm1
-; AVX2-NEXT: vpblendw {{.*#+}} ymm2 = mem[0],ymm0[1,2,3],mem[4],ymm0[5,6,7],mem[8],ymm0[9,10,11],mem[12],ymm0[13,14,15]
-; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm3
-; AVX2-NEXT: vpackusdw %xmm3, %xmm2, %xmm2
-; AVX2-NEXT: vpackusdw %xmm1, %xmm2, %xmm1
-; AVX2-NEXT: vpblendw {{.*#+}} ymm2 = mem[0],ymm0[1,2,3],mem[4],ymm0[5,6,7],mem[8],ymm0[9,10,11],mem[12],ymm0[13,14,15]
-; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm3
-; AVX2-NEXT: vpackusdw %xmm3, %xmm2, %xmm2
-; AVX2-NEXT: vpackusdw %xmm2, %xmm2, %xmm2
-; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = mem[0],ymm0[1,2,3],mem[4],ymm0[5,6,7],mem[8],ymm0[9,10,11],mem[12],ymm0[13,14,15]
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm3
-; AVX2-NEXT: vpackusdw %xmm3, %xmm0, %xmm0
-; AVX2-NEXT: vpackusdw %xmm0, %xmm0, %xmm0
-; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm2[6,7]
-; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX2-NEXT: vmovdqa (%rdi), %ymm0
+; AVX2-NEXT: vmovdqa 32(%rdi), %ymm1
+; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX2-NEXT: vpblendw {{.*#+}} ymm3 = mem[0],ymm2[1,2,3],mem[4],ymm2[5,6,7],mem[8],ymm2[9,10,11],mem[12],ymm2[13,14,15]
+; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm4
+; AVX2-NEXT: vpackusdw %xmm4, %xmm3, %xmm3
+; AVX2-NEXT: vpackusdw %xmm3, %xmm3, %xmm3
+; AVX2-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX2-NEXT: vpblendw {{.*#+}} ymm4 = mem[0],ymm2[1,2,3],mem[4],ymm2[5,6,7],mem[8],ymm2[9,10,11],mem[12],ymm2[13,14,15]
+; AVX2-NEXT: vextracti128 $1, %ymm4, %xmm5
+; AVX2-NEXT: vpackusdw %xmm5, %xmm4, %xmm4
+; AVX2-NEXT: vpackusdw %xmm4, %xmm4, %xmm4
+; AVX2-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4
+; AVX2-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3,4,5],ymm3[6,7]
+; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6,7]
+; AVX2-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0],ymm2[1,2,3],ymm1[4],ymm2[5,6,7],ymm1[8],ymm2[9,10,11],ymm1[12],ymm2[13,14,15]
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm4
+; AVX2-NEXT: vpackusdw %xmm4, %xmm1, %xmm1
+; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6,7]
+; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm2[1,2,3],ymm0[4],ymm2[5,6,7],ymm0[8],ymm2[9,10,11],ymm0[12],ymm2[13,14,15]
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
+; AVX2-NEXT: vpackusdw %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm3[4,5,6,7]
; AVX2-NEXT: vmovdqa (%rdi), %xmm1
; AVX2-NEXT: vmovdqa 16(%rdi), %xmm2
; AVX2-NEXT: vmovdqa 32(%rdi), %xmm3
@@ -1125,26 +1141,30 @@ define void @load_i16_stride4_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
;
; AVX2-FP-LABEL: load_i16_stride4_vf16:
; AVX2-FP: # %bb.0:
-; AVX2-FP-NEXT: vpxor %xmm0, %xmm0, %xmm0
-; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm1 = mem[0],ymm0[1,2,3],mem[4],ymm0[5,6,7],mem[8],ymm0[9,10,11],mem[12],ymm0[13,14,15]
-; AVX2-FP-NEXT: vextracti128 $1, %ymm1, %xmm2
-; AVX2-FP-NEXT: vpackusdw %xmm2, %xmm1, %xmm1
-; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm2 = mem[0],ymm0[1,2,3],mem[4],ymm0[5,6,7],mem[8],ymm0[9,10,11],mem[12],ymm0[13,14,15]
-; AVX2-FP-NEXT: vextracti128 $1, %ymm2, %xmm3
-; AVX2-FP-NEXT: vpackusdw %xmm3, %xmm2, %xmm2
-; AVX2-FP-NEXT: vpackusdw %xmm1, %xmm2, %xmm1
-; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm2 = mem[0],ymm0[1,2,3],mem[4],ymm0[5,6,7],mem[8],ymm0[9,10,11],mem[12],ymm0[13,14,15]
-; AVX2-FP-NEXT: vextracti128 $1, %ymm2, %xmm3
-; AVX2-FP-NEXT: vpackusdw %xmm3, %xmm2, %xmm2
-; AVX2-FP-NEXT: vpackusdw %xmm2, %xmm2, %xmm2
-; AVX2-FP-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm0 = mem[0],ymm0[1,2,3],mem[4],ymm0[5,6,7],mem[8],ymm0[9,10,11],mem[12],ymm0[13,14,15]
-; AVX2-FP-NEXT: vextracti128 $1, %ymm0, %xmm3
-; AVX2-FP-NEXT: vpackusdw %xmm3, %xmm0, %xmm0
-; AVX2-FP-NEXT: vpackusdw %xmm0, %xmm0, %xmm0
-; AVX2-FP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm2[6,7]
-; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX2-FP-NEXT: vmovdqa (%rdi), %ymm0
+; AVX2-FP-NEXT: vmovdqa 32(%rdi), %ymm1
+; AVX2-FP-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm3 = mem[0],ymm2[1,2,3],mem[4],ymm2[5,6,7],mem[8],ymm2[9,10,11],mem[12],ymm2[13,14,15]
+; AVX2-FP-NEXT: vextracti128 $1, %ymm3, %xmm4
+; AVX2-FP-NEXT: vpackusdw %xmm4, %xmm3, %xmm3
+; AVX2-FP-NEXT: vpackusdw %xmm3, %xmm3, %xmm3
+; AVX2-FP-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm4 = mem[0],ymm2[1,2,3],mem[4],ymm2[5,6,7],mem[8],ymm2[9,10,11],mem[12],ymm2[13,14,15]
+; AVX2-FP-NEXT: vextracti128 $1, %ymm4, %xmm5
+; AVX2-FP-NEXT: vpackusdw %xmm5, %xmm4, %xmm4
+; AVX2-FP-NEXT: vpackusdw %xmm4, %xmm4, %xmm4
+; AVX2-FP-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4
+; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3,4,5],ymm3[6,7]
+; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6,7]
+; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0],ymm2[1,2,3],ymm1[4],ymm2[5,6,7],ymm1[8],ymm2[9,10,11],ymm1[12],ymm2[13,14,15]
+; AVX2-FP-NEXT: vextracti128 $1, %ymm1, %xmm4
+; AVX2-FP-NEXT: vpackusdw %xmm4, %xmm1, %xmm1
+; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6,7]
+; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm2[1,2,3],ymm0[4],ymm2[5,6,7],ymm0[8],ymm2[9,10,11],ymm0[12],ymm2[13,14,15]
+; AVX2-FP-NEXT: vextracti128 $1, %ymm0, %xmm2
+; AVX2-FP-NEXT: vpackusdw %xmm2, %xmm0, %xmm0
+; AVX2-FP-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
+; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm3[4,5,6,7]
; AVX2-FP-NEXT: vmovdqa (%rdi), %xmm1
; AVX2-FP-NEXT: vmovdqa 16(%rdi), %xmm3
; AVX2-FP-NEXT: vmovdqa 32(%rdi), %xmm4
@@ -1225,11 +1245,15 @@ define void @load_i16_stride4_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX2-FCP: # %bb.0:
; AVX2-FCP-NEXT: vmovdqa 64(%rdi), %ymm1
; AVX2-FCP-NEXT: vmovdqa 96(%rdi), %ymm2
-; AVX2-FCP-NEXT: vpxor %xmm0, %xmm0, %xmm0
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm3 = mem[0],ymm0[1,2,3],mem[4],ymm0[5,6,7],mem[8],ymm0[9,10,11],mem[12],ymm0[13,14,15]
-; AVX2-FCP-NEXT: vextracti128 $1, %ymm3, %xmm4
-; AVX2-FCP-NEXT: vpackusdw %xmm4, %xmm3, %xmm3
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm0 = mem[0],ymm0[1,2,3],mem[4],ymm0[5,6,7],mem[8],ymm0[9,10,11],mem[12],ymm0[13,14,15]
+; AVX2-FCP-NEXT: vmovdqa (%rdi), %ymm0
+; AVX2-FCP-NEXT: vmovdqa 32(%rdi), %ymm3
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5,6,7]
+; AVX2-FCP-NEXT: vpxor %xmm4, %xmm4, %xmm4
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm3[0],ymm4[1,2,3],ymm3[4],ymm4[5,6,7],ymm3[8],ymm4[9,10,11],ymm3[12],ymm4[13,14,15]
+; AVX2-FCP-NEXT: vextracti128 $1, %ymm3, %xmm5
+; AVX2-FCP-NEXT: vpackusdw %xmm5, %xmm3, %xmm3
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6,7]
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm4[1,2,3],ymm0[4],ymm4[5,6,7],ymm0[8],ymm4[9,10,11],ymm0[12],ymm4[13,14,15]
; AVX2-FCP-NEXT: vextracti128 $1, %ymm0, %xmm4
; AVX2-FCP-NEXT: vpackusdw %xmm4, %xmm0, %xmm0
; AVX2-FCP-NEXT: vpackusdw %xmm3, %xmm0, %xmm0
@@ -2106,46 +2130,54 @@ define void @load_i16_stride4_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX2-LABEL: load_i16_stride4_vf32:
; AVX2: # %bb.0:
; AVX2-NEXT: subq $168, %rsp
-; AVX2-NEXT: vpxor %xmm0, %xmm0, %xmm0
-; AVX2-NEXT: vpblendw {{.*#+}} ymm1 = mem[0],ymm0[1,2,3],mem[4],ymm0[5,6,7],mem[8],ymm0[9,10,11],mem[12],ymm0[13,14,15]
-; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
-; AVX2-NEXT: vpackusdw %xmm2, %xmm1, %xmm1
-; AVX2-NEXT: vpblendw {{.*#+}} ymm2 = mem[0],ymm0[1,2,3],mem[4],ymm0[5,6,7],mem[8],ymm0[9,10,11],mem[12],ymm0[13,14,15]
-; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm3
-; AVX2-NEXT: vpackusdw %xmm3, %xmm2, %xmm2
-; AVX2-NEXT: vpackusdw %xmm1, %xmm2, %xmm1
-; AVX2-NEXT: vpblendw {{.*#+}} ymm2 = mem[0],ymm0[1,2,3],mem[4],ymm0[5,6,7],mem[8],ymm0[9,10,11],mem[12],ymm0[13,14,15]
-; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm3
-; AVX2-NEXT: vpackusdw %xmm3, %xmm2, %xmm2
-; AVX2-NEXT: vpackusdw %xmm2, %xmm2, %xmm2
-; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX2-NEXT: vpblendw {{.*#+}} ymm3 = mem[0],ymm0[1,2,3],mem[4],ymm0[5,6,7],mem[8],ymm0[9,10,11],mem[12],ymm0[13,14,15]
+; AVX2-NEXT: vmovdqa 128(%rdi), %ymm0
+; AVX2-NEXT: vmovdqa 160(%rdi), %ymm2
+; AVX2-NEXT: vmovdqa (%rdi), %ymm3
+; AVX2-NEXT: vmovdqa 32(%rdi), %ymm4
+; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX2-NEXT: vpblendw {{.*#+}} ymm5 = mem[0],ymm1[1,2,3],mem[4],ymm1[5,6,7],mem[8],ymm1[9,10,11],mem[12],ymm1[13,14,15]
+; AVX2-NEXT: vextracti128 $1, %ymm5, %xmm6
+; AVX2-NEXT: vpackusdw %xmm6, %xmm5, %xmm5
+; AVX2-NEXT: vpackusdw %xmm5, %xmm5, %xmm5
+; AVX2-NEXT: vinserti128 $1, %xmm5, %ymm0, %ymm5
+; AVX2-NEXT: vpblendw {{.*#+}} ymm6 = mem[0],ymm1[1,2,3],mem[4],ymm1[5,6,7],mem[8],ymm1[9,10,11],mem[12],ymm1[13,14,15]
+; AVX2-NEXT: vextracti128 $1, %ymm6, %xmm7
+; AVX2-NEXT: vpackusdw %xmm7, %xmm6, %xmm6
+; AVX2-NEXT: vpackusdw %xmm6, %xmm6, %xmm6
+; AVX2-NEXT: vinserti128 $1, %xmm6, %ymm0, %ymm6
+; AVX2-NEXT: vpblendd {{.*#+}} ymm5 = ymm6[0,1,2,3,4,5],ymm5[6,7]
+; AVX2-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5,6,7]
+; AVX2-NEXT: vpblendw {{.*#+}} ymm4 = ymm4[0],ymm1[1,2,3],ymm4[4],ymm1[5,6,7],ymm4[8],ymm1[9,10,11],ymm4[12],ymm1[13,14,15]
+; AVX2-NEXT: vextracti128 $1, %ymm4, %xmm6
+; AVX2-NEXT: vpackusdw %xmm6, %xmm4, %xmm4
+; AVX2-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5,6,7]
+; AVX2-NEXT: vpblendw {{.*#+}} ymm3 = ymm3[0],ymm1[1,2,3],ymm3[4],ymm1[5,6,7],ymm3[8],ymm1[9,10,11],ymm3[12],ymm1[13,14,15]
+; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm6
+; AVX2-NEXT: vpackusdw %xmm6, %xmm3, %xmm3
+; AVX2-NEXT: vpackusdw %xmm4, %xmm3, %xmm3
+; AVX2-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm5[4,5,6,7]
+; AVX2-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-NEXT: vpblendw {{.*#+}} ymm3 = mem[0],ymm1[1,2,3],mem[4],ymm1[5,6,7],mem[8],ymm1[9,10,11],mem[12],ymm1[13,14,15]
; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm4
; AVX2-NEXT: vpackusdw %xmm4, %xmm3, %xmm3
; AVX2-NEXT: vpackusdw %xmm3, %xmm3, %xmm3
; AVX2-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3,4,5],ymm2[6,7]
-; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
-; AVX2-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-NEXT: vpblendw {{.*#+}} ymm1 = mem[0],ymm0[1,2,3],mem[4],ymm0[5,6,7],mem[8],ymm0[9,10,11],mem[12],ymm0[13,14,15]
-; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
-; AVX2-NEXT: vpackusdw %xmm2, %xmm1, %xmm1
-; AVX2-NEXT: vpblendw {{.*#+}} ymm2 = mem[0],ymm0[1,2,3],mem[4],ymm0[5,6,7],mem[8],ymm0[9,10,11],mem[12],ymm0[13,14,15]
-; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm3
-; AVX2-NEXT: vpackusdw %xmm3, %xmm2, %xmm2
-; AVX2-NEXT: vpackusdw %xmm1, %xmm2, %xmm1
-; AVX2-NEXT: vpblendw {{.*#+}} ymm2 = mem[0],ymm0[1,2,3],mem[4],ymm0[5,6,7],mem[8],ymm0[9,10,11],mem[12],ymm0[13,14,15]
-; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm3
-; AVX2-NEXT: vpackusdw %xmm3, %xmm2, %xmm2
-; AVX2-NEXT: vpackusdw %xmm2, %xmm2, %xmm2
-; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = mem[0],ymm0[1,2,3],mem[4],ymm0[5,6,7],mem[8],ymm0[9,10,11],mem[12],ymm0[13,14,15]
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm3
-; AVX2-NEXT: vpackusdw %xmm3, %xmm0, %xmm0
-; AVX2-NEXT: vpackusdw %xmm0, %xmm0, %xmm0
-; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm2[6,7]
-; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX2-NEXT: vpblendw {{.*#+}} ymm4 = mem[0],ymm1[1,2,3],mem[4],ymm1[5,6,7],mem[8],ymm1[9,10,11],mem[12],ymm1[13,14,15]
+; AVX2-NEXT: vextracti128 $1, %ymm4, %xmm5
+; AVX2-NEXT: vpackusdw %xmm5, %xmm4, %xmm4
+; AVX2-NEXT: vpackusdw %xmm4, %xmm4, %xmm4
+; AVX2-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4
+; AVX2-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3,4,5],ymm3[6,7]
+; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6,7]
+; AVX2-NEXT: vpblendw {{.*#+}} ymm2 = ymm2[0],ymm1[1,2,3],ymm2[4],ymm1[5,6,7],ymm2[8],ymm1[9,10,11],ymm2[12],ymm1[13,14,15]
+; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm4
+; AVX2-NEXT: vpackusdw %xmm4, %xmm2, %xmm2
+; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6,7]
+; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7],ymm0[8],ymm1[9,10,11],ymm0[12],ymm1[13,14,15]
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpackusdw %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm3[4,5,6,7]
; AVX2-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT: vmovdqa (%rdi), %xmm1
; AVX2-NEXT: vmovdqa %xmm1, (%rsp) # 16-byte Spill
@@ -2338,46 +2370,54 @@ define void @load_i16_stride4_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX2-FP-LABEL: load_i16_stride4_vf32:
; AVX2-FP: # %bb.0:
; AVX2-FP-NEXT: subq $184, %rsp
-; AVX2-FP-NEXT: vpxor %xmm0, %xmm0, %xmm0
-; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm1 = mem[0],ymm0[1,2,3],mem[4],ymm0[5,6,7],mem[8],ymm0[9,10,11],mem[12],ymm0[13,14,15]
-; AVX2-FP-NEXT: vextracti128 $1, %ymm1, %xmm2
-; AVX2-FP-NEXT: vpackusdw %xmm2, %xmm1, %xmm1
-; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm2 = mem[0],ymm0[1,2,3],mem[4],ymm0[5,6,7],mem[8],ymm0[9,10,11],mem[12],ymm0[13,14,15]
-; AVX2-FP-NEXT: vextracti128 $1, %ymm2, %xmm3
-; AVX2-FP-NEXT: vpackusdw %xmm3, %xmm2, %xmm2
-; AVX2-FP-NEXT: vpackusdw %xmm1, %xmm2, %xmm1
-; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm2 = mem[0],ymm0[1,2,3],mem[4],ymm0[5,6,7],mem[8],ymm0[9,10,11],mem[12],ymm0[13,14,15]
-; AVX2-FP-NEXT: vextracti128 $1, %ymm2, %xmm3
-; AVX2-FP-NEXT: vpackusdw %xmm3, %xmm2, %xmm2
-; AVX2-FP-NEXT: vpackusdw %xmm2, %xmm2, %xmm2
-; AVX2-FP-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm3 = mem[0],ymm0[1,2,3],mem[4],ymm0[5,6,7],mem[8],ymm0[9,10,11],mem[12],ymm0[13,14,15]
+; AVX2-FP-NEXT: vmovdqa 128(%rdi), %ymm0
+; AVX2-FP-NEXT: vmovdqa 160(%rdi), %ymm2
+; AVX2-FP-NEXT: vmovdqa (%rdi), %ymm3
+; AVX2-FP-NEXT: vmovdqa 32(%rdi), %ymm4
+; AVX2-FP-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm5 = mem[0],ymm1[1,2,3],mem[4],ymm1[5,6,7],mem[8],ymm1[9,10,11],mem[12],ymm1[13,14,15]
+; AVX2-FP-NEXT: vextracti128 $1, %ymm5, %xmm6
+; AVX2-FP-NEXT: vpackusdw %xmm6, %xmm5, %xmm5
+; AVX2-FP-NEXT: vpackusdw %xmm5, %xmm5, %xmm5
+; AVX2-FP-NEXT: vinserti128 $1, %xmm5, %ymm0, %ymm5
+; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm6 = mem[0],ymm1[1,2,3],mem[4],ymm1[5,6,7],mem[8],ymm1[9,10,11],mem[12],ymm1[13,14,15]
+; AVX2-FP-NEXT: vextracti128 $1, %ymm6, %xmm7
+; AVX2-FP-NEXT: vpackusdw %xmm7, %xmm6, %xmm6
+; AVX2-FP-NEXT: vpackusdw %xmm6, %xmm6, %xmm6
+; AVX2-FP-NEXT: vinserti128 $1, %xmm6, %ymm0, %ymm6
+; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm5 = ymm6[0,1,2,3,4,5],ymm5[6,7]
+; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5,6,7]
+; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm4 = ymm4[0],ymm1[1,2,3],ymm4[4],ymm1[5,6,7],ymm4[8],ymm1[9,10,11],ymm4[12],ymm1[13,14,15]
+; AVX2-FP-NEXT: vextracti128 $1, %ymm4, %xmm6
+; AVX2-FP-NEXT: vpackusdw %xmm6, %xmm4, %xmm4
+; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5,6,7]
+; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm3 = ymm3[0],ymm1[1,2,3],ymm3[4],ymm1[5,6,7],ymm3[8],ymm1[9,10,11],ymm3[12],ymm1[13,14,15]
+; AVX2-FP-NEXT: vextracti128 $1, %ymm3, %xmm6
+; AVX2-FP-NEXT: vpackusdw %xmm6, %xmm3, %xmm3
+; AVX2-FP-NEXT: vpackusdw %xmm4, %xmm3, %xmm3
+; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm5[4,5,6,7]
+; AVX2-FP-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm3 = mem[0],ymm1[1,2,3],mem[4],ymm1[5,6,7],mem[8],ymm1[9,10,11],mem[12],ymm1[13,14,15]
; AVX2-FP-NEXT: vextracti128 $1, %ymm3, %xmm4
; AVX2-FP-NEXT: vpackusdw %xmm4, %xmm3, %xmm3
; AVX2-FP-NEXT: vpackusdw %xmm3, %xmm3, %xmm3
; AVX2-FP-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3,4,5],ymm2[6,7]
-; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
-; AVX2-FP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm1 = mem[0],ymm0[1,2,3],mem[4],ymm0[5,6,7],mem[8],ymm0[9,10,11],mem[12],ymm0[13,14,15]
-; AVX2-FP-NEXT: vextracti128 $1, %ymm1, %xmm2
-; AVX2-FP-NEXT: vpackusdw %xmm2, %xmm1, %xmm1
-; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm2 = mem[0],ymm0[1,2,3],mem[4],ymm0[5,6,7],mem[8],ymm0[9,10,11],mem[12],ymm0[13,14,15]
-; AVX2-FP-NEXT: vextracti128 $1, %ymm2, %xmm3
-; AVX2-FP-NEXT: vpackusdw %xmm3, %xmm2, %xmm2
-; AVX2-FP-NEXT: vpackusdw %xmm1, %xmm2, %xmm1
-; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm2 = mem[0],ymm0[1,2,3],mem[4],ymm0[5,6,7],mem[8],ymm0[9,10,11],mem[12],ymm0[13,14,15]
-; AVX2-FP-NEXT: vextracti128 $1, %ymm2, %xmm3
-; AVX2-FP-NEXT: vpackusdw %xmm3, %xmm2, %xmm2
-; AVX2-FP-NEXT: vpackusdw %xmm2, %xmm2, %xmm2
-; AVX2-FP-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm0 = mem[0],ymm0[1,2,3],mem[4],ymm0[5,6,7],mem[8],ymm0[9,10,11],mem[12],ymm0[13,14,15]
-; AVX2-FP-NEXT: vextracti128 $1, %ymm0, %xmm3
-; AVX2-FP-NEXT: vpackusdw %xmm3, %xmm0, %xmm0
-; AVX2-FP-NEXT: vpackusdw %xmm0, %xmm0, %xmm0
-; AVX2-FP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm2[6,7]
-; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm4 = mem[0],ymm1[1,2,3],mem[4],ymm1[5,6,7],mem[8],ymm1[9,10,11],mem[12],ymm1[13,14,15]
+; AVX2-FP-NEXT: vextracti128 $1, %ymm4, %xmm5
+; AVX2-FP-NEXT: vpackusdw %xmm5, %xmm4, %xmm4
+; AVX2-FP-NEXT: vpackusdw %xmm4, %xmm4, %xmm4
+; AVX2-FP-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4
+; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3,4,5],ymm3[6,7]
+; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6,7]
+; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm2 = ymm2[0],ymm1[1,2,3],ymm2[4],ymm1[5,6,7],ymm2[8],ymm1[9,10,11],ymm2[12],ymm1[13,14,15]
+; AVX2-FP-NEXT: vextracti128 $1, %ymm2, %xmm4
+; AVX2-FP-NEXT: vpackusdw %xmm4, %xmm2, %xmm2
+; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6,7]
+; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7],ymm0[8],ymm1[9,10,11],ymm0[12],ymm1[13,14,15]
+; AVX2-FP-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-FP-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
+; AVX2-FP-NEXT: vpackusdw %xmm2, %xmm0, %xmm0
+; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm3[4,5,6,7]
; AVX2-FP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT: vmovdqa (%rdi), %xmm13
; AVX2-FP-NEXT: vmovdqa %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
@@ -2560,149 +2600,157 @@ define void @load_i16_stride4_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX2-FCP-NEXT: subq $104, %rsp
; AVX2-FCP-NEXT: vmovdqa 192(%rdi), %ymm3
; AVX2-FCP-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vmovdqa 224(%rdi), %ymm6
-; AVX2-FCP-NEXT: vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vmovdqa 64(%rdi), %ymm9
-; AVX2-FCP-NEXT: vmovdqu %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vmovdqa 224(%rdi), %ymm5
+; AVX2-FCP-NEXT: vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vmovdqa 128(%rdi), %ymm0
+; AVX2-FCP-NEXT: vmovdqa 160(%rdi), %ymm1
+; AVX2-FCP-NEXT: vmovdqa 64(%rdi), %ymm10
+; AVX2-FCP-NEXT: vmovdqu %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vmovdqa 96(%rdi), %ymm7
-; AVX2-FCP-NEXT: vpxor %xmm0, %xmm0, %xmm0
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm1 = mem[0],ymm0[1,2,3],mem[4],ymm0[5,6,7],mem[8],ymm0[9,10,11],mem[12],ymm0[13,14,15]
-; AVX2-FCP-NEXT: vextracti128 $1, %ymm1, %xmm2
-; AVX2-FCP-NEXT: vpackusdw %xmm2, %xmm1, %xmm1
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm2 = mem[0],ymm0[1,2,3],mem[4],ymm0[5,6,7],mem[8],ymm0[9,10,11],mem[12],ymm0[13,14,15]
-; AVX2-FCP-NEXT: vextracti128 $1, %ymm2, %xmm4
+; AVX2-FCP-NEXT: vmovdqa (%rdi), %ymm2
+; AVX2-FCP-NEXT: vmovdqa 32(%rdi), %ymm4
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5,6,7]
+; AVX2-FCP-NEXT: vpxor %xmm8, %xmm8, %xmm8
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm4 = ymm4[0],ymm8[1,2,3],ymm4[4],ymm8[5,6,7],ymm4[8],ymm8[9,10,11],ymm4[12],ymm8[13,14,15]
+; AVX2-FCP-NEXT: vextracti128 $1, %ymm4, %xmm6
+; AVX2-FCP-NEXT: vpackusdw %xmm6, %xmm4, %xmm4
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6,7]
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm2[0],ymm8[1,2,3],ymm2[4],ymm8[5,6,7],ymm2[8],ymm8[9,10,11],ymm2[12],ymm8[13,14,15]
+; AVX2-FCP-NEXT: vextracti128 $1, %ymm2, %xmm6
+; AVX2-FCP-NEXT: vpackusdw %xmm6, %xmm2, %xmm2
; AVX2-FCP-NEXT: vpackusdw %xmm4, %xmm2, %xmm2
-; AVX2-FCP-NEXT: vpackusdw %xmm1, %xmm2, %xmm1
-; AVX2-FCP-NEXT: vpmovsxbd {{.*#+}} ymm2 = [0,2,2,3,0,2,4,6]
-; AVX2-FCP-NEXT: vpermd %ymm7, %ymm2, %ymm5
+; AVX2-FCP-NEXT: vpmovsxbd {{.*#+}} ymm9 = [0,2,2,3,0,2,4,6]
+; AVX2-FCP-NEXT: vpermd %ymm7, %ymm9, %ymm6
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,16,17,20,21,24,25,28,29]
-; AVX2-FCP-NEXT: vpshufb %ymm4, %ymm5, %ymm8
-; AVX2-FCP-NEXT: vpermd %ymm9, %ymm2, %ymm10
-; AVX2-FCP-NEXT: vpshufb %ymm4, %ymm10, %ymm9
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm9[0,1,2,3,4,5],ymm8[6,7]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm8[4,5,6,7]
-; AVX2-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm1 = mem[0],ymm0[1,2,3],mem[4],ymm0[5,6,7],mem[8],ymm0[9,10,11],mem[12],ymm0[13,14,15]
-; AVX2-FCP-NEXT: vextracti128 $1, %ymm1, %xmm8
-; AVX2-FCP-NEXT: vpackusdw %xmm8, %xmm1, %xmm1
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm0 = mem[0],ymm0[1,2,3],mem[4],ymm0[5,6,7],mem[8],ymm0[9,10,11],mem[12],ymm0[13,14,15]
-; AVX2-FCP-NEXT: vextracti128 $1, %ymm0, %xmm8
-; AVX2-FCP-NEXT: vpackusdw %xmm8, %xmm0, %xmm0
+; AVX2-FCP-NEXT: vpshufb %ymm4, %ymm6, %ymm11
+; AVX2-FCP-NEXT: vpermd %ymm10, %ymm9, %ymm10
+; AVX2-FCP-NEXT: vpshufb %ymm4, %ymm10, %ymm12
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm11 = ymm12[0,1,2,3,4,5],ymm11[6,7]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm11[4,5,6,7]
+; AVX2-FCP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6,7]
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0],ymm8[1,2,3],ymm1[4],ymm8[5,6,7],ymm1[8],ymm8[9,10,11],ymm1[12],ymm8[13,14,15]
+; AVX2-FCP-NEXT: vextracti128 $1, %ymm1, %xmm2
+; AVX2-FCP-NEXT: vpackusdw %xmm2, %xmm1, %xmm1
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6,7]
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm8[1,2,3],ymm0[4],ymm8[5,6,7],ymm0[8],ymm8[9,10,11],ymm0[12],ymm8[13,14,15]
+; AVX2-FCP-NEXT: vextracti128 $1, %ymm0, %xmm2
+; AVX2-FCP-NEXT: vpackusdw %xmm2, %xmm0, %xmm0
; AVX2-FCP-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
-; AVX2-FCP-NEXT: vpermd %ymm6, %ymm2, %ymm9
-; AVX2-FCP-NEXT: vpshufb %ymm4, %ymm9, %ymm1
-; AVX2-FCP-NEXT: vpermd %ymm3, %ymm2, %ymm6
-; AVX2-FCP-NEXT: vpshufb %ymm4, %ymm6, %ymm8
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm8[0,1,2,3,4,5],ymm1[6,7]
+; AVX2-FCP-NEXT: vpermd %ymm5, %ymm9, %ymm15
+; AVX2-FCP-NEXT: vpshufb %ymm4, %ymm15, %ymm1
+; AVX2-FCP-NEXT: vpermd %ymm3, %ymm9, %ymm9
+; AVX2-FCP-NEXT: vpshufb %ymm4, %ymm9, %ymm2
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3,4,5],ymm1[6,7]
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vmovdqa 32(%rdi), %xmm14
-; AVX2-FCP-NEXT: vmovdqa 48(%rdi), %xmm3
+; AVX2-FCP-NEXT: vmovdqa 48(%rdi), %xmm4
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm12 = [0,1,2,3,2,3,10,11,8,9,10,11,12,13,14,15]
-; AVX2-FCP-NEXT: vpshufb %xmm12, %xmm3, %xmm0
-; AVX2-FCP-NEXT: vpshufb %xmm12, %xmm14, %xmm8
-; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm8 = xmm8[0],xmm0[0],xmm8[1],xmm0[1]
+; AVX2-FCP-NEXT: vpshufb %xmm12, %xmm4, %xmm0
+; AVX2-FCP-NEXT: vpshufb %xmm12, %xmm14, %xmm2
+; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
; AVX2-FCP-NEXT: vmovdqa (%rdi), %xmm13
-; AVX2-FCP-NEXT: vmovdqa 16(%rdi), %xmm2
-; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm1 = [2,3,10,11,8,9,10,11,8,9,10,11,12,13,14,15]
-; AVX2-FCP-NEXT: vpshufb %xmm1, %xmm2, %xmm4
-; AVX2-FCP-NEXT: vpshufb %xmm1, %xmm13, %xmm11
-; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm11[0],xmm4[0],xmm11[1],xmm4[1]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm4 = xmm4[0,1],xmm8[2,3]
+; AVX2-FCP-NEXT: vmovdqa 16(%rdi), %xmm5
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm3 = [2,3,10,11,8,9,10,11,8,9,10,11,12,13,14,15]
+; AVX2-FCP-NEXT: vpshufb %xmm3, %xmm5, %xmm8
+; AVX2-FCP-NEXT: vpshufb %xmm3, %xmm13, %xmm11
+; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm8 = xmm11[0],xmm8[0],xmm11[1],xmm8[1]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm2 = xmm8[0,1],xmm2[2,3]
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm8 = [2,3,6,7,10,11,14,15,14,15,10,11,12,13,14,15,18,19,22,23,26,27,30,31,18,19,22,23,26,27,30,31]
-; AVX2-FCP-NEXT: vpshufb %ymm8, %ymm5, %ymm5
+; AVX2-FCP-NEXT: vpshufb %ymm8, %ymm6, %ymm6
; AVX2-FCP-NEXT: vpshufb %ymm8, %ymm10, %ymm10
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm10[0,1,2,3,4,5],ymm5[6,7]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm4[0,1,2,3],ymm5[4,5,6,7]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm10[0,1,2,3,4,5],ymm6[6,7]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm6[4,5,6,7]
; AVX2-FCP-NEXT: vmovdqu %ymm0, (%rsp) # 32-byte Spill
; AVX2-FCP-NEXT: vmovdqa 160(%rdi), %xmm11
-; AVX2-FCP-NEXT: vmovdqa 176(%rdi), %xmm15
-; AVX2-FCP-NEXT: vpshufb %xmm12, %xmm15, %xmm4
-; AVX2-FCP-NEXT: vpshufb %xmm12, %xmm11, %xmm10
-; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm10[0],xmm4[0],xmm10[1],xmm4[1]
+; AVX2-FCP-NEXT: vmovdqa 176(%rdi), %xmm2
+; AVX2-FCP-NEXT: vpshufb %xmm12, %xmm2, %xmm10
+; AVX2-FCP-NEXT: vpshufb %xmm12, %xmm11, %xmm12
+; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm12[0],xmm10[0],xmm12[1],xmm10[1]
; AVX2-FCP-NEXT: vmovdqa 128(%rdi), %xmm10
; AVX2-FCP-NEXT: vmovdqa 144(%rdi), %xmm12
-; AVX2-FCP-NEXT: vpshufb %xmm1, %xmm12, %xmm0
-; AVX2-FCP-NEXT: vpshufb %xmm1, %xmm10, %xmm1
-; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm4[2,3]
-; AVX2-FCP-NEXT: vpshufb %ymm8, %ymm9, %ymm1
-; AVX2-FCP-NEXT: vpshufb %ymm8, %ymm6, %ymm4
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm4[0,1,2,3,4,5],ymm1[6,7]
+; AVX2-FCP-NEXT: vpshufb %xmm3, %xmm12, %xmm0
+; AVX2-FCP-NEXT: vpshufb %xmm3, %xmm10, %xmm3
+; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm3[0],xmm0[0],xmm3[1],xmm0[1]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
+; AVX2-FCP-NEXT: vpshufb %ymm8, %ymm15, %ymm1
+; AVX2-FCP-NEXT: vpshufb %ymm8, %ymm9, %ymm3
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3,4,5],ymm1[6,7]
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpmovsxbd {{.*#+}} ymm6 = [1,3,2,3,1,3,5,7]
-; AVX2-FCP-NEXT: vpermd %ymm7, %ymm6, %ymm4
-; AVX2-FCP-NEXT: vpermd {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm7 # 32-byte Folded Reload
+; AVX2-FCP-NEXT: vpmovsxbd {{.*#+}} ymm0 = [1,3,2,3,1,3,5,7]
+; AVX2-FCP-NEXT: vpermd %ymm7, %ymm0, %ymm7
+; AVX2-FCP-NEXT: vpermd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm15 # 32-byte Folded Reload
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm8 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,16,17,20,21,24,25,28,29]
-; AVX2-FCP-NEXT: vpshufb %ymm8, %ymm4, %ymm0
; AVX2-FCP-NEXT: vpshufb %ymm8, %ymm7, %ymm1
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
-; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm1 = xmm3[3,1,2,3]
+; AVX2-FCP-NEXT: vpshufb %ymm8, %ymm15, %ymm3
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5],ymm1[6,7]
+; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm1 = xmm4[3,1,2,3]
; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm14 = xmm14[3,1,2,3]
-; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm3 = xmm1[0,1,2,0,4,5,6,7]
-; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm5 = xmm14[0,1,2,0,4,5,6,7]
-; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm5[0],xmm3[0],xmm5[1],xmm3[1]
-; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[3,1,2,3]
+; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm4 = xmm1[0,1,2,0,4,5,6,7]
+; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm6 = xmm14[0,1,2,0,4,5,6,7]
+; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm6[0],xmm4[0],xmm6[1],xmm4[1]
+; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm5 = xmm5[3,1,2,3]
; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm13 = xmm13[3,1,2,3]
-; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm5 = xmm2[2,0,2,3,4,5,6,7]
+; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm6 = xmm5[2,0,2,3,4,5,6,7]
; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm9 = xmm13[2,0,2,3,4,5,6,7]
-; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm5 = xmm9[0],xmm5[0],xmm9[1],xmm5[1]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm3 = xmm5[0,1],xmm3[2,3]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm0[4,5,6,7]
-; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpermd {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm3 # 32-byte Folded Reload
-; AVX2-FCP-NEXT: vpermd {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm5 # 32-byte Folded Reload
+; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm6 = xmm9[0],xmm6[0],xmm9[1],xmm6[1]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm4 = xmm6[0,1],xmm4[2,3]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
+; AVX2-FCP-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vpermd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm3 # 32-byte Folded Reload
+; AVX2-FCP-NEXT: vpermd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
; AVX2-FCP-NEXT: vpshufb %ymm8, %ymm3, %ymm6
-; AVX2-FCP-NEXT: vpshufb %ymm8, %ymm5, %ymm9
+; AVX2-FCP-NEXT: vpshufb %ymm8, %ymm0, %ymm9
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm9[0,1,2,3,4,5],ymm6[6,7]
-; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm9 = xmm15[3,1,2,3]
-; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm11 = xmm11[3,1,2,3]
-; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm15 = xmm9[0,1,2,0,4,5,6,7]
-; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm0 = xmm11[0,1,2,0,4,5,6,7]
-; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm15[0],xmm0[1],xmm15[1]
-; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm12 = xmm12[3,1,2,3]
+; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[3,1,2,3]
+; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm9 = xmm11[3,1,2,3]
+; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm11 = xmm2[0,1,2,0,4,5,6,7]
+; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm4 = xmm9[0,1,2,0,4,5,6,7]
+; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm4[0],xmm11[0],xmm4[1],xmm11[1]
+; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm11 = xmm12[3,1,2,3]
; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm10 = xmm10[3,1,2,3]
-; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm15 = xmm12[2,0,2,3,4,5,6,7]
+; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm12 = xmm11[2,0,2,3,4,5,6,7]
; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm8 = xmm10[2,0,2,3,4,5,6,7]
-; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm8 = xmm8[0],xmm15[0],xmm8[1],xmm15[1]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm0 = xmm8[0,1],xmm0[2,3]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm6[4,5,6,7]
+; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm8 = xmm8[0],xmm12[0],xmm8[1],xmm12[1]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm4 = xmm8[0,1],xmm4[2,3]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm6[4,5,6,7]
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm8 = [2,3,6,7,10,11,14,15,14,15,10,11,12,13,14,15,18,19,22,23,26,27,30,31,18,19,22,23,26,27,30,31]
-; AVX2-FCP-NEXT: vpshufb %ymm8, %ymm4, %ymm4
; AVX2-FCP-NEXT: vpshufb %ymm8, %ymm7, %ymm6
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm6[0,1,2,3,4,5],ymm4[6,7]
+; AVX2-FCP-NEXT: vpshufb %ymm8, %ymm15, %ymm7
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm7[0,1,2,3,4,5],ymm6[6,7]
; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,1,3,1,4,5,6,7]
-; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm6 = xmm14[0,1,3,1,4,5,6,7]
-; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm6[0],xmm1[0],xmm6[1],xmm1[1]
-; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm2 = xmm2[3,1,2,3,4,5,6,7]
-; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm6 = xmm13[3,1,2,3,4,5,6,7]
-; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm6[0],xmm2[0],xmm6[1],xmm2[1]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm4[4,5,6,7]
-; AVX2-FCP-NEXT: vpshufb %ymm8, %ymm3, %ymm2
-; AVX2-FCP-NEXT: vpshufb %ymm8, %ymm5, %ymm3
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3,4,5],ymm2[6,7]
+; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm7 = xmm14[0,1,3,1,4,5,6,7]
+; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm7[0],xmm1[0],xmm7[1],xmm1[1]
+; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm5 = xmm5[3,1,2,3,4,5,6,7]
+; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm7 = xmm13[3,1,2,3,4,5,6,7]
+; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm5 = xmm7[0],xmm5[0],xmm7[1],xmm5[1]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm1 = xmm5[0,1],xmm1[2,3]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm6[4,5,6,7]
+; AVX2-FCP-NEXT: vpshufb %ymm8, %ymm3, %ymm3
+; AVX2-FCP-NEXT: vpshufb %ymm8, %ymm0, %ymm0
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm3[6,7]
+; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm2 = xmm2[0,1,3,1,4,5,6,7]
; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm3 = xmm9[0,1,3,1,4,5,6,7]
-; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm4 = xmm11[0,1,3,1,4,5,6,7]
-; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
-; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm4 = xmm12[3,1,2,3,4,5,6,7]
+; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
+; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm3 = xmm11[3,1,2,3,4,5,6,7]
; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm5 = xmm10[3,1,2,3,4,5,6,7]
-; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm3 = xmm4[0,1],xmm3[2,3]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5,6,7]
-; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX2-FCP-NEXT: vmovaps %ymm3, 32(%rsi)
-; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX2-FCP-NEXT: vmovaps %ymm3, (%rsi)
-; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX2-FCP-NEXT: vmovaps %ymm3, 32(%rdx)
-; AVX2-FCP-NEXT: vmovups (%rsp), %ymm3 # 32-byte Reload
-; AVX2-FCP-NEXT: vmovaps %ymm3, (%rdx)
-; AVX2-FCP-NEXT: vmovdqa %ymm0, 32(%rcx)
-; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FCP-NEXT: vmovaps %ymm0, (%rcx)
-; AVX2-FCP-NEXT: vmovdqa %ymm2, 32(%r8)
+; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm5[0],xmm3[0],xmm5[1],xmm3[1]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm2 = xmm3[0,1],xmm2[2,3]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
+; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX2-FCP-NEXT: vmovaps %ymm2, 32(%rsi)
+; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX2-FCP-NEXT: vmovaps %ymm2, (%rsi)
+; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX2-FCP-NEXT: vmovaps %ymm2, 32(%rdx)
+; AVX2-FCP-NEXT: vmovups (%rsp), %ymm2 # 32-byte Reload
+; AVX2-FCP-NEXT: vmovaps %ymm2, (%rdx)
+; AVX2-FCP-NEXT: vmovdqa %ymm4, 32(%rcx)
+; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX2-FCP-NEXT: vmovaps %ymm2, (%rcx)
+; AVX2-FCP-NEXT: vmovdqa %ymm0, 32(%r8)
; AVX2-FCP-NEXT: vmovdqa %ymm1, (%r8)
; AVX2-FCP-NEXT: addq $104, %rsp
; AVX2-FCP-NEXT: vzeroupper
@@ -4354,86 +4402,102 @@ define void @load_i16_stride4_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX2-LABEL: load_i16_stride4_vf64:
; AVX2: # %bb.0:
; AVX2-NEXT: subq $696, %rsp # imm = 0x2B8
-; AVX2-NEXT: vpxor %xmm0, %xmm0, %xmm0
-; AVX2-NEXT: vpblendw {{.*#+}} ymm1 = mem[0],ymm0[1,2,3],mem[4],ymm0[5,6,7],mem[8],ymm0[9,10,11],mem[12],ymm0[13,14,15]
-; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
-; AVX2-NEXT: vpackusdw %xmm2, %xmm1, %xmm1
-; AVX2-NEXT: vpblendw {{.*#+}} ymm2 = mem[0],ymm0[1,2,3],mem[4],ymm0[5,6,7],mem[8],ymm0[9,10,11],mem[12],ymm0[13,14,15]
-; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm3
-; AVX2-NEXT: vpackusdw %xmm3, %xmm2, %xmm2
-; AVX2-NEXT: vpackusdw %xmm1, %xmm2, %xmm1
-; AVX2-NEXT: vpblendw {{.*#+}} ymm2 = mem[0],ymm0[1,2,3],mem[4],ymm0[5,6,7],mem[8],ymm0[9,10,11],mem[12],ymm0[13,14,15]
-; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm3
-; AVX2-NEXT: vpackusdw %xmm3, %xmm2, %xmm2
-; AVX2-NEXT: vpackusdw %xmm2, %xmm2, %xmm2
-; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX2-NEXT: vpblendw {{.*#+}} ymm3 = mem[0],ymm0[1,2,3],mem[4],ymm0[5,6,7],mem[8],ymm0[9,10,11],mem[12],ymm0[13,14,15]
-; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm4
-; AVX2-NEXT: vpackusdw %xmm4, %xmm3, %xmm3
-; AVX2-NEXT: vpackusdw %xmm3, %xmm3, %xmm3
-; AVX2-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3,4,5],ymm2[6,7]
-; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
-; AVX2-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-NEXT: vpblendw {{.*#+}} ymm1 = mem[0],ymm0[1,2,3],mem[4],ymm0[5,6,7],mem[8],ymm0[9,10,11],mem[12],ymm0[13,14,15]
-; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
-; AVX2-NEXT: vpackusdw %xmm2, %xmm1, %xmm1
-; AVX2-NEXT: vpblendw {{.*#+}} ymm2 = mem[0],ymm0[1,2,3],mem[4],ymm0[5,6,7],mem[8],ymm0[9,10,11],mem[12],ymm0[13,14,15]
-; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm3
-; AVX2-NEXT: vpackusdw %xmm3, %xmm2, %xmm2
-; AVX2-NEXT: vpackusdw %xmm1, %xmm2, %xmm1
-; AVX2-NEXT: vpblendw {{.*#+}} ymm2 = mem[0],ymm0[1,2,3],mem[4],ymm0[5,6,7],mem[8],ymm0[9,10,11],mem[12],ymm0[13,14,15]
-; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm3
-; AVX2-NEXT: vpackusdw %xmm3, %xmm2, %xmm2
-; AVX2-NEXT: vpackusdw %xmm2, %xmm2, %xmm2
-; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX2-NEXT: vpblendw {{.*#+}} ymm3 = mem[0],ymm0[1,2,3],mem[4],ymm0[5,6,7],mem[8],ymm0[9,10,11],mem[12],ymm0[13,14,15]
-; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm4
+; AVX2-NEXT: vmovdqa 128(%rdi), %ymm0
+; AVX2-NEXT: vmovdqa 160(%rdi), %ymm1
+; AVX2-NEXT: vmovdqa 384(%rdi), %ymm3
+; AVX2-NEXT: vmovdqa 416(%rdi), %ymm4
+; AVX2-NEXT: vmovdqa 256(%rdi), %ymm5
+; AVX2-NEXT: vmovdqa 288(%rdi), %ymm6
+; AVX2-NEXT: vmovdqa (%rdi), %ymm7
+; AVX2-NEXT: vmovdqa 32(%rdi), %ymm8
+; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX2-NEXT: vpblendw {{.*#+}} ymm9 = mem[0],ymm2[1,2,3],mem[4],ymm2[5,6,7],mem[8],ymm2[9,10,11],mem[12],ymm2[13,14,15]
+; AVX2-NEXT: vextracti128 $1, %ymm9, %xmm10
+; AVX2-NEXT: vpackusdw %xmm10, %xmm9, %xmm9
+; AVX2-NEXT: vpackusdw %xmm9, %xmm9, %xmm9
+; AVX2-NEXT: vinserti128 $1, %xmm9, %ymm0, %ymm9
+; AVX2-NEXT: vpblendw {{.*#+}} ymm10 = mem[0],ymm2[1,2,3],mem[4],ymm2[5,6,7],mem[8],ymm2[9,10,11],mem[12],ymm2[13,14,15]
+; AVX2-NEXT: vextracti128 $1, %ymm10, %xmm11
+; AVX2-NEXT: vpackusdw %xmm11, %xmm10, %xmm10
+; AVX2-NEXT: vpackusdw %xmm10, %xmm10, %xmm10
+; AVX2-NEXT: vinserti128 $1, %xmm10, %ymm0, %ymm10
+; AVX2-NEXT: vpblendd {{.*#+}} ymm9 = ymm10[0,1,2,3,4,5],ymm9[6,7]
+; AVX2-NEXT: vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3,4,5,6,7]
+; AVX2-NEXT: vpblendw {{.*#+}} ymm8 = ymm8[0],ymm2[1,2,3],ymm8[4],ymm2[5,6,7],ymm8[8],ymm2[9,10,11],ymm8[12],ymm2[13,14,15]
+; AVX2-NEXT: vextracti128 $1, %ymm8, %xmm10
+; AVX2-NEXT: vpackusdw %xmm10, %xmm8, %xmm8
+; AVX2-NEXT: vpblendd {{.*#+}} ymm7 = ymm7[0,1,2,3,4,5,6,7]
+; AVX2-NEXT: vpblendw {{.*#+}} ymm7 = ymm7[0],ymm2[1,2,3],ymm7[4],ymm2[5,6,7],ymm7[8],ymm2[9,10,11],ymm7[12],ymm2[13,14,15]
+; AVX2-NEXT: vextracti128 $1, %ymm7, %xmm10
+; AVX2-NEXT: vpackusdw %xmm10, %xmm7, %xmm7
+; AVX2-NEXT: vpackusdw %xmm8, %xmm7, %xmm7
+; AVX2-NEXT: vpblendd {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm9[4,5,6,7]
+; AVX2-NEXT: vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-NEXT: vpblendw {{.*#+}} ymm7 = mem[0],ymm2[1,2,3],mem[4],ymm2[5,6,7],mem[8],ymm2[9,10,11],mem[12],ymm2[13,14,15]
+; AVX2-NEXT: vextracti128 $1, %ymm7, %xmm8
+; AVX2-NEXT: vpackusdw %xmm8, %xmm7, %xmm7
+; AVX2-NEXT: vpackusdw %xmm7, %xmm7, %xmm7
+; AVX2-NEXT: vinserti128 $1, %xmm7, %ymm0, %ymm7
+; AVX2-NEXT: vpblendw {{.*#+}} ymm8 = mem[0],ymm2[1,2,3],mem[4],ymm2[5,6,7],mem[8],ymm2[9,10,11],mem[12],ymm2[13,14,15]
+; AVX2-NEXT: vextracti128 $1, %ymm8, %xmm9
+; AVX2-NEXT: vpackusdw %xmm9, %xmm8, %xmm8
+; AVX2-NEXT: vpackusdw %xmm8, %xmm8, %xmm8
+; AVX2-NEXT: vinserti128 $1, %xmm8, %ymm0, %ymm8
+; AVX2-NEXT: vpblendd {{.*#+}} ymm7 = ymm8[0,1,2,3,4,5],ymm7[6,7]
+; AVX2-NEXT: vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5,6,7]
+; AVX2-NEXT: vpblendw {{.*#+}} ymm6 = ymm6[0],ymm2[1,2,3],ymm6[4],ymm2[5,6,7],ymm6[8],ymm2[9,10,11],ymm6[12],ymm2[13,14,15]
+; AVX2-NEXT: vextracti128 $1, %ymm6, %xmm8
+; AVX2-NEXT: vpackusdw %xmm8, %xmm6, %xmm6
+; AVX2-NEXT: vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3,4,5,6,7]
+; AVX2-NEXT: vpblendw {{.*#+}} ymm5 = ymm5[0],ymm2[1,2,3],ymm5[4],ymm2[5,6,7],ymm5[8],ymm2[9,10,11],ymm5[12],ymm2[13,14,15]
+; AVX2-NEXT: vextracti128 $1, %ymm5, %xmm8
+; AVX2-NEXT: vpackusdw %xmm8, %xmm5, %xmm5
+; AVX2-NEXT: vpackusdw %xmm6, %xmm5, %xmm5
+; AVX2-NEXT: vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm7[4,5,6,7]
+; AVX2-NEXT: vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-NEXT: vpblendw {{.*#+}} ymm5 = mem[0],ymm2[1,2,3],mem[4],ymm2[5,6,7],mem[8],ymm2[9,10,11],mem[12],ymm2[13,14,15]
+; AVX2-NEXT: vextracti128 $1, %ymm5, %xmm6
+; AVX2-NEXT: vpackusdw %xmm6, %xmm5, %xmm5
+; AVX2-NEXT: vpackusdw %xmm5, %xmm5, %xmm5
+; AVX2-NEXT: vinserti128 $1, %xmm5, %ymm0, %ymm5
+; AVX2-NEXT: vpblendw {{.*#+}} ymm6 = mem[0],ymm2[1,2,3],mem[4],ymm2[5,6,7],mem[8],ymm2[9,10,11],mem[12],ymm2[13,14,15]
+; AVX2-NEXT: vextracti128 $1, %ymm6, %xmm7
+; AVX2-NEXT: vpackusdw %xmm7, %xmm6, %xmm6
+; AVX2-NEXT: vpackusdw %xmm6, %xmm6, %xmm6
+; AVX2-NEXT: vinserti128 $1, %xmm6, %ymm0, %ymm6
+; AVX2-NEXT: vpblendd {{.*#+}} ymm5 = ymm6[0,1,2,3,4,5],ymm5[6,7]
+; AVX2-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5,6,7]
+; AVX2-NEXT: vpblendw {{.*#+}} ymm4 = ymm4[0],ymm2[1,2,3],ymm4[4],ymm2[5,6,7],ymm4[8],ymm2[9,10,11],ymm4[12],ymm2[13,14,15]
+; AVX2-NEXT: vextracti128 $1, %ymm4, %xmm6
+; AVX2-NEXT: vpackusdw %xmm6, %xmm4, %xmm4
+; AVX2-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5,6,7]
+; AVX2-NEXT: vpblendw {{.*#+}} ymm3 = ymm3[0],ymm2[1,2,3],ymm3[4],ymm2[5,6,7],ymm3[8],ymm2[9,10,11],ymm3[12],ymm2[13,14,15]
+; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm6
+; AVX2-NEXT: vpackusdw %xmm6, %xmm3, %xmm3
; AVX2-NEXT: vpackusdw %xmm4, %xmm3, %xmm3
-; AVX2-NEXT: vpackusdw %xmm3, %xmm3, %xmm3
-; AVX2-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3,4,5],ymm2[6,7]
-; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
-; AVX2-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-NEXT: vpblendw {{.*#+}} ymm1 = mem[0],ymm0[1,2,3],mem[4],ymm0[5,6,7],mem[8],ymm0[9,10,11],mem[12],ymm0[13,14,15]
-; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
-; AVX2-NEXT: vpackusdw %xmm2, %xmm1, %xmm1
-; AVX2-NEXT: vpblendw {{.*#+}} ymm2 = mem[0],ymm0[1,2,3],mem[4],ymm0[5,6,7],mem[8],ymm0[9,10,11],mem[12],ymm0[13,14,15]
-; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm3
-; AVX2-NEXT: vpackusdw %xmm3, %xmm2, %xmm2
-; AVX2-NEXT: vpackusdw %xmm1, %xmm2, %xmm1
-; AVX2-NEXT: vpblendw {{.*#+}} ymm2 = mem[0],ymm0[1,2,3],mem[4],ymm0[5,6,7],mem[8],ymm0[9,10,11],mem[12],ymm0[13,14,15]
-; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm3
-; AVX2-NEXT: vpackusdw %xmm3, %xmm2, %xmm2
-; AVX2-NEXT: vpackusdw %xmm2, %xmm2, %xmm2
-; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX2-NEXT: vpblendw {{.*#+}} ymm3 = mem[0],ymm0[1,2,3],mem[4],ymm0[5,6,7],mem[8],ymm0[9,10,11],mem[12],ymm0[13,14,15]
+; AVX2-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm5[4,5,6,7]
+; AVX2-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-NEXT: vpblendw {{.*#+}} ymm3 = mem[0],ymm2[1,2,3],mem[4],ymm2[5,6,7],mem[8],ymm2[9,10,11],mem[12],ymm2[13,14,15]
; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm4
; AVX2-NEXT: vpackusdw %xmm4, %xmm3, %xmm3
; AVX2-NEXT: vpackusdw %xmm3, %xmm3, %xmm3
; AVX2-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3,4,5],ymm2[6,7]
-; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
-; AVX2-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-NEXT: vpblendw {{.*#+}} ymm1 = mem[0],ymm0[1,2,3],mem[4],ymm0[5,6,7],mem[8],ymm0[9,10,11],mem[12],ymm0[13,14,15]
-; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
-; AVX2-NEXT: vpackusdw %xmm2, %xmm1, %xmm1
-; AVX2-NEXT: vpblendw {{.*#+}} ymm2 = mem[0],ymm0[1,2,3],mem[4],ymm0[5,6,7],mem[8],ymm0[9,10,11],mem[12],ymm0[13,14,15]
-; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm3
-; AVX2-NEXT: vpackusdw %xmm3, %xmm2, %xmm2
-; AVX2-NEXT: vpackusdw %xmm1, %xmm2, %xmm1
-; AVX2-NEXT: vpblendw {{.*#+}} ymm2 = mem[0],ymm0[1,2,3],mem[4],ymm0[5,6,7],mem[8],ymm0[9,10,11],mem[12],ymm0[13,14,15]
-; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm3
-; AVX2-NEXT: vpackusdw %xmm3, %xmm2, %xmm2
-; AVX2-NEXT: vpackusdw %xmm2, %xmm2, %xmm2
-; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = mem[0],ymm0[1,2,3],mem[4],ymm0[5,6,7],mem[8],ymm0[9,10,11],mem[12],ymm0[13,14,15]
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm3
-; AVX2-NEXT: vpackusdw %xmm3, %xmm0, %xmm0
-; AVX2-NEXT: vpackusdw %xmm0, %xmm0, %xmm0
-; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm2[6,7]
-; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX2-NEXT: vpblendw {{.*#+}} ymm4 = mem[0],ymm2[1,2,3],mem[4],ymm2[5,6,7],mem[8],ymm2[9,10,11],mem[12],ymm2[13,14,15]
+; AVX2-NEXT: vextracti128 $1, %ymm4, %xmm5
+; AVX2-NEXT: vpackusdw %xmm5, %xmm4, %xmm4
+; AVX2-NEXT: vpackusdw %xmm4, %xmm4, %xmm4
+; AVX2-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4
+; AVX2-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3,4,5],ymm3[6,7]
+; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6,7]
+; AVX2-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0],ymm2[1,2,3],ymm1[4],ymm2[5,6,7],ymm1[8],ymm2[9,10,11],ymm1[12],ymm2[13,14,15]
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm4
+; AVX2-NEXT: vpackusdw %xmm4, %xmm1, %xmm1
+; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6,7]
+; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm2[1,2,3],ymm0[4],ymm2[5,6,7],ymm0[8],ymm2[9,10,11],ymm0[12],ymm2[13,14,15]
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
+; AVX2-NEXT: vpackusdw %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm3[4,5,6,7]
; AVX2-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT: vmovdqa 256(%rdi), %xmm4
; AVX2-NEXT: vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
@@ -4858,86 +4922,102 @@ define void @load_i16_stride4_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX2-FP-LABEL: load_i16_stride4_vf64:
; AVX2-FP: # %bb.0:
; AVX2-FP-NEXT: subq $712, %rsp # imm = 0x2C8
-; AVX2-FP-NEXT: vpxor %xmm0, %xmm0, %xmm0
-; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm1 = mem[0],ymm0[1,2,3],mem[4],ymm0[5,6,7],mem[8],ymm0[9,10,11],mem[12],ymm0[13,14,15]
-; AVX2-FP-NEXT: vextracti128 $1, %ymm1, %xmm2
-; AVX2-FP-NEXT: vpackusdw %xmm2, %xmm1, %xmm1
-; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm2 = mem[0],ymm0[1,2,3],mem[4],ymm0[5,6,7],mem[8],ymm0[9,10,11],mem[12],ymm0[13,14,15]
-; AVX2-FP-NEXT: vextracti128 $1, %ymm2, %xmm3
-; AVX2-FP-NEXT: vpackusdw %xmm3, %xmm2, %xmm2
-; AVX2-FP-NEXT: vpackusdw %xmm1, %xmm2, %xmm1
-; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm2 = mem[0],ymm0[1,2,3],mem[4],ymm0[5,6,7],mem[8],ymm0[9,10,11],mem[12],ymm0[13,14,15]
-; AVX2-FP-NEXT: vextracti128 $1, %ymm2, %xmm3
-; AVX2-FP-NEXT: vpackusdw %xmm3, %xmm2, %xmm2
-; AVX2-FP-NEXT: vpackusdw %xmm2, %xmm2, %xmm2
-; AVX2-FP-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm3 = mem[0],ymm0[1,2,3],mem[4],ymm0[5,6,7],mem[8],ymm0[9,10,11],mem[12],ymm0[13,14,15]
-; AVX2-FP-NEXT: vextracti128 $1, %ymm3, %xmm4
-; AVX2-FP-NEXT: vpackusdw %xmm4, %xmm3, %xmm3
-; AVX2-FP-NEXT: vpackusdw %xmm3, %xmm3, %xmm3
-; AVX2-FP-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3,4,5],ymm2[6,7]
-; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
-; AVX2-FP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm1 = mem[0],ymm0[1,2,3],mem[4],ymm0[5,6,7],mem[8],ymm0[9,10,11],mem[12],ymm0[13,14,15]
-; AVX2-FP-NEXT: vextracti128 $1, %ymm1, %xmm2
-; AVX2-FP-NEXT: vpackusdw %xmm2, %xmm1, %xmm1
-; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm2 = mem[0],ymm0[1,2,3],mem[4],ymm0[5,6,7],mem[8],ymm0[9,10,11],mem[12],ymm0[13,14,15]
-; AVX2-FP-NEXT: vextracti128 $1, %ymm2, %xmm3
-; AVX2-FP-NEXT: vpackusdw %xmm3, %xmm2, %xmm2
-; AVX2-FP-NEXT: vpackusdw %xmm1, %xmm2, %xmm1
-; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm2 = mem[0],ymm0[1,2,3],mem[4],ymm0[5,6,7],mem[8],ymm0[9,10,11],mem[12],ymm0[13,14,15]
-; AVX2-FP-NEXT: vextracti128 $1, %ymm2, %xmm3
-; AVX2-FP-NEXT: vpackusdw %xmm3, %xmm2, %xmm2
-; AVX2-FP-NEXT: vpackusdw %xmm2, %xmm2, %xmm2
-; AVX2-FP-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm3 = mem[0],ymm0[1,2,3],mem[4],ymm0[5,6,7],mem[8],ymm0[9,10,11],mem[12],ymm0[13,14,15]
-; AVX2-FP-NEXT: vextracti128 $1, %ymm3, %xmm4
+; AVX2-FP-NEXT: vmovdqa 128(%rdi), %ymm0
+; AVX2-FP-NEXT: vmovdqa 160(%rdi), %ymm1
+; AVX2-FP-NEXT: vmovdqa 384(%rdi), %ymm3
+; AVX2-FP-NEXT: vmovdqa 416(%rdi), %ymm4
+; AVX2-FP-NEXT: vmovdqa 256(%rdi), %ymm5
+; AVX2-FP-NEXT: vmovdqa 288(%rdi), %ymm6
+; AVX2-FP-NEXT: vmovdqa (%rdi), %ymm7
+; AVX2-FP-NEXT: vmovdqa 32(%rdi), %ymm8
+; AVX2-FP-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm9 = mem[0],ymm2[1,2,3],mem[4],ymm2[5,6,7],mem[8],ymm2[9,10,11],mem[12],ymm2[13,14,15]
+; AVX2-FP-NEXT: vextracti128 $1, %ymm9, %xmm10
+; AVX2-FP-NEXT: vpackusdw %xmm10, %xmm9, %xmm9
+; AVX2-FP-NEXT: vpackusdw %xmm9, %xmm9, %xmm9
+; AVX2-FP-NEXT: vinserti128 $1, %xmm9, %ymm0, %ymm9
+; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm10 = mem[0],ymm2[1,2,3],mem[4],ymm2[5,6,7],mem[8],ymm2[9,10,11],mem[12],ymm2[13,14,15]
+; AVX2-FP-NEXT: vextracti128 $1, %ymm10, %xmm11
+; AVX2-FP-NEXT: vpackusdw %xmm11, %xmm10, %xmm10
+; AVX2-FP-NEXT: vpackusdw %xmm10, %xmm10, %xmm10
+; AVX2-FP-NEXT: vinserti128 $1, %xmm10, %ymm0, %ymm10
+; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm9 = ymm10[0,1,2,3,4,5],ymm9[6,7]
+; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3,4,5,6,7]
+; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm8 = ymm8[0],ymm2[1,2,3],ymm8[4],ymm2[5,6,7],ymm8[8],ymm2[9,10,11],ymm8[12],ymm2[13,14,15]
+; AVX2-FP-NEXT: vextracti128 $1, %ymm8, %xmm10
+; AVX2-FP-NEXT: vpackusdw %xmm10, %xmm8, %xmm8
+; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm7 = ymm7[0,1,2,3,4,5,6,7]
+; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm7 = ymm7[0],ymm2[1,2,3],ymm7[4],ymm2[5,6,7],ymm7[8],ymm2[9,10,11],ymm7[12],ymm2[13,14,15]
+; AVX2-FP-NEXT: vextracti128 $1, %ymm7, %xmm10
+; AVX2-FP-NEXT: vpackusdw %xmm10, %xmm7, %xmm7
+; AVX2-FP-NEXT: vpackusdw %xmm8, %xmm7, %xmm7
+; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm9[4,5,6,7]
+; AVX2-FP-NEXT: vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm7 = mem[0],ymm2[1,2,3],mem[4],ymm2[5,6,7],mem[8],ymm2[9,10,11],mem[12],ymm2[13,14,15]
+; AVX2-FP-NEXT: vextracti128 $1, %ymm7, %xmm8
+; AVX2-FP-NEXT: vpackusdw %xmm8, %xmm7, %xmm7
+; AVX2-FP-NEXT: vpackusdw %xmm7, %xmm7, %xmm7
+; AVX2-FP-NEXT: vinserti128 $1, %xmm7, %ymm0, %ymm7
+; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm8 = mem[0],ymm2[1,2,3],mem[4],ymm2[5,6,7],mem[8],ymm2[9,10,11],mem[12],ymm2[13,14,15]
+; AVX2-FP-NEXT: vextracti128 $1, %ymm8, %xmm9
+; AVX2-FP-NEXT: vpackusdw %xmm9, %xmm8, %xmm8
+; AVX2-FP-NEXT: vpackusdw %xmm8, %xmm8, %xmm8
+; AVX2-FP-NEXT: vinserti128 $1, %xmm8, %ymm0, %ymm8
+; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm7 = ymm8[0,1,2,3,4,5],ymm7[6,7]
+; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5,6,7]
+; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm6 = ymm6[0],ymm2[1,2,3],ymm6[4],ymm2[5,6,7],ymm6[8],ymm2[9,10,11],ymm6[12],ymm2[13,14,15]
+; AVX2-FP-NEXT: vextracti128 $1, %ymm6, %xmm8
+; AVX2-FP-NEXT: vpackusdw %xmm8, %xmm6, %xmm6
+; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3,4,5,6,7]
+; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm5 = ymm5[0],ymm2[1,2,3],ymm5[4],ymm2[5,6,7],ymm5[8],ymm2[9,10,11],ymm5[12],ymm2[13,14,15]
+; AVX2-FP-NEXT: vextracti128 $1, %ymm5, %xmm8
+; AVX2-FP-NEXT: vpackusdw %xmm8, %xmm5, %xmm5
+; AVX2-FP-NEXT: vpackusdw %xmm6, %xmm5, %xmm5
+; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm7[4,5,6,7]
+; AVX2-FP-NEXT: vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm5 = mem[0],ymm2[1,2,3],mem[4],ymm2[5,6,7],mem[8],ymm2[9,10,11],mem[12],ymm2[13,14,15]
+; AVX2-FP-NEXT: vextracti128 $1, %ymm5, %xmm6
+; AVX2-FP-NEXT: vpackusdw %xmm6, %xmm5, %xmm5
+; AVX2-FP-NEXT: vpackusdw %xmm5, %xmm5, %xmm5
+; AVX2-FP-NEXT: vinserti128 $1, %xmm5, %ymm0, %ymm5
+; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm6 = mem[0],ymm2[1,2,3],mem[4],ymm2[5,6,7],mem[8],ymm2[9,10,11],mem[12],ymm2[13,14,15]
+; AVX2-FP-NEXT: vextracti128 $1, %ymm6, %xmm7
+; AVX2-FP-NEXT: vpackusdw %xmm7, %xmm6, %xmm6
+; AVX2-FP-NEXT: vpackusdw %xmm6, %xmm6, %xmm6
+; AVX2-FP-NEXT: vinserti128 $1, %xmm6, %ymm0, %ymm6
+; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm5 = ymm6[0,1,2,3,4,5],ymm5[6,7]
+; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5,6,7]
+; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm4 = ymm4[0],ymm2[1,2,3],ymm4[4],ymm2[5,6,7],ymm4[8],ymm2[9,10,11],ymm4[12],ymm2[13,14,15]
+; AVX2-FP-NEXT: vextracti128 $1, %ymm4, %xmm6
+; AVX2-FP-NEXT: vpackusdw %xmm6, %xmm4, %xmm4
+; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5,6,7]
+; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm3 = ymm3[0],ymm2[1,2,3],ymm3[4],ymm2[5,6,7],ymm3[8],ymm2[9,10,11],ymm3[12],ymm2[13,14,15]
+; AVX2-FP-NEXT: vextracti128 $1, %ymm3, %xmm6
+; AVX2-FP-NEXT: vpackusdw %xmm6, %xmm3, %xmm3
; AVX2-FP-NEXT: vpackusdw %xmm4, %xmm3, %xmm3
-; AVX2-FP-NEXT: vpackusdw %xmm3, %xmm3, %xmm3
-; AVX2-FP-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3,4,5],ymm2[6,7]
-; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
-; AVX2-FP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm1 = mem[0],ymm0[1,2,3],mem[4],ymm0[5,6,7],mem[8],ymm0[9,10,11],mem[12],ymm0[13,14,15]
-; AVX2-FP-NEXT: vextracti128 $1, %ymm1, %xmm2
-; AVX2-FP-NEXT: vpackusdw %xmm2, %xmm1, %xmm1
-; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm2 = mem[0],ymm0[1,2,3],mem[4],ymm0[5,6,7],mem[8],ymm0[9,10,11],mem[12],ymm0[13,14,15]
-; AVX2-FP-NEXT: vextracti128 $1, %ymm2, %xmm3
-; AVX2-FP-NEXT: vpackusdw %xmm3, %xmm2, %xmm2
-; AVX2-FP-NEXT: vpackusdw %xmm1, %xmm2, %xmm1
-; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm2 = mem[0],ymm0[1,2,3],mem[4],ymm0[5,6,7],mem[8],ymm0[9,10,11],mem[12],ymm0[13,14,15]
-; AVX2-FP-NEXT: vextracti128 $1, %ymm2, %xmm3
-; AVX2-FP-NEXT: vpackusdw %xmm3, %xmm2, %xmm2
-; AVX2-FP-NEXT: vpackusdw %xmm2, %xmm2, %xmm2
-; AVX2-FP-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm3 = mem[0],ymm0[1,2,3],mem[4],ymm0[5,6,7],mem[8],ymm0[9,10,11],mem[12],ymm0[13,14,15]
+; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm5[4,5,6,7]
+; AVX2-FP-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm3 = mem[0],ymm2[1,2,3],mem[4],ymm2[5,6,7],mem[8],ymm2[9,10,11],mem[12],ymm2[13,14,15]
; AVX2-FP-NEXT: vextracti128 $1, %ymm3, %xmm4
; AVX2-FP-NEXT: vpackusdw %xmm4, %xmm3, %xmm3
; AVX2-FP-NEXT: vpackusdw %xmm3, %xmm3, %xmm3
; AVX2-FP-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3,4,5],ymm2[6,7]
-; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
-; AVX2-FP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm1 = mem[0],ymm0[1,2,3],mem[4],ymm0[5,6,7],mem[8],ymm0[9,10,11],mem[12],ymm0[13,14,15]
-; AVX2-FP-NEXT: vextracti128 $1, %ymm1, %xmm2
-; AVX2-FP-NEXT: vpackusdw %xmm2, %xmm1, %xmm1
-; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm2 = mem[0],ymm0[1,2,3],mem[4],ymm0[5,6,7],mem[8],ymm0[9,10,11],mem[12],ymm0[13,14,15]
-; AVX2-FP-NEXT: vextracti128 $1, %ymm2, %xmm3
-; AVX2-FP-NEXT: vpackusdw %xmm3, %xmm2, %xmm2
-; AVX2-FP-NEXT: vpackusdw %xmm1, %xmm2, %xmm1
-; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm2 = mem[0],ymm0[1,2,3],mem[4],ymm0[5,6,7],mem[8],ymm0[9,10,11],mem[12],ymm0[13,14,15]
-; AVX2-FP-NEXT: vextracti128 $1, %ymm2, %xmm3
-; AVX2-FP-NEXT: vpackusdw %xmm3, %xmm2, %xmm2
-; AVX2-FP-NEXT: vpackusdw %xmm2, %xmm2, %xmm2
-; AVX2-FP-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm0 = mem[0],ymm0[1,2,3],mem[4],ymm0[5,6,7],mem[8],ymm0[9,10,11],mem[12],ymm0[13,14,15]
-; AVX2-FP-NEXT: vextracti128 $1, %ymm0, %xmm3
-; AVX2-FP-NEXT: vpackusdw %xmm3, %xmm0, %xmm0
-; AVX2-FP-NEXT: vpackusdw %xmm0, %xmm0, %xmm0
-; AVX2-FP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm2[6,7]
-; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm4 = mem[0],ymm2[1,2,3],mem[4],ymm2[5,6,7],mem[8],ymm2[9,10,11],mem[12],ymm2[13,14,15]
+; AVX2-FP-NEXT: vextracti128 $1, %ymm4, %xmm5
+; AVX2-FP-NEXT: vpackusdw %xmm5, %xmm4, %xmm4
+; AVX2-FP-NEXT: vpackusdw %xmm4, %xmm4, %xmm4
+; AVX2-FP-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4
+; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3,4,5],ymm3[6,7]
+; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6,7]
+; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0],ymm2[1,2,3],ymm1[4],ymm2[5,6,7],ymm1[8],ymm2[9,10,11],ymm1[12],ymm2[13,14,15]
+; AVX2-FP-NEXT: vextracti128 $1, %ymm1, %xmm4
+; AVX2-FP-NEXT: vpackusdw %xmm4, %xmm1, %xmm1
+; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6,7]
+; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm2[1,2,3],ymm0[4],ymm2[5,6,7],ymm0[8],ymm2[9,10,11],ymm0[12],ymm2[13,14,15]
+; AVX2-FP-NEXT: vextracti128 $1, %ymm0, %xmm2
+; AVX2-FP-NEXT: vpackusdw %xmm2, %xmm0, %xmm0
+; AVX2-FP-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
+; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm3[4,5,6,7]
; AVX2-FP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT: vmovdqa 256(%rdi), %xmm5
; AVX2-FP-NEXT: vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
@@ -5333,246 +5413,262 @@ define void @load_i16_stride4_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
;
; AVX2-FCP-LABEL: load_i16_stride4_vf64:
; AVX2-FCP: # %bb.0:
-; AVX2-FCP-NEXT: subq $680, %rsp # imm = 0x2A8
-; AVX2-FCP-NEXT: vmovdqa 320(%rdi), %ymm7
+; AVX2-FCP-NEXT: subq $664, %rsp # imm = 0x298
+; AVX2-FCP-NEXT: vmovdqa 384(%rdi), %ymm0
+; AVX2-FCP-NEXT: vmovdqa 320(%rdi), %ymm9
+; AVX2-FCP-NEXT: vmovdqu %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vmovdqa 352(%rdi), %ymm10
+; AVX2-FCP-NEXT: vmovdqu %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vmovdqa 256(%rdi), %ymm4
+; AVX2-FCP-NEXT: vmovdqa 288(%rdi), %ymm5
+; AVX2-FCP-NEXT: vmovdqa 64(%rdi), %ymm8
+; AVX2-FCP-NEXT: vmovdqu %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vmovdqa 96(%rdi), %ymm7
; AVX2-FCP-NEXT: vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vmovdqa 352(%rdi), %ymm6
-; AVX2-FCP-NEXT: vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vmovdqa 64(%rdi), %ymm5
-; AVX2-FCP-NEXT: vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vmovdqa 96(%rdi), %ymm4
-; AVX2-FCP-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vmovdqa (%rdi), %ymm2
+; AVX2-FCP-NEXT: vmovdqa 32(%rdi), %ymm1
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm1[0,1,2,3,4,5,6,7]
; AVX2-FCP-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm0 = mem[0],ymm1[1,2,3],mem[4],ymm1[5,6,7],mem[8],ymm1[9,10,11],mem[12],ymm1[13,14,15]
-; AVX2-FCP-NEXT: vextracti128 $1, %ymm0, %xmm2
-; AVX2-FCP-NEXT: vpackusdw %xmm2, %xmm0, %xmm0
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm2 = mem[0],ymm1[1,2,3],mem[4],ymm1[5,6,7],mem[8],ymm1[9,10,11],mem[12],ymm1[13,14,15]
-; AVX2-FCP-NEXT: vextracti128 $1, %ymm2, %xmm3
-; AVX2-FCP-NEXT: vpackusdw %xmm3, %xmm2, %xmm2
-; AVX2-FCP-NEXT: vpackusdw %xmm0, %xmm2, %xmm2
-; AVX2-FCP-NEXT: vpmovsxbd {{.*#+}} ymm0 = [0,2,2,3,0,2,4,6]
-; AVX2-FCP-NEXT: vpermd %ymm4, %ymm0, %ymm3
-; AVX2-FCP-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,16,17,20,21,24,25,28,29]
-; AVX2-FCP-NEXT: vpshufb %ymm4, %ymm3, %ymm3
-; AVX2-FCP-NEXT: vpermd %ymm5, %ymm0, %ymm5
-; AVX2-FCP-NEXT: vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpshufb %ymm4, %ymm5, %ymm5
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm5[0,1,2,3,4,5],ymm3[6,7]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
-; AVX2-FCP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm2 = mem[0],ymm1[1,2,3],mem[4],ymm1[5,6,7],mem[8],ymm1[9,10,11],mem[12],ymm1[13,14,15]
-; AVX2-FCP-NEXT: vextracti128 $1, %ymm2, %xmm3
-; AVX2-FCP-NEXT: vpackusdw %xmm3, %xmm2, %xmm2
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm3 = mem[0],ymm1[1,2,3],mem[4],ymm1[5,6,7],mem[8],ymm1[9,10,11],mem[12],ymm1[13,14,15]
-; AVX2-FCP-NEXT: vextracti128 $1, %ymm3, %xmm5
-; AVX2-FCP-NEXT: vpackusdw %xmm5, %xmm3, %xmm3
-; AVX2-FCP-NEXT: vpackusdw %xmm2, %xmm3, %xmm3
-; AVX2-FCP-NEXT: vpermd %ymm6, %ymm0, %ymm2
-; AVX2-FCP-NEXT: vpshufb %ymm4, %ymm2, %ymm5
-; AVX2-FCP-NEXT: vpermd %ymm7, %ymm0, %ymm6
-; AVX2-FCP-NEXT: vpshufb %ymm4, %ymm6, %ymm7
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm7[0,1,2,3,4,5],ymm5[6,7]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm5[4,5,6,7]
-; AVX2-FCP-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm3 = mem[0],ymm1[1,2,3],mem[4],ymm1[5,6,7],mem[8],ymm1[9,10,11],mem[12],ymm1[13,14,15]
-; AVX2-FCP-NEXT: vextracti128 $1, %ymm3, %xmm5
-; AVX2-FCP-NEXT: vpackusdw %xmm5, %xmm3, %xmm3
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm5 = mem[0],ymm1[1,2,3],mem[4],ymm1[5,6,7],mem[8],ymm1[9,10,11],mem[12],ymm1[13,14,15]
-; AVX2-FCP-NEXT: vextracti128 $1, %ymm5, %xmm7
-; AVX2-FCP-NEXT: vpackusdw %xmm7, %xmm5, %xmm5
-; AVX2-FCP-NEXT: vmovdqa 448(%rdi), %ymm7
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm3[0],ymm1[1,2,3],ymm3[4],ymm1[5,6,7],ymm3[8],ymm1[9,10,11],ymm3[12],ymm1[13,14,15]
+; AVX2-FCP-NEXT: vextracti128 $1, %ymm3, %xmm6
+; AVX2-FCP-NEXT: vpackusdw %xmm6, %xmm3, %xmm3
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6,7]
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm2[0],ymm1[1,2,3],ymm2[4],ymm1[5,6,7],ymm2[8],ymm1[9,10,11],ymm2[12],ymm1[13,14,15]
+; AVX2-FCP-NEXT: vextracti128 $1, %ymm2, %xmm6
+; AVX2-FCP-NEXT: vpackusdw %xmm6, %xmm2, %xmm2
+; AVX2-FCP-NEXT: vpackusdw %xmm3, %xmm2, %xmm6
+; AVX2-FCP-NEXT: vpmovsxbd {{.*#+}} ymm2 = [0,2,2,3,0,2,4,6]
+; AVX2-FCP-NEXT: vpermd %ymm7, %ymm2, %ymm7
; AVX2-FCP-NEXT: vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpackusdw %xmm3, %xmm5, %xmm3
-; AVX2-FCP-NEXT: vmovdqa 480(%rdi), %ymm5
-; AVX2-FCP-NEXT: vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpermd %ymm5, %ymm0, %ymm5
-; AVX2-FCP-NEXT: vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpshufb %ymm4, %ymm5, %ymm5
-; AVX2-FCP-NEXT: vpermd %ymm7, %ymm0, %ymm14
-; AVX2-FCP-NEXT: vpshufb %ymm4, %ymm14, %ymm7
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm7[0,1,2,3,4,5],ymm5[6,7]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm5[4,5,6,7]
-; AVX2-FCP-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm3 = mem[0],ymm1[1,2,3],mem[4],ymm1[5,6,7],mem[8],ymm1[9,10,11],mem[12],ymm1[13,14,15]
-; AVX2-FCP-NEXT: vextracti128 $1, %ymm3, %xmm5
-; AVX2-FCP-NEXT: vpackusdw %xmm5, %xmm3, %xmm3
-; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm1 = mem[0],ymm1[1,2,3],mem[4],ymm1[5,6,7],mem[8],ymm1[9,10,11],mem[12],ymm1[13,14,15]
-; AVX2-FCP-NEXT: vextracti128 $1, %ymm1, %xmm5
-; AVX2-FCP-NEXT: vpackusdw %xmm5, %xmm1, %xmm1
-; AVX2-FCP-NEXT: vpackusdw %xmm3, %xmm1, %xmm1
-; AVX2-FCP-NEXT: vmovdqa 192(%rdi), %ymm5
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,16,17,20,21,24,25,28,29]
+; AVX2-FCP-NEXT: vpshufb %ymm3, %ymm7, %ymm7
+; AVX2-FCP-NEXT: vpermd %ymm8, %ymm2, %ymm8
+; AVX2-FCP-NEXT: vmovdqu %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vpshufb %ymm3, %ymm8, %ymm8
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm8[0,1,2,3,4,5],ymm7[6,7]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm7[4,5,6,7]
+; AVX2-FCP-NEXT: vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3,4,5,6,7]
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm5 = ymm5[0],ymm1[1,2,3],ymm5[4],ymm1[5,6,7],ymm5[8],ymm1[9,10,11],ymm5[12],ymm1[13,14,15]
+; AVX2-FCP-NEXT: vextracti128 $1, %ymm5, %xmm6
+; AVX2-FCP-NEXT: vpackusdw %xmm6, %xmm5, %xmm5
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5,6,7]
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm4 = ymm4[0],ymm1[1,2,3],ymm4[4],ymm1[5,6,7],ymm4[8],ymm1[9,10,11],ymm4[12],ymm1[13,14,15]
+; AVX2-FCP-NEXT: vextracti128 $1, %ymm4, %xmm6
+; AVX2-FCP-NEXT: vpackusdw %xmm6, %xmm4, %xmm4
+; AVX2-FCP-NEXT: vpackusdw %xmm5, %xmm4, %xmm4
+; AVX2-FCP-NEXT: vpermd %ymm10, %ymm2, %ymm13
+; AVX2-FCP-NEXT: vpshufb %ymm3, %ymm13, %ymm5
+; AVX2-FCP-NEXT: vpermd %ymm9, %ymm2, %ymm10
+; AVX2-FCP-NEXT: vpshufb %ymm3, %ymm10, %ymm6
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm6[0,1,2,3,4,5],ymm5[6,7]
+; AVX2-FCP-NEXT: vmovdqa 416(%rdi), %ymm6
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm5[4,5,6,7]
+; AVX2-FCP-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm6[0,1,2,3,4,5,6,7]
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm4 = ymm4[0],ymm1[1,2,3],ymm4[4],ymm1[5,6,7],ymm4[8],ymm1[9,10,11],ymm4[12],ymm1[13,14,15]
+; AVX2-FCP-NEXT: vextracti128 $1, %ymm4, %xmm5
+; AVX2-FCP-NEXT: vpackusdw %xmm5, %xmm4, %xmm4
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6,7]
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7],ymm0[8],ymm1[9,10,11],ymm0[12],ymm1[13,14,15]
+; AVX2-FCP-NEXT: vextracti128 $1, %ymm0, %xmm5
+; AVX2-FCP-NEXT: vpackusdw %xmm5, %xmm0, %xmm0
+; AVX2-FCP-NEXT: vmovdqa 448(%rdi), %ymm5
; AVX2-FCP-NEXT: vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vmovdqa 224(%rdi), %ymm3
-; AVX2-FCP-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpermd %ymm3, %ymm0, %ymm9
-; AVX2-FCP-NEXT: vpermd %ymm5, %ymm0, %ymm3
-; AVX2-FCP-NEXT: vpshufb %ymm4, %ymm9, %ymm0
-; AVX2-FCP-NEXT: vpshufb %ymm4, %ymm3, %ymm5
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm5[0,1,2,3,4,5],ymm0[6,7]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX2-FCP-NEXT: vpackusdw %xmm4, %xmm0, %xmm0
+; AVX2-FCP-NEXT: vmovdqa 480(%rdi), %ymm4
+; AVX2-FCP-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vpermd %ymm4, %ymm2, %ymm4
+; AVX2-FCP-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vpshufb %ymm3, %ymm4, %ymm4
+; AVX2-FCP-NEXT: vpermd %ymm5, %ymm2, %ymm12
+; AVX2-FCP-NEXT: vpshufb %ymm3, %ymm12, %ymm5
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm5[0,1,2,3,4,5],ymm4[6,7]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm4[4,5,6,7]
; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vmovdqa 288(%rdi), %xmm1
-; AVX2-FCP-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FCP-NEXT: vmovdqa 304(%rdi), %xmm12
+; AVX2-FCP-NEXT: vmovdqa 160(%rdi), %ymm0
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6,7]
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7],ymm0[8],ymm1[9,10,11],ymm0[12],ymm1[13,14,15]
+; AVX2-FCP-NEXT: vextracti128 $1, %ymm0, %xmm4
+; AVX2-FCP-NEXT: vpackusdw %xmm4, %xmm0, %xmm0
+; AVX2-FCP-NEXT: vmovdqa 128(%rdi), %ymm4
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5,6,7]
+; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm4[0],ymm1[1,2,3],ymm4[4],ymm1[5,6,7],ymm4[8],ymm1[9,10,11],ymm4[12],ymm1[13,14,15]
+; AVX2-FCP-NEXT: vextracti128 $1, %ymm1, %xmm4
+; AVX2-FCP-NEXT: vpackusdw %xmm4, %xmm1, %xmm1
+; AVX2-FCP-NEXT: vpackusdw %xmm0, %xmm1, %xmm0
+; AVX2-FCP-NEXT: vmovdqa 192(%rdi), %ymm1
+; AVX2-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vmovdqa 224(%rdi), %ymm4
+; AVX2-FCP-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vpermd %ymm4, %ymm2, %ymm6
+; AVX2-FCP-NEXT: vpermd %ymm1, %ymm2, %ymm4
+; AVX2-FCP-NEXT: vpshufb %ymm3, %ymm6, %ymm1
+; AVX2-FCP-NEXT: vpshufb %ymm3, %ymm4, %ymm2
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3,4,5],ymm1[6,7]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vmovdqa 288(%rdi), %xmm2
+; AVX2-FCP-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX2-FCP-NEXT: vmovdqa 304(%rdi), %xmm0
+; AVX2-FCP-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm5 = [0,1,2,3,2,3,10,11,8,9,10,11,12,13,14,15]
-; AVX2-FCP-NEXT: vpshufb %xmm5, %xmm12, %xmm0
-; AVX2-FCP-NEXT: vpshufb %xmm5, %xmm1, %xmm1
+; AVX2-FCP-NEXT: vpshufb %xmm5, %xmm0, %xmm0
+; AVX2-FCP-NEXT: vpshufb %xmm5, %xmm2, %xmm1
; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; AVX2-FCP-NEXT: vmovdqa 256(%rdi), %xmm10
-; AVX2-FCP-NEXT: vmovdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX2-FCP-NEXT: vmovdqa 256(%rdi), %xmm0
+; AVX2-FCP-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX2-FCP-NEXT: vmovdqa 272(%rdi), %xmm7
-; AVX2-FCP-NEXT: vmovdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm0 = [2,3,10,11,8,9,10,11,8,9,10,11,12,13,14,15]
-; AVX2-FCP-NEXT: vpshufb %xmm0, %xmm7, %xmm7
-; AVX2-FCP-NEXT: vpshufb %xmm0, %xmm10, %xmm8
-; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm7 = xmm8[0],xmm7[0],xmm8[1],xmm7[1]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm1 = xmm7[0,1],xmm1[2,3]
-; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm10 = [2,3,6,7,10,11,14,15,14,15,10,11,12,13,14,15,18,19,22,23,26,27,30,31,18,19,22,23,26,27,30,31]
-; AVX2-FCP-NEXT: vpshufb %ymm10, %ymm2, %ymm2
-; AVX2-FCP-NEXT: vpshufb %ymm10, %ymm6, %ymm6
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm6[0,1,2,3,4,5],ymm2[6,7]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
-; AVX2-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vmovdqa 32(%rdi), %xmm2
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm2 = [2,3,10,11,8,9,10,11,8,9,10,11,12,13,14,15]
+; AVX2-FCP-NEXT: vpshufb %xmm2, %xmm7, %xmm8
+; AVX2-FCP-NEXT: vpshufb %xmm2, %xmm0, %xmm14
+; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm8 = xmm14[0],xmm8[0],xmm14[1],xmm8[1]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm1 = xmm8[0,1],xmm1[2,3]
+; AVX2-FCP-NEXT: vmovdqa {{.*#+}} ymm11 = [2,3,6,7,10,11,14,15,14,15,10,11,12,13,14,15,18,19,22,23,26,27,30,31,18,19,22,23,26,27,30,31]
+; AVX2-FCP-NEXT: vpshufb %ymm11, %ymm13, %ymm13
+; AVX2-FCP-NEXT: vpshufb %ymm11, %ymm10, %ymm10
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm10 = ymm10[0,1,2,3,4,5],ymm13[6,7]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm10[4,5,6,7]
+; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vmovdqa 32(%rdi), %xmm13
; AVX2-FCP-NEXT: vmovdqa 48(%rdi), %xmm1
-; AVX2-FCP-NEXT: vpshufb %xmm5, %xmm1, %xmm6
-; AVX2-FCP-NEXT: vpshufb %xmm5, %xmm2, %xmm7
-; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[1],xmm6[1]
-; AVX2-FCP-NEXT: vmovdqa (%rdi), %xmm7
+; AVX2-FCP-NEXT: vpshufb %xmm5, %xmm1, %xmm10
+; AVX2-FCP-NEXT: vpshufb %xmm5, %xmm13, %xmm14
+; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm14 = xmm14[0],xmm10[0],xmm14[1],xmm10[1]
+; AVX2-FCP-NEXT: vmovdqa (%rdi), %xmm10
; AVX2-FCP-NEXT: vmovdqa 16(%rdi), %xmm15
-; AVX2-FCP-NEXT: vpshufb %xmm0, %xmm15, %xmm11
-; AVX2-FCP-NEXT: vpshufb %xmm0, %xmm7, %xmm13
-; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm11 = xmm13[0],xmm11[0],xmm13[1],xmm11[1]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm6 = xmm11[0,1],xmm6[2,3]
+; AVX2-FCP-NEXT: vpshufb %xmm2, %xmm15, %xmm0
+; AVX2-FCP-NEXT: vpshufb %xmm2, %xmm10, %xmm9
+; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm9[0],xmm0[0],xmm9[1],xmm0[1]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm14[2,3]
; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
-; AVX2-FCP-NEXT: vpshufb %ymm10, %ymm8, %ymm11
+; AVX2-FCP-NEXT: vpshufb %ymm11, %ymm8, %ymm9
; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
-; AVX2-FCP-NEXT: vpshufb %ymm10, %ymm8, %ymm13
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm11 = ymm13[0,1,2,3,4,5],ymm11[6,7]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm11[4,5,6,7]
-; AVX2-FCP-NEXT: vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vpshufb %ymm11, %ymm8, %ymm14
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm9 = ymm14[0,1,2,3,4,5],ymm9[6,7]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm9[4,5,6,7]
+; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vmovdqa 160(%rdi), %xmm8
; AVX2-FCP-NEXT: vmovdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FCP-NEXT: vmovdqa 176(%rdi), %xmm6
-; AVX2-FCP-NEXT: vmovdqa %xmm6, (%rsp) # 16-byte Spill
-; AVX2-FCP-NEXT: vpshufb %xmm5, %xmm6, %xmm6
-; AVX2-FCP-NEXT: vpshufb %xmm5, %xmm8, %xmm11
-; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm6 = xmm11[0],xmm6[0],xmm11[1],xmm6[1]
+; AVX2-FCP-NEXT: vmovdqa 176(%rdi), %xmm0
+; AVX2-FCP-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX2-FCP-NEXT: vpshufb %xmm5, %xmm0, %xmm0
+; AVX2-FCP-NEXT: vpshufb %xmm5, %xmm8, %xmm9
+; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm9[0],xmm0[0],xmm9[1],xmm0[1]
; AVX2-FCP-NEXT: vmovdqa 128(%rdi), %xmm8
; AVX2-FCP-NEXT: vmovdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FCP-NEXT: vmovdqa 144(%rdi), %xmm11
-; AVX2-FCP-NEXT: vmovdqa %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FCP-NEXT: vpshufb %xmm0, %xmm11, %xmm11
-; AVX2-FCP-NEXT: vpshufb %xmm0, %xmm8, %xmm13
-; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm11 = xmm13[0],xmm11[0],xmm13[1],xmm11[1]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm6 = xmm11[0,1],xmm6[2,3]
-; AVX2-FCP-NEXT: vpshufb %ymm10, %ymm9, %ymm9
-; AVX2-FCP-NEXT: vpshufb %ymm10, %ymm3, %ymm3
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5],ymm9[6,7]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm6[0,1,2,3],ymm3[4,5,6,7]
-; AVX2-FCP-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vmovdqa 416(%rdi), %xmm6
-; AVX2-FCP-NEXT: vmovdqa 432(%rdi), %xmm13
-; AVX2-FCP-NEXT: vpshufb %xmm5, %xmm13, %xmm3
-; AVX2-FCP-NEXT: vpshufb %xmm5, %xmm6, %xmm5
-; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm9 = xmm5[0],xmm3[0],xmm5[1],xmm3[1]
+; AVX2-FCP-NEXT: vmovdqa 144(%rdi), %xmm9
+; AVX2-FCP-NEXT: vmovdqa %xmm9, (%rsp) # 16-byte Spill
+; AVX2-FCP-NEXT: vpshufb %xmm2, %xmm9, %xmm9
+; AVX2-FCP-NEXT: vpshufb %xmm2, %xmm8, %xmm14
+; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm9 = xmm14[0],xmm9[0],xmm14[1],xmm9[1]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm0 = xmm9[0,1],xmm0[2,3]
+; AVX2-FCP-NEXT: vpshufb %ymm11, %ymm6, %ymm9
+; AVX2-FCP-NEXT: vpshufb %ymm11, %ymm4, %ymm4
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5],ymm9[6,7]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm4[4,5,6,7]
+; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vmovdqa 416(%rdi), %xmm9
+; AVX2-FCP-NEXT: vmovdqa 432(%rdi), %xmm8
+; AVX2-FCP-NEXT: vpshufb %xmm5, %xmm8, %xmm0
+; AVX2-FCP-NEXT: vpshufb %xmm5, %xmm9, %xmm4
+; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm4[0],xmm0[0],xmm4[1],xmm0[1]
; AVX2-FCP-NEXT: vmovdqa 384(%rdi), %xmm5
-; AVX2-FCP-NEXT: vmovdqa 400(%rdi), %xmm3
-; AVX2-FCP-NEXT: vpshufb %xmm0, %xmm3, %xmm11
-; AVX2-FCP-NEXT: vpshufb %xmm0, %xmm5, %xmm0
-; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm11[0],xmm0[1],xmm11[1]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm9[2,3]
-; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
-; AVX2-FCP-NEXT: vpshufb %ymm10, %ymm8, %ymm9
-; AVX2-FCP-NEXT: vpshufb %ymm10, %ymm14, %ymm11
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm9 = ymm11[0,1,2,3,4,5],ymm9[6,7]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm9[4,5,6,7]
+; AVX2-FCP-NEXT: vmovdqa 400(%rdi), %xmm4
+; AVX2-FCP-NEXT: vpshufb %xmm2, %xmm4, %xmm14
+; AVX2-FCP-NEXT: vpshufb %xmm2, %xmm5, %xmm2
+; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm2[0],xmm14[0],xmm2[1],xmm14[1]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm0 = xmm2[0,1],xmm0[2,3]
+; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX2-FCP-NEXT: vpshufb %ymm11, %ymm2, %ymm2
+; AVX2-FCP-NEXT: vpshufb %ymm11, %ymm12, %ymm12
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm12[0,1,2,3,4,5],ymm2[6,7]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vpmovsxbd {{.*#+}} ymm0 = [1,3,2,3,1,3,5,7]
-; AVX2-FCP-NEXT: vpermd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm8 # 32-byte Folded Reload
-; AVX2-FCP-NEXT: vmovdqu %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpermd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm11 # 32-byte Folded Reload
-; AVX2-FCP-NEXT: vmovdqu %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpshufb %ymm4, %ymm8, %ymm9
-; AVX2-FCP-NEXT: vpshufb %ymm4, %ymm11, %ymm11
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm9 = ymm11[0,1,2,3,4,5],ymm9[6,7]
+; AVX2-FCP-NEXT: vpermd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 32-byte Folded Reload
+; AVX2-FCP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vpermd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm6 # 32-byte Folded Reload
+; AVX2-FCP-NEXT: vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FCP-NEXT: vpshufb %ymm3, %ymm2, %ymm2
+; AVX2-FCP-NEXT: vpshufb %ymm3, %ymm6, %ymm12
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm12[0,1,2,3,4,5],ymm2[6,7]
; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[3,1,2,3]
; AVX2-FCP-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[3,1,2,3]
-; AVX2-FCP-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm6 = xmm13[3,1,2,3]
+; AVX2-FCP-NEXT: vmovdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,1,2,0,4,5,6,7]
-; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm2 = xmm2[0,1,2,0,4,5,6,7]
-; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
-; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm2 = xmm15[3,1,2,3]
-; AVX2-FCP-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm7 = xmm7[3,1,2,3]
-; AVX2-FCP-NEXT: vmovdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm2 = xmm2[2,0,2,3,4,5,6,7]
-; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm7 = xmm7[2,0,2,3,4,5,6,7]
-; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm7[0],xmm2[0],xmm7[1],xmm2[1]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm9[4,5,6,7]
+; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm12 = xmm6[0,1,2,0,4,5,6,7]
+; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm12[0],xmm1[0],xmm12[1],xmm1[1]
+; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm6 = xmm15[3,1,2,3]
+; AVX2-FCP-NEXT: vmovdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm12 = xmm10[3,1,2,3]
+; AVX2-FCP-NEXT: vmovdqa %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm10 = xmm6[2,0,2,3,4,5,6,7]
+; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm12 = xmm12[2,0,2,3,4,5,6,7]
+; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm10 = xmm12[0],xmm10[0],xmm12[1],xmm10[1]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm1 = xmm10[0,1],xmm1[2,3]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
; AVX2-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vpermd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 32-byte Folded Reload
; AVX2-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vpermd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 32-byte Folded Reload
; AVX2-FCP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpshufb %ymm4, %ymm1, %ymm1
-; AVX2-FCP-NEXT: vpshufb %ymm4, %ymm2, %ymm2
+; AVX2-FCP-NEXT: vpshufb %ymm3, %ymm1, %ymm1
+; AVX2-FCP-NEXT: vpshufb %ymm3, %ymm2, %ymm2
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3,4,5],ymm1[6,7]
-; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm2 = xmm12[3,1,2,3]
+; AVX2-FCP-NEXT: vpshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
+; AVX2-FCP-NEXT: # xmm2 = mem[3,1,2,3]
; AVX2-FCP-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX2-FCP-NEXT: vpshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Folded Reload
; AVX2-FCP-NEXT: # xmm14 = mem[3,1,2,3]
; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm2 = xmm2[0,1,2,0,4,5,6,7]
-; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm7 = xmm14[0,1,2,0,4,5,6,7]
-; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm7[0],xmm2[0],xmm7[1],xmm2[1]
-; AVX2-FCP-NEXT: vpshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Folded Reload
-; AVX2-FCP-NEXT: # xmm11 = mem[3,1,2,3]
-; AVX2-FCP-NEXT: vpshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload
-; AVX2-FCP-NEXT: # xmm9 = mem[3,1,2,3]
-; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm12 = xmm11[2,0,2,3,4,5,6,7]
-; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm15 = xmm9[2,0,2,3,4,5,6,7]
-; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm12 = xmm15[0],xmm12[0],xmm15[1],xmm12[1]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm2 = xmm12[0,1],xmm2[2,3]
+; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm6 = xmm14[0,1,2,0,4,5,6,7]
+; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm6[0],xmm2[0],xmm6[1],xmm2[1]
+; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm12 = xmm7[3,1,2,3]
+; AVX2-FCP-NEXT: vpshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Folded Reload
+; AVX2-FCP-NEXT: # xmm10 = mem[3,1,2,3]
+; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm13 = xmm12[2,0,2,3,4,5,6,7]
+; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm15 = xmm10[2,0,2,3,4,5,6,7]
+; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm13 = xmm15[0],xmm13[0],xmm15[1],xmm13[1]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm2 = xmm13[0,1],xmm2[2,3]
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vpermd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 32-byte Folded Reload
; AVX2-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vpermd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 32-byte Folded Reload
; AVX2-FCP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FCP-NEXT: vpshufb %ymm4, %ymm1, %ymm1
-; AVX2-FCP-NEXT: vpshufb %ymm4, %ymm2, %ymm2
+; AVX2-FCP-NEXT: vpshufb %ymm3, %ymm1, %ymm1
+; AVX2-FCP-NEXT: vpshufb %ymm3, %ymm2, %ymm2
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3,4,5],ymm1[6,7]
-; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm2 = xmm13[3,1,2,3]
+; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm2 = xmm8[3,1,2,3]
; AVX2-FCP-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm6 = xmm6[3,1,2,3]
+; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm6 = xmm9[3,1,2,3]
; AVX2-FCP-NEXT: vmovdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm2 = xmm2[0,1,2,0,4,5,6,7]
-; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm6 = xmm6[0,1,2,0,4,5,6,7]
-; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm6[0],xmm2[0],xmm6[1],xmm2[1]
-; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm13 = xmm3[3,1,2,3]
-; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm12 = xmm5[3,1,2,3]
-; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm3 = xmm13[2,0,2,3,4,5,6,7]
-; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm5 = xmm12[2,0,2,3,4,5,6,7]
-; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm5[0],xmm3[0],xmm5[1],xmm3[1]
-; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm2 = xmm3[0,1],xmm2[2,3]
+; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm9 = xmm6[0,1,2,0,4,5,6,7]
+; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm9[0],xmm2[0],xmm9[1],xmm2[1]
+; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm13 = xmm4[3,1,2,3]
+; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm9 = xmm5[3,1,2,3]
+; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm4 = xmm13[2,0,2,3,4,5,6,7]
+; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm5 = xmm9[2,0,2,3,4,5,6,7]
+; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
+; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm2 = xmm4[0,1],xmm2[2,3]
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vpermd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm8 # 32-byte Folded Reload
; AVX2-FCP-NEXT: vpermd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm7 # 32-byte Folded Reload
-; AVX2-FCP-NEXT: vpshufb %ymm4, %ymm8, %ymm0
-; AVX2-FCP-NEXT: vpshufb %ymm4, %ymm7, %ymm1
+; AVX2-FCP-NEXT: vpshufb %ymm3, %ymm8, %ymm0
+; AVX2-FCP-NEXT: vpshufb %ymm3, %ymm7, %ymm1
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm1[0,1,2,3,4,5],ymm0[6,7]
-; AVX2-FCP-NEXT: vpshufd $231, (%rsp), %xmm6 # 16-byte Folded Reload
+; AVX2-FCP-NEXT: vpshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
; AVX2-FCP-NEXT: # xmm6 = mem[3,1,2,3]
; AVX2-FCP-NEXT: vpshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Folded Reload
; AVX2-FCP-NEXT: # xmm5 = mem[3,1,2,3]
; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm1 = xmm6[0,1,2,0,4,5,6,7]
; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm2 = xmm5[0,1,2,0,4,5,6,7]
; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
-; AVX2-FCP-NEXT: vpshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
+; AVX2-FCP-NEXT: vpshufd $231, (%rsp), %xmm4 # 16-byte Folded Reload
; AVX2-FCP-NEXT: # xmm4 = mem[3,1,2,3]
; AVX2-FCP-NEXT: vpshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
; AVX2-FCP-NEXT: # xmm2 = mem[3,1,2,3]
@@ -5583,9 +5679,9 @@ define void @load_i16_stride4_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm3[4,5,6,7]
; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FCP-NEXT: vpshufb %ymm10, %ymm0, %ymm0
+; AVX2-FCP-NEXT: vpshufb %ymm11, %ymm0, %ymm0
; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-FCP-NEXT: vpshufb %ymm10, %ymm1, %ymm1
+; AVX2-FCP-NEXT: vpshufb %ymm11, %ymm1, %ymm1
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
; AVX2-FCP-NEXT: vpshuflw $116, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; AVX2-FCP-NEXT: # xmm1 = mem[0,1,3,1,4,5,6,7]
@@ -5600,21 +5696,21 @@ define void @load_i16_stride4_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm1 = xmm3[0,1],xmm1[2,3]
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm15 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FCP-NEXT: vpshufb %ymm10, %ymm0, %ymm0
+; AVX2-FCP-NEXT: vpshufb %ymm11, %ymm0, %ymm0
; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-FCP-NEXT: vpshufb %ymm10, %ymm1, %ymm1
+; AVX2-FCP-NEXT: vpshufb %ymm11, %ymm1, %ymm1
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
; AVX2-FCP-NEXT: vpshuflw $116, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; AVX2-FCP-NEXT: # xmm1 = mem[0,1,3,1,4,5,6,7]
; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm3 = xmm14[0,1,3,1,4,5,6,7]
; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
-; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm3 = xmm11[3,1,2,3,4,5,6,7]
-; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm9 = xmm9[3,1,2,3,4,5,6,7]
-; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm9[0],xmm3[0],xmm9[1],xmm3[1]
+; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm3 = xmm12[3,1,2,3,4,5,6,7]
+; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm10 = xmm10[3,1,2,3,4,5,6,7]
+; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm10[0],xmm3[0],xmm10[1],xmm3[1]
; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm1 = xmm3[0,1],xmm1[2,3]
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; AVX2-FCP-NEXT: vpshufb %ymm10, %ymm8, %ymm1
-; AVX2-FCP-NEXT: vpshufb %ymm10, %ymm7, %ymm3
+; AVX2-FCP-NEXT: vpshufb %ymm11, %ymm8, %ymm1
+; AVX2-FCP-NEXT: vpshufb %ymm11, %ymm7, %ymm3
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3,4,5],ymm1[6,7]
; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm3 = xmm6[0,1,3,1,4,5,6,7]
; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm5 = xmm5[0,1,3,1,4,5,6,7]
@@ -5625,9 +5721,9 @@ define void @load_i16_stride4_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0,1],xmm3[2,3]
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX2-FCP-NEXT: vpshufb %ymm10, %ymm2, %ymm2
+; AVX2-FCP-NEXT: vpshufb %ymm11, %ymm2, %ymm2
; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX2-FCP-NEXT: vpshufb %ymm10, %ymm3, %ymm3
+; AVX2-FCP-NEXT: vpshufb %ymm11, %ymm3, %ymm3
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3,4,5],ymm2[6,7]
; AVX2-FCP-NEXT: vpshuflw $116, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
; AVX2-FCP-NEXT: # xmm3 = mem[0,1,3,1,4,5,6,7]
@@ -5635,7 +5731,7 @@ define void @load_i16_stride4_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX2-FCP-NEXT: # xmm4 = mem[0,1,3,1,4,5,6,7]
; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm4 = xmm13[3,1,2,3,4,5,6,7]
-; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm5 = xmm12[3,1,2,3,4,5,6,7]
+; AVX2-FCP-NEXT: vpshuflw {{.*#+}} xmm5 = xmm9[3,1,2,3,4,5,6,7]
; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm3 = xmm4[0,1],xmm3[2,3]
; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5,6,7]
@@ -5667,7 +5763,7 @@ define void @load_i16_stride4_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX2-FCP-NEXT: vmovdqa %ymm1, 32(%r8)
; AVX2-FCP-NEXT: vmovdqa %ymm0, 64(%r8)
; AVX2-FCP-NEXT: vmovdqa %ymm15, (%r8)
-; AVX2-FCP-NEXT: addq $680, %rsp # imm = 0x2A8
+; AVX2-FCP-NEXT: addq $664, %rsp # imm = 0x298
; AVX2-FCP-NEXT: vzeroupper
; AVX2-FCP-NEXT: retq
;
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-5.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-5.ll
index 418c987ab9a30..d282e051d35b3 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-5.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-5.ll
@@ -444,6 +444,7 @@ define void @store_i16_stride5_vf4(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-NEXT: vpshufb %ymm4, %ymm2, %ymm2
; AVX512-NEXT: vpblendw {{.*#+}} ymm2 = ymm5[0,1],ymm2[2,3],ymm5[4,5,6],ymm2[7],ymm5[8,9],ymm2[10,11],ymm5[12,13,14],ymm2[15]
; AVX512-NEXT: vpternlogq {{.*#+}} ymm2 = ymm3 ^ (mem & (ymm2 ^ ymm3))
+; AVX512-NEXT: vinserti32x4 $2, %xmm0, %zmm2, %zmm2
; AVX512-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[3,1,2,3]
; AVX512-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,3,1,3,4,5,6,7]
@@ -471,6 +472,7 @@ define void @store_i16_stride5_vf4(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-FCP-NEXT: vpshufb %ymm4, %ymm2, %ymm2
; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm5[0,1],ymm2[2,3],ymm5[4,5,6],ymm2[7],ymm5[8,9],ymm2[10,11],ymm5[12,13,14],ymm2[15]
; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm2 = ymm3 ^ (mem & (ymm2 ^ ymm3))
+; AVX512-FCP-NEXT: vinserti32x4 $2, %xmm0, %zmm2, %zmm2
; AVX512-FCP-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[u,u,6,7,14,15,u,u,8,9,10,11,12,13,14,15]
; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3,4,5,6,7]
@@ -497,6 +499,7 @@ define void @store_i16_stride5_vf4(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-NEXT: vpshufb %ymm4, %ymm2, %ymm2
; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm2 = ymm5[0,1],ymm2[2,3],ymm5[4,5,6],ymm2[7],ymm5[8,9],ymm2[10,11],ymm5[12,13,14],ymm2[15]
; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm2 = ymm3 ^ (mem & (ymm2 ^ ymm3))
+; AVX512DQ-NEXT: vinserti32x4 $2, %xmm0, %zmm2, %zmm2
; AVX512DQ-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[3,1,2,3]
; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,3,1,3,4,5,6,7]
@@ -524,6 +527,7 @@ define void @store_i16_stride5_vf4(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-FCP-NEXT: vpshufb %ymm4, %ymm2, %ymm2
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm5[0,1],ymm2[2,3],ymm5[4,5,6],ymm2[7],ymm5[8,9],ymm2[10,11],ymm5[12,13,14],ymm2[15]
; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm2 = ymm3 ^ (mem & (ymm2 ^ ymm3))
+; AVX512DQ-FCP-NEXT: vinserti32x4 $2, %xmm0, %zmm2, %zmm2
; AVX512DQ-FCP-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[u,u,6,7,14,15,u,u,8,9,10,11,12,13,14,15]
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3,4,5,6,7]
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-7.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-7.ll
index 6f50d61f4d1f4..fc24976d651db 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-7.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-7.ll
@@ -1390,18 +1390,19 @@ define void @store_i16_stride7_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-NEXT: vmovdqa {{.*#+}} ymm8 = [65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535]
; AVX512-NEXT: vpternlogd {{.*#+}} ymm8 = (mem & ~ymm8) | ymm7
; AVX512-NEXT: vpternlogq {{.*#+}} ymm8 = ymm2 ^ (mem & (ymm8 ^ ymm2))
-; AVX512-NEXT: vpsrlq $48, %xmm4, %xmm2
-; AVX512-NEXT: vpunpckhqdq {{.*#+}} xmm2 = xmm3[1],xmm2[1]
+; AVX512-NEXT: vinserti32x4 $2, %xmm0, %zmm8, %zmm2
+; AVX512-NEXT: vpsrlq $48, %xmm4, %xmm4
+; AVX512-NEXT: vpunpckhqdq {{.*#+}} xmm3 = xmm3[1],xmm4[1]
; AVX512-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; AVX512-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm2[3,4],xmm0[5,6,7]
+; AVX512-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm3[3,4],xmm0[5,6,7]
; AVX512-NEXT: vpsrld $16, %xmm6, %xmm1
; AVX512-NEXT: vpunpckhdq {{.*#+}} xmm1 = xmm5[2],xmm1[2],xmm5[3],xmm1[3]
-; AVX512-NEXT: vpbroadcastd 12(%r10), %xmm2
-; AVX512-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3,4],xmm1[5,6],xmm2[7]
+; AVX512-NEXT: vpbroadcastd 12(%r10), %xmm3
+; AVX512-NEXT: vpblendw {{.*#+}} xmm1 = xmm3[0,1,2,3,4],xmm1[5,6],xmm3[7]
; AVX512-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3,4],xmm1[5,6,7]
; AVX512-NEXT: vmovdqa %xmm0, 96(%rax)
-; AVX512-NEXT: vmovdqa %ymm8, 64(%rax)
+; AVX512-NEXT: vmovdqa %ymm2, 64(%rax)
; AVX512-NEXT: vmovdqa64 %zmm11, (%rax)
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
@@ -1450,6 +1451,7 @@ define void @store_i16_stride7_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm8 = [65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535]
; AVX512-FCP-NEXT: vpternlogd {{.*#+}} ymm8 = (mem & ~ymm8) | ymm7
; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm8 = ymm6 ^ (mem & (ymm8 ^ ymm6))
+; AVX512-FCP-NEXT: vinserti32x4 $2, %xmm0, %zmm8, %zmm6
; AVX512-FCP-NEXT: vpsrlq $48, %xmm3, %xmm3
; AVX512-FCP-NEXT: vpunpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm3[1]
; AVX512-FCP-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
@@ -1461,8 +1463,8 @@ define void @store_i16_stride7_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3,4],xmm1[5,6],xmm2[7]
; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3,4],xmm1[5,6,7]
; AVX512-FCP-NEXT: vmovdqa %xmm0, 96(%rax)
-; AVX512-FCP-NEXT: vmovdqa %ymm8, 64(%rax)
; AVX512-FCP-NEXT: vmovdqa64 %zmm11, (%rax)
+; AVX512-FCP-NEXT: vmovdqa %ymm6, 64(%rax)
; AVX512-FCP-NEXT: vzeroupper
; AVX512-FCP-NEXT: retq
;
@@ -1513,18 +1515,19 @@ define void @store_i16_stride7_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm8 = [65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535]
; AVX512DQ-NEXT: vpternlogd {{.*#+}} ymm8 = (mem & ~ymm8) | ymm7
; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm8 = ymm2 ^ (mem & (ymm8 ^ ymm2))
-; AVX512DQ-NEXT: vpsrlq $48, %xmm4, %xmm2
-; AVX512DQ-NEXT: vpunpckhqdq {{.*#+}} xmm2 = xmm3[1],xmm2[1]
+; AVX512DQ-NEXT: vinserti32x4 $2, %xmm0, %zmm8, %zmm2
+; AVX512DQ-NEXT: vpsrlq $48, %xmm4, %xmm4
+; AVX512DQ-NEXT: vpunpckhqdq {{.*#+}} xmm3 = xmm3[1],xmm4[1]
; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; AVX512DQ-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm2[3,4],xmm0[5,6,7]
+; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm3[3,4],xmm0[5,6,7]
; AVX512DQ-NEXT: vpsrld $16, %xmm6, %xmm1
; AVX512DQ-NEXT: vpunpckhdq {{.*#+}} xmm1 = xmm5[2],xmm1[2],xmm5[3],xmm1[3]
-; AVX512DQ-NEXT: vpbroadcastd 12(%r10), %xmm2
-; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3,4],xmm1[5,6],xmm2[7]
+; AVX512DQ-NEXT: vpbroadcastd 12(%r10), %xmm3
+; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm1 = xmm3[0,1,2,3,4],xmm1[5,6],xmm3[7]
; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3,4],xmm1[5,6,7]
; AVX512DQ-NEXT: vmovdqa %xmm0, 96(%rax)
-; AVX512DQ-NEXT: vmovdqa %ymm8, 64(%rax)
+; AVX512DQ-NEXT: vmovdqa %ymm2, 64(%rax)
; AVX512DQ-NEXT: vmovdqa64 %zmm11, (%rax)
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
@@ -1573,6 +1576,7 @@ define void @store_i16_stride7_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm8 = [65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535]
; AVX512DQ-FCP-NEXT: vpternlogd {{.*#+}} ymm8 = (mem & ~ymm8) | ymm7
; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm8 = ymm6 ^ (mem & (ymm8 ^ ymm6))
+; AVX512DQ-FCP-NEXT: vinserti32x4 $2, %xmm0, %zmm8, %zmm6
; AVX512DQ-FCP-NEXT: vpsrlq $48, %xmm3, %xmm3
; AVX512DQ-FCP-NEXT: vpunpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm3[1]
; AVX512DQ-FCP-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
@@ -1584,8 +1588,8 @@ define void @store_i16_stride7_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3,4],xmm1[5,6],xmm2[7]
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3,4],xmm1[5,6,7]
; AVX512DQ-FCP-NEXT: vmovdqa %xmm0, 96(%rax)
-; AVX512DQ-FCP-NEXT: vmovdqa %ymm8, 64(%rax)
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm11, (%rax)
+; AVX512DQ-FCP-NEXT: vmovdqa %ymm6, 64(%rax)
; AVX512DQ-FCP-NEXT: vzeroupper
; AVX512DQ-FCP-NEXT: retq
;
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-5.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-5.ll
index 86efcf9c57616..7bbb082f07741 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-5.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-5.ll
@@ -718,6 +718,7 @@ define void @store_i8_stride5_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecp
; AVX512-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[0,0,1,1]
; AVX512-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,0,0,1]
; AVX512-NEXT: vpternlogq {{.*#+}} ymm3 = ymm3 ^ (mem & (ymm3 ^ ymm2))
+; AVX512-NEXT: vinserti32x4 $2, %xmm0, %zmm3, %zmm2
; AVX512-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,xmm0[u,7,15],zero,zero,xmm0[u,u,u,u,u,u,u,u,u]
; AVX512-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[6,14,u],zero,zero,xmm1[7,15,u,u,u,u,u,u,u,u,u]
; AVX512-NEXT: vpor %xmm0, %xmm1, %xmm0
@@ -726,7 +727,7 @@ define void @store_i8_stride5_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecp
; AVX512-NEXT: vpbroadcastw %xmm1, %xmm1
; AVX512-NEXT: vpternlogq {{.*#+}} xmm1 = xmm1 ^ (mem & (xmm1 ^ xmm0))
; AVX512-NEXT: vmovq %xmm1, 32(%r9)
-; AVX512-NEXT: vmovdqa %ymm3, (%r9)
+; AVX512-NEXT: vmovdqa %ymm2, (%r9)
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
;
@@ -748,6 +749,7 @@ define void @store_i8_stride5_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecp
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm4 = [0,0,0,0,0,0,1,1]
; AVX512-FCP-NEXT: vpermd %ymm3, %ymm4, %ymm3
; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm3 = ymm3 ^ (mem & (ymm3 ^ ymm2))
+; AVX512-FCP-NEXT: vinserti32x4 $2, %xmm0, %zmm3, %zmm2
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,xmm0[u,7,15],zero,zero,xmm0[u,u,u,u,u,u,u,u,u]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[6,14,u],zero,zero,xmm1[7,15,u,u,u,u,u,u,u,u,u]
; AVX512-FCP-NEXT: vpor %xmm0, %xmm1, %xmm0
@@ -756,7 +758,7 @@ define void @store_i8_stride5_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecp
; AVX512-FCP-NEXT: vpbroadcastw %xmm1, %xmm1
; AVX512-FCP-NEXT: vpternlogq {{.*#+}} xmm1 = xmm1 ^ (mem & (xmm1 ^ xmm0))
; AVX512-FCP-NEXT: vmovq %xmm1, 32(%r9)
-; AVX512-FCP-NEXT: vmovdqa %ymm3, (%r9)
+; AVX512-FCP-NEXT: vmovdqa %ymm2, (%r9)
; AVX512-FCP-NEXT: vzeroupper
; AVX512-FCP-NEXT: retq
;
@@ -778,6 +780,7 @@ define void @store_i8_stride5_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecp
; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[0,0,1,1]
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,0,0,1]
; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm3 = ymm3 ^ (mem & (ymm3 ^ ymm2))
+; AVX512DQ-NEXT: vinserti32x4 $2, %xmm0, %zmm3, %zmm2
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,xmm0[u,7,15],zero,zero,xmm0[u,u,u,u,u,u,u,u,u]
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[6,14,u],zero,zero,xmm1[7,15,u,u,u,u,u,u,u,u,u]
; AVX512DQ-NEXT: vpor %xmm0, %xmm1, %xmm0
@@ -786,7 +789,7 @@ define void @store_i8_stride5_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecp
; AVX512DQ-NEXT: vpbroadcastw %xmm1, %xmm1
; AVX512DQ-NEXT: vpternlogq {{.*#+}} xmm1 = xmm1 ^ (mem & (xmm1 ^ xmm0))
; AVX512DQ-NEXT: vmovq %xmm1, 32(%r9)
-; AVX512DQ-NEXT: vmovdqa %ymm3, (%r9)
+; AVX512DQ-NEXT: vmovdqa %ymm2, (%r9)
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
@@ -808,6 +811,7 @@ define void @store_i8_stride5_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecp
; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm4 = [0,0,0,0,0,0,1,1]
; AVX512DQ-FCP-NEXT: vpermd %ymm3, %ymm4, %ymm3
; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm3 = ymm3 ^ (mem & (ymm3 ^ ymm2))
+; AVX512DQ-FCP-NEXT: vinserti32x4 $2, %xmm0, %zmm3, %zmm2
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,xmm0[u,7,15],zero,zero,xmm0[u,u,u,u,u,u,u,u,u]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[6,14,u],zero,zero,xmm1[7,15,u,u,u,u,u,u,u,u,u]
; AVX512DQ-FCP-NEXT: vpor %xmm0, %xmm1, %xmm0
@@ -816,7 +820,7 @@ define void @store_i8_stride5_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecp
; AVX512DQ-FCP-NEXT: vpbroadcastw %xmm1, %xmm1
; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} xmm1 = xmm1 ^ (mem & (xmm1 ^ xmm0))
; AVX512DQ-FCP-NEXT: vmovq %xmm1, 32(%r9)
-; AVX512DQ-FCP-NEXT: vmovdqa %ymm3, (%r9)
+; AVX512DQ-FCP-NEXT: vmovdqa %ymm2, (%r9)
; AVX512DQ-FCP-NEXT: vzeroupper
; AVX512DQ-FCP-NEXT: retq
;
@@ -840,6 +844,7 @@ define void @store_i8_stride5_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecp
; AVX512BW-NEXT: movl $554189328, %ecx # imm = 0x21084210
; AVX512BW-NEXT: kmovd %ecx, %k1
; AVX512BW-NEXT: vmovdqu8 %ymm3, %ymm2 {%k1}
+; AVX512BW-NEXT: vinserti32x4 $2, %xmm0, %zmm2, %zmm2
; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,xmm0[u,7,15],zero,zero,xmm0[u,u,u,u,u,u,u,u,u]
; AVX512BW-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[6,14,u],zero,zero,xmm1[7,15,u,u,u,u,u,u,u,u,u]
; AVX512BW-NEXT: vpor %xmm0, %xmm1, %xmm0
@@ -873,6 +878,7 @@ define void @store_i8_stride5_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecp
; AVX512BW-FCP-NEXT: movl $554189328, %ecx # imm = 0x21084210
; AVX512BW-FCP-NEXT: kmovd %ecx, %k1
; AVX512BW-FCP-NEXT: vmovdqu8 %ymm3, %ymm2 {%k1}
+; AVX512BW-FCP-NEXT: vinserti32x4 $2, %xmm0, %zmm2, %zmm2
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,xmm0[u,7,15],zero,zero,xmm0[u,u,u,u,u,u,u,u,u]
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[6,14,u],zero,zero,xmm1[7,15,u,u,u,u,u,u,u,u,u]
; AVX512BW-FCP-NEXT: vpor %xmm0, %xmm1, %xmm0
@@ -906,6 +912,7 @@ define void @store_i8_stride5_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecp
; AVX512DQ-BW-NEXT: movl $554189328, %ecx # imm = 0x21084210
; AVX512DQ-BW-NEXT: kmovd %ecx, %k1
; AVX512DQ-BW-NEXT: vmovdqu8 %ymm3, %ymm2 {%k1}
+; AVX512DQ-BW-NEXT: vinserti32x4 $2, %xmm0, %zmm2, %zmm2
; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,xmm0[u,7,15],zero,zero,xmm0[u,u,u,u,u,u,u,u,u]
; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[6,14,u],zero,zero,xmm1[7,15,u,u,u,u,u,u,u,u,u]
; AVX512DQ-BW-NEXT: vpor %xmm0, %xmm1, %xmm0
@@ -939,6 +946,7 @@ define void @store_i8_stride5_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecp
; AVX512DQ-BW-FCP-NEXT: movl $554189328, %ecx # imm = 0x21084210
; AVX512DQ-BW-FCP-NEXT: kmovd %ecx, %k1
; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %ymm3, %ymm2 {%k1}
+; AVX512DQ-BW-FCP-NEXT: vinserti32x4 $2, %xmm0, %zmm2, %zmm2
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,xmm0[u,7,15],zero,zero,xmm0[u,u,u,u,u,u,u,u,u]
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[6,14,u],zero,zero,xmm1[7,15,u,u,u,u,u,u,u,u,u]
; AVX512DQ-BW-FCP-NEXT: vpor %xmm0, %xmm1, %xmm0
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-7.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-7.ll
index f4055a953badd..524161408bc0f 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-7.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-7.ll
@@ -2088,6 +2088,7 @@ define void @store_i8_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-NEXT: vpshufb {{.*#+}} ymm6 = ymm6[u,u,u,1,9],zero,ymm6[u,u,u,u,2,10],zero,ymm6[u,u,u,u,19,27],zero,ymm6[u,u,u,u,20,28],zero,ymm6[u,u,u,u,21]
; AVX512-NEXT: vpternlogq {{.*#+}} ymm6 = ymm6 | (ymm7 & ~mem)
; AVX512-NEXT: vpternlogq {{.*#+}} ymm6 = ymm6 ^ (mem & (ymm6 ^ ymm5))
+; AVX512-NEXT: vinserti32x4 $2, %xmm0, %zmm6, %zmm5
; AVX512-NEXT: vpermq {{.*#+}} ymm3 = ymm3[1,3,2,3]
; AVX512-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[u,u],zero,zero,xmm3[6,14,u,u,u],zero,zero,xmm3[7,15,u,u,u]
; AVX512-NEXT: vpermq {{.*#+}} ymm4 = ymm4[1,3,2,3]
@@ -2098,9 +2099,9 @@ define void @store_i8_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-NEXT: vpshufb {{.*#+}} xmm0 = zero,xmm0[13,u,u,u,u],zero,zero,xmm0[14,u,u,u,u],zero,zero,xmm0[15]
; AVX512-NEXT: vpor %xmm0, %xmm1, %xmm0
; AVX512-NEXT: vpternlogq {{.*#+}} xmm0 = xmm0 ^ (mem & (xmm0 ^ xmm3))
-; AVX512-NEXT: vmovdqa %ymm6, 64(%rax)
; AVX512-NEXT: vmovdqa %xmm0, 96(%rax)
; AVX512-NEXT: vmovdqa64 %zmm8, (%rax)
+; AVX512-NEXT: vmovdqa %ymm5, 64(%rax)
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
;
@@ -2155,6 +2156,7 @@ define void @store_i8_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm7 = ymm7[u,u,u,1,9],zero,ymm7[u,u,u,u,2,10],zero,ymm7[u,u,u,u,19,27],zero,ymm7[u,u,u,u,20,28],zero,ymm7[u,u,u,u,21]
; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm7 = ymm7 | (ymm8 & ~mem)
; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm7 = ymm7 ^ (mem & (ymm7 ^ ymm5))
+; AVX512-FCP-NEXT: vinserti32x4 $2, %xmm0, %zmm7, %zmm5
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm4[1,3,2,3]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[u,u],zero,zero,xmm4[6,14,u,u,u],zero,zero,xmm4[7,15,u,u,u]
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm3 = ymm3[1,3,2,3]
@@ -2165,9 +2167,9 @@ define void @store_i8_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm0 = zero,xmm0[13,u,u,u,u],zero,zero,xmm0[14,u,u,u,u],zero,zero,xmm0[15]
; AVX512-FCP-NEXT: vpor %xmm0, %xmm1, %xmm0
; AVX512-FCP-NEXT: vpternlogq {{.*#+}} xmm0 = xmm0 ^ (mem & (xmm0 ^ xmm3))
-; AVX512-FCP-NEXT: vmovdqa %ymm7, 64(%rax)
; AVX512-FCP-NEXT: vmovdqa %xmm0, 96(%rax)
; AVX512-FCP-NEXT: vmovdqa64 %zmm6, (%rax)
+; AVX512-FCP-NEXT: vmovdqa %ymm5, 64(%rax)
; AVX512-FCP-NEXT: vzeroupper
; AVX512-FCP-NEXT: retq
;
@@ -2225,6 +2227,7 @@ define void @store_i8_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm6 = ymm6[u,u,u,1,9],zero,ymm6[u,u,u,u,2,10],zero,ymm6[u,u,u,u,19,27],zero,ymm6[u,u,u,u,20,28],zero,ymm6[u,u,u,u,21]
; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm6 = ymm6 | (ymm7 & ~mem)
; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm6 = ymm6 ^ (mem & (ymm6 ^ ymm5))
+; AVX512DQ-NEXT: vinserti32x4 $2, %xmm0, %zmm6, %zmm5
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm3 = ymm3[1,3,2,3]
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[u,u],zero,zero,xmm3[6,14,u,u,u],zero,zero,xmm3[7,15,u,u,u]
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm4 = ymm4[1,3,2,3]
@@ -2235,9 +2238,9 @@ define void @store_i8_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm0 = zero,xmm0[13,u,u,u,u],zero,zero,xmm0[14,u,u,u,u],zero,zero,xmm0[15]
; AVX512DQ-NEXT: vpor %xmm0, %xmm1, %xmm0
; AVX512DQ-NEXT: vpternlogq {{.*#+}} xmm0 = xmm0 ^ (mem & (xmm0 ^ xmm3))
-; AVX512DQ-NEXT: vmovdqa %ymm6, 64(%rax)
; AVX512DQ-NEXT: vmovdqa %xmm0, 96(%rax)
; AVX512DQ-NEXT: vmovdqa64 %zmm8, (%rax)
+; AVX512DQ-NEXT: vmovdqa %ymm5, 64(%rax)
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
@@ -2292,6 +2295,7 @@ define void @store_i8_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm7 = ymm7[u,u,u,1,9],zero,ymm7[u,u,u,u,2,10],zero,ymm7[u,u,u,u,19,27],zero,ymm7[u,u,u,u,20,28],zero,ymm7[u,u,u,u,21]
; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm7 = ymm7 | (ymm8 & ~mem)
; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm7 = ymm7 ^ (mem & (ymm7 ^ ymm5))
+; AVX512DQ-FCP-NEXT: vinserti32x4 $2, %xmm0, %zmm7, %zmm5
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm4[1,3,2,3]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[u,u],zero,zero,xmm4[6,14,u,u,u],zero,zero,xmm4[7,15,u,u,u]
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm3 = ymm3[1,3,2,3]
@@ -2302,9 +2306,9 @@ define void @store_i8_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm0 = zero,xmm0[13,u,u,u,u],zero,zero,xmm0[14,u,u,u,u],zero,zero,xmm0[15]
; AVX512DQ-FCP-NEXT: vpor %xmm0, %xmm1, %xmm0
; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} xmm0 = xmm0 ^ (mem & (xmm0 ^ xmm3))
-; AVX512DQ-FCP-NEXT: vmovdqa %ymm7, 64(%rax)
; AVX512DQ-FCP-NEXT: vmovdqa %xmm0, 96(%rax)
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm6, (%rax)
+; AVX512DQ-FCP-NEXT: vmovdqa %ymm5, 64(%rax)
; AVX512DQ-FCP-NEXT: vzeroupper
; AVX512DQ-FCP-NEXT: retq
;
@@ -2312,80 +2316,81 @@ define void @store_i8_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX512BW-NEXT: movq {{[0-9]+}}(%rsp), %r10
-; AVX512BW-NEXT: vmovdqa (%rdi), %xmm1
-; AVX512BW-NEXT: vmovdqa (%rdx), %xmm0
-; AVX512BW-NEXT: vmovdqa (%r8), %xmm3
-; AVX512BW-NEXT: vmovdqa (%r9), %xmm4
-; AVX512BW-NEXT: vmovdqa (%r10), %xmm2
-; AVX512BW-NEXT: vinserti128 $1, (%rcx), %ymm0, %ymm0
-; AVX512BW-NEXT: vpshufb {{.*#+}} ymm5 = zero,zero,zero,zero,zero,ymm0[5],zero,zero,zero,zero,zero,zero,ymm0[6],zero,zero,zero,zero,zero,zero,zero,ymm0[23],zero,zero,zero,zero,zero,zero,ymm0[24],zero,zero,zero,zero
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm6 = ymm0[2,3,0,1]
-; AVX512BW-NEXT: vpshufb {{.*#+}} ymm6 = zero,zero,zero,zero,zero,zero,ymm6[5],zero,zero,zero,zero,zero,zero,ymm6[6],zero,zero,zero,zero,zero,ymm6[23],zero,zero,zero,zero,zero,zero,ymm6[24],zero,zero,zero,zero,zero
-; AVX512BW-NEXT: vpor %ymm5, %ymm6, %ymm5
-; AVX512BW-NEXT: vinserti128 $1, (%rsi), %ymm1, %ymm1
-; AVX512BW-NEXT: vpshufb {{.*#+}} ymm6 = zero,zero,zero,ymm1[5],zero,zero,zero,zero,zero,zero,ymm1[6],zero,zero,zero,zero,zero,zero,zero,ymm1[23],zero,zero,zero,zero,zero,zero,ymm1[24],zero,zero,zero,zero,zero,zero
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm7 = ymm1[2,3,0,1]
-; AVX512BW-NEXT: vpshufb {{.*#+}} ymm7 = zero,zero,zero,zero,ymm7[5],zero,zero,zero,zero,zero,zero,ymm7[6],zero,zero,zero,zero,zero,ymm7[23],zero,zero,zero,zero,zero,zero,ymm7[24],zero,zero,zero,zero,zero,zero,ymm7[25]
-; AVX512BW-NEXT: vpor %ymm7, %ymm6, %ymm6
-; AVX512BW-NEXT: movl $202911840, %ecx # imm = 0xC183060
+; AVX512BW-NEXT: vmovdqa (%rdi), %xmm3
+; AVX512BW-NEXT: vmovdqa (%rdx), %xmm4
+; AVX512BW-NEXT: vmovdqa (%r8), %xmm0
+; AVX512BW-NEXT: vmovdqa (%r9), %xmm2
+; AVX512BW-NEXT: vmovdqa (%r10), %xmm1
+; AVX512BW-NEXT: vbroadcasti128 {{.*#+}} ymm5 = [4,5,4,5,5,6,5,6,4,5,4,5,5,6,5,6]
+; AVX512BW-NEXT: # ymm5 = mem[0,1,0,1]
+; AVX512BW-NEXT: vpermw %ymm1, %ymm5, %ymm5
+; AVX512BW-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm6
+; AVX512BW-NEXT: vpermq {{.*#+}} ymm7 = ymm6[1,3,1,3]
+; AVX512BW-NEXT: vpshufb {{.*#+}} ymm7 = ymm7[u,u,u,1,9,u,u,u,u,u,2,10,u,u,u,u,u,19,27,u,u,u,u,u,20,28,u,u,u,u,u,21]
+; AVX512BW-NEXT: movl $67637280, %edx # imm = 0x4081020
+; AVX512BW-NEXT: kmovd %edx, %k1
+; AVX512BW-NEXT: vmovdqu8 %ymm5, %ymm7 {%k1}
+; AVX512BW-NEXT: vinserti128 $1, (%rcx), %ymm4, %ymm4
+; AVX512BW-NEXT: vpermq {{.*#+}} ymm5 = ymm4[1,3,3,1]
+; AVX512BW-NEXT: vpshufb {{.*#+}} ymm5 = zero,ymm5[1,9],zero,zero,zero,zero,zero,ymm5[2,10],zero,zero,zero,zero,zero,ymm5[3,19],zero,zero,zero,zero,zero,ymm5[28,20],zero,zero,zero,zero,zero,ymm5[29,21],zero
+; AVX512BW-NEXT: vinserti128 $1, (%rsi), %ymm3, %ymm3
+; AVX512BW-NEXT: vpermq {{.*#+}} ymm8 = ymm3[3,1,1,3]
+; AVX512BW-NEXT: vpshufb {{.*#+}} ymm8 = ymm8[1],zero,zero,zero,zero,zero,ymm8[10,2],zero,zero,zero,zero,zero,ymm8[11,3],zero,zero,zero,zero,zero,ymm8[20,28],zero,zero,zero,zero,zero,ymm8[21,29],zero,zero,zero
+; AVX512BW-NEXT: vpor %ymm5, %ymm8, %ymm5
+; AVX512BW-NEXT: movl $-2029118408, %ecx # imm = 0x870E1C38
; AVX512BW-NEXT: kmovd %ecx, %k1
-; AVX512BW-NEXT: vmovdqu8 %ymm5, %ymm6 {%k1}
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm5 = ymm1[0,2,0,2]
-; AVX512BW-NEXT: vpshufb {{.*#+}} ymm5 = ymm5[0,8],zero,zero,zero,zero,zero,ymm5[1,9],zero,zero,zero,zero,zero,ymm5[2,10],zero,zero,zero,zero,zero,ymm5[19,27],zero,zero,zero,zero,zero,ymm5[20,28],zero,zero
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm7 = ymm0[0,2,0,2]
-; AVX512BW-NEXT: vpshufb {{.*#+}} ymm7 = zero,zero,ymm7[0,8],zero,zero,zero,zero,zero,ymm7[1,9],zero,zero,zero,zero,zero,ymm7[18,26],zero,zero,zero,zero,zero,ymm7[19,27],zero,zero,zero,zero,zero,ymm7[20,28]
-; AVX512BW-NEXT: vpor %ymm5, %ymm7, %ymm5
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm6, %zmm5, %zmm5
-; AVX512BW-NEXT: vinserti128 $1, %xmm4, %ymm3, %ymm6
-; AVX512BW-NEXT: vpshufb {{.*#+}} ymm7 = ymm6[4],zero,zero,zero,zero,zero,zero,ymm6[5],zero,zero,zero,zero,zero,zero,ymm6[6],zero,zero,zero,zero,zero,zero,zero,ymm6[23],zero,zero,zero,zero,zero,zero,ymm6[24],zero,zero
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm8 = ymm6[2,3,0,1]
-; AVX512BW-NEXT: vpshufb {{.*#+}} ymm8 = zero,ymm8[4],zero,zero,zero,zero,zero,zero,ymm8[5],zero,zero,zero,zero,zero,zero,ymm8[6],zero,zero,zero,zero,zero,ymm8[23],zero,zero,zero,zero,zero,zero,ymm8[24],zero,zero,zero
+; AVX512BW-NEXT: vmovdqu8 %ymm7, %ymm5 {%k1}
+; AVX512BW-NEXT: vinserti32x4 $2, %xmm0, %zmm5, %zmm5
+; AVX512BW-NEXT: vpshufb {{.*#+}} ymm7 = zero,zero,zero,zero,zero,ymm4[5],zero,zero,zero,zero,zero,zero,ymm4[6],zero,zero,zero,zero,zero,zero,zero,ymm4[23],zero,zero,zero,zero,zero,zero,ymm4[24],zero,zero,zero,zero
+; AVX512BW-NEXT: vpermq {{.*#+}} ymm8 = ymm4[2,3,0,1]
+; AVX512BW-NEXT: vpshufb {{.*#+}} ymm8 = zero,zero,zero,zero,zero,zero,ymm8[5],zero,zero,zero,zero,zero,zero,ymm8[6],zero,zero,zero,zero,zero,ymm8[23],zero,zero,zero,zero,zero,zero,ymm8[24],zero,zero,zero,zero,zero
; AVX512BW-NEXT: vpor %ymm7, %ymm8, %ymm7
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm8 = ymm6[0,2,0,2]
-; AVX512BW-NEXT: vpshufb {{.*#+}} ymm8 = ymm8[u,u,u,u,0,8,u,u,u,u,u,1,9,u,u,u,u,u,18,26,u,u,u,u,u,19,27,u,u,u,u,u]
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm7, %zmm8, %zmm7
+; AVX512BW-NEXT: vpshufb {{.*#+}} ymm8 = zero,zero,zero,ymm3[5],zero,zero,zero,zero,zero,zero,ymm3[6],zero,zero,zero,zero,zero,zero,zero,ymm3[23],zero,zero,zero,zero,zero,zero,ymm3[24],zero,zero,zero,zero,zero,zero
+; AVX512BW-NEXT: vpermq {{.*#+}} ymm9 = ymm3[2,3,0,1]
+; AVX512BW-NEXT: vpshufb {{.*#+}} ymm9 = zero,zero,zero,zero,ymm9[5],zero,zero,zero,zero,zero,zero,ymm9[6],zero,zero,zero,zero,zero,ymm9[23],zero,zero,zero,zero,zero,zero,ymm9[24],zero,zero,zero,zero,zero,zero,ymm9[25]
+; AVX512BW-NEXT: vpor %ymm9, %ymm8, %ymm8
+; AVX512BW-NEXT: movl $202911840, %ecx # imm = 0xC183060
+; AVX512BW-NEXT: kmovd %ecx, %k1
+; AVX512BW-NEXT: vmovdqu8 %ymm7, %ymm8 {%k1}
+; AVX512BW-NEXT: vpermq {{.*#+}} ymm7 = ymm3[0,2,0,2]
+; AVX512BW-NEXT: vpshufb {{.*#+}} ymm7 = ymm7[0,8],zero,zero,zero,zero,zero,ymm7[1,9],zero,zero,zero,zero,zero,ymm7[2,10],zero,zero,zero,zero,zero,ymm7[19,27],zero,zero,zero,zero,zero,ymm7[20,28],zero,zero
+; AVX512BW-NEXT: vpermq {{.*#+}} ymm9 = ymm4[0,2,0,2]
+; AVX512BW-NEXT: vpshufb {{.*#+}} ymm9 = zero,zero,ymm9[0,8],zero,zero,zero,zero,zero,ymm9[1,9],zero,zero,zero,zero,zero,ymm9[18,26],zero,zero,zero,zero,zero,ymm9[19,27],zero,zero,zero,zero,zero,ymm9[20,28]
+; AVX512BW-NEXT: vpor %ymm7, %ymm9, %ymm7
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm8, %zmm7, %zmm7
+; AVX512BW-NEXT: vpshufb {{.*#+}} ymm8 = ymm6[4],zero,zero,zero,zero,zero,zero,ymm6[5],zero,zero,zero,zero,zero,zero,ymm6[6],zero,zero,zero,zero,zero,zero,zero,ymm6[23],zero,zero,zero,zero,zero,zero,ymm6[24],zero,zero
+; AVX512BW-NEXT: vpermq {{.*#+}} ymm9 = ymm6[2,3,0,1]
+; AVX512BW-NEXT: vpshufb {{.*#+}} ymm9 = zero,ymm9[4],zero,zero,zero,zero,zero,zero,ymm9[5],zero,zero,zero,zero,zero,zero,ymm9[6],zero,zero,zero,zero,zero,ymm9[23],zero,zero,zero,zero,zero,zero,ymm9[24],zero,zero,zero
+; AVX512BW-NEXT: vpor %ymm8, %ymm9, %ymm8
+; AVX512BW-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,2,0,2]
+; AVX512BW-NEXT: vpshufb {{.*#+}} ymm6 = ymm6[u,u,u,u,0,8,u,u,u,u,u,1,9,u,u,u,u,u,18,26,u,u,u,u,u,19,27,u,u,u,u,u]
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm8, %zmm6, %zmm6
; AVX512BW-NEXT: vpmovsxbw {{.*#+}} zmm8 = [1,1,0,0,1,1,0,0,4,5,1,1,1,1,0,0,2,2,2,4,2,2,2,4,3,3,3,3,2,2,2,4]
-; AVX512BW-NEXT: vpermw %zmm2, %zmm8, %zmm8
+; AVX512BW-NEXT: vpermw %zmm1, %zmm8, %zmm8
; AVX512BW-NEXT: movabsq $4647998506761461824, %rcx # imm = 0x4081020408102040
; AVX512BW-NEXT: kmovq %rcx, %k1
-; AVX512BW-NEXT: vmovdqu8 %zmm8, %zmm7 {%k1}
+; AVX512BW-NEXT: vmovdqu8 %zmm8, %zmm6 {%k1}
; AVX512BW-NEXT: movabsq $8133997386832558192, %rcx # imm = 0x70E1C3870E1C3870
; AVX512BW-NEXT: kmovq %rcx, %k1
-; AVX512BW-NEXT: vmovdqu8 %zmm7, %zmm5 {%k1}
-; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm4[8],xmm3[8],xmm4[9],xmm3[9],xmm4[10],xmm3[10],xmm4[11],xmm3[11],xmm4[12],xmm3[12],xmm4[13],xmm3[13],xmm4[14],xmm3[14],xmm4[15],xmm3[15]
-; AVX512BW-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[10,u,u,u,u,u,13,12,u,u,u,u,u,15,14,u]
-; AVX512BW-NEXT: vpshufhw {{.*#+}} xmm4 = xmm2[0,1,2,3,6,7,7,7]
-; AVX512BW-NEXT: vpshufd {{.*#+}} xmm4 = xmm4[2,1,3,2]
+; AVX512BW-NEXT: vmovdqu8 %zmm6, %zmm7 {%k1}
+; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
+; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[10,u,u,u,u,u,13,12,u,u,u,u,u,15,14,u]
+; AVX512BW-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,7,7,7]
+; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,1,3,2]
; AVX512BW-NEXT: movw $-32510, %cx # imm = 0x8102
; AVX512BW-NEXT: kmovd %ecx, %k1
-; AVX512BW-NEXT: vmovdqu8 %xmm4, %xmm3 {%k1}
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm4 = ymm1[1,3,2,3]
-; AVX512BW-NEXT: vpshufb {{.*#+}} xmm4 = zero,zero,xmm4[6,14],zero,zero,zero,zero,zero,xmm4[7,15],zero,zero,zero,zero,zero
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm7 = ymm0[1,3,2,3]
-; AVX512BW-NEXT: vpshufb {{.*#+}} xmm7 = zero,zero,zero,zero,xmm7[6,14],zero,zero,zero,zero,zero,xmm7[7,15],zero,zero,zero
-; AVX512BW-NEXT: vpor %xmm4, %xmm7, %xmm4
+; AVX512BW-NEXT: vmovdqu8 %xmm1, %xmm0 {%k1}
+; AVX512BW-NEXT: vpermq {{.*#+}} ymm1 = ymm3[1,3,2,3]
+; AVX512BW-NEXT: vpshufb {{.*#+}} xmm1 = zero,zero,xmm1[6,14],zero,zero,zero,zero,zero,xmm1[7,15],zero,zero,zero,zero,zero
+; AVX512BW-NEXT: vpermq {{.*#+}} ymm2 = ymm4[1,3,2,3]
+; AVX512BW-NEXT: vpshufb {{.*#+}} xmm2 = zero,zero,zero,zero,xmm2[6,14],zero,zero,zero,zero,zero,xmm2[7,15],zero,zero,zero
+; AVX512BW-NEXT: vpor %xmm1, %xmm2, %xmm1
; AVX512BW-NEXT: movw $-7741, %cx # imm = 0xE1C3
; AVX512BW-NEXT: kmovd %ecx, %k1
-; AVX512BW-NEXT: vmovdqu8 %xmm3, %xmm4 {%k1}
-; AVX512BW-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [4,5,4,5,5,6,5,6,4,5,4,5,5,6,5,6]
-; AVX512BW-NEXT: # ymm3 = mem[0,1,0,1]
-; AVX512BW-NEXT: vpermw %ymm2, %ymm3, %ymm2
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm3 = ymm6[1,3,1,3]
-; AVX512BW-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,1,9,u,u,u,u,u,2,10,u,u,u,u,u,19,27,u,u,u,u,u,20,28,u,u,u,u,u,21]
-; AVX512BW-NEXT: movl $67637280, %ecx # imm = 0x4081020
-; AVX512BW-NEXT: kmovd %ecx, %k1
-; AVX512BW-NEXT: vmovdqu8 %ymm2, %ymm3 {%k1}
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[1,3,3,1]
-; AVX512BW-NEXT: vpshufb {{.*#+}} ymm0 = zero,ymm0[1,9],zero,zero,zero,zero,zero,ymm0[2,10],zero,zero,zero,zero,zero,ymm0[3,19],zero,zero,zero,zero,zero,ymm0[28,20],zero,zero,zero,zero,zero,ymm0[29,21],zero
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[3,1,1,3]
-; AVX512BW-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[1],zero,zero,zero,zero,zero,ymm1[10,2],zero,zero,zero,zero,zero,ymm1[11,3],zero,zero,zero,zero,zero,ymm1[20,28],zero,zero,zero,zero,zero,ymm1[21,29],zero,zero,zero
-; AVX512BW-NEXT: vpor %ymm0, %ymm1, %ymm0
-; AVX512BW-NEXT: movl $-2029118408, %ecx # imm = 0x870E1C38
-; AVX512BW-NEXT: kmovd %ecx, %k1
-; AVX512BW-NEXT: vmovdqu8 %ymm3, %ymm0 {%k1}
-; AVX512BW-NEXT: vmovdqa %ymm0, 64(%rax)
-; AVX512BW-NEXT: vmovdqa %xmm4, 96(%rax)
-; AVX512BW-NEXT: vmovdqa64 %zmm5, (%rax)
+; AVX512BW-NEXT: vmovdqu8 %xmm0, %xmm1 {%k1}
+; AVX512BW-NEXT: vmovdqa %xmm1, 96(%rax)
+; AVX512BW-NEXT: vmovdqa64 %zmm7, (%rax)
+; AVX512BW-NEXT: vmovdqa %ymm5, 64(%rax)
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -2398,45 +2403,46 @@ define void @store_i8_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512BW-FCP-NEXT: vmovdqa (%r8), %xmm2
; AVX512BW-FCP-NEXT: vmovdqa (%r9), %xmm3
; AVX512BW-FCP-NEXT: vmovdqa (%r10), %xmm4
-; AVX512BW-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm5 = [1,9,2,10,1,9,2,10]
+; AVX512BW-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm5 = [4,5,4,5,5,6,5,6,4,5,4,5,5,6,5,6]
; AVX512BW-FCP-NEXT: # ymm5 = mem[0,1,0,1]
-; AVX512BW-FCP-NEXT: vpermi2d %ymm3, %ymm2, %ymm5
+; AVX512BW-FCP-NEXT: vpermw %ymm4, %ymm5, %ymm5
; AVX512BW-FCP-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm6
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm7 = ymm6[0,2,0,2]
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm5, %zmm7, %zmm5
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} zmm5 = zmm5[u,u,u,u,0,8,u,u,u,u,u,1,9,u,u,u,u,u,18,26,u,u,u,u,u,19,27,u,u,u,u,u,32,36,u,u,u,u,u,33,37,u,u,u,u,u,34,38,u,u,u,u,u,51,55,u,u,u,u,u,56,60,u,u]
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm7 = ymm6[1,3,1,3]
+; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm7 = ymm7[u,u,u,1,9,u,u,u,u,u,2,10,u,u,u,u,u,19,27,u,u,u,u,u,20,28,u,u,u,u,u,21]
+; AVX512BW-FCP-NEXT: movl $67637280, %edx # imm = 0x4081020
+; AVX512BW-FCP-NEXT: kmovd %edx, %k1
+; AVX512BW-FCP-NEXT: vmovdqu8 %ymm5, %ymm7 {%k1}
+; AVX512BW-FCP-NEXT: vinserti128 $1, (%rcx), %ymm1, %ymm1
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm5 = ymm1[1,3,3,1]
+; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm5 = zero,ymm5[1,9],zero,zero,zero,zero,zero,ymm5[2,10],zero,zero,zero,zero,zero,ymm5[3,19],zero,zero,zero,zero,zero,ymm5[28,20],zero,zero,zero,zero,zero,ymm5[29,21],zero
+; AVX512BW-FCP-NEXT: vinserti128 $1, (%rsi), %ymm0, %ymm0
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm8 = ymm0[3,1,1,3]
+; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm8 = ymm8[1],zero,zero,zero,zero,zero,ymm8[10,2],zero,zero,zero,zero,zero,ymm8[11,3],zero,zero,zero,zero,zero,ymm8[20,28],zero,zero,zero,zero,zero,ymm8[21,29],zero,zero,zero
+; AVX512BW-FCP-NEXT: vpor %ymm5, %ymm8, %ymm5
+; AVX512BW-FCP-NEXT: movl $-2029118408, %ecx # imm = 0x870E1C38
+; AVX512BW-FCP-NEXT: kmovd %ecx, %k1
+; AVX512BW-FCP-NEXT: vmovdqu8 %ymm7, %ymm5 {%k1}
+; AVX512BW-FCP-NEXT: vinserti32x4 $2, %xmm0, %zmm5, %zmm5
+; AVX512BW-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm7 = [1,9,2,10,1,9,2,10]
+; AVX512BW-FCP-NEXT: # ymm7 = mem[0,1,0,1]
+; AVX512BW-FCP-NEXT: vpermi2d %ymm3, %ymm2, %ymm7
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,2,0,2]
+; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm7, %zmm6, %zmm6
+; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} zmm6 = zmm6[u,u,u,u,0,8,u,u,u,u,u,1,9,u,u,u,u,u,18,26,u,u,u,u,u,19,27,u,u,u,u,u,32,36,u,u,u,u,u,33,37,u,u,u,u,u,34,38,u,u,u,u,u,51,55,u,u,u,u,u,56,60,u,u]
; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} zmm7 = [1,1,0,0,1,1,0,0,4,5,1,1,1,1,0,0,2,2,2,4,2,2,2,4,3,3,3,3,2,2,2,4]
; AVX512BW-FCP-NEXT: vpermw %zmm4, %zmm7, %zmm7
-; AVX512BW-FCP-NEXT: movabsq $4647998506761461824, %rdx # imm = 0x4081020408102040
-; AVX512BW-FCP-NEXT: kmovq %rdx, %k1
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm7, %zmm5 {%k1}
-; AVX512BW-FCP-NEXT: vinserti128 $1, (%rsi), %ymm0, %ymm0
+; AVX512BW-FCP-NEXT: movabsq $4647998506761461824, %rcx # imm = 0x4081020408102040
+; AVX512BW-FCP-NEXT: kmovq %rcx, %k1
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm7, %zmm6 {%k1}
; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm7 = [0,0,4,0,0,1,4,5,1,5,0,0,1,5,2,6]
; AVX512BW-FCP-NEXT: vpermd %zmm0, %zmm7, %zmm8
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} zmm8 = zmm8[0,8],zero,zero,zero,zero,zero,zmm8[1,9],zero,zero,zero,zero,zero,zmm8[2,10],zero,zero,zero,zero,zero,zmm8[19,27],zero,zero,zero,zero,zero,zmm8[20,28],zero,zero,zero,zero,zero,zmm8[33,37],zero,zero,zero,zero,zero,zmm8[34,38],zero,zero,zero,zero,zero,zmm8[51,55],zero,zero,zero,zero,zero,zmm8[56,60],zero,zero,zero,zero,zero,zmm8[57]
-; AVX512BW-FCP-NEXT: vinserti128 $1, (%rcx), %ymm1, %ymm1
; AVX512BW-FCP-NEXT: vpermd %zmm1, %zmm7, %zmm7
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} zmm7 = zero,zero,zmm7[0,8],zero,zero,zero,zero,zero,zmm7[1,9],zero,zero,zero,zero,zero,zmm7[18,26],zero,zero,zero,zero,zero,zmm7[19,27],zero,zero,zero,zero,zero,zmm7[20,28],zero,zero,zero,zero,zero,zmm7[33,37],zero,zero,zero,zero,zero,zmm7[34,38],zero,zero,zero,zero,zero,zmm7[51,55],zero,zero,zero,zero,zero,zmm7[56,60],zero,zero,zero,zero
; AVX512BW-FCP-NEXT: vporq %zmm8, %zmm7, %zmm7
; AVX512BW-FCP-NEXT: movabsq $8133997386832558192, %rcx # imm = 0x70E1C3870E1C3870
; AVX512BW-FCP-NEXT: kmovq %rcx, %k1
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm5, %zmm7 {%k1}
-; AVX512BW-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm5 = [4,5,4,5,5,6,5,6,4,5,4,5,5,6,5,6]
-; AVX512BW-FCP-NEXT: # ymm5 = mem[0,1,0,1]
-; AVX512BW-FCP-NEXT: vpermw %ymm4, %ymm5, %ymm5
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm6 = ymm6[1,3,1,3]
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm6 = ymm6[u,u,u,1,9,u,u,u,u,u,2,10,u,u,u,u,u,19,27,u,u,u,u,u,20,28,u,u,u,u,u,21]
-; AVX512BW-FCP-NEXT: movl $67637280, %ecx # imm = 0x4081020
-; AVX512BW-FCP-NEXT: kmovd %ecx, %k1
-; AVX512BW-FCP-NEXT: vmovdqu8 %ymm5, %ymm6 {%k1}
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm5 = ymm1[1,3,3,1]
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm5 = zero,ymm5[1,9],zero,zero,zero,zero,zero,ymm5[2,10],zero,zero,zero,zero,zero,ymm5[3,19],zero,zero,zero,zero,zero,ymm5[28,20],zero,zero,zero,zero,zero,ymm5[29,21],zero
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm8 = ymm0[3,1,1,3]
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm8 = ymm8[1],zero,zero,zero,zero,zero,ymm8[10,2],zero,zero,zero,zero,zero,ymm8[11,3],zero,zero,zero,zero,zero,ymm8[20,28],zero,zero,zero,zero,zero,ymm8[21,29],zero,zero,zero
-; AVX512BW-FCP-NEXT: vpor %ymm5, %ymm8, %ymm5
-; AVX512BW-FCP-NEXT: movl $-2029118408, %ecx # imm = 0x870E1C38
-; AVX512BW-FCP-NEXT: kmovd %ecx, %k1
-; AVX512BW-FCP-NEXT: vmovdqu8 %ymm6, %ymm5 {%k1}
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm6, %zmm7 {%k1}
; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[1,3,2,3]
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,xmm0[6,14],zero,zero,zero,zero,zero,xmm0[7,15],zero,zero,zero,zero,zero
; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[1,3,2,3]
@@ -2449,9 +2455,9 @@ define void @store_i8_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512BW-FCP-NEXT: movw $-7741, %cx # imm = 0xE1C3
; AVX512BW-FCP-NEXT: kmovd %ecx, %k1
; AVX512BW-FCP-NEXT: vmovdqu8 %xmm1, %xmm0 {%k1}
-; AVX512BW-FCP-NEXT: vmovdqa %ymm5, 64(%rax)
; AVX512BW-FCP-NEXT: vmovdqa %xmm0, 96(%rax)
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm7, (%rax)
+; AVX512BW-FCP-NEXT: vmovdqa %ymm5, 64(%rax)
; AVX512BW-FCP-NEXT: vzeroupper
; AVX512BW-FCP-NEXT: retq
;
@@ -2459,80 +2465,81 @@ define void @store_i8_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-BW: # %bb.0:
; AVX512DQ-BW-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX512DQ-BW-NEXT: movq {{[0-9]+}}(%rsp), %r10
-; AVX512DQ-BW-NEXT: vmovdqa (%rdi), %xmm1
-; AVX512DQ-BW-NEXT: vmovdqa (%rdx), %xmm0
-; AVX512DQ-BW-NEXT: vmovdqa (%r8), %xmm3
-; AVX512DQ-BW-NEXT: vmovdqa (%r9), %xmm4
-; AVX512DQ-BW-NEXT: vmovdqa (%r10), %xmm2
-; AVX512DQ-BW-NEXT: vinserti128 $1, (%rcx), %ymm0, %ymm0
-; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm5 = zero,zero,zero,zero,zero,ymm0[5],zero,zero,zero,zero,zero,zero,ymm0[6],zero,zero,zero,zero,zero,zero,zero,ymm0[23],zero,zero,zero,zero,zero,zero,ymm0[24],zero,zero,zero,zero
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm6 = ymm0[2,3,0,1]
-; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm6 = zero,zero,zero,zero,zero,zero,ymm6[5],zero,zero,zero,zero,zero,zero,ymm6[6],zero,zero,zero,zero,zero,ymm6[23],zero,zero,zero,zero,zero,zero,ymm6[24],zero,zero,zero,zero,zero
-; AVX512DQ-BW-NEXT: vpor %ymm5, %ymm6, %ymm5
-; AVX512DQ-BW-NEXT: vinserti128 $1, (%rsi), %ymm1, %ymm1
-; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm6 = zero,zero,zero,ymm1[5],zero,zero,zero,zero,zero,zero,ymm1[6],zero,zero,zero,zero,zero,zero,zero,ymm1[23],zero,zero,zero,zero,zero,zero,ymm1[24],zero,zero,zero,zero,zero,zero
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm7 = ymm1[2,3,0,1]
-; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm7 = zero,zero,zero,zero,ymm7[5],zero,zero,zero,zero,zero,zero,ymm7[6],zero,zero,zero,zero,zero,ymm7[23],zero,zero,zero,zero,zero,zero,ymm7[24],zero,zero,zero,zero,zero,zero,ymm7[25]
-; AVX512DQ-BW-NEXT: vpor %ymm7, %ymm6, %ymm6
-; AVX512DQ-BW-NEXT: movl $202911840, %ecx # imm = 0xC183060
+; AVX512DQ-BW-NEXT: vmovdqa (%rdi), %xmm3
+; AVX512DQ-BW-NEXT: vmovdqa (%rdx), %xmm4
+; AVX512DQ-BW-NEXT: vmovdqa (%r8), %xmm0
+; AVX512DQ-BW-NEXT: vmovdqa (%r9), %xmm2
+; AVX512DQ-BW-NEXT: vmovdqa (%r10), %xmm1
+; AVX512DQ-BW-NEXT: vbroadcasti128 {{.*#+}} ymm5 = [4,5,4,5,5,6,5,6,4,5,4,5,5,6,5,6]
+; AVX512DQ-BW-NEXT: # ymm5 = mem[0,1,0,1]
+; AVX512DQ-BW-NEXT: vpermw %ymm1, %ymm5, %ymm5
+; AVX512DQ-BW-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm6
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm7 = ymm6[1,3,1,3]
+; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm7 = ymm7[u,u,u,1,9,u,u,u,u,u,2,10,u,u,u,u,u,19,27,u,u,u,u,u,20,28,u,u,u,u,u,21]
+; AVX512DQ-BW-NEXT: movl $67637280, %edx # imm = 0x4081020
+; AVX512DQ-BW-NEXT: kmovd %edx, %k1
+; AVX512DQ-BW-NEXT: vmovdqu8 %ymm5, %ymm7 {%k1}
+; AVX512DQ-BW-NEXT: vinserti128 $1, (%rcx), %ymm4, %ymm4
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm5 = ymm4[1,3,3,1]
+; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm5 = zero,ymm5[1,9],zero,zero,zero,zero,zero,ymm5[2,10],zero,zero,zero,zero,zero,ymm5[3,19],zero,zero,zero,zero,zero,ymm5[28,20],zero,zero,zero,zero,zero,ymm5[29,21],zero
+; AVX512DQ-BW-NEXT: vinserti128 $1, (%rsi), %ymm3, %ymm3
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm8 = ymm3[3,1,1,3]
+; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm8 = ymm8[1],zero,zero,zero,zero,zero,ymm8[10,2],zero,zero,zero,zero,zero,ymm8[11,3],zero,zero,zero,zero,zero,ymm8[20,28],zero,zero,zero,zero,zero,ymm8[21,29],zero,zero,zero
+; AVX512DQ-BW-NEXT: vpor %ymm5, %ymm8, %ymm5
+; AVX512DQ-BW-NEXT: movl $-2029118408, %ecx # imm = 0x870E1C38
; AVX512DQ-BW-NEXT: kmovd %ecx, %k1
-; AVX512DQ-BW-NEXT: vmovdqu8 %ymm5, %ymm6 {%k1}
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm5 = ymm1[0,2,0,2]
-; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm5 = ymm5[0,8],zero,zero,zero,zero,zero,ymm5[1,9],zero,zero,zero,zero,zero,ymm5[2,10],zero,zero,zero,zero,zero,ymm5[19,27],zero,zero,zero,zero,zero,ymm5[20,28],zero,zero
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm7 = ymm0[0,2,0,2]
-; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm7 = zero,zero,ymm7[0,8],zero,zero,zero,zero,zero,ymm7[1,9],zero,zero,zero,zero,zero,ymm7[18,26],zero,zero,zero,zero,zero,ymm7[19,27],zero,zero,zero,zero,zero,ymm7[20,28]
-; AVX512DQ-BW-NEXT: vpor %ymm5, %ymm7, %ymm5
-; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm6, %zmm5, %zmm5
-; AVX512DQ-BW-NEXT: vinserti128 $1, %xmm4, %ymm3, %ymm6
-; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm7 = ymm6[4],zero,zero,zero,zero,zero,zero,ymm6[5],zero,zero,zero,zero,zero,zero,ymm6[6],zero,zero,zero,zero,zero,zero,zero,ymm6[23],zero,zero,zero,zero,zero,zero,ymm6[24],zero,zero
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm8 = ymm6[2,3,0,1]
-; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm8 = zero,ymm8[4],zero,zero,zero,zero,zero,zero,ymm8[5],zero,zero,zero,zero,zero,zero,ymm8[6],zero,zero,zero,zero,zero,ymm8[23],zero,zero,zero,zero,zero,zero,ymm8[24],zero,zero,zero
+; AVX512DQ-BW-NEXT: vmovdqu8 %ymm7, %ymm5 {%k1}
+; AVX512DQ-BW-NEXT: vinserti32x4 $2, %xmm0, %zmm5, %zmm5
+; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm7 = zero,zero,zero,zero,zero,ymm4[5],zero,zero,zero,zero,zero,zero,ymm4[6],zero,zero,zero,zero,zero,zero,zero,ymm4[23],zero,zero,zero,zero,zero,zero,ymm4[24],zero,zero,zero,zero
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm8 = ymm4[2,3,0,1]
+; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm8 = zero,zero,zero,zero,zero,zero,ymm8[5],zero,zero,zero,zero,zero,zero,ymm8[6],zero,zero,zero,zero,zero,ymm8[23],zero,zero,zero,zero,zero,zero,ymm8[24],zero,zero,zero,zero,zero
; AVX512DQ-BW-NEXT: vpor %ymm7, %ymm8, %ymm7
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm8 = ymm6[0,2,0,2]
-; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm8 = ymm8[u,u,u,u,0,8,u,u,u,u,u,1,9,u,u,u,u,u,18,26,u,u,u,u,u,19,27,u,u,u,u,u]
-; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm7, %zmm8, %zmm7
+; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm8 = zero,zero,zero,ymm3[5],zero,zero,zero,zero,zero,zero,ymm3[6],zero,zero,zero,zero,zero,zero,zero,ymm3[23],zero,zero,zero,zero,zero,zero,ymm3[24],zero,zero,zero,zero,zero,zero
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm9 = ymm3[2,3,0,1]
+; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm9 = zero,zero,zero,zero,ymm9[5],zero,zero,zero,zero,zero,zero,ymm9[6],zero,zero,zero,zero,zero,ymm9[23],zero,zero,zero,zero,zero,zero,ymm9[24],zero,zero,zero,zero,zero,zero,ymm9[25]
+; AVX512DQ-BW-NEXT: vpor %ymm9, %ymm8, %ymm8
+; AVX512DQ-BW-NEXT: movl $202911840, %ecx # imm = 0xC183060
+; AVX512DQ-BW-NEXT: kmovd %ecx, %k1
+; AVX512DQ-BW-NEXT: vmovdqu8 %ymm7, %ymm8 {%k1}
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm7 = ymm3[0,2,0,2]
+; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm7 = ymm7[0,8],zero,zero,zero,zero,zero,ymm7[1,9],zero,zero,zero,zero,zero,ymm7[2,10],zero,zero,zero,zero,zero,ymm7[19,27],zero,zero,zero,zero,zero,ymm7[20,28],zero,zero
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm9 = ymm4[0,2,0,2]
+; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm9 = zero,zero,ymm9[0,8],zero,zero,zero,zero,zero,ymm9[1,9],zero,zero,zero,zero,zero,ymm9[18,26],zero,zero,zero,zero,zero,ymm9[19,27],zero,zero,zero,zero,zero,ymm9[20,28]
+; AVX512DQ-BW-NEXT: vpor %ymm7, %ymm9, %ymm7
+; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm8, %zmm7, %zmm7
+; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm8 = ymm6[4],zero,zero,zero,zero,zero,zero,ymm6[5],zero,zero,zero,zero,zero,zero,ymm6[6],zero,zero,zero,zero,zero,zero,zero,ymm6[23],zero,zero,zero,zero,zero,zero,ymm6[24],zero,zero
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm9 = ymm6[2,3,0,1]
+; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm9 = zero,ymm9[4],zero,zero,zero,zero,zero,zero,ymm9[5],zero,zero,zero,zero,zero,zero,ymm9[6],zero,zero,zero,zero,zero,ymm9[23],zero,zero,zero,zero,zero,zero,ymm9[24],zero,zero,zero
+; AVX512DQ-BW-NEXT: vpor %ymm8, %ymm9, %ymm8
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,2,0,2]
+; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm6 = ymm6[u,u,u,u,0,8,u,u,u,u,u,1,9,u,u,u,u,u,18,26,u,u,u,u,u,19,27,u,u,u,u,u]
+; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm8, %zmm6, %zmm6
; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} zmm8 = [1,1,0,0,1,1,0,0,4,5,1,1,1,1,0,0,2,2,2,4,2,2,2,4,3,3,3,3,2,2,2,4]
-; AVX512DQ-BW-NEXT: vpermw %zmm2, %zmm8, %zmm8
+; AVX512DQ-BW-NEXT: vpermw %zmm1, %zmm8, %zmm8
; AVX512DQ-BW-NEXT: movabsq $4647998506761461824, %rcx # imm = 0x4081020408102040
; AVX512DQ-BW-NEXT: kmovq %rcx, %k1
-; AVX512DQ-BW-NEXT: vmovdqu8 %zmm8, %zmm7 {%k1}
+; AVX512DQ-BW-NEXT: vmovdqu8 %zmm8, %zmm6 {%k1}
; AVX512DQ-BW-NEXT: movabsq $8133997386832558192, %rcx # imm = 0x70E1C3870E1C3870
; AVX512DQ-BW-NEXT: kmovq %rcx, %k1
-; AVX512DQ-BW-NEXT: vmovdqu8 %zmm7, %zmm5 {%k1}
-; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm4[8],xmm3[8],xmm4[9],xmm3[9],xmm4[10],xmm3[10],xmm4[11],xmm3[11],xmm4[12],xmm3[12],xmm4[13],xmm3[13],xmm4[14],xmm3[14],xmm4[15],xmm3[15]
-; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[10,u,u,u,u,u,13,12,u,u,u,u,u,15,14,u]
-; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} xmm4 = xmm2[0,1,2,3,6,7,7,7]
-; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} xmm4 = xmm4[2,1,3,2]
+; AVX512DQ-BW-NEXT: vmovdqu8 %zmm6, %zmm7 {%k1}
+; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
+; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[10,u,u,u,u,u,13,12,u,u,u,u,u,15,14,u]
+; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,7,7,7]
+; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,1,3,2]
; AVX512DQ-BW-NEXT: movw $-32510, %cx # imm = 0x8102
; AVX512DQ-BW-NEXT: kmovd %ecx, %k1
-; AVX512DQ-BW-NEXT: vmovdqu8 %xmm4, %xmm3 {%k1}
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm4 = ymm1[1,3,2,3]
-; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm4 = zero,zero,xmm4[6,14],zero,zero,zero,zero,zero,xmm4[7,15],zero,zero,zero,zero,zero
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm7 = ymm0[1,3,2,3]
-; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm7 = zero,zero,zero,zero,xmm7[6,14],zero,zero,zero,zero,zero,xmm7[7,15],zero,zero,zero
-; AVX512DQ-BW-NEXT: vpor %xmm4, %xmm7, %xmm4
+; AVX512DQ-BW-NEXT: vmovdqu8 %xmm1, %xmm0 {%k1}
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm1 = ymm3[1,3,2,3]
+; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm1 = zero,zero,xmm1[6,14],zero,zero,zero,zero,zero,xmm1[7,15],zero,zero,zero,zero,zero
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm2 = ymm4[1,3,2,3]
+; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm2 = zero,zero,zero,zero,xmm2[6,14],zero,zero,zero,zero,zero,xmm2[7,15],zero,zero,zero
+; AVX512DQ-BW-NEXT: vpor %xmm1, %xmm2, %xmm1
; AVX512DQ-BW-NEXT: movw $-7741, %cx # imm = 0xE1C3
; AVX512DQ-BW-NEXT: kmovd %ecx, %k1
-; AVX512DQ-BW-NEXT: vmovdqu8 %xmm3, %xmm4 {%k1}
-; AVX512DQ-BW-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [4,5,4,5,5,6,5,6,4,5,4,5,5,6,5,6]
-; AVX512DQ-BW-NEXT: # ymm3 = mem[0,1,0,1]
-; AVX512DQ-BW-NEXT: vpermw %ymm2, %ymm3, %ymm2
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm3 = ymm6[1,3,1,3]
-; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,1,9,u,u,u,u,u,2,10,u,u,u,u,u,19,27,u,u,u,u,u,20,28,u,u,u,u,u,21]
-; AVX512DQ-BW-NEXT: movl $67637280, %ecx # imm = 0x4081020
-; AVX512DQ-BW-NEXT: kmovd %ecx, %k1
-; AVX512DQ-BW-NEXT: vmovdqu8 %ymm2, %ymm3 {%k1}
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[1,3,3,1]
-; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm0 = zero,ymm0[1,9],zero,zero,zero,zero,zero,ymm0[2,10],zero,zero,zero,zero,zero,ymm0[3,19],zero,zero,zero,zero,zero,ymm0[28,20],zero,zero,zero,zero,zero,ymm0[29,21],zero
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[3,1,1,3]
-; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[1],zero,zero,zero,zero,zero,ymm1[10,2],zero,zero,zero,zero,zero,ymm1[11,3],zero,zero,zero,zero,zero,ymm1[20,28],zero,zero,zero,zero,zero,ymm1[21,29],zero,zero,zero
-; AVX512DQ-BW-NEXT: vpor %ymm0, %ymm1, %ymm0
-; AVX512DQ-BW-NEXT: movl $-2029118408, %ecx # imm = 0x870E1C38
-; AVX512DQ-BW-NEXT: kmovd %ecx, %k1
-; AVX512DQ-BW-NEXT: vmovdqu8 %ymm3, %ymm0 {%k1}
-; AVX512DQ-BW-NEXT: vmovdqa %ymm0, 64(%rax)
-; AVX512DQ-BW-NEXT: vmovdqa %xmm4, 96(%rax)
-; AVX512DQ-BW-NEXT: vmovdqa64 %zmm5, (%rax)
+; AVX512DQ-BW-NEXT: vmovdqu8 %xmm0, %xmm1 {%k1}
+; AVX512DQ-BW-NEXT: vmovdqa %xmm1, 96(%rax)
+; AVX512DQ-BW-NEXT: vmovdqa64 %zmm7, (%rax)
+; AVX512DQ-BW-NEXT: vmovdqa %ymm5, 64(%rax)
; AVX512DQ-BW-NEXT: vzeroupper
; AVX512DQ-BW-NEXT: retq
;
@@ -2545,45 +2552,46 @@ define void @store_i8_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-BW-FCP-NEXT: vmovdqa (%r8), %xmm2
; AVX512DQ-BW-FCP-NEXT: vmovdqa (%r9), %xmm3
; AVX512DQ-BW-FCP-NEXT: vmovdqa (%r10), %xmm4
-; AVX512DQ-BW-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm5 = [1,9,2,10,1,9,2,10]
+; AVX512DQ-BW-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm5 = [4,5,4,5,5,6,5,6,4,5,4,5,5,6,5,6]
; AVX512DQ-BW-FCP-NEXT: # ymm5 = mem[0,1,0,1]
-; AVX512DQ-BW-FCP-NEXT: vpermi2d %ymm3, %ymm2, %ymm5
+; AVX512DQ-BW-FCP-NEXT: vpermw %ymm4, %ymm5, %ymm5
; AVX512DQ-BW-FCP-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm6
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm7 = ymm6[0,2,0,2]
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm5, %zmm7, %zmm5
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} zmm5 = zmm5[u,u,u,u,0,8,u,u,u,u,u,1,9,u,u,u,u,u,18,26,u,u,u,u,u,19,27,u,u,u,u,u,32,36,u,u,u,u,u,33,37,u,u,u,u,u,34,38,u,u,u,u,u,51,55,u,u,u,u,u,56,60,u,u]
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm7 = ymm6[1,3,1,3]
+; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm7 = ymm7[u,u,u,1,9,u,u,u,u,u,2,10,u,u,u,u,u,19,27,u,u,u,u,u,20,28,u,u,u,u,u,21]
+; AVX512DQ-BW-FCP-NEXT: movl $67637280, %edx # imm = 0x4081020
+; AVX512DQ-BW-FCP-NEXT: kmovd %edx, %k1
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %ymm5, %ymm7 {%k1}
+; AVX512DQ-BW-FCP-NEXT: vinserti128 $1, (%rcx), %ymm1, %ymm1
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm5 = ymm1[1,3,3,1]
+; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm5 = zero,ymm5[1,9],zero,zero,zero,zero,zero,ymm5[2,10],zero,zero,zero,zero,zero,ymm5[3,19],zero,zero,zero,zero,zero,ymm5[28,20],zero,zero,zero,zero,zero,ymm5[29,21],zero
+; AVX512DQ-BW-FCP-NEXT: vinserti128 $1, (%rsi), %ymm0, %ymm0
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm8 = ymm0[3,1,1,3]
+; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm8 = ymm8[1],zero,zero,zero,zero,zero,ymm8[10,2],zero,zero,zero,zero,zero,ymm8[11,3],zero,zero,zero,zero,zero,ymm8[20,28],zero,zero,zero,zero,zero,ymm8[21,29],zero,zero,zero
+; AVX512DQ-BW-FCP-NEXT: vpor %ymm5, %ymm8, %ymm5
+; AVX512DQ-BW-FCP-NEXT: movl $-2029118408, %ecx # imm = 0x870E1C38
+; AVX512DQ-BW-FCP-NEXT: kmovd %ecx, %k1
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %ymm7, %ymm5 {%k1}
+; AVX512DQ-BW-FCP-NEXT: vinserti32x4 $2, %xmm0, %zmm5, %zmm5
+; AVX512DQ-BW-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm7 = [1,9,2,10,1,9,2,10]
+; AVX512DQ-BW-FCP-NEXT: # ymm7 = mem[0,1,0,1]
+; AVX512DQ-BW-FCP-NEXT: vpermi2d %ymm3, %ymm2, %ymm7
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,2,0,2]
+; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm7, %zmm6, %zmm6
+; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} zmm6 = zmm6[u,u,u,u,0,8,u,u,u,u,u,1,9,u,u,u,u,u,18,26,u,u,u,u,u,19,27,u,u,u,u,u,32,36,u,u,u,u,u,33,37,u,u,u,u,u,34,38,u,u,u,u,u,51,55,u,u,u,u,u,56,60,u,u]
; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} zmm7 = [1,1,0,0,1,1,0,0,4,5,1,1,1,1,0,0,2,2,2,4,2,2,2,4,3,3,3,3,2,2,2,4]
; AVX512DQ-BW-FCP-NEXT: vpermw %zmm4, %zmm7, %zmm7
-; AVX512DQ-BW-FCP-NEXT: movabsq $4647998506761461824, %rdx # imm = 0x4081020408102040
-; AVX512DQ-BW-FCP-NEXT: kmovq %rdx, %k1
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm7, %zmm5 {%k1}
-; AVX512DQ-BW-FCP-NEXT: vinserti128 $1, (%rsi), %ymm0, %ymm0
+; AVX512DQ-BW-FCP-NEXT: movabsq $4647998506761461824, %rcx # imm = 0x4081020408102040
+; AVX512DQ-BW-FCP-NEXT: kmovq %rcx, %k1
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm7, %zmm6 {%k1}
; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm7 = [0,0,4,0,0,1,4,5,1,5,0,0,1,5,2,6]
; AVX512DQ-BW-FCP-NEXT: vpermd %zmm0, %zmm7, %zmm8
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} zmm8 = zmm8[0,8],zero,zero,zero,zero,zero,zmm8[1,9],zero,zero,zero,zero,zero,zmm8[2,10],zero,zero,zero,zero,zero,zmm8[19,27],zero,zero,zero,zero,zero,zmm8[20,28],zero,zero,zero,zero,zero,zmm8[33,37],zero,zero,zero,zero,zero,zmm8[34,38],zero,zero,zero,zero,zero,zmm8[51,55],zero,zero,zero,zero,zero,zmm8[56,60],zero,zero,zero,zero,zero,zmm8[57]
-; AVX512DQ-BW-FCP-NEXT: vinserti128 $1, (%rcx), %ymm1, %ymm1
; AVX512DQ-BW-FCP-NEXT: vpermd %zmm1, %zmm7, %zmm7
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} zmm7 = zero,zero,zmm7[0,8],zero,zero,zero,zero,zero,zmm7[1,9],zero,zero,zero,zero,zero,zmm7[18,26],zero,zero,zero,zero,zero,zmm7[19,27],zero,zero,zero,zero,zero,zmm7[20,28],zero,zero,zero,zero,zero,zmm7[33,37],zero,zero,zero,zero,zero,zmm7[34,38],zero,zero,zero,zero,zero,zmm7[51,55],zero,zero,zero,zero,zero,zmm7[56,60],zero,zero,zero,zero
; AVX512DQ-BW-FCP-NEXT: vporq %zmm8, %zmm7, %zmm7
; AVX512DQ-BW-FCP-NEXT: movabsq $8133997386832558192, %rcx # imm = 0x70E1C3870E1C3870
; AVX512DQ-BW-FCP-NEXT: kmovq %rcx, %k1
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm5, %zmm7 {%k1}
-; AVX512DQ-BW-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm5 = [4,5,4,5,5,6,5,6,4,5,4,5,5,6,5,6]
-; AVX512DQ-BW-FCP-NEXT: # ymm5 = mem[0,1,0,1]
-; AVX512DQ-BW-FCP-NEXT: vpermw %ymm4, %ymm5, %ymm5
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm6 = ymm6[1,3,1,3]
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm6 = ymm6[u,u,u,1,9,u,u,u,u,u,2,10,u,u,u,u,u,19,27,u,u,u,u,u,20,28,u,u,u,u,u,21]
-; AVX512DQ-BW-FCP-NEXT: movl $67637280, %ecx # imm = 0x4081020
-; AVX512DQ-BW-FCP-NEXT: kmovd %ecx, %k1
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %ymm5, %ymm6 {%k1}
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm5 = ymm1[1,3,3,1]
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm5 = zero,ymm5[1,9],zero,zero,zero,zero,zero,ymm5[2,10],zero,zero,zero,zero,zero,ymm5[3,19],zero,zero,zero,zero,zero,ymm5[28,20],zero,zero,zero,zero,zero,ymm5[29,21],zero
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm8 = ymm0[3,1,1,3]
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm8 = ymm8[1],zero,zero,zero,zero,zero,ymm8[10,2],zero,zero,zero,zero,zero,ymm8[11,3],zero,zero,zero,zero,zero,ymm8[20,28],zero,zero,zero,zero,zero,ymm8[21,29],zero,zero,zero
-; AVX512DQ-BW-FCP-NEXT: vpor %ymm5, %ymm8, %ymm5
-; AVX512DQ-BW-FCP-NEXT: movl $-2029118408, %ecx # imm = 0x870E1C38
-; AVX512DQ-BW-FCP-NEXT: kmovd %ecx, %k1
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %ymm6, %ymm5 {%k1}
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm6, %zmm7 {%k1}
; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[1,3,2,3]
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,xmm0[6,14],zero,zero,zero,zero,zero,xmm0[7,15],zero,zero,zero,zero,zero
; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[1,3,2,3]
@@ -2596,9 +2604,9 @@ define void @store_i8_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-BW-FCP-NEXT: movw $-7741, %cx # imm = 0xE1C3
; AVX512DQ-BW-FCP-NEXT: kmovd %ecx, %k1
; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %xmm1, %xmm0 {%k1}
-; AVX512DQ-BW-FCP-NEXT: vmovdqa %ymm5, 64(%rax)
; AVX512DQ-BW-FCP-NEXT: vmovdqa %xmm0, 96(%rax)
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm7, (%rax)
+; AVX512DQ-BW-FCP-NEXT: vmovdqa %ymm5, 64(%rax)
; AVX512DQ-BW-FCP-NEXT: vzeroupper
; AVX512DQ-BW-FCP-NEXT: retq
%in.vec0 = load <16 x i8>, ptr %in.vecptr0, align 64
>From 49330db770a61cd55694679e3506bcaeecad86a8 Mon Sep 17 00:00:00 2001
From: Bjorn Pettersson <bjorn.a.pettersson at ericsson.com>
Date: Tue, 3 Jun 2025 10:01:01 +0200
Subject: [PATCH 3/4] [SelectionDAG] Deal with POISON for
INSERT_VECTOR_ELT/INSERT_SUBVECTOR (part 2)
Add support in isGuaranteedNotToBeUndefOrPoison and
SimplifyDemandedVectorElts to compensate/avoid regressions seen
after a previous commit fixing #141034.
---
.../lib/CodeGen/SelectionDAG/SelectionDAG.cpp | 54 +++
.../CodeGen/SelectionDAG/TargetLowering.cpp | 4 +
.../aarch64-neon-vector-insert-uaddlv.ll | 3 +-
.../AMDGPU/load-local-redundant-copies.ll | 45 +--
llvm/test/CodeGen/Thumb2/active_lane_mask.ll | 3 +-
llvm/test/CodeGen/Thumb2/mve-masked-ldst.ll | 36 +-
llvm/test/CodeGen/X86/pr62286.ll | 14 +-
.../vector-interleaved-store-i16-stride-5.ll | 4 -
.../vector-interleaved-store-i16-stride-7.ll | 32 +-
.../vector-interleaved-store-i8-stride-5.ll | 16 +-
.../vector-interleaved-store-i8-stride-7.ll | 380 +++++++++---------
11 files changed, 310 insertions(+), 281 deletions(-)
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index 6bff2d90b91ac..fadfd35489e54 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -5454,6 +5454,60 @@ bool SelectionDAG::isGuaranteedNotToBeUndefOrPoison(SDValue Op,
}
return true;
+ case ISD::INSERT_SUBVECTOR: {
+ if (Op.getValueType().isScalableVector())
+ break;
+ SDValue Src = Op.getOperand(0);
+ SDValue Sub = Op.getOperand(1);
+ uint64_t Idx = Op.getConstantOperandVal(2);
+ unsigned NumSubElts = Sub.getValueType().getVectorNumElements();
+ APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx);
+ APInt DemandedSrcElts = DemandedElts;
+ DemandedSrcElts.clearBits(Idx, Idx + NumSubElts);
+
+ if (!!DemandedSubElts &&
+ !isGuaranteedNotToBeUndefOrPoison(Sub, DemandedSubElts,
+ PoisonOnly, Depth + 1))
+ return false;
+ if (!!DemandedSrcElts &&
+ !isGuaranteedNotToBeUndefOrPoison(Src, DemandedSrcElts,
+ PoisonOnly, Depth + 1))
+ return false;
+ return true;
+ }
+
+ case ISD::INSERT_VECTOR_ELT: {
+ SDValue InVec = Op.getOperand(0);
+ SDValue InVal = Op.getOperand(1);
+ SDValue EltNo = Op.getOperand(2);
+ EVT VT = InVec.getValueType();
+ auto *IndexC = dyn_cast<ConstantSDNode>(EltNo);
+ if (IndexC && VT.isFixedLengthVector() &&
+ IndexC->getZExtValue() < VT.getVectorNumElements()) {
+ if (DemandedElts[IndexC->getZExtValue()] &&
+ !isGuaranteedNotToBeUndefOrPoison(InVal, PoisonOnly, Depth + 1))
+ return false;
+ APInt InVecDemandedElts = DemandedElts;
+ InVecDemandedElts.clearBit(IndexC->getZExtValue());
+ if (!!InVecDemandedElts &&
+ !isGuaranteedNotToBeUndefOrPoison(InVec, InVecDemandedElts,
+ PoisonOnly, Depth + 1))
+ return false;
+ return true;
+ }
+ break;
+ }
+
+ case ISD::SCALAR_TO_VECTOR:
+ // If only demanding upper (undef) elements.
+ if (DemandedElts.ugt(1))
+ return PoisonOnly;
+ // If only demanding element 0, or only considering poison.
+ if (PoisonOnly || DemandedElts == 0)
+ return isGuaranteedNotToBeUndefOrPoison(Op.getOperand(0), PoisonOnly,
+ Depth + 1);
+ return false;
+
case ISD::SPLAT_VECTOR:
return isGuaranteedNotToBeUndefOrPoison(Op.getOperand(0), PoisonOnly,
Depth + 1);
diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index e8e820ac1f695..643ec26bba3f7 100644
--- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -3359,6 +3359,10 @@ bool TargetLowering::SimplifyDemandedVectorElts(
APInt DemandedSrcElts = DemandedElts;
DemandedSrcElts.clearBits(Idx, Idx + NumSubElts);
+ // If none of the sub operand elements are demanded, bypass the insert.
+ if (!DemandedSubElts)
+ return TLO.CombineTo(Op, Src);
+
APInt SubUndef, SubZero;
if (SimplifyDemandedVectorElts(Sub, DemandedSubElts, SubUndef, SubZero, TLO,
Depth + 1))
diff --git a/llvm/test/CodeGen/AArch64/aarch64-neon-vector-insert-uaddlv.ll b/llvm/test/CodeGen/AArch64/aarch64-neon-vector-insert-uaddlv.ll
index 83e35599c4e9e..91eda8d552397 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-neon-vector-insert-uaddlv.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-neon-vector-insert-uaddlv.ll
@@ -229,10 +229,9 @@ define void @insert_vec_v3i16_uaddlv_from_v8i16(ptr %0) {
; CHECK: ; %bb.0: ; %entry
; CHECK-NEXT: movi.2d v0, #0000000000000000
; CHECK-NEXT: movi.2d v1, #0000000000000000
+; CHECK-NEXT: add x8, x0, #8
; CHECK-NEXT: uaddlv.8h s0, v0
; CHECK-NEXT: mov.h v1[0], v0[0]
-; CHECK-NEXT: mov.h v1[3], w8
-; CHECK-NEXT: add x8, x0, #8
; CHECK-NEXT: ushll.4s v1, v1, #0
; CHECK-NEXT: ucvtf.4s v1, v1
; CHECK-NEXT: st1.s { v1 }[2], [x8]
diff --git a/llvm/test/CodeGen/AMDGPU/load-local-redundant-copies.ll b/llvm/test/CodeGen/AMDGPU/load-local-redundant-copies.ll
index 2269c68b941e7..8a3cc57e08579 100644
--- a/llvm/test/CodeGen/AMDGPU/load-local-redundant-copies.ll
+++ b/llvm/test/CodeGen/AMDGPU/load-local-redundant-copies.ll
@@ -66,39 +66,38 @@ define amdgpu_vs void @test_3(i32 inreg %arg1, i32 inreg %arg2, ptr addrspace(8)
; CHECK-NEXT: s_mov_b32 s6, s4
; CHECK-NEXT: s_mov_b32 s5, s3
; CHECK-NEXT: s_mov_b32 s4, s2
-; CHECK-NEXT: v_add_i32_e32 v0, vcc, 20, v1
-; CHECK-NEXT: v_add_i32_e32 v3, vcc, 16, v1
-; CHECK-NEXT: v_add_i32_e32 v4, vcc, 12, v1
-; CHECK-NEXT: v_add_i32_e32 v5, vcc, 8, v1
-; CHECK-NEXT: v_add_i32_e32 v9, vcc, 4, v1
+; CHECK-NEXT: v_add_i32_e32 v0, vcc, 12, v1
+; CHECK-NEXT: v_add_i32_e32 v3, vcc, 8, v1
+; CHECK-NEXT: v_add_i32_e32 v4, vcc, 4, v1
+; CHECK-NEXT: v_add_i32_e32 v7, vcc, 20, v1
+; CHECK-NEXT: v_add_i32_e32 v9, vcc, 16, v1
; CHECK-NEXT: v_mov_b32_e32 v10, s0
-; CHECK-NEXT: v_add_i32_e32 v11, vcc, 20, v2
-; CHECK-NEXT: v_add_i32_e32 v12, vcc, 16, v2
+; CHECK-NEXT: v_add_i32_e32 v11, vcc, 12, v2
+; CHECK-NEXT: v_add_i32_e32 v12, vcc, 8, v2
; CHECK-NEXT: s_mov_b32 m0, -1
-; CHECK-NEXT: ds_read_b32 v8, v0
-; CHECK-NEXT: ds_read_b32 v7, v3
-; CHECK-NEXT: ds_read_b32 v6, v4
-; CHECK-NEXT: ds_read_b32 v5, v5
-; CHECK-NEXT: ds_read_b32 v4, v9
+; CHECK-NEXT: ds_read_b32 v6, v0
+; CHECK-NEXT: ds_read_b32 v5, v3
+; CHECK-NEXT: ds_read_b32 v4, v4
+; CHECK-NEXT: ds_read_b32 v8, v7
+; CHECK-NEXT: ds_read_b32 v7, v9
; CHECK-NEXT: ds_read_b32 v3, v1
-; CHECK-NEXT: v_add_i32_e32 v1, vcc, 12, v2
-; CHECK-NEXT: v_add_i32_e32 v9, vcc, 8, v2
-; CHECK-NEXT: v_add_i32_e32 v13, vcc, 4, v2
+; CHECK-NEXT: v_add_i32_e32 v0, vcc, 4, v2
+; CHECK-NEXT: v_add_i32_e32 v1, vcc, 20, v2
+; CHECK-NEXT: v_add_i32_e32 v9, vcc, 16, v2
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
; CHECK-NEXT: tbuffer_store_format_xyzw v[3:6], v10, s[4:7], s1 format:[BUF_DATA_FORMAT_32_32_32,BUF_NUM_FORMAT_UINT] idxen offset:264 glc slc
; CHECK-NEXT: tbuffer_store_format_xy v[7:8], v10, s[4:7], s1 format:[BUF_DATA_FORMAT_INVALID,BUF_NUM_FORMAT_UINT] idxen offset:280 glc slc
-; CHECK-NEXT: ds_read_b32 v0, v12
; CHECK-NEXT: s_waitcnt expcnt(1)
-; CHECK-NEXT: ds_read_b32 v5, v1
-; CHECK-NEXT: ds_read_b32 v4, v9
-; CHECK-NEXT: ds_read_b32 v3, v13
+; CHECK-NEXT: ds_read_b32 v5, v11
+; CHECK-NEXT: ds_read_b32 v4, v12
+; CHECK-NEXT: ds_read_b32 v3, v0
+; CHECK-NEXT: ds_read_b32 v1, v1
+; CHECK-NEXT: ds_read_b32 v0, v9
; CHECK-NEXT: ds_read_b32 v2, v2
-; CHECK-NEXT: ds_read_b32 v1, v11
-; CHECK-NEXT: s_waitcnt lgkmcnt(5)
-; CHECK-NEXT: exp mrt0 off, off, off, off
; CHECK-NEXT: s_waitcnt lgkmcnt(1)
-; CHECK-NEXT: tbuffer_store_format_xyzw v[2:5], v10, s[4:7], s1 format:[BUF_DATA_FORMAT_32_32_32,BUF_NUM_FORMAT_UINT] idxen offset:240 glc slc
+; CHECK-NEXT: exp mrt0 off, off, off, off
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: tbuffer_store_format_xyzw v[2:5], v10, s[4:7], s1 format:[BUF_DATA_FORMAT_32_32_32,BUF_NUM_FORMAT_UINT] idxen offset:240 glc slc
; CHECK-NEXT: tbuffer_store_format_xy v[0:1], v10, s[4:7], s1 format:[BUF_DATA_FORMAT_INVALID,BUF_NUM_FORMAT_UINT] idxen offset:256 glc slc
; CHECK-NEXT: s_endpgm
%load1 = load <6 x float>, ptr addrspace(3) %arg5, align 4
diff --git a/llvm/test/CodeGen/Thumb2/active_lane_mask.ll b/llvm/test/CodeGen/Thumb2/active_lane_mask.ll
index da13d07abf09b..bcd92f81911b2 100644
--- a/llvm/test/CodeGen/Thumb2/active_lane_mask.ll
+++ b/llvm/test/CodeGen/Thumb2/active_lane_mask.ll
@@ -306,10 +306,9 @@ define void @test_width2(ptr nocapture readnone %x, ptr nocapture %y, i8 zeroext
; CHECK-NEXT: ldrne.w r3, [r12]
; CHECK-NEXT: vmovne.32 q0[0], r3
; CHECK-NEXT: lsls r0, r0, #30
-; CHECK-NEXT: ittt mi
+; CHECK-NEXT: itt mi
; CHECK-NEXT: ldrmi.w r0, [r12, #4]
; CHECK-NEXT: vmovmi.32 q0[2], r0
-; CHECK-NEXT: vmovmi.32 q0[3], r0
; CHECK-NEXT: vmrs r3, p0
; CHECK-NEXT: and r0, r3, #1
; CHECK-NEXT: ubfx r3, r3, #8, #1
diff --git a/llvm/test/CodeGen/Thumb2/mve-masked-ldst.ll b/llvm/test/CodeGen/Thumb2/mve-masked-ldst.ll
index 53595bc5a5595..67910e40e7705 100644
--- a/llvm/test/CodeGen/Thumb2/mve-masked-ldst.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-masked-ldst.ll
@@ -110,10 +110,9 @@ define void @foo_sext_v2i64_v2i32(ptr %dest, ptr %mask, ptr %src) {
; CHECK-LE-NEXT: ldrne r3, [r2]
; CHECK-LE-NEXT: vmovne.32 q1[0], r3
; CHECK-LE-NEXT: lsls r1, r1, #30
-; CHECK-LE-NEXT: ittt mi
+; CHECK-LE-NEXT: itt mi
; CHECK-LE-NEXT: ldrmi r1, [r2, #4]
; CHECK-LE-NEXT: vmovmi.32 q1[2], r1
-; CHECK-LE-NEXT: vmovmi.32 q1[3], r0
; CHECK-LE-NEXT: vmov r2, s6
; CHECK-LE-NEXT: movs r1, #0
; CHECK-LE-NEXT: vmov r3, s0
@@ -220,9 +219,9 @@ define void @foo_sext_v2i64_v2i32_unaligned(ptr %dest, ptr %mask, ptr %src) {
; CHECK-LE-NEXT: sub sp, #4
; CHECK-LE-NEXT: ldrd r12, lr, [r1]
; CHECK-LE-NEXT: movs r1, #0
-; CHECK-LE-NEXT: @ implicit-def: $q1
+; CHECK-LE-NEXT: @ implicit-def: $q0
; CHECK-LE-NEXT: rsbs.w r3, r12, #0
-; CHECK-LE-NEXT: vmov q0[2], q0[0], r12, lr
+; CHECK-LE-NEXT: vmov q1[2], q1[0], r12, lr
; CHECK-LE-NEXT: sbcs.w r3, r1, r12, asr #31
; CHECK-LE-NEXT: csetm r3, lt
; CHECK-LE-NEXT: rsbs.w r4, lr, #0
@@ -233,24 +232,23 @@ define void @foo_sext_v2i64_v2i32_unaligned(ptr %dest, ptr %mask, ptr %src) {
; CHECK-LE-NEXT: lsls r3, r1, #31
; CHECK-LE-NEXT: itt ne
; CHECK-LE-NEXT: ldrne r3, [r2]
-; CHECK-LE-NEXT: vmovne.32 q1[0], r3
+; CHECK-LE-NEXT: vmovne.32 q0[0], r3
; CHECK-LE-NEXT: lsls r1, r1, #30
-; CHECK-LE-NEXT: ittt mi
+; CHECK-LE-NEXT: itt mi
; CHECK-LE-NEXT: ldrmi r1, [r2, #4]
-; CHECK-LE-NEXT: vmovmi.32 q1[2], r1
-; CHECK-LE-NEXT: vmovmi.32 q1[3], r0
-; CHECK-LE-NEXT: vmov r2, s6
+; CHECK-LE-NEXT: vmovmi.32 q0[2], r1
+; CHECK-LE-NEXT: vmov r2, s2
; CHECK-LE-NEXT: movs r1, #0
-; CHECK-LE-NEXT: vmov r3, s0
-; CHECK-LE-NEXT: vmov r4, s4
-; CHECK-LE-NEXT: vmov q1[2], q1[0], r4, r2
+; CHECK-LE-NEXT: vmov r3, s4
+; CHECK-LE-NEXT: vmov r4, s0
+; CHECK-LE-NEXT: vmov q0[2], q0[0], r4, r2
; CHECK-LE-NEXT: rsbs r5, r3, #0
; CHECK-LE-NEXT: asr.w r12, r2, #31
; CHECK-LE-NEXT: sbcs.w r2, r1, r3, asr #31
-; CHECK-LE-NEXT: vmov r3, s2
+; CHECK-LE-NEXT: vmov r3, s6
; CHECK-LE-NEXT: csetm r2, lt
; CHECK-LE-NEXT: asr.w lr, r4, #31
-; CHECK-LE-NEXT: vmov q1[3], q1[1], lr, r12
+; CHECK-LE-NEXT: vmov q0[3], q0[1], lr, r12
; CHECK-LE-NEXT: rsbs r5, r3, #0
; CHECK-LE-NEXT: sbcs.w r3, r1, r3, asr #31
; CHECK-LE-NEXT: bfi r1, r2, #0, #1
@@ -258,11 +256,11 @@ define void @foo_sext_v2i64_v2i32_unaligned(ptr %dest, ptr %mask, ptr %src) {
; CHECK-LE-NEXT: bfi r1, r2, #1, #1
; CHECK-LE-NEXT: lsls r2, r1, #31
; CHECK-LE-NEXT: itt ne
-; CHECK-LE-NEXT: vmovne r2, r3, d2
+; CHECK-LE-NEXT: vmovne r2, r3, d0
; CHECK-LE-NEXT: strdne r2, r3, [r0]
; CHECK-LE-NEXT: lsls r1, r1, #30
; CHECK-LE-NEXT: itt mi
-; CHECK-LE-NEXT: vmovmi r1, r2, d3
+; CHECK-LE-NEXT: vmovmi r1, r2, d1
; CHECK-LE-NEXT: strdmi r1, r2, [r0, #8]
; CHECK-LE-NEXT: add sp, #4
; CHECK-LE-NEXT: pop {r4, r5, r7, pc}
@@ -365,10 +363,9 @@ define void @foo_zext_v2i64_v2i32(ptr %dest, ptr %mask, ptr %src) {
; CHECK-LE-NEXT: ldrne r3, [r2]
; CHECK-LE-NEXT: vmovne.32 q0[0], r3
; CHECK-LE-NEXT: lsls r1, r1, #30
-; CHECK-LE-NEXT: ittt mi
+; CHECK-LE-NEXT: itt mi
; CHECK-LE-NEXT: ldrmi r1, [r2, #4]
; CHECK-LE-NEXT: vmovmi.32 q0[2], r1
-; CHECK-LE-NEXT: vmovmi.32 q0[3], r0
; CHECK-LE-NEXT: vmov r2, s4
; CHECK-LE-NEXT: movs r1, #0
; CHECK-LE-NEXT: vand q0, q0, q2
@@ -481,10 +478,9 @@ define void @foo_zext_v2i64_v2i32_unaligned(ptr %dest, ptr %mask, ptr %src) {
; CHECK-LE-NEXT: ldrne r3, [r2]
; CHECK-LE-NEXT: vmovne.32 q0[0], r3
; CHECK-LE-NEXT: lsls r1, r1, #30
-; CHECK-LE-NEXT: ittt mi
+; CHECK-LE-NEXT: itt mi
; CHECK-LE-NEXT: ldrmi r1, [r2, #4]
; CHECK-LE-NEXT: vmovmi.32 q0[2], r1
-; CHECK-LE-NEXT: vmovmi.32 q0[3], r0
; CHECK-LE-NEXT: vmov r2, s4
; CHECK-LE-NEXT: movs r1, #0
; CHECK-LE-NEXT: vand q0, q0, q2
diff --git a/llvm/test/CodeGen/X86/pr62286.ll b/llvm/test/CodeGen/X86/pr62286.ll
index 2d1b7fcbf0239..ce03f8fad4a19 100644
--- a/llvm/test/CodeGen/X86/pr62286.ll
+++ b/llvm/test/CodeGen/X86/pr62286.ll
@@ -28,8 +28,9 @@ define i64 @PR62286(i32 %a) {
; AVX1-NEXT: vmovd %edi, %xmm0
; AVX1-NEXT: vpslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3]
; AVX1-NEXT: vpaddd %xmm0, %xmm0, %xmm0
+; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3,4,5,6,7]
; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
; AVX1-NEXT: vpmovsxdq %xmm0, %xmm0
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
; AVX1-NEXT: vpmovsxdq %xmm1, %xmm1
@@ -58,12 +59,13 @@ define i64 @PR62286(i32 %a) {
; AVX512-LABEL: PR62286:
; AVX512: # %bb.0:
; AVX512-NEXT: vmovd %edi, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,1,0]
-; AVX512-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm1
-; AVX512-NEXT: movw $4369, %ax # imm = 0x1111
+; AVX512-NEXT: movb $8, %al
; AVX512-NEXT: kmovd %eax, %k1
-; AVX512-NEXT: vpaddd %zmm0, %zmm0, %zmm1 {%k1}
-; AVX512-NEXT: vpmovsxdq %ymm1, %zmm0
+; AVX512-NEXT: vpexpandd %ymm0, %ymm1 {%k1} {z}
+; AVX512-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
+; AVX512-NEXT: vpaddd %ymm0, %ymm0, %ymm0
+; AVX512-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
+; AVX512-NEXT: vpmovsxdq %ymm0, %zmm0
; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; AVX512-NEXT: vpaddq %zmm1, %zmm0, %zmm0
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-5.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-5.ll
index d282e051d35b3..418c987ab9a30 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-5.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-5.ll
@@ -444,7 +444,6 @@ define void @store_i16_stride5_vf4(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-NEXT: vpshufb %ymm4, %ymm2, %ymm2
; AVX512-NEXT: vpblendw {{.*#+}} ymm2 = ymm5[0,1],ymm2[2,3],ymm5[4,5,6],ymm2[7],ymm5[8,9],ymm2[10,11],ymm5[12,13,14],ymm2[15]
; AVX512-NEXT: vpternlogq {{.*#+}} ymm2 = ymm3 ^ (mem & (ymm2 ^ ymm3))
-; AVX512-NEXT: vinserti32x4 $2, %xmm0, %zmm2, %zmm2
; AVX512-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[3,1,2,3]
; AVX512-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,3,1,3,4,5,6,7]
@@ -472,7 +471,6 @@ define void @store_i16_stride5_vf4(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-FCP-NEXT: vpshufb %ymm4, %ymm2, %ymm2
; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm5[0,1],ymm2[2,3],ymm5[4,5,6],ymm2[7],ymm5[8,9],ymm2[10,11],ymm5[12,13,14],ymm2[15]
; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm2 = ymm3 ^ (mem & (ymm2 ^ ymm3))
-; AVX512-FCP-NEXT: vinserti32x4 $2, %xmm0, %zmm2, %zmm2
; AVX512-FCP-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[u,u,6,7,14,15,u,u,8,9,10,11,12,13,14,15]
; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3,4,5,6,7]
@@ -499,7 +497,6 @@ define void @store_i16_stride5_vf4(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-NEXT: vpshufb %ymm4, %ymm2, %ymm2
; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm2 = ymm5[0,1],ymm2[2,3],ymm5[4,5,6],ymm2[7],ymm5[8,9],ymm2[10,11],ymm5[12,13,14],ymm2[15]
; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm2 = ymm3 ^ (mem & (ymm2 ^ ymm3))
-; AVX512DQ-NEXT: vinserti32x4 $2, %xmm0, %zmm2, %zmm2
; AVX512DQ-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[3,1,2,3]
; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,3,1,3,4,5,6,7]
@@ -527,7 +524,6 @@ define void @store_i16_stride5_vf4(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-FCP-NEXT: vpshufb %ymm4, %ymm2, %ymm2
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm5[0,1],ymm2[2,3],ymm5[4,5,6],ymm2[7],ymm5[8,9],ymm2[10,11],ymm5[12,13,14],ymm2[15]
; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm2 = ymm3 ^ (mem & (ymm2 ^ ymm3))
-; AVX512DQ-FCP-NEXT: vinserti32x4 $2, %xmm0, %zmm2, %zmm2
; AVX512DQ-FCP-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[u,u,6,7,14,15,u,u,8,9,10,11,12,13,14,15]
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3,4,5,6,7]
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-7.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-7.ll
index fc24976d651db..6f50d61f4d1f4 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-7.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-7.ll
@@ -1390,19 +1390,18 @@ define void @store_i16_stride7_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-NEXT: vmovdqa {{.*#+}} ymm8 = [65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535]
; AVX512-NEXT: vpternlogd {{.*#+}} ymm8 = (mem & ~ymm8) | ymm7
; AVX512-NEXT: vpternlogq {{.*#+}} ymm8 = ymm2 ^ (mem & (ymm8 ^ ymm2))
-; AVX512-NEXT: vinserti32x4 $2, %xmm0, %zmm8, %zmm2
-; AVX512-NEXT: vpsrlq $48, %xmm4, %xmm4
-; AVX512-NEXT: vpunpckhqdq {{.*#+}} xmm3 = xmm3[1],xmm4[1]
+; AVX512-NEXT: vpsrlq $48, %xmm4, %xmm2
+; AVX512-NEXT: vpunpckhqdq {{.*#+}} xmm2 = xmm3[1],xmm2[1]
; AVX512-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; AVX512-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm3[3,4],xmm0[5,6,7]
+; AVX512-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm2[3,4],xmm0[5,6,7]
; AVX512-NEXT: vpsrld $16, %xmm6, %xmm1
; AVX512-NEXT: vpunpckhdq {{.*#+}} xmm1 = xmm5[2],xmm1[2],xmm5[3],xmm1[3]
-; AVX512-NEXT: vpbroadcastd 12(%r10), %xmm3
-; AVX512-NEXT: vpblendw {{.*#+}} xmm1 = xmm3[0,1,2,3,4],xmm1[5,6],xmm3[7]
+; AVX512-NEXT: vpbroadcastd 12(%r10), %xmm2
+; AVX512-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3,4],xmm1[5,6],xmm2[7]
; AVX512-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3,4],xmm1[5,6,7]
; AVX512-NEXT: vmovdqa %xmm0, 96(%rax)
-; AVX512-NEXT: vmovdqa %ymm2, 64(%rax)
+; AVX512-NEXT: vmovdqa %ymm8, 64(%rax)
; AVX512-NEXT: vmovdqa64 %zmm11, (%rax)
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
@@ -1451,7 +1450,6 @@ define void @store_i16_stride7_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm8 = [65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535]
; AVX512-FCP-NEXT: vpternlogd {{.*#+}} ymm8 = (mem & ~ymm8) | ymm7
; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm8 = ymm6 ^ (mem & (ymm8 ^ ymm6))
-; AVX512-FCP-NEXT: vinserti32x4 $2, %xmm0, %zmm8, %zmm6
; AVX512-FCP-NEXT: vpsrlq $48, %xmm3, %xmm3
; AVX512-FCP-NEXT: vpunpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm3[1]
; AVX512-FCP-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
@@ -1463,8 +1461,8 @@ define void @store_i16_stride7_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3,4],xmm1[5,6],xmm2[7]
; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3,4],xmm1[5,6,7]
; AVX512-FCP-NEXT: vmovdqa %xmm0, 96(%rax)
+; AVX512-FCP-NEXT: vmovdqa %ymm8, 64(%rax)
; AVX512-FCP-NEXT: vmovdqa64 %zmm11, (%rax)
-; AVX512-FCP-NEXT: vmovdqa %ymm6, 64(%rax)
; AVX512-FCP-NEXT: vzeroupper
; AVX512-FCP-NEXT: retq
;
@@ -1515,19 +1513,18 @@ define void @store_i16_stride7_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm8 = [65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535]
; AVX512DQ-NEXT: vpternlogd {{.*#+}} ymm8 = (mem & ~ymm8) | ymm7
; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm8 = ymm2 ^ (mem & (ymm8 ^ ymm2))
-; AVX512DQ-NEXT: vinserti32x4 $2, %xmm0, %zmm8, %zmm2
-; AVX512DQ-NEXT: vpsrlq $48, %xmm4, %xmm4
-; AVX512DQ-NEXT: vpunpckhqdq {{.*#+}} xmm3 = xmm3[1],xmm4[1]
+; AVX512DQ-NEXT: vpsrlq $48, %xmm4, %xmm2
+; AVX512DQ-NEXT: vpunpckhqdq {{.*#+}} xmm2 = xmm3[1],xmm2[1]
; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; AVX512DQ-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm3[3,4],xmm0[5,6,7]
+; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm2[3,4],xmm0[5,6,7]
; AVX512DQ-NEXT: vpsrld $16, %xmm6, %xmm1
; AVX512DQ-NEXT: vpunpckhdq {{.*#+}} xmm1 = xmm5[2],xmm1[2],xmm5[3],xmm1[3]
-; AVX512DQ-NEXT: vpbroadcastd 12(%r10), %xmm3
-; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm1 = xmm3[0,1,2,3,4],xmm1[5,6],xmm3[7]
+; AVX512DQ-NEXT: vpbroadcastd 12(%r10), %xmm2
+; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3,4],xmm1[5,6],xmm2[7]
; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3,4],xmm1[5,6,7]
; AVX512DQ-NEXT: vmovdqa %xmm0, 96(%rax)
-; AVX512DQ-NEXT: vmovdqa %ymm2, 64(%rax)
+; AVX512DQ-NEXT: vmovdqa %ymm8, 64(%rax)
; AVX512DQ-NEXT: vmovdqa64 %zmm11, (%rax)
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
@@ -1576,7 +1573,6 @@ define void @store_i16_stride7_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm8 = [65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535]
; AVX512DQ-FCP-NEXT: vpternlogd {{.*#+}} ymm8 = (mem & ~ymm8) | ymm7
; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm8 = ymm6 ^ (mem & (ymm8 ^ ymm6))
-; AVX512DQ-FCP-NEXT: vinserti32x4 $2, %xmm0, %zmm8, %zmm6
; AVX512DQ-FCP-NEXT: vpsrlq $48, %xmm3, %xmm3
; AVX512DQ-FCP-NEXT: vpunpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm3[1]
; AVX512DQ-FCP-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
@@ -1588,8 +1584,8 @@ define void @store_i16_stride7_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3,4],xmm1[5,6],xmm2[7]
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3,4],xmm1[5,6,7]
; AVX512DQ-FCP-NEXT: vmovdqa %xmm0, 96(%rax)
+; AVX512DQ-FCP-NEXT: vmovdqa %ymm8, 64(%rax)
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm11, (%rax)
-; AVX512DQ-FCP-NEXT: vmovdqa %ymm6, 64(%rax)
; AVX512DQ-FCP-NEXT: vzeroupper
; AVX512DQ-FCP-NEXT: retq
;
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-5.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-5.ll
index 7bbb082f07741..86efcf9c57616 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-5.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-5.ll
@@ -718,7 +718,6 @@ define void @store_i8_stride5_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecp
; AVX512-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[0,0,1,1]
; AVX512-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,0,0,1]
; AVX512-NEXT: vpternlogq {{.*#+}} ymm3 = ymm3 ^ (mem & (ymm3 ^ ymm2))
-; AVX512-NEXT: vinserti32x4 $2, %xmm0, %zmm3, %zmm2
; AVX512-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,xmm0[u,7,15],zero,zero,xmm0[u,u,u,u,u,u,u,u,u]
; AVX512-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[6,14,u],zero,zero,xmm1[7,15,u,u,u,u,u,u,u,u,u]
; AVX512-NEXT: vpor %xmm0, %xmm1, %xmm0
@@ -727,7 +726,7 @@ define void @store_i8_stride5_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecp
; AVX512-NEXT: vpbroadcastw %xmm1, %xmm1
; AVX512-NEXT: vpternlogq {{.*#+}} xmm1 = xmm1 ^ (mem & (xmm1 ^ xmm0))
; AVX512-NEXT: vmovq %xmm1, 32(%r9)
-; AVX512-NEXT: vmovdqa %ymm2, (%r9)
+; AVX512-NEXT: vmovdqa %ymm3, (%r9)
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
;
@@ -749,7 +748,6 @@ define void @store_i8_stride5_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecp
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm4 = [0,0,0,0,0,0,1,1]
; AVX512-FCP-NEXT: vpermd %ymm3, %ymm4, %ymm3
; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm3 = ymm3 ^ (mem & (ymm3 ^ ymm2))
-; AVX512-FCP-NEXT: vinserti32x4 $2, %xmm0, %zmm3, %zmm2
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,xmm0[u,7,15],zero,zero,xmm0[u,u,u,u,u,u,u,u,u]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[6,14,u],zero,zero,xmm1[7,15,u,u,u,u,u,u,u,u,u]
; AVX512-FCP-NEXT: vpor %xmm0, %xmm1, %xmm0
@@ -758,7 +756,7 @@ define void @store_i8_stride5_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecp
; AVX512-FCP-NEXT: vpbroadcastw %xmm1, %xmm1
; AVX512-FCP-NEXT: vpternlogq {{.*#+}} xmm1 = xmm1 ^ (mem & (xmm1 ^ xmm0))
; AVX512-FCP-NEXT: vmovq %xmm1, 32(%r9)
-; AVX512-FCP-NEXT: vmovdqa %ymm2, (%r9)
+; AVX512-FCP-NEXT: vmovdqa %ymm3, (%r9)
; AVX512-FCP-NEXT: vzeroupper
; AVX512-FCP-NEXT: retq
;
@@ -780,7 +778,6 @@ define void @store_i8_stride5_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecp
; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[0,0,1,1]
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,0,0,1]
; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm3 = ymm3 ^ (mem & (ymm3 ^ ymm2))
-; AVX512DQ-NEXT: vinserti32x4 $2, %xmm0, %zmm3, %zmm2
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,xmm0[u,7,15],zero,zero,xmm0[u,u,u,u,u,u,u,u,u]
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[6,14,u],zero,zero,xmm1[7,15,u,u,u,u,u,u,u,u,u]
; AVX512DQ-NEXT: vpor %xmm0, %xmm1, %xmm0
@@ -789,7 +786,7 @@ define void @store_i8_stride5_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecp
; AVX512DQ-NEXT: vpbroadcastw %xmm1, %xmm1
; AVX512DQ-NEXT: vpternlogq {{.*#+}} xmm1 = xmm1 ^ (mem & (xmm1 ^ xmm0))
; AVX512DQ-NEXT: vmovq %xmm1, 32(%r9)
-; AVX512DQ-NEXT: vmovdqa %ymm2, (%r9)
+; AVX512DQ-NEXT: vmovdqa %ymm3, (%r9)
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
@@ -811,7 +808,6 @@ define void @store_i8_stride5_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecp
; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm4 = [0,0,0,0,0,0,1,1]
; AVX512DQ-FCP-NEXT: vpermd %ymm3, %ymm4, %ymm3
; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm3 = ymm3 ^ (mem & (ymm3 ^ ymm2))
-; AVX512DQ-FCP-NEXT: vinserti32x4 $2, %xmm0, %zmm3, %zmm2
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,xmm0[u,7,15],zero,zero,xmm0[u,u,u,u,u,u,u,u,u]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[6,14,u],zero,zero,xmm1[7,15,u,u,u,u,u,u,u,u,u]
; AVX512DQ-FCP-NEXT: vpor %xmm0, %xmm1, %xmm0
@@ -820,7 +816,7 @@ define void @store_i8_stride5_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecp
; AVX512DQ-FCP-NEXT: vpbroadcastw %xmm1, %xmm1
; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} xmm1 = xmm1 ^ (mem & (xmm1 ^ xmm0))
; AVX512DQ-FCP-NEXT: vmovq %xmm1, 32(%r9)
-; AVX512DQ-FCP-NEXT: vmovdqa %ymm2, (%r9)
+; AVX512DQ-FCP-NEXT: vmovdqa %ymm3, (%r9)
; AVX512DQ-FCP-NEXT: vzeroupper
; AVX512DQ-FCP-NEXT: retq
;
@@ -844,7 +840,6 @@ define void @store_i8_stride5_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecp
; AVX512BW-NEXT: movl $554189328, %ecx # imm = 0x21084210
; AVX512BW-NEXT: kmovd %ecx, %k1
; AVX512BW-NEXT: vmovdqu8 %ymm3, %ymm2 {%k1}
-; AVX512BW-NEXT: vinserti32x4 $2, %xmm0, %zmm2, %zmm2
; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,xmm0[u,7,15],zero,zero,xmm0[u,u,u,u,u,u,u,u,u]
; AVX512BW-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[6,14,u],zero,zero,xmm1[7,15,u,u,u,u,u,u,u,u,u]
; AVX512BW-NEXT: vpor %xmm0, %xmm1, %xmm0
@@ -878,7 +873,6 @@ define void @store_i8_stride5_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecp
; AVX512BW-FCP-NEXT: movl $554189328, %ecx # imm = 0x21084210
; AVX512BW-FCP-NEXT: kmovd %ecx, %k1
; AVX512BW-FCP-NEXT: vmovdqu8 %ymm3, %ymm2 {%k1}
-; AVX512BW-FCP-NEXT: vinserti32x4 $2, %xmm0, %zmm2, %zmm2
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,xmm0[u,7,15],zero,zero,xmm0[u,u,u,u,u,u,u,u,u]
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[6,14,u],zero,zero,xmm1[7,15,u,u,u,u,u,u,u,u,u]
; AVX512BW-FCP-NEXT: vpor %xmm0, %xmm1, %xmm0
@@ -912,7 +906,6 @@ define void @store_i8_stride5_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecp
; AVX512DQ-BW-NEXT: movl $554189328, %ecx # imm = 0x21084210
; AVX512DQ-BW-NEXT: kmovd %ecx, %k1
; AVX512DQ-BW-NEXT: vmovdqu8 %ymm3, %ymm2 {%k1}
-; AVX512DQ-BW-NEXT: vinserti32x4 $2, %xmm0, %zmm2, %zmm2
; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,xmm0[u,7,15],zero,zero,xmm0[u,u,u,u,u,u,u,u,u]
; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[6,14,u],zero,zero,xmm1[7,15,u,u,u,u,u,u,u,u,u]
; AVX512DQ-BW-NEXT: vpor %xmm0, %xmm1, %xmm0
@@ -946,7 +939,6 @@ define void @store_i8_stride5_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecp
; AVX512DQ-BW-FCP-NEXT: movl $554189328, %ecx # imm = 0x21084210
; AVX512DQ-BW-FCP-NEXT: kmovd %ecx, %k1
; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %ymm3, %ymm2 {%k1}
-; AVX512DQ-BW-FCP-NEXT: vinserti32x4 $2, %xmm0, %zmm2, %zmm2
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,xmm0[u,7,15],zero,zero,xmm0[u,u,u,u,u,u,u,u,u]
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[6,14,u],zero,zero,xmm1[7,15,u,u,u,u,u,u,u,u,u]
; AVX512DQ-BW-FCP-NEXT: vpor %xmm0, %xmm1, %xmm0
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-7.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-7.ll
index 524161408bc0f..f4055a953badd 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-7.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-7.ll
@@ -2088,7 +2088,6 @@ define void @store_i8_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-NEXT: vpshufb {{.*#+}} ymm6 = ymm6[u,u,u,1,9],zero,ymm6[u,u,u,u,2,10],zero,ymm6[u,u,u,u,19,27],zero,ymm6[u,u,u,u,20,28],zero,ymm6[u,u,u,u,21]
; AVX512-NEXT: vpternlogq {{.*#+}} ymm6 = ymm6 | (ymm7 & ~mem)
; AVX512-NEXT: vpternlogq {{.*#+}} ymm6 = ymm6 ^ (mem & (ymm6 ^ ymm5))
-; AVX512-NEXT: vinserti32x4 $2, %xmm0, %zmm6, %zmm5
; AVX512-NEXT: vpermq {{.*#+}} ymm3 = ymm3[1,3,2,3]
; AVX512-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[u,u],zero,zero,xmm3[6,14,u,u,u],zero,zero,xmm3[7,15,u,u,u]
; AVX512-NEXT: vpermq {{.*#+}} ymm4 = ymm4[1,3,2,3]
@@ -2099,9 +2098,9 @@ define void @store_i8_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-NEXT: vpshufb {{.*#+}} xmm0 = zero,xmm0[13,u,u,u,u],zero,zero,xmm0[14,u,u,u,u],zero,zero,xmm0[15]
; AVX512-NEXT: vpor %xmm0, %xmm1, %xmm0
; AVX512-NEXT: vpternlogq {{.*#+}} xmm0 = xmm0 ^ (mem & (xmm0 ^ xmm3))
+; AVX512-NEXT: vmovdqa %ymm6, 64(%rax)
; AVX512-NEXT: vmovdqa %xmm0, 96(%rax)
; AVX512-NEXT: vmovdqa64 %zmm8, (%rax)
-; AVX512-NEXT: vmovdqa %ymm5, 64(%rax)
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
;
@@ -2156,7 +2155,6 @@ define void @store_i8_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm7 = ymm7[u,u,u,1,9],zero,ymm7[u,u,u,u,2,10],zero,ymm7[u,u,u,u,19,27],zero,ymm7[u,u,u,u,20,28],zero,ymm7[u,u,u,u,21]
; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm7 = ymm7 | (ymm8 & ~mem)
; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm7 = ymm7 ^ (mem & (ymm7 ^ ymm5))
-; AVX512-FCP-NEXT: vinserti32x4 $2, %xmm0, %zmm7, %zmm5
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm4[1,3,2,3]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[u,u],zero,zero,xmm4[6,14,u,u,u],zero,zero,xmm4[7,15,u,u,u]
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm3 = ymm3[1,3,2,3]
@@ -2167,9 +2165,9 @@ define void @store_i8_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm0 = zero,xmm0[13,u,u,u,u],zero,zero,xmm0[14,u,u,u,u],zero,zero,xmm0[15]
; AVX512-FCP-NEXT: vpor %xmm0, %xmm1, %xmm0
; AVX512-FCP-NEXT: vpternlogq {{.*#+}} xmm0 = xmm0 ^ (mem & (xmm0 ^ xmm3))
+; AVX512-FCP-NEXT: vmovdqa %ymm7, 64(%rax)
; AVX512-FCP-NEXT: vmovdqa %xmm0, 96(%rax)
; AVX512-FCP-NEXT: vmovdqa64 %zmm6, (%rax)
-; AVX512-FCP-NEXT: vmovdqa %ymm5, 64(%rax)
; AVX512-FCP-NEXT: vzeroupper
; AVX512-FCP-NEXT: retq
;
@@ -2227,7 +2225,6 @@ define void @store_i8_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm6 = ymm6[u,u,u,1,9],zero,ymm6[u,u,u,u,2,10],zero,ymm6[u,u,u,u,19,27],zero,ymm6[u,u,u,u,20,28],zero,ymm6[u,u,u,u,21]
; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm6 = ymm6 | (ymm7 & ~mem)
; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm6 = ymm6 ^ (mem & (ymm6 ^ ymm5))
-; AVX512DQ-NEXT: vinserti32x4 $2, %xmm0, %zmm6, %zmm5
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm3 = ymm3[1,3,2,3]
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[u,u],zero,zero,xmm3[6,14,u,u,u],zero,zero,xmm3[7,15,u,u,u]
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm4 = ymm4[1,3,2,3]
@@ -2238,9 +2235,9 @@ define void @store_i8_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm0 = zero,xmm0[13,u,u,u,u],zero,zero,xmm0[14,u,u,u,u],zero,zero,xmm0[15]
; AVX512DQ-NEXT: vpor %xmm0, %xmm1, %xmm0
; AVX512DQ-NEXT: vpternlogq {{.*#+}} xmm0 = xmm0 ^ (mem & (xmm0 ^ xmm3))
+; AVX512DQ-NEXT: vmovdqa %ymm6, 64(%rax)
; AVX512DQ-NEXT: vmovdqa %xmm0, 96(%rax)
; AVX512DQ-NEXT: vmovdqa64 %zmm8, (%rax)
-; AVX512DQ-NEXT: vmovdqa %ymm5, 64(%rax)
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
@@ -2295,7 +2292,6 @@ define void @store_i8_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm7 = ymm7[u,u,u,1,9],zero,ymm7[u,u,u,u,2,10],zero,ymm7[u,u,u,u,19,27],zero,ymm7[u,u,u,u,20,28],zero,ymm7[u,u,u,u,21]
; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm7 = ymm7 | (ymm8 & ~mem)
; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm7 = ymm7 ^ (mem & (ymm7 ^ ymm5))
-; AVX512DQ-FCP-NEXT: vinserti32x4 $2, %xmm0, %zmm7, %zmm5
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm4[1,3,2,3]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[u,u],zero,zero,xmm4[6,14,u,u,u],zero,zero,xmm4[7,15,u,u,u]
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm3 = ymm3[1,3,2,3]
@@ -2306,9 +2302,9 @@ define void @store_i8_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm0 = zero,xmm0[13,u,u,u,u],zero,zero,xmm0[14,u,u,u,u],zero,zero,xmm0[15]
; AVX512DQ-FCP-NEXT: vpor %xmm0, %xmm1, %xmm0
; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} xmm0 = xmm0 ^ (mem & (xmm0 ^ xmm3))
+; AVX512DQ-FCP-NEXT: vmovdqa %ymm7, 64(%rax)
; AVX512DQ-FCP-NEXT: vmovdqa %xmm0, 96(%rax)
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm6, (%rax)
-; AVX512DQ-FCP-NEXT: vmovdqa %ymm5, 64(%rax)
; AVX512DQ-FCP-NEXT: vzeroupper
; AVX512DQ-FCP-NEXT: retq
;
@@ -2316,81 +2312,80 @@ define void @store_i8_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX512BW-NEXT: movq {{[0-9]+}}(%rsp), %r10
-; AVX512BW-NEXT: vmovdqa (%rdi), %xmm3
-; AVX512BW-NEXT: vmovdqa (%rdx), %xmm4
-; AVX512BW-NEXT: vmovdqa (%r8), %xmm0
-; AVX512BW-NEXT: vmovdqa (%r9), %xmm2
-; AVX512BW-NEXT: vmovdqa (%r10), %xmm1
-; AVX512BW-NEXT: vbroadcasti128 {{.*#+}} ymm5 = [4,5,4,5,5,6,5,6,4,5,4,5,5,6,5,6]
-; AVX512BW-NEXT: # ymm5 = mem[0,1,0,1]
-; AVX512BW-NEXT: vpermw %ymm1, %ymm5, %ymm5
-; AVX512BW-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm6
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm7 = ymm6[1,3,1,3]
-; AVX512BW-NEXT: vpshufb {{.*#+}} ymm7 = ymm7[u,u,u,1,9,u,u,u,u,u,2,10,u,u,u,u,u,19,27,u,u,u,u,u,20,28,u,u,u,u,u,21]
-; AVX512BW-NEXT: movl $67637280, %edx # imm = 0x4081020
-; AVX512BW-NEXT: kmovd %edx, %k1
-; AVX512BW-NEXT: vmovdqu8 %ymm5, %ymm7 {%k1}
-; AVX512BW-NEXT: vinserti128 $1, (%rcx), %ymm4, %ymm4
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm5 = ymm4[1,3,3,1]
-; AVX512BW-NEXT: vpshufb {{.*#+}} ymm5 = zero,ymm5[1,9],zero,zero,zero,zero,zero,ymm5[2,10],zero,zero,zero,zero,zero,ymm5[3,19],zero,zero,zero,zero,zero,ymm5[28,20],zero,zero,zero,zero,zero,ymm5[29,21],zero
-; AVX512BW-NEXT: vinserti128 $1, (%rsi), %ymm3, %ymm3
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm8 = ymm3[3,1,1,3]
-; AVX512BW-NEXT: vpshufb {{.*#+}} ymm8 = ymm8[1],zero,zero,zero,zero,zero,ymm8[10,2],zero,zero,zero,zero,zero,ymm8[11,3],zero,zero,zero,zero,zero,ymm8[20,28],zero,zero,zero,zero,zero,ymm8[21,29],zero,zero,zero
-; AVX512BW-NEXT: vpor %ymm5, %ymm8, %ymm5
-; AVX512BW-NEXT: movl $-2029118408, %ecx # imm = 0x870E1C38
-; AVX512BW-NEXT: kmovd %ecx, %k1
-; AVX512BW-NEXT: vmovdqu8 %ymm7, %ymm5 {%k1}
-; AVX512BW-NEXT: vinserti32x4 $2, %xmm0, %zmm5, %zmm5
-; AVX512BW-NEXT: vpshufb {{.*#+}} ymm7 = zero,zero,zero,zero,zero,ymm4[5],zero,zero,zero,zero,zero,zero,ymm4[6],zero,zero,zero,zero,zero,zero,zero,ymm4[23],zero,zero,zero,zero,zero,zero,ymm4[24],zero,zero,zero,zero
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm8 = ymm4[2,3,0,1]
-; AVX512BW-NEXT: vpshufb {{.*#+}} ymm8 = zero,zero,zero,zero,zero,zero,ymm8[5],zero,zero,zero,zero,zero,zero,ymm8[6],zero,zero,zero,zero,zero,ymm8[23],zero,zero,zero,zero,zero,zero,ymm8[24],zero,zero,zero,zero,zero
-; AVX512BW-NEXT: vpor %ymm7, %ymm8, %ymm7
-; AVX512BW-NEXT: vpshufb {{.*#+}} ymm8 = zero,zero,zero,ymm3[5],zero,zero,zero,zero,zero,zero,ymm3[6],zero,zero,zero,zero,zero,zero,zero,ymm3[23],zero,zero,zero,zero,zero,zero,ymm3[24],zero,zero,zero,zero,zero,zero
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm9 = ymm3[2,3,0,1]
-; AVX512BW-NEXT: vpshufb {{.*#+}} ymm9 = zero,zero,zero,zero,ymm9[5],zero,zero,zero,zero,zero,zero,ymm9[6],zero,zero,zero,zero,zero,ymm9[23],zero,zero,zero,zero,zero,zero,ymm9[24],zero,zero,zero,zero,zero,zero,ymm9[25]
-; AVX512BW-NEXT: vpor %ymm9, %ymm8, %ymm8
+; AVX512BW-NEXT: vmovdqa (%rdi), %xmm1
+; AVX512BW-NEXT: vmovdqa (%rdx), %xmm0
+; AVX512BW-NEXT: vmovdqa (%r8), %xmm3
+; AVX512BW-NEXT: vmovdqa (%r9), %xmm4
+; AVX512BW-NEXT: vmovdqa (%r10), %xmm2
+; AVX512BW-NEXT: vinserti128 $1, (%rcx), %ymm0, %ymm0
+; AVX512BW-NEXT: vpshufb {{.*#+}} ymm5 = zero,zero,zero,zero,zero,ymm0[5],zero,zero,zero,zero,zero,zero,ymm0[6],zero,zero,zero,zero,zero,zero,zero,ymm0[23],zero,zero,zero,zero,zero,zero,ymm0[24],zero,zero,zero,zero
+; AVX512BW-NEXT: vpermq {{.*#+}} ymm6 = ymm0[2,3,0,1]
+; AVX512BW-NEXT: vpshufb {{.*#+}} ymm6 = zero,zero,zero,zero,zero,zero,ymm6[5],zero,zero,zero,zero,zero,zero,ymm6[6],zero,zero,zero,zero,zero,ymm6[23],zero,zero,zero,zero,zero,zero,ymm6[24],zero,zero,zero,zero,zero
+; AVX512BW-NEXT: vpor %ymm5, %ymm6, %ymm5
+; AVX512BW-NEXT: vinserti128 $1, (%rsi), %ymm1, %ymm1
+; AVX512BW-NEXT: vpshufb {{.*#+}} ymm6 = zero,zero,zero,ymm1[5],zero,zero,zero,zero,zero,zero,ymm1[6],zero,zero,zero,zero,zero,zero,zero,ymm1[23],zero,zero,zero,zero,zero,zero,ymm1[24],zero,zero,zero,zero,zero,zero
+; AVX512BW-NEXT: vpermq {{.*#+}} ymm7 = ymm1[2,3,0,1]
+; AVX512BW-NEXT: vpshufb {{.*#+}} ymm7 = zero,zero,zero,zero,ymm7[5],zero,zero,zero,zero,zero,zero,ymm7[6],zero,zero,zero,zero,zero,ymm7[23],zero,zero,zero,zero,zero,zero,ymm7[24],zero,zero,zero,zero,zero,zero,ymm7[25]
+; AVX512BW-NEXT: vpor %ymm7, %ymm6, %ymm6
; AVX512BW-NEXT: movl $202911840, %ecx # imm = 0xC183060
; AVX512BW-NEXT: kmovd %ecx, %k1
-; AVX512BW-NEXT: vmovdqu8 %ymm7, %ymm8 {%k1}
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm7 = ymm3[0,2,0,2]
-; AVX512BW-NEXT: vpshufb {{.*#+}} ymm7 = ymm7[0,8],zero,zero,zero,zero,zero,ymm7[1,9],zero,zero,zero,zero,zero,ymm7[2,10],zero,zero,zero,zero,zero,ymm7[19,27],zero,zero,zero,zero,zero,ymm7[20,28],zero,zero
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm9 = ymm4[0,2,0,2]
-; AVX512BW-NEXT: vpshufb {{.*#+}} ymm9 = zero,zero,ymm9[0,8],zero,zero,zero,zero,zero,ymm9[1,9],zero,zero,zero,zero,zero,ymm9[18,26],zero,zero,zero,zero,zero,ymm9[19,27],zero,zero,zero,zero,zero,ymm9[20,28]
-; AVX512BW-NEXT: vpor %ymm7, %ymm9, %ymm7
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm8, %zmm7, %zmm7
-; AVX512BW-NEXT: vpshufb {{.*#+}} ymm8 = ymm6[4],zero,zero,zero,zero,zero,zero,ymm6[5],zero,zero,zero,zero,zero,zero,ymm6[6],zero,zero,zero,zero,zero,zero,zero,ymm6[23],zero,zero,zero,zero,zero,zero,ymm6[24],zero,zero
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm9 = ymm6[2,3,0,1]
-; AVX512BW-NEXT: vpshufb {{.*#+}} ymm9 = zero,ymm9[4],zero,zero,zero,zero,zero,zero,ymm9[5],zero,zero,zero,zero,zero,zero,ymm9[6],zero,zero,zero,zero,zero,ymm9[23],zero,zero,zero,zero,zero,zero,ymm9[24],zero,zero,zero
-; AVX512BW-NEXT: vpor %ymm8, %ymm9, %ymm8
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,2,0,2]
-; AVX512BW-NEXT: vpshufb {{.*#+}} ymm6 = ymm6[u,u,u,u,0,8,u,u,u,u,u,1,9,u,u,u,u,u,18,26,u,u,u,u,u,19,27,u,u,u,u,u]
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm8, %zmm6, %zmm6
+; AVX512BW-NEXT: vmovdqu8 %ymm5, %ymm6 {%k1}
+; AVX512BW-NEXT: vpermq {{.*#+}} ymm5 = ymm1[0,2,0,2]
+; AVX512BW-NEXT: vpshufb {{.*#+}} ymm5 = ymm5[0,8],zero,zero,zero,zero,zero,ymm5[1,9],zero,zero,zero,zero,zero,ymm5[2,10],zero,zero,zero,zero,zero,ymm5[19,27],zero,zero,zero,zero,zero,ymm5[20,28],zero,zero
+; AVX512BW-NEXT: vpermq {{.*#+}} ymm7 = ymm0[0,2,0,2]
+; AVX512BW-NEXT: vpshufb {{.*#+}} ymm7 = zero,zero,ymm7[0,8],zero,zero,zero,zero,zero,ymm7[1,9],zero,zero,zero,zero,zero,ymm7[18,26],zero,zero,zero,zero,zero,ymm7[19,27],zero,zero,zero,zero,zero,ymm7[20,28]
+; AVX512BW-NEXT: vpor %ymm5, %ymm7, %ymm5
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm6, %zmm5, %zmm5
+; AVX512BW-NEXT: vinserti128 $1, %xmm4, %ymm3, %ymm6
+; AVX512BW-NEXT: vpshufb {{.*#+}} ymm7 = ymm6[4],zero,zero,zero,zero,zero,zero,ymm6[5],zero,zero,zero,zero,zero,zero,ymm6[6],zero,zero,zero,zero,zero,zero,zero,ymm6[23],zero,zero,zero,zero,zero,zero,ymm6[24],zero,zero
+; AVX512BW-NEXT: vpermq {{.*#+}} ymm8 = ymm6[2,3,0,1]
+; AVX512BW-NEXT: vpshufb {{.*#+}} ymm8 = zero,ymm8[4],zero,zero,zero,zero,zero,zero,ymm8[5],zero,zero,zero,zero,zero,zero,ymm8[6],zero,zero,zero,zero,zero,ymm8[23],zero,zero,zero,zero,zero,zero,ymm8[24],zero,zero,zero
+; AVX512BW-NEXT: vpor %ymm7, %ymm8, %ymm7
+; AVX512BW-NEXT: vpermq {{.*#+}} ymm8 = ymm6[0,2,0,2]
+; AVX512BW-NEXT: vpshufb {{.*#+}} ymm8 = ymm8[u,u,u,u,0,8,u,u,u,u,u,1,9,u,u,u,u,u,18,26,u,u,u,u,u,19,27,u,u,u,u,u]
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm7, %zmm8, %zmm7
; AVX512BW-NEXT: vpmovsxbw {{.*#+}} zmm8 = [1,1,0,0,1,1,0,0,4,5,1,1,1,1,0,0,2,2,2,4,2,2,2,4,3,3,3,3,2,2,2,4]
-; AVX512BW-NEXT: vpermw %zmm1, %zmm8, %zmm8
+; AVX512BW-NEXT: vpermw %zmm2, %zmm8, %zmm8
; AVX512BW-NEXT: movabsq $4647998506761461824, %rcx # imm = 0x4081020408102040
; AVX512BW-NEXT: kmovq %rcx, %k1
-; AVX512BW-NEXT: vmovdqu8 %zmm8, %zmm6 {%k1}
+; AVX512BW-NEXT: vmovdqu8 %zmm8, %zmm7 {%k1}
; AVX512BW-NEXT: movabsq $8133997386832558192, %rcx # imm = 0x70E1C3870E1C3870
; AVX512BW-NEXT: kmovq %rcx, %k1
-; AVX512BW-NEXT: vmovdqu8 %zmm6, %zmm7 {%k1}
-; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
-; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[10,u,u,u,u,u,13,12,u,u,u,u,u,15,14,u]
-; AVX512BW-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,7,7,7]
-; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,1,3,2]
+; AVX512BW-NEXT: vmovdqu8 %zmm7, %zmm5 {%k1}
+; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm4[8],xmm3[8],xmm4[9],xmm3[9],xmm4[10],xmm3[10],xmm4[11],xmm3[11],xmm4[12],xmm3[12],xmm4[13],xmm3[13],xmm4[14],xmm3[14],xmm4[15],xmm3[15]
+; AVX512BW-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[10,u,u,u,u,u,13,12,u,u,u,u,u,15,14,u]
+; AVX512BW-NEXT: vpshufhw {{.*#+}} xmm4 = xmm2[0,1,2,3,6,7,7,7]
+; AVX512BW-NEXT: vpshufd {{.*#+}} xmm4 = xmm4[2,1,3,2]
; AVX512BW-NEXT: movw $-32510, %cx # imm = 0x8102
; AVX512BW-NEXT: kmovd %ecx, %k1
-; AVX512BW-NEXT: vmovdqu8 %xmm1, %xmm0 {%k1}
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm1 = ymm3[1,3,2,3]
-; AVX512BW-NEXT: vpshufb {{.*#+}} xmm1 = zero,zero,xmm1[6,14],zero,zero,zero,zero,zero,xmm1[7,15],zero,zero,zero,zero,zero
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm2 = ymm4[1,3,2,3]
-; AVX512BW-NEXT: vpshufb {{.*#+}} xmm2 = zero,zero,zero,zero,xmm2[6,14],zero,zero,zero,zero,zero,xmm2[7,15],zero,zero,zero
-; AVX512BW-NEXT: vpor %xmm1, %xmm2, %xmm1
+; AVX512BW-NEXT: vmovdqu8 %xmm4, %xmm3 {%k1}
+; AVX512BW-NEXT: vpermq {{.*#+}} ymm4 = ymm1[1,3,2,3]
+; AVX512BW-NEXT: vpshufb {{.*#+}} xmm4 = zero,zero,xmm4[6,14],zero,zero,zero,zero,zero,xmm4[7,15],zero,zero,zero,zero,zero
+; AVX512BW-NEXT: vpermq {{.*#+}} ymm7 = ymm0[1,3,2,3]
+; AVX512BW-NEXT: vpshufb {{.*#+}} xmm7 = zero,zero,zero,zero,xmm7[6,14],zero,zero,zero,zero,zero,xmm7[7,15],zero,zero,zero
+; AVX512BW-NEXT: vpor %xmm4, %xmm7, %xmm4
; AVX512BW-NEXT: movw $-7741, %cx # imm = 0xE1C3
; AVX512BW-NEXT: kmovd %ecx, %k1
-; AVX512BW-NEXT: vmovdqu8 %xmm0, %xmm1 {%k1}
-; AVX512BW-NEXT: vmovdqa %xmm1, 96(%rax)
-; AVX512BW-NEXT: vmovdqa64 %zmm7, (%rax)
-; AVX512BW-NEXT: vmovdqa %ymm5, 64(%rax)
+; AVX512BW-NEXT: vmovdqu8 %xmm3, %xmm4 {%k1}
+; AVX512BW-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [4,5,4,5,5,6,5,6,4,5,4,5,5,6,5,6]
+; AVX512BW-NEXT: # ymm3 = mem[0,1,0,1]
+; AVX512BW-NEXT: vpermw %ymm2, %ymm3, %ymm2
+; AVX512BW-NEXT: vpermq {{.*#+}} ymm3 = ymm6[1,3,1,3]
+; AVX512BW-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,1,9,u,u,u,u,u,2,10,u,u,u,u,u,19,27,u,u,u,u,u,20,28,u,u,u,u,u,21]
+; AVX512BW-NEXT: movl $67637280, %ecx # imm = 0x4081020
+; AVX512BW-NEXT: kmovd %ecx, %k1
+; AVX512BW-NEXT: vmovdqu8 %ymm2, %ymm3 {%k1}
+; AVX512BW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[1,3,3,1]
+; AVX512BW-NEXT: vpshufb {{.*#+}} ymm0 = zero,ymm0[1,9],zero,zero,zero,zero,zero,ymm0[2,10],zero,zero,zero,zero,zero,ymm0[3,19],zero,zero,zero,zero,zero,ymm0[28,20],zero,zero,zero,zero,zero,ymm0[29,21],zero
+; AVX512BW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[3,1,1,3]
+; AVX512BW-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[1],zero,zero,zero,zero,zero,ymm1[10,2],zero,zero,zero,zero,zero,ymm1[11,3],zero,zero,zero,zero,zero,ymm1[20,28],zero,zero,zero,zero,zero,ymm1[21,29],zero,zero,zero
+; AVX512BW-NEXT: vpor %ymm0, %ymm1, %ymm0
+; AVX512BW-NEXT: movl $-2029118408, %ecx # imm = 0x870E1C38
+; AVX512BW-NEXT: kmovd %ecx, %k1
+; AVX512BW-NEXT: vmovdqu8 %ymm3, %ymm0 {%k1}
+; AVX512BW-NEXT: vmovdqa %ymm0, 64(%rax)
+; AVX512BW-NEXT: vmovdqa %xmm4, 96(%rax)
+; AVX512BW-NEXT: vmovdqa64 %zmm5, (%rax)
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -2403,46 +2398,45 @@ define void @store_i8_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512BW-FCP-NEXT: vmovdqa (%r8), %xmm2
; AVX512BW-FCP-NEXT: vmovdqa (%r9), %xmm3
; AVX512BW-FCP-NEXT: vmovdqa (%r10), %xmm4
-; AVX512BW-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm5 = [4,5,4,5,5,6,5,6,4,5,4,5,5,6,5,6]
+; AVX512BW-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm5 = [1,9,2,10,1,9,2,10]
; AVX512BW-FCP-NEXT: # ymm5 = mem[0,1,0,1]
-; AVX512BW-FCP-NEXT: vpermw %ymm4, %ymm5, %ymm5
+; AVX512BW-FCP-NEXT: vpermi2d %ymm3, %ymm2, %ymm5
; AVX512BW-FCP-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm6
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm7 = ymm6[1,3,1,3]
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm7 = ymm7[u,u,u,1,9,u,u,u,u,u,2,10,u,u,u,u,u,19,27,u,u,u,u,u,20,28,u,u,u,u,u,21]
-; AVX512BW-FCP-NEXT: movl $67637280, %edx # imm = 0x4081020
-; AVX512BW-FCP-NEXT: kmovd %edx, %k1
-; AVX512BW-FCP-NEXT: vmovdqu8 %ymm5, %ymm7 {%k1}
-; AVX512BW-FCP-NEXT: vinserti128 $1, (%rcx), %ymm1, %ymm1
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm5 = ymm1[1,3,3,1]
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm5 = zero,ymm5[1,9],zero,zero,zero,zero,zero,ymm5[2,10],zero,zero,zero,zero,zero,ymm5[3,19],zero,zero,zero,zero,zero,ymm5[28,20],zero,zero,zero,zero,zero,ymm5[29,21],zero
-; AVX512BW-FCP-NEXT: vinserti128 $1, (%rsi), %ymm0, %ymm0
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm8 = ymm0[3,1,1,3]
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm8 = ymm8[1],zero,zero,zero,zero,zero,ymm8[10,2],zero,zero,zero,zero,zero,ymm8[11,3],zero,zero,zero,zero,zero,ymm8[20,28],zero,zero,zero,zero,zero,ymm8[21,29],zero,zero,zero
-; AVX512BW-FCP-NEXT: vpor %ymm5, %ymm8, %ymm5
-; AVX512BW-FCP-NEXT: movl $-2029118408, %ecx # imm = 0x870E1C38
-; AVX512BW-FCP-NEXT: kmovd %ecx, %k1
-; AVX512BW-FCP-NEXT: vmovdqu8 %ymm7, %ymm5 {%k1}
-; AVX512BW-FCP-NEXT: vinserti32x4 $2, %xmm0, %zmm5, %zmm5
-; AVX512BW-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm7 = [1,9,2,10,1,9,2,10]
-; AVX512BW-FCP-NEXT: # ymm7 = mem[0,1,0,1]
-; AVX512BW-FCP-NEXT: vpermi2d %ymm3, %ymm2, %ymm7
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,2,0,2]
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm7, %zmm6, %zmm6
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} zmm6 = zmm6[u,u,u,u,0,8,u,u,u,u,u,1,9,u,u,u,u,u,18,26,u,u,u,u,u,19,27,u,u,u,u,u,32,36,u,u,u,u,u,33,37,u,u,u,u,u,34,38,u,u,u,u,u,51,55,u,u,u,u,u,56,60,u,u]
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm7 = ymm6[0,2,0,2]
+; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm5, %zmm7, %zmm5
+; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} zmm5 = zmm5[u,u,u,u,0,8,u,u,u,u,u,1,9,u,u,u,u,u,18,26,u,u,u,u,u,19,27,u,u,u,u,u,32,36,u,u,u,u,u,33,37,u,u,u,u,u,34,38,u,u,u,u,u,51,55,u,u,u,u,u,56,60,u,u]
; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} zmm7 = [1,1,0,0,1,1,0,0,4,5,1,1,1,1,0,0,2,2,2,4,2,2,2,4,3,3,3,3,2,2,2,4]
; AVX512BW-FCP-NEXT: vpermw %zmm4, %zmm7, %zmm7
-; AVX512BW-FCP-NEXT: movabsq $4647998506761461824, %rcx # imm = 0x4081020408102040
-; AVX512BW-FCP-NEXT: kmovq %rcx, %k1
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm7, %zmm6 {%k1}
+; AVX512BW-FCP-NEXT: movabsq $4647998506761461824, %rdx # imm = 0x4081020408102040
+; AVX512BW-FCP-NEXT: kmovq %rdx, %k1
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm7, %zmm5 {%k1}
+; AVX512BW-FCP-NEXT: vinserti128 $1, (%rsi), %ymm0, %ymm0
; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm7 = [0,0,4,0,0,1,4,5,1,5,0,0,1,5,2,6]
; AVX512BW-FCP-NEXT: vpermd %zmm0, %zmm7, %zmm8
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} zmm8 = zmm8[0,8],zero,zero,zero,zero,zero,zmm8[1,9],zero,zero,zero,zero,zero,zmm8[2,10],zero,zero,zero,zero,zero,zmm8[19,27],zero,zero,zero,zero,zero,zmm8[20,28],zero,zero,zero,zero,zero,zmm8[33,37],zero,zero,zero,zero,zero,zmm8[34,38],zero,zero,zero,zero,zero,zmm8[51,55],zero,zero,zero,zero,zero,zmm8[56,60],zero,zero,zero,zero,zero,zmm8[57]
+; AVX512BW-FCP-NEXT: vinserti128 $1, (%rcx), %ymm1, %ymm1
; AVX512BW-FCP-NEXT: vpermd %zmm1, %zmm7, %zmm7
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} zmm7 = zero,zero,zmm7[0,8],zero,zero,zero,zero,zero,zmm7[1,9],zero,zero,zero,zero,zero,zmm7[18,26],zero,zero,zero,zero,zero,zmm7[19,27],zero,zero,zero,zero,zero,zmm7[20,28],zero,zero,zero,zero,zero,zmm7[33,37],zero,zero,zero,zero,zero,zmm7[34,38],zero,zero,zero,zero,zero,zmm7[51,55],zero,zero,zero,zero,zero,zmm7[56,60],zero,zero,zero,zero
; AVX512BW-FCP-NEXT: vporq %zmm8, %zmm7, %zmm7
; AVX512BW-FCP-NEXT: movabsq $8133997386832558192, %rcx # imm = 0x70E1C3870E1C3870
; AVX512BW-FCP-NEXT: kmovq %rcx, %k1
-; AVX512BW-FCP-NEXT: vmovdqu8 %zmm6, %zmm7 {%k1}
+; AVX512BW-FCP-NEXT: vmovdqu8 %zmm5, %zmm7 {%k1}
+; AVX512BW-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm5 = [4,5,4,5,5,6,5,6,4,5,4,5,5,6,5,6]
+; AVX512BW-FCP-NEXT: # ymm5 = mem[0,1,0,1]
+; AVX512BW-FCP-NEXT: vpermw %ymm4, %ymm5, %ymm5
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm6 = ymm6[1,3,1,3]
+; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm6 = ymm6[u,u,u,1,9,u,u,u,u,u,2,10,u,u,u,u,u,19,27,u,u,u,u,u,20,28,u,u,u,u,u,21]
+; AVX512BW-FCP-NEXT: movl $67637280, %ecx # imm = 0x4081020
+; AVX512BW-FCP-NEXT: kmovd %ecx, %k1
+; AVX512BW-FCP-NEXT: vmovdqu8 %ymm5, %ymm6 {%k1}
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm5 = ymm1[1,3,3,1]
+; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm5 = zero,ymm5[1,9],zero,zero,zero,zero,zero,ymm5[2,10],zero,zero,zero,zero,zero,ymm5[3,19],zero,zero,zero,zero,zero,ymm5[28,20],zero,zero,zero,zero,zero,ymm5[29,21],zero
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm8 = ymm0[3,1,1,3]
+; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm8 = ymm8[1],zero,zero,zero,zero,zero,ymm8[10,2],zero,zero,zero,zero,zero,ymm8[11,3],zero,zero,zero,zero,zero,ymm8[20,28],zero,zero,zero,zero,zero,ymm8[21,29],zero,zero,zero
+; AVX512BW-FCP-NEXT: vpor %ymm5, %ymm8, %ymm5
+; AVX512BW-FCP-NEXT: movl $-2029118408, %ecx # imm = 0x870E1C38
+; AVX512BW-FCP-NEXT: kmovd %ecx, %k1
+; AVX512BW-FCP-NEXT: vmovdqu8 %ymm6, %ymm5 {%k1}
; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[1,3,2,3]
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,xmm0[6,14],zero,zero,zero,zero,zero,xmm0[7,15],zero,zero,zero,zero,zero
; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[1,3,2,3]
@@ -2455,9 +2449,9 @@ define void @store_i8_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512BW-FCP-NEXT: movw $-7741, %cx # imm = 0xE1C3
; AVX512BW-FCP-NEXT: kmovd %ecx, %k1
; AVX512BW-FCP-NEXT: vmovdqu8 %xmm1, %xmm0 {%k1}
+; AVX512BW-FCP-NEXT: vmovdqa %ymm5, 64(%rax)
; AVX512BW-FCP-NEXT: vmovdqa %xmm0, 96(%rax)
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm7, (%rax)
-; AVX512BW-FCP-NEXT: vmovdqa %ymm5, 64(%rax)
; AVX512BW-FCP-NEXT: vzeroupper
; AVX512BW-FCP-NEXT: retq
;
@@ -2465,81 +2459,80 @@ define void @store_i8_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-BW: # %bb.0:
; AVX512DQ-BW-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX512DQ-BW-NEXT: movq {{[0-9]+}}(%rsp), %r10
-; AVX512DQ-BW-NEXT: vmovdqa (%rdi), %xmm3
-; AVX512DQ-BW-NEXT: vmovdqa (%rdx), %xmm4
-; AVX512DQ-BW-NEXT: vmovdqa (%r8), %xmm0
-; AVX512DQ-BW-NEXT: vmovdqa (%r9), %xmm2
-; AVX512DQ-BW-NEXT: vmovdqa (%r10), %xmm1
-; AVX512DQ-BW-NEXT: vbroadcasti128 {{.*#+}} ymm5 = [4,5,4,5,5,6,5,6,4,5,4,5,5,6,5,6]
-; AVX512DQ-BW-NEXT: # ymm5 = mem[0,1,0,1]
-; AVX512DQ-BW-NEXT: vpermw %ymm1, %ymm5, %ymm5
-; AVX512DQ-BW-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm6
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm7 = ymm6[1,3,1,3]
-; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm7 = ymm7[u,u,u,1,9,u,u,u,u,u,2,10,u,u,u,u,u,19,27,u,u,u,u,u,20,28,u,u,u,u,u,21]
-; AVX512DQ-BW-NEXT: movl $67637280, %edx # imm = 0x4081020
-; AVX512DQ-BW-NEXT: kmovd %edx, %k1
-; AVX512DQ-BW-NEXT: vmovdqu8 %ymm5, %ymm7 {%k1}
-; AVX512DQ-BW-NEXT: vinserti128 $1, (%rcx), %ymm4, %ymm4
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm5 = ymm4[1,3,3,1]
-; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm5 = zero,ymm5[1,9],zero,zero,zero,zero,zero,ymm5[2,10],zero,zero,zero,zero,zero,ymm5[3,19],zero,zero,zero,zero,zero,ymm5[28,20],zero,zero,zero,zero,zero,ymm5[29,21],zero
-; AVX512DQ-BW-NEXT: vinserti128 $1, (%rsi), %ymm3, %ymm3
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm8 = ymm3[3,1,1,3]
-; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm8 = ymm8[1],zero,zero,zero,zero,zero,ymm8[10,2],zero,zero,zero,zero,zero,ymm8[11,3],zero,zero,zero,zero,zero,ymm8[20,28],zero,zero,zero,zero,zero,ymm8[21,29],zero,zero,zero
-; AVX512DQ-BW-NEXT: vpor %ymm5, %ymm8, %ymm5
-; AVX512DQ-BW-NEXT: movl $-2029118408, %ecx # imm = 0x870E1C38
-; AVX512DQ-BW-NEXT: kmovd %ecx, %k1
-; AVX512DQ-BW-NEXT: vmovdqu8 %ymm7, %ymm5 {%k1}
-; AVX512DQ-BW-NEXT: vinserti32x4 $2, %xmm0, %zmm5, %zmm5
-; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm7 = zero,zero,zero,zero,zero,ymm4[5],zero,zero,zero,zero,zero,zero,ymm4[6],zero,zero,zero,zero,zero,zero,zero,ymm4[23],zero,zero,zero,zero,zero,zero,ymm4[24],zero,zero,zero,zero
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm8 = ymm4[2,3,0,1]
-; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm8 = zero,zero,zero,zero,zero,zero,ymm8[5],zero,zero,zero,zero,zero,zero,ymm8[6],zero,zero,zero,zero,zero,ymm8[23],zero,zero,zero,zero,zero,zero,ymm8[24],zero,zero,zero,zero,zero
-; AVX512DQ-BW-NEXT: vpor %ymm7, %ymm8, %ymm7
-; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm8 = zero,zero,zero,ymm3[5],zero,zero,zero,zero,zero,zero,ymm3[6],zero,zero,zero,zero,zero,zero,zero,ymm3[23],zero,zero,zero,zero,zero,zero,ymm3[24],zero,zero,zero,zero,zero,zero
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm9 = ymm3[2,3,0,1]
-; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm9 = zero,zero,zero,zero,ymm9[5],zero,zero,zero,zero,zero,zero,ymm9[6],zero,zero,zero,zero,zero,ymm9[23],zero,zero,zero,zero,zero,zero,ymm9[24],zero,zero,zero,zero,zero,zero,ymm9[25]
-; AVX512DQ-BW-NEXT: vpor %ymm9, %ymm8, %ymm8
+; AVX512DQ-BW-NEXT: vmovdqa (%rdi), %xmm1
+; AVX512DQ-BW-NEXT: vmovdqa (%rdx), %xmm0
+; AVX512DQ-BW-NEXT: vmovdqa (%r8), %xmm3
+; AVX512DQ-BW-NEXT: vmovdqa (%r9), %xmm4
+; AVX512DQ-BW-NEXT: vmovdqa (%r10), %xmm2
+; AVX512DQ-BW-NEXT: vinserti128 $1, (%rcx), %ymm0, %ymm0
+; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm5 = zero,zero,zero,zero,zero,ymm0[5],zero,zero,zero,zero,zero,zero,ymm0[6],zero,zero,zero,zero,zero,zero,zero,ymm0[23],zero,zero,zero,zero,zero,zero,ymm0[24],zero,zero,zero,zero
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm6 = ymm0[2,3,0,1]
+; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm6 = zero,zero,zero,zero,zero,zero,ymm6[5],zero,zero,zero,zero,zero,zero,ymm6[6],zero,zero,zero,zero,zero,ymm6[23],zero,zero,zero,zero,zero,zero,ymm6[24],zero,zero,zero,zero,zero
+; AVX512DQ-BW-NEXT: vpor %ymm5, %ymm6, %ymm5
+; AVX512DQ-BW-NEXT: vinserti128 $1, (%rsi), %ymm1, %ymm1
+; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm6 = zero,zero,zero,ymm1[5],zero,zero,zero,zero,zero,zero,ymm1[6],zero,zero,zero,zero,zero,zero,zero,ymm1[23],zero,zero,zero,zero,zero,zero,ymm1[24],zero,zero,zero,zero,zero,zero
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm7 = ymm1[2,3,0,1]
+; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm7 = zero,zero,zero,zero,ymm7[5],zero,zero,zero,zero,zero,zero,ymm7[6],zero,zero,zero,zero,zero,ymm7[23],zero,zero,zero,zero,zero,zero,ymm7[24],zero,zero,zero,zero,zero,zero,ymm7[25]
+; AVX512DQ-BW-NEXT: vpor %ymm7, %ymm6, %ymm6
; AVX512DQ-BW-NEXT: movl $202911840, %ecx # imm = 0xC183060
; AVX512DQ-BW-NEXT: kmovd %ecx, %k1
-; AVX512DQ-BW-NEXT: vmovdqu8 %ymm7, %ymm8 {%k1}
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm7 = ymm3[0,2,0,2]
-; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm7 = ymm7[0,8],zero,zero,zero,zero,zero,ymm7[1,9],zero,zero,zero,zero,zero,ymm7[2,10],zero,zero,zero,zero,zero,ymm7[19,27],zero,zero,zero,zero,zero,ymm7[20,28],zero,zero
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm9 = ymm4[0,2,0,2]
-; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm9 = zero,zero,ymm9[0,8],zero,zero,zero,zero,zero,ymm9[1,9],zero,zero,zero,zero,zero,ymm9[18,26],zero,zero,zero,zero,zero,ymm9[19,27],zero,zero,zero,zero,zero,ymm9[20,28]
-; AVX512DQ-BW-NEXT: vpor %ymm7, %ymm9, %ymm7
-; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm8, %zmm7, %zmm7
-; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm8 = ymm6[4],zero,zero,zero,zero,zero,zero,ymm6[5],zero,zero,zero,zero,zero,zero,ymm6[6],zero,zero,zero,zero,zero,zero,zero,ymm6[23],zero,zero,zero,zero,zero,zero,ymm6[24],zero,zero
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm9 = ymm6[2,3,0,1]
-; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm9 = zero,ymm9[4],zero,zero,zero,zero,zero,zero,ymm9[5],zero,zero,zero,zero,zero,zero,ymm9[6],zero,zero,zero,zero,zero,ymm9[23],zero,zero,zero,zero,zero,zero,ymm9[24],zero,zero,zero
-; AVX512DQ-BW-NEXT: vpor %ymm8, %ymm9, %ymm8
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,2,0,2]
-; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm6 = ymm6[u,u,u,u,0,8,u,u,u,u,u,1,9,u,u,u,u,u,18,26,u,u,u,u,u,19,27,u,u,u,u,u]
-; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm8, %zmm6, %zmm6
+; AVX512DQ-BW-NEXT: vmovdqu8 %ymm5, %ymm6 {%k1}
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm5 = ymm1[0,2,0,2]
+; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm5 = ymm5[0,8],zero,zero,zero,zero,zero,ymm5[1,9],zero,zero,zero,zero,zero,ymm5[2,10],zero,zero,zero,zero,zero,ymm5[19,27],zero,zero,zero,zero,zero,ymm5[20,28],zero,zero
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm7 = ymm0[0,2,0,2]
+; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm7 = zero,zero,ymm7[0,8],zero,zero,zero,zero,zero,ymm7[1,9],zero,zero,zero,zero,zero,ymm7[18,26],zero,zero,zero,zero,zero,ymm7[19,27],zero,zero,zero,zero,zero,ymm7[20,28]
+; AVX512DQ-BW-NEXT: vpor %ymm5, %ymm7, %ymm5
+; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm6, %zmm5, %zmm5
+; AVX512DQ-BW-NEXT: vinserti128 $1, %xmm4, %ymm3, %ymm6
+; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm7 = ymm6[4],zero,zero,zero,zero,zero,zero,ymm6[5],zero,zero,zero,zero,zero,zero,ymm6[6],zero,zero,zero,zero,zero,zero,zero,ymm6[23],zero,zero,zero,zero,zero,zero,ymm6[24],zero,zero
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm8 = ymm6[2,3,0,1]
+; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm8 = zero,ymm8[4],zero,zero,zero,zero,zero,zero,ymm8[5],zero,zero,zero,zero,zero,zero,ymm8[6],zero,zero,zero,zero,zero,ymm8[23],zero,zero,zero,zero,zero,zero,ymm8[24],zero,zero,zero
+; AVX512DQ-BW-NEXT: vpor %ymm7, %ymm8, %ymm7
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm8 = ymm6[0,2,0,2]
+; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm8 = ymm8[u,u,u,u,0,8,u,u,u,u,u,1,9,u,u,u,u,u,18,26,u,u,u,u,u,19,27,u,u,u,u,u]
+; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm7, %zmm8, %zmm7
; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} zmm8 = [1,1,0,0,1,1,0,0,4,5,1,1,1,1,0,0,2,2,2,4,2,2,2,4,3,3,3,3,2,2,2,4]
-; AVX512DQ-BW-NEXT: vpermw %zmm1, %zmm8, %zmm8
+; AVX512DQ-BW-NEXT: vpermw %zmm2, %zmm8, %zmm8
; AVX512DQ-BW-NEXT: movabsq $4647998506761461824, %rcx # imm = 0x4081020408102040
; AVX512DQ-BW-NEXT: kmovq %rcx, %k1
-; AVX512DQ-BW-NEXT: vmovdqu8 %zmm8, %zmm6 {%k1}
+; AVX512DQ-BW-NEXT: vmovdqu8 %zmm8, %zmm7 {%k1}
; AVX512DQ-BW-NEXT: movabsq $8133997386832558192, %rcx # imm = 0x70E1C3870E1C3870
; AVX512DQ-BW-NEXT: kmovq %rcx, %k1
-; AVX512DQ-BW-NEXT: vmovdqu8 %zmm6, %zmm7 {%k1}
-; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
-; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[10,u,u,u,u,u,13,12,u,u,u,u,u,15,14,u]
-; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,7,7,7]
-; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,1,3,2]
+; AVX512DQ-BW-NEXT: vmovdqu8 %zmm7, %zmm5 {%k1}
+; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm4[8],xmm3[8],xmm4[9],xmm3[9],xmm4[10],xmm3[10],xmm4[11],xmm3[11],xmm4[12],xmm3[12],xmm4[13],xmm3[13],xmm4[14],xmm3[14],xmm4[15],xmm3[15]
+; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[10,u,u,u,u,u,13,12,u,u,u,u,u,15,14,u]
+; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} xmm4 = xmm2[0,1,2,3,6,7,7,7]
+; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} xmm4 = xmm4[2,1,3,2]
; AVX512DQ-BW-NEXT: movw $-32510, %cx # imm = 0x8102
; AVX512DQ-BW-NEXT: kmovd %ecx, %k1
-; AVX512DQ-BW-NEXT: vmovdqu8 %xmm1, %xmm0 {%k1}
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm1 = ymm3[1,3,2,3]
-; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm1 = zero,zero,xmm1[6,14],zero,zero,zero,zero,zero,xmm1[7,15],zero,zero,zero,zero,zero
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm2 = ymm4[1,3,2,3]
-; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm2 = zero,zero,zero,zero,xmm2[6,14],zero,zero,zero,zero,zero,xmm2[7,15],zero,zero,zero
-; AVX512DQ-BW-NEXT: vpor %xmm1, %xmm2, %xmm1
+; AVX512DQ-BW-NEXT: vmovdqu8 %xmm4, %xmm3 {%k1}
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm4 = ymm1[1,3,2,3]
+; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm4 = zero,zero,xmm4[6,14],zero,zero,zero,zero,zero,xmm4[7,15],zero,zero,zero,zero,zero
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm7 = ymm0[1,3,2,3]
+; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm7 = zero,zero,zero,zero,xmm7[6,14],zero,zero,zero,zero,zero,xmm7[7,15],zero,zero,zero
+; AVX512DQ-BW-NEXT: vpor %xmm4, %xmm7, %xmm4
; AVX512DQ-BW-NEXT: movw $-7741, %cx # imm = 0xE1C3
; AVX512DQ-BW-NEXT: kmovd %ecx, %k1
-; AVX512DQ-BW-NEXT: vmovdqu8 %xmm0, %xmm1 {%k1}
-; AVX512DQ-BW-NEXT: vmovdqa %xmm1, 96(%rax)
-; AVX512DQ-BW-NEXT: vmovdqa64 %zmm7, (%rax)
-; AVX512DQ-BW-NEXT: vmovdqa %ymm5, 64(%rax)
+; AVX512DQ-BW-NEXT: vmovdqu8 %xmm3, %xmm4 {%k1}
+; AVX512DQ-BW-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [4,5,4,5,5,6,5,6,4,5,4,5,5,6,5,6]
+; AVX512DQ-BW-NEXT: # ymm3 = mem[0,1,0,1]
+; AVX512DQ-BW-NEXT: vpermw %ymm2, %ymm3, %ymm2
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm3 = ymm6[1,3,1,3]
+; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,1,9,u,u,u,u,u,2,10,u,u,u,u,u,19,27,u,u,u,u,u,20,28,u,u,u,u,u,21]
+; AVX512DQ-BW-NEXT: movl $67637280, %ecx # imm = 0x4081020
+; AVX512DQ-BW-NEXT: kmovd %ecx, %k1
+; AVX512DQ-BW-NEXT: vmovdqu8 %ymm2, %ymm3 {%k1}
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[1,3,3,1]
+; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm0 = zero,ymm0[1,9],zero,zero,zero,zero,zero,ymm0[2,10],zero,zero,zero,zero,zero,ymm0[3,19],zero,zero,zero,zero,zero,ymm0[28,20],zero,zero,zero,zero,zero,ymm0[29,21],zero
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[3,1,1,3]
+; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[1],zero,zero,zero,zero,zero,ymm1[10,2],zero,zero,zero,zero,zero,ymm1[11,3],zero,zero,zero,zero,zero,ymm1[20,28],zero,zero,zero,zero,zero,ymm1[21,29],zero,zero,zero
+; AVX512DQ-BW-NEXT: vpor %ymm0, %ymm1, %ymm0
+; AVX512DQ-BW-NEXT: movl $-2029118408, %ecx # imm = 0x870E1C38
+; AVX512DQ-BW-NEXT: kmovd %ecx, %k1
+; AVX512DQ-BW-NEXT: vmovdqu8 %ymm3, %ymm0 {%k1}
+; AVX512DQ-BW-NEXT: vmovdqa %ymm0, 64(%rax)
+; AVX512DQ-BW-NEXT: vmovdqa %xmm4, 96(%rax)
+; AVX512DQ-BW-NEXT: vmovdqa64 %zmm5, (%rax)
; AVX512DQ-BW-NEXT: vzeroupper
; AVX512DQ-BW-NEXT: retq
;
@@ -2552,46 +2545,45 @@ define void @store_i8_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-BW-FCP-NEXT: vmovdqa (%r8), %xmm2
; AVX512DQ-BW-FCP-NEXT: vmovdqa (%r9), %xmm3
; AVX512DQ-BW-FCP-NEXT: vmovdqa (%r10), %xmm4
-; AVX512DQ-BW-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm5 = [4,5,4,5,5,6,5,6,4,5,4,5,5,6,5,6]
+; AVX512DQ-BW-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm5 = [1,9,2,10,1,9,2,10]
; AVX512DQ-BW-FCP-NEXT: # ymm5 = mem[0,1,0,1]
-; AVX512DQ-BW-FCP-NEXT: vpermw %ymm4, %ymm5, %ymm5
+; AVX512DQ-BW-FCP-NEXT: vpermi2d %ymm3, %ymm2, %ymm5
; AVX512DQ-BW-FCP-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm6
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm7 = ymm6[1,3,1,3]
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm7 = ymm7[u,u,u,1,9,u,u,u,u,u,2,10,u,u,u,u,u,19,27,u,u,u,u,u,20,28,u,u,u,u,u,21]
-; AVX512DQ-BW-FCP-NEXT: movl $67637280, %edx # imm = 0x4081020
-; AVX512DQ-BW-FCP-NEXT: kmovd %edx, %k1
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %ymm5, %ymm7 {%k1}
-; AVX512DQ-BW-FCP-NEXT: vinserti128 $1, (%rcx), %ymm1, %ymm1
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm5 = ymm1[1,3,3,1]
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm5 = zero,ymm5[1,9],zero,zero,zero,zero,zero,ymm5[2,10],zero,zero,zero,zero,zero,ymm5[3,19],zero,zero,zero,zero,zero,ymm5[28,20],zero,zero,zero,zero,zero,ymm5[29,21],zero
-; AVX512DQ-BW-FCP-NEXT: vinserti128 $1, (%rsi), %ymm0, %ymm0
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm8 = ymm0[3,1,1,3]
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm8 = ymm8[1],zero,zero,zero,zero,zero,ymm8[10,2],zero,zero,zero,zero,zero,ymm8[11,3],zero,zero,zero,zero,zero,ymm8[20,28],zero,zero,zero,zero,zero,ymm8[21,29],zero,zero,zero
-; AVX512DQ-BW-FCP-NEXT: vpor %ymm5, %ymm8, %ymm5
-; AVX512DQ-BW-FCP-NEXT: movl $-2029118408, %ecx # imm = 0x870E1C38
-; AVX512DQ-BW-FCP-NEXT: kmovd %ecx, %k1
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %ymm7, %ymm5 {%k1}
-; AVX512DQ-BW-FCP-NEXT: vinserti32x4 $2, %xmm0, %zmm5, %zmm5
-; AVX512DQ-BW-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm7 = [1,9,2,10,1,9,2,10]
-; AVX512DQ-BW-FCP-NEXT: # ymm7 = mem[0,1,0,1]
-; AVX512DQ-BW-FCP-NEXT: vpermi2d %ymm3, %ymm2, %ymm7
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,2,0,2]
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm7, %zmm6, %zmm6
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} zmm6 = zmm6[u,u,u,u,0,8,u,u,u,u,u,1,9,u,u,u,u,u,18,26,u,u,u,u,u,19,27,u,u,u,u,u,32,36,u,u,u,u,u,33,37,u,u,u,u,u,34,38,u,u,u,u,u,51,55,u,u,u,u,u,56,60,u,u]
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm7 = ymm6[0,2,0,2]
+; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm5, %zmm7, %zmm5
+; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} zmm5 = zmm5[u,u,u,u,0,8,u,u,u,u,u,1,9,u,u,u,u,u,18,26,u,u,u,u,u,19,27,u,u,u,u,u,32,36,u,u,u,u,u,33,37,u,u,u,u,u,34,38,u,u,u,u,u,51,55,u,u,u,u,u,56,60,u,u]
; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} zmm7 = [1,1,0,0,1,1,0,0,4,5,1,1,1,1,0,0,2,2,2,4,2,2,2,4,3,3,3,3,2,2,2,4]
; AVX512DQ-BW-FCP-NEXT: vpermw %zmm4, %zmm7, %zmm7
-; AVX512DQ-BW-FCP-NEXT: movabsq $4647998506761461824, %rcx # imm = 0x4081020408102040
-; AVX512DQ-BW-FCP-NEXT: kmovq %rcx, %k1
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm7, %zmm6 {%k1}
+; AVX512DQ-BW-FCP-NEXT: movabsq $4647998506761461824, %rdx # imm = 0x4081020408102040
+; AVX512DQ-BW-FCP-NEXT: kmovq %rdx, %k1
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm7, %zmm5 {%k1}
+; AVX512DQ-BW-FCP-NEXT: vinserti128 $1, (%rsi), %ymm0, %ymm0
; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm7 = [0,0,4,0,0,1,4,5,1,5,0,0,1,5,2,6]
; AVX512DQ-BW-FCP-NEXT: vpermd %zmm0, %zmm7, %zmm8
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} zmm8 = zmm8[0,8],zero,zero,zero,zero,zero,zmm8[1,9],zero,zero,zero,zero,zero,zmm8[2,10],zero,zero,zero,zero,zero,zmm8[19,27],zero,zero,zero,zero,zero,zmm8[20,28],zero,zero,zero,zero,zero,zmm8[33,37],zero,zero,zero,zero,zero,zmm8[34,38],zero,zero,zero,zero,zero,zmm8[51,55],zero,zero,zero,zero,zero,zmm8[56,60],zero,zero,zero,zero,zero,zmm8[57]
+; AVX512DQ-BW-FCP-NEXT: vinserti128 $1, (%rcx), %ymm1, %ymm1
; AVX512DQ-BW-FCP-NEXT: vpermd %zmm1, %zmm7, %zmm7
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} zmm7 = zero,zero,zmm7[0,8],zero,zero,zero,zero,zero,zmm7[1,9],zero,zero,zero,zero,zero,zmm7[18,26],zero,zero,zero,zero,zero,zmm7[19,27],zero,zero,zero,zero,zero,zmm7[20,28],zero,zero,zero,zero,zero,zmm7[33,37],zero,zero,zero,zero,zero,zmm7[34,38],zero,zero,zero,zero,zero,zmm7[51,55],zero,zero,zero,zero,zero,zmm7[56,60],zero,zero,zero,zero
; AVX512DQ-BW-FCP-NEXT: vporq %zmm8, %zmm7, %zmm7
; AVX512DQ-BW-FCP-NEXT: movabsq $8133997386832558192, %rcx # imm = 0x70E1C3870E1C3870
; AVX512DQ-BW-FCP-NEXT: kmovq %rcx, %k1
-; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm6, %zmm7 {%k1}
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm5, %zmm7 {%k1}
+; AVX512DQ-BW-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm5 = [4,5,4,5,5,6,5,6,4,5,4,5,5,6,5,6]
+; AVX512DQ-BW-FCP-NEXT: # ymm5 = mem[0,1,0,1]
+; AVX512DQ-BW-FCP-NEXT: vpermw %ymm4, %ymm5, %ymm5
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm6 = ymm6[1,3,1,3]
+; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm6 = ymm6[u,u,u,1,9,u,u,u,u,u,2,10,u,u,u,u,u,19,27,u,u,u,u,u,20,28,u,u,u,u,u,21]
+; AVX512DQ-BW-FCP-NEXT: movl $67637280, %ecx # imm = 0x4081020
+; AVX512DQ-BW-FCP-NEXT: kmovd %ecx, %k1
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %ymm5, %ymm6 {%k1}
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm5 = ymm1[1,3,3,1]
+; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm5 = zero,ymm5[1,9],zero,zero,zero,zero,zero,ymm5[2,10],zero,zero,zero,zero,zero,ymm5[3,19],zero,zero,zero,zero,zero,ymm5[28,20],zero,zero,zero,zero,zero,ymm5[29,21],zero
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm8 = ymm0[3,1,1,3]
+; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm8 = ymm8[1],zero,zero,zero,zero,zero,ymm8[10,2],zero,zero,zero,zero,zero,ymm8[11,3],zero,zero,zero,zero,zero,ymm8[20,28],zero,zero,zero,zero,zero,ymm8[21,29],zero,zero,zero
+; AVX512DQ-BW-FCP-NEXT: vpor %ymm5, %ymm8, %ymm5
+; AVX512DQ-BW-FCP-NEXT: movl $-2029118408, %ecx # imm = 0x870E1C38
+; AVX512DQ-BW-FCP-NEXT: kmovd %ecx, %k1
+; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %ymm6, %ymm5 {%k1}
; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[1,3,2,3]
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,xmm0[6,14],zero,zero,zero,zero,zero,xmm0[7,15],zero,zero,zero,zero,zero
; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[1,3,2,3]
@@ -2604,9 +2596,9 @@ define void @store_i8_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-BW-FCP-NEXT: movw $-7741, %cx # imm = 0xE1C3
; AVX512DQ-BW-FCP-NEXT: kmovd %ecx, %k1
; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %xmm1, %xmm0 {%k1}
+; AVX512DQ-BW-FCP-NEXT: vmovdqa %ymm5, 64(%rax)
; AVX512DQ-BW-FCP-NEXT: vmovdqa %xmm0, 96(%rax)
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm7, (%rax)
-; AVX512DQ-BW-FCP-NEXT: vmovdqa %ymm5, 64(%rax)
; AVX512DQ-BW-FCP-NEXT: vzeroupper
; AVX512DQ-BW-FCP-NEXT: retq
%in.vec0 = load <16 x i8>, ptr %in.vecptr0, align 64
>From 7f93651bdc0268aa76f005da5906a4fe380cfa67 Mon Sep 17 00:00:00 2001
From: Bjorn Pettersson <bjorn.a.pettersson at ericsson.com>
Date: Sat, 31 May 2025 09:37:27 +0200
Subject: [PATCH 4/4] [SelectionDAG] Deal with POISON for
INSERT_VECTOR_ELT/INSERT_SUBVECTOR (part 3)
Target specific patches to avoid regressions seen after "part 1"
aiming at fixing github issue #141034.
One perhaps controversial change here is that convertToScalableVector
now uses POISON instead of UNDEF for any additional elements added
when converting to the scalable vector. This can avoid that we end
up with things like
t31: nxv1f32 =
t32: v2f32 = extract_subvector t31, Constant:i64<0>
t38: nxv1f32 = insert_subvector undef:nxv1f32, t32, Constant:i64<0>
since if we instead try to insert into poison we can just use t31
instead of t38 without the risk that t31 would be more poisonous.
---
llvm/include/llvm/CodeGen/SelectionDAG.h | 11 +-
.../Target/AArch64/AArch64ISelLowering.cpp | 9 +-
llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 17 +--
.../CodeGen/AArch64/arm64-vector-insertion.ll | 2 -
.../AArch64/sve-fixed-length-fp-vselect.ll | 81 +------------
.../AArch64/sve-fixed-length-frame-offests.ll | 8 +-
.../AArch64/sve-fixed-length-int-vselect.ll | 108 ++----------------
.../AArch64/sve-fixed-length-masked-gather.ll | 6 +-
...-streaming-mode-fixed-length-fp-vselect.ll | 21 ----
...streaming-mode-fixed-length-int-vselect.ll | 28 -----
.../fixed-vectors-vfw-web-simplification.ll | 90 +++++----------
.../fixed-vectors-vw-web-simplification.ll | 55 +++------
12 files changed, 93 insertions(+), 343 deletions(-)
diff --git a/llvm/include/llvm/CodeGen/SelectionDAG.h b/llvm/include/llvm/CodeGen/SelectionDAG.h
index a98e46c587273..3abdafac4b411 100644
--- a/llvm/include/llvm/CodeGen/SelectionDAG.h
+++ b/llvm/include/llvm/CodeGen/SelectionDAG.h
@@ -953,8 +953,17 @@ class SelectionDAG {
}
/// Insert \p SubVec at the \p Idx element of \p Vec.
+ /// If \p SkipUndef is true and \p SubVec is UNDEF/POISON, then \p Vec is
+ /// returned.
SDValue getInsertSubvector(const SDLoc &DL, SDValue Vec, SDValue SubVec,
- unsigned Idx) {
+ unsigned Idx, bool SkipUndef = false) {
+ // Skipping insert of UNDEF could result in POISON elements remaining in the
+ // resulting vector. The SkipUndef is useful in situations when getNode
+ // can't reason well enough about ignoring the insert, e.g. when having
+ // scalable vectors and the user of this method knows that the subvector
+ // being replaced isn't POISON.
+ if (SkipUndef && SubVec.isUndef())
+ return Vec;
return getNode(ISD::INSERT_SUBVECTOR, DL, Vec.getValueType(), Vec, SubVec,
getVectorIdxConstant(Idx, DL));
}
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index e1b22b3eaf7bc..8bbc620cdef63 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -14925,11 +14925,14 @@ SDValue AArch64TargetLowering::LowerBUILD_VECTOR(SDValue Op,
if (PreferDUPAndInsert) {
// First, build a constant vector with the common element.
- SmallVector<SDValue, 8> Ops(NumElts, Value);
+ // Make sure to freeze the common element first, since we will use it also
+ // for indices that should be UNDEF (so we want to avoid making those
+ // elements more poisonous).
+ SmallVector<SDValue, 8> Ops(NumElts, DAG.getFreeze(Value));
SDValue NewVector = LowerBUILD_VECTOR(DAG.getBuildVector(VT, dl, Ops), DAG);
// Next, insert the elements that do not match the common value.
for (unsigned I = 0; I < NumElts; ++I)
- if (Op.getOperand(I) != Value)
+ if (Op.getOperand(I) != Value && !Op.getOperand(I).isUndef())
NewVector =
DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, NewVector,
Op.getOperand(I), DAG.getConstant(I, dl, MVT::i64));
@@ -28486,7 +28489,7 @@ static SDValue convertToScalableVector(SelectionDAG &DAG, EVT VT, SDValue V) {
"Expected a fixed length vector operand!");
SDLoc DL(V);
SDValue Zero = DAG.getConstant(0, DL, MVT::i64);
- return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), V, Zero);
+ return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getPOISON(VT), V, Zero);
}
// Shrink V so it's just big enough to maintain a VT's worth of data.
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index aa8999755d560..8131a02b0dd8e 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -2844,7 +2844,7 @@ static SDValue convertToScalableVector(EVT VT, SDValue V, SelectionDAG &DAG,
assert(V.getValueType().isFixedLengthVector() &&
"Expected a fixed length vector operand!");
SDLoc DL(V);
- return DAG.getInsertSubvector(DL, DAG.getUNDEF(VT), V, 0);
+ return DAG.getInsertSubvector(DL, DAG.getPOISON(VT), V, 0);
}
// Shrink V so it's just big enough to maintain a VT's worth of data.
@@ -4327,7 +4327,8 @@ static SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
DAG.getNode(ISD::BUILD_VECTOR, DL, OneRegVT, OneVRegOfOps);
SubBV = convertToScalableVector(M1VT, SubBV, DAG, Subtarget);
unsigned InsertIdx = (i / ElemsPerVReg) * NumOpElts;
- Vec = DAG.getInsertSubvector(DL, Vec, SubBV, InsertIdx);
+ Vec = DAG.getInsertSubvector(DL, Vec, SubBV, InsertIdx,
+ /*SkipUndef=*/true);
}
return convertFromScalableVector(VT, Vec, DAG, Subtarget);
}
@@ -7813,10 +7814,8 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
SDValue Vec = DAG.getUNDEF(VT);
for (const auto &OpIdx : enumerate(Op->ops())) {
SDValue SubVec = OpIdx.value();
- // Don't insert undef subvectors.
- if (SubVec.isUndef())
- continue;
- Vec = DAG.getInsertSubvector(DL, Vec, SubVec, OpIdx.index() * NumOpElts);
+ Vec = DAG.getInsertSubvector(DL, Vec, SubVec, OpIdx.index() * NumOpElts,
+ /*SkipUndef=*/true);
}
return Vec;
}
@@ -12143,9 +12142,11 @@ SDValue RISCVTargetLowering::lowerVECTOR_REVERSE(SDValue Op,
Hi = DAG.getNode(ISD::VECTOR_REVERSE, DL, HiVT, Hi);
// Reassemble the low and high pieces reversed.
// FIXME: This is a CONCAT_VECTORS.
- SDValue Res = DAG.getInsertSubvector(DL, DAG.getUNDEF(VecVT), Hi, 0);
+ SDValue Res = DAG.getInsertSubvector(DL, DAG.getUNDEF(VecVT), Hi, 0,
+ /*SkipUndef=*/true);
return DAG.getInsertSubvector(DL, Res, Lo,
- LoVT.getVectorMinNumElements());
+ LoVT.getVectorMinNumElements(),
+ /*SkipUndef=*/true);
}
// Just promote the int type to i16 which will double the LMUL.
diff --git a/llvm/test/CodeGen/AArch64/arm64-vector-insertion.ll b/llvm/test/CodeGen/AArch64/arm64-vector-insertion.ll
index 2c44f56316801..5962150ac9ffc 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vector-insertion.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vector-insertion.ll
@@ -84,7 +84,6 @@ define <16 x i8> @test_insert_v16i8_insert_2_undef_base_skip8(i32 %a0) {
; CHECK-NEXT: lsr w8, w0, #5
; CHECK-NEXT: dup.16b v0, w8
; CHECK-NEXT: mov.b v0[5], wzr
-; CHECK-NEXT: mov.b v0[8], w8
; CHECK-NEXT: mov.b v0[9], wzr
; CHECK-NEXT: ret
%a1 = lshr exact i32 %a0, 5
@@ -145,7 +144,6 @@ define <16 x i8> @test_insert_v16i8_insert_2_undef_base_different_valeus_skip8(i
; CHECK-NEXT: mov.b v0[2], w8
; CHECK-NEXT: mov.b v0[5], wzr
; CHECK-NEXT: mov.b v0[7], w8
-; CHECK-NEXT: mov.b v0[8], w8
; CHECK-NEXT: mov.b v0[9], wzr
; CHECK-NEXT: mov.b v0[12], w8
; CHECK-NEXT: mov.b v0[15], w8
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-vselect.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-vselect.ll
index 2905d707bdd09..9efe0b33910c8 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-vselect.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-vselect.ll
@@ -37,10 +37,6 @@ define void @select_v16f16(ptr %a, ptr %b) vscale_range(2,0) #0 {
; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0]
; CHECK-NEXT: ld1h { z1.h }, p0/z, [x1]
; CHECK-NEXT: fcmeq p1.h, p0/z, z0.h, z1.h
-; CHECK-NEXT: mov z2.h, p1/z, #-1 // =0xffffffffffffffff
-; CHECK-NEXT: ptrue p1.h
-; CHECK-NEXT: and z2.h, z2.h, #0x1
-; CHECK-NEXT: cmpne p1.h, p1/z, z2.h, #0
; CHECK-NEXT: sel z0.h, p1, z0.h, z1.h
; CHECK-NEXT: st1h { z0.h }, p0, [x0]
; CHECK-NEXT: ret
@@ -63,15 +59,8 @@ define void @select_v32f16(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1h { z3.h }, p0/z, [x1]
; VBITS_GE_256-NEXT: fcmeq p1.h, p0/z, z0.h, z1.h
; VBITS_GE_256-NEXT: fcmeq p2.h, p0/z, z2.h, z3.h
-; VBITS_GE_256-NEXT: mov z4.h, p1/z, #-1 // =0xffffffffffffffff
-; VBITS_GE_256-NEXT: ptrue p1.h
-; VBITS_GE_256-NEXT: mov z5.h, p2/z, #-1 // =0xffffffffffffffff
-; VBITS_GE_256-NEXT: and z4.h, z4.h, #0x1
-; VBITS_GE_256-NEXT: and z5.h, z5.h, #0x1
-; VBITS_GE_256-NEXT: cmpne p2.h, p1/z, z4.h, #0
-; VBITS_GE_256-NEXT: cmpne p1.h, p1/z, z5.h, #0
-; VBITS_GE_256-NEXT: sel z0.h, p2, z0.h, z1.h
-; VBITS_GE_256-NEXT: sel z1.h, p1, z2.h, z3.h
+; VBITS_GE_256-NEXT: sel z0.h, p1, z0.h, z1.h
+; VBITS_GE_256-NEXT: sel z1.h, p2, z2.h, z3.h
; VBITS_GE_256-NEXT: st1h { z0.h }, p0, [x0, x8, lsl #1]
; VBITS_GE_256-NEXT: st1h { z1.h }, p0, [x0]
; VBITS_GE_256-NEXT: ret
@@ -82,10 +71,6 @@ define void @select_v32f16(ptr %a, ptr %b) #0 {
; VBITS_GE_512-NEXT: ld1h { z0.h }, p0/z, [x0]
; VBITS_GE_512-NEXT: ld1h { z1.h }, p0/z, [x1]
; VBITS_GE_512-NEXT: fcmeq p1.h, p0/z, z0.h, z1.h
-; VBITS_GE_512-NEXT: mov z2.h, p1/z, #-1 // =0xffffffffffffffff
-; VBITS_GE_512-NEXT: ptrue p1.h
-; VBITS_GE_512-NEXT: and z2.h, z2.h, #0x1
-; VBITS_GE_512-NEXT: cmpne p1.h, p1/z, z2.h, #0
; VBITS_GE_512-NEXT: sel z0.h, p1, z0.h, z1.h
; VBITS_GE_512-NEXT: st1h { z0.h }, p0, [x0]
; VBITS_GE_512-NEXT: ret
@@ -104,10 +89,6 @@ define void @select_v64f16(ptr %a, ptr %b) vscale_range(8,0) #0 {
; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0]
; CHECK-NEXT: ld1h { z1.h }, p0/z, [x1]
; CHECK-NEXT: fcmeq p1.h, p0/z, z0.h, z1.h
-; CHECK-NEXT: mov z2.h, p1/z, #-1 // =0xffffffffffffffff
-; CHECK-NEXT: ptrue p1.h
-; CHECK-NEXT: and z2.h, z2.h, #0x1
-; CHECK-NEXT: cmpne p1.h, p1/z, z2.h, #0
; CHECK-NEXT: sel z0.h, p1, z0.h, z1.h
; CHECK-NEXT: st1h { z0.h }, p0, [x0]
; CHECK-NEXT: ret
@@ -126,10 +107,6 @@ define void @select_v128f16(ptr %a, ptr %b) vscale_range(16,0) #0 {
; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0]
; CHECK-NEXT: ld1h { z1.h }, p0/z, [x1]
; CHECK-NEXT: fcmeq p1.h, p0/z, z0.h, z1.h
-; CHECK-NEXT: mov z2.h, p1/z, #-1 // =0xffffffffffffffff
-; CHECK-NEXT: ptrue p1.h
-; CHECK-NEXT: and z2.h, z2.h, #0x1
-; CHECK-NEXT: cmpne p1.h, p1/z, z2.h, #0
; CHECK-NEXT: sel z0.h, p1, z0.h, z1.h
; CHECK-NEXT: st1h { z0.h }, p0, [x0]
; CHECK-NEXT: ret
@@ -173,10 +150,6 @@ define void @select_v8f32(ptr %a, ptr %b) vscale_range(2,0) #0 {
; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0]
; CHECK-NEXT: ld1w { z1.s }, p0/z, [x1]
; CHECK-NEXT: fcmeq p1.s, p0/z, z0.s, z1.s
-; CHECK-NEXT: mov z2.s, p1/z, #-1 // =0xffffffffffffffff
-; CHECK-NEXT: ptrue p1.s
-; CHECK-NEXT: and z2.s, z2.s, #0x1
-; CHECK-NEXT: cmpne p1.s, p1/z, z2.s, #0
; CHECK-NEXT: sel z0.s, p1, z0.s, z1.s
; CHECK-NEXT: st1w { z0.s }, p0, [x0]
; CHECK-NEXT: ret
@@ -199,15 +172,8 @@ define void @select_v16f32(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1w { z3.s }, p0/z, [x1]
; VBITS_GE_256-NEXT: fcmeq p1.s, p0/z, z0.s, z1.s
; VBITS_GE_256-NEXT: fcmeq p2.s, p0/z, z2.s, z3.s
-; VBITS_GE_256-NEXT: mov z4.s, p1/z, #-1 // =0xffffffffffffffff
-; VBITS_GE_256-NEXT: ptrue p1.s
-; VBITS_GE_256-NEXT: mov z5.s, p2/z, #-1 // =0xffffffffffffffff
-; VBITS_GE_256-NEXT: and z4.s, z4.s, #0x1
-; VBITS_GE_256-NEXT: and z5.s, z5.s, #0x1
-; VBITS_GE_256-NEXT: cmpne p2.s, p1/z, z4.s, #0
-; VBITS_GE_256-NEXT: cmpne p1.s, p1/z, z5.s, #0
-; VBITS_GE_256-NEXT: sel z0.s, p2, z0.s, z1.s
-; VBITS_GE_256-NEXT: sel z1.s, p1, z2.s, z3.s
+; VBITS_GE_256-NEXT: sel z0.s, p1, z0.s, z1.s
+; VBITS_GE_256-NEXT: sel z1.s, p2, z2.s, z3.s
; VBITS_GE_256-NEXT: st1w { z0.s }, p0, [x0, x8, lsl #2]
; VBITS_GE_256-NEXT: st1w { z1.s }, p0, [x0]
; VBITS_GE_256-NEXT: ret
@@ -218,10 +184,6 @@ define void @select_v16f32(ptr %a, ptr %b) #0 {
; VBITS_GE_512-NEXT: ld1w { z0.s }, p0/z, [x0]
; VBITS_GE_512-NEXT: ld1w { z1.s }, p0/z, [x1]
; VBITS_GE_512-NEXT: fcmeq p1.s, p0/z, z0.s, z1.s
-; VBITS_GE_512-NEXT: mov z2.s, p1/z, #-1 // =0xffffffffffffffff
-; VBITS_GE_512-NEXT: ptrue p1.s
-; VBITS_GE_512-NEXT: and z2.s, z2.s, #0x1
-; VBITS_GE_512-NEXT: cmpne p1.s, p1/z, z2.s, #0
; VBITS_GE_512-NEXT: sel z0.s, p1, z0.s, z1.s
; VBITS_GE_512-NEXT: st1w { z0.s }, p0, [x0]
; VBITS_GE_512-NEXT: ret
@@ -240,10 +202,6 @@ define void @select_v32f32(ptr %a, ptr %b) vscale_range(8,0) #0 {
; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0]
; CHECK-NEXT: ld1w { z1.s }, p0/z, [x1]
; CHECK-NEXT: fcmeq p1.s, p0/z, z0.s, z1.s
-; CHECK-NEXT: mov z2.s, p1/z, #-1 // =0xffffffffffffffff
-; CHECK-NEXT: ptrue p1.s
-; CHECK-NEXT: and z2.s, z2.s, #0x1
-; CHECK-NEXT: cmpne p1.s, p1/z, z2.s, #0
; CHECK-NEXT: sel z0.s, p1, z0.s, z1.s
; CHECK-NEXT: st1w { z0.s }, p0, [x0]
; CHECK-NEXT: ret
@@ -262,10 +220,6 @@ define void @select_v64f32(ptr %a, ptr %b) vscale_range(16,0) #0 {
; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0]
; CHECK-NEXT: ld1w { z1.s }, p0/z, [x1]
; CHECK-NEXT: fcmeq p1.s, p0/z, z0.s, z1.s
-; CHECK-NEXT: mov z2.s, p1/z, #-1 // =0xffffffffffffffff
-; CHECK-NEXT: ptrue p1.s
-; CHECK-NEXT: and z2.s, z2.s, #0x1
-; CHECK-NEXT: cmpne p1.s, p1/z, z2.s, #0
; CHECK-NEXT: sel z0.s, p1, z0.s, z1.s
; CHECK-NEXT: st1w { z0.s }, p0, [x0]
; CHECK-NEXT: ret
@@ -310,10 +264,6 @@ define void @select_v4f64(ptr %a, ptr %b) vscale_range(2,0) #0 {
; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0]
; CHECK-NEXT: ld1d { z1.d }, p0/z, [x1]
; CHECK-NEXT: fcmeq p1.d, p0/z, z0.d, z1.d
-; CHECK-NEXT: mov z2.d, p1/z, #-1 // =0xffffffffffffffff
-; CHECK-NEXT: ptrue p1.d
-; CHECK-NEXT: and z2.d, z2.d, #0x1
-; CHECK-NEXT: cmpne p1.d, p1/z, z2.d, #0
; CHECK-NEXT: sel z0.d, p1, z0.d, z1.d
; CHECK-NEXT: st1d { z0.d }, p0, [x0]
; CHECK-NEXT: ret
@@ -336,15 +286,8 @@ define void @select_v8f64(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1d { z3.d }, p0/z, [x1]
; VBITS_GE_256-NEXT: fcmeq p1.d, p0/z, z0.d, z1.d
; VBITS_GE_256-NEXT: fcmeq p2.d, p0/z, z2.d, z3.d
-; VBITS_GE_256-NEXT: mov z4.d, p1/z, #-1 // =0xffffffffffffffff
-; VBITS_GE_256-NEXT: ptrue p1.d
-; VBITS_GE_256-NEXT: mov z5.d, p2/z, #-1 // =0xffffffffffffffff
-; VBITS_GE_256-NEXT: and z4.d, z4.d, #0x1
-; VBITS_GE_256-NEXT: and z5.d, z5.d, #0x1
-; VBITS_GE_256-NEXT: cmpne p2.d, p1/z, z4.d, #0
-; VBITS_GE_256-NEXT: cmpne p1.d, p1/z, z5.d, #0
-; VBITS_GE_256-NEXT: sel z0.d, p2, z0.d, z1.d
-; VBITS_GE_256-NEXT: sel z1.d, p1, z2.d, z3.d
+; VBITS_GE_256-NEXT: sel z0.d, p1, z0.d, z1.d
+; VBITS_GE_256-NEXT: sel z1.d, p2, z2.d, z3.d
; VBITS_GE_256-NEXT: st1d { z0.d }, p0, [x0, x8, lsl #3]
; VBITS_GE_256-NEXT: st1d { z1.d }, p0, [x0]
; VBITS_GE_256-NEXT: ret
@@ -355,10 +298,6 @@ define void @select_v8f64(ptr %a, ptr %b) #0 {
; VBITS_GE_512-NEXT: ld1d { z0.d }, p0/z, [x0]
; VBITS_GE_512-NEXT: ld1d { z1.d }, p0/z, [x1]
; VBITS_GE_512-NEXT: fcmeq p1.d, p0/z, z0.d, z1.d
-; VBITS_GE_512-NEXT: mov z2.d, p1/z, #-1 // =0xffffffffffffffff
-; VBITS_GE_512-NEXT: ptrue p1.d
-; VBITS_GE_512-NEXT: and z2.d, z2.d, #0x1
-; VBITS_GE_512-NEXT: cmpne p1.d, p1/z, z2.d, #0
; VBITS_GE_512-NEXT: sel z0.d, p1, z0.d, z1.d
; VBITS_GE_512-NEXT: st1d { z0.d }, p0, [x0]
; VBITS_GE_512-NEXT: ret
@@ -377,10 +316,6 @@ define void @select_v16f64(ptr %a, ptr %b) vscale_range(8,0) #0 {
; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0]
; CHECK-NEXT: ld1d { z1.d }, p0/z, [x1]
; CHECK-NEXT: fcmeq p1.d, p0/z, z0.d, z1.d
-; CHECK-NEXT: mov z2.d, p1/z, #-1 // =0xffffffffffffffff
-; CHECK-NEXT: ptrue p1.d
-; CHECK-NEXT: and z2.d, z2.d, #0x1
-; CHECK-NEXT: cmpne p1.d, p1/z, z2.d, #0
; CHECK-NEXT: sel z0.d, p1, z0.d, z1.d
; CHECK-NEXT: st1d { z0.d }, p0, [x0]
; CHECK-NEXT: ret
@@ -399,10 +334,6 @@ define void @select_v32f64(ptr %a, ptr %b) vscale_range(16,0) #0 {
; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0]
; CHECK-NEXT: ld1d { z1.d }, p0/z, [x1]
; CHECK-NEXT: fcmeq p1.d, p0/z, z0.d, z1.d
-; CHECK-NEXT: mov z2.d, p1/z, #-1 // =0xffffffffffffffff
-; CHECK-NEXT: ptrue p1.d
-; CHECK-NEXT: and z2.d, z2.d, #0x1
-; CHECK-NEXT: cmpne p1.d, p1/z, z2.d, #0
; CHECK-NEXT: sel z0.d, p1, z0.d, z1.d
; CHECK-NEXT: st1d { z0.d }, p0, [x0]
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-frame-offests.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-frame-offests.ll
index 2f76be61ae192..5e940078695f0 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-frame-offests.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-frame-offests.ll
@@ -12,12 +12,12 @@ define void @foo(ptr %a) #0 {
; CHECK: SelectionDAG has 13 nodes:
; CHECK-NEXT: t0: ch,glue = EntryToken
; CHECK-NEXT: t2: i64,ch = CopyFromReg t0, Register:i64 %0
-; CHECK-NEXT: t21: nxv2i64,ch = LDR_ZXI<Mem:(volatile load (<vscale x 1 x s128>) from %ir.a, align 64)> t2, TargetConstant:i64<0>, t0
+; CHECK-NEXT: t22: nxv2i64,ch = LDR_ZXI<Mem:(volatile load (<vscale x 1 x s128>) from %ir.a, align 64)> t2, TargetConstant:i64<0>, t0
; CHECK-NEXT: t8: i64 = ADDXri TargetFrameIndex:i64<1>, TargetConstant:i32<0>, TargetConstant:i32<0>
; CHECK-NEXT: t6: i64 = ADDXri TargetFrameIndex:i64<0>, TargetConstant:i32<0>, TargetConstant:i32<0>
-; CHECK-NEXT: t22: ch = STR_ZXI<Mem:(volatile store (<vscale x 1 x s128>) into %ir.r0, align 64)> t21, t6, TargetConstant:i64<0>, t21:1
-; CHECK-NEXT: t23: ch = STR_ZXI<Mem:(volatile store (<vscale x 1 x s128>) into %ir.r1, align 64)> t21, t8, TargetConstant:i64<0>, t22
-; CHECK-NEXT: t10: ch = RET_ReallyLR t23
+; CHECK-NEXT: t23: ch = STR_ZXI<Mem:(volatile store (<vscale x 1 x s128>) into %ir.r0, align 64)> t22, t6, TargetConstant:i64<0>, t22:1
+; CHECK-NEXT: t24: ch = STR_ZXI<Mem:(volatile store (<vscale x 1 x s128>) into %ir.r1, align 64)> t22, t8, TargetConstant:i64<0>, t23
+; CHECK-NEXT: t10: ch = RET_ReallyLR t24
; CHECK-EMPTY:
entry:
%r0 = alloca <8 x i64>
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-vselect.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-vselect.ll
index 0e95da31c13cc..9cebbc4aab9b7 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-vselect.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-vselect.ll
@@ -36,10 +36,6 @@ define void @select_v32i8(ptr %a, ptr %b) vscale_range(2,0) #0 {
; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0]
; CHECK-NEXT: ld1b { z1.b }, p0/z, [x1]
; CHECK-NEXT: cmpeq p1.b, p0/z, z0.b, z1.b
-; CHECK-NEXT: mov z2.b, p1/z, #-1 // =0xffffffffffffffff
-; CHECK-NEXT: ptrue p1.b
-; CHECK-NEXT: and z2.b, z2.b, #0x1
-; CHECK-NEXT: cmpne p1.b, p1/z, z2.b, #0
; CHECK-NEXT: sel z0.b, p1, z0.b, z1.b
; CHECK-NEXT: st1b { z0.b }, p0, [x0]
; CHECK-NEXT: ret
@@ -62,15 +58,8 @@ define void @select_v64i8(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1b { z3.b }, p0/z, [x1]
; VBITS_GE_256-NEXT: cmpeq p1.b, p0/z, z0.b, z1.b
; VBITS_GE_256-NEXT: cmpeq p2.b, p0/z, z2.b, z3.b
-; VBITS_GE_256-NEXT: mov z4.b, p1/z, #-1 // =0xffffffffffffffff
-; VBITS_GE_256-NEXT: ptrue p1.b
-; VBITS_GE_256-NEXT: mov z5.b, p2/z, #-1 // =0xffffffffffffffff
-; VBITS_GE_256-NEXT: and z4.b, z4.b, #0x1
-; VBITS_GE_256-NEXT: and z5.b, z5.b, #0x1
-; VBITS_GE_256-NEXT: cmpne p2.b, p1/z, z4.b, #0
-; VBITS_GE_256-NEXT: cmpne p1.b, p1/z, z5.b, #0
-; VBITS_GE_256-NEXT: sel z0.b, p2, z0.b, z1.b
-; VBITS_GE_256-NEXT: sel z1.b, p1, z2.b, z3.b
+; VBITS_GE_256-NEXT: sel z0.b, p1, z0.b, z1.b
+; VBITS_GE_256-NEXT: sel z1.b, p2, z2.b, z3.b
; VBITS_GE_256-NEXT: st1b { z0.b }, p0, [x0, x8]
; VBITS_GE_256-NEXT: st1b { z1.b }, p0, [x0]
; VBITS_GE_256-NEXT: ret
@@ -81,10 +70,6 @@ define void @select_v64i8(ptr %a, ptr %b) #0 {
; VBITS_GE_512-NEXT: ld1b { z0.b }, p0/z, [x0]
; VBITS_GE_512-NEXT: ld1b { z1.b }, p0/z, [x1]
; VBITS_GE_512-NEXT: cmpeq p1.b, p0/z, z0.b, z1.b
-; VBITS_GE_512-NEXT: mov z2.b, p1/z, #-1 // =0xffffffffffffffff
-; VBITS_GE_512-NEXT: ptrue p1.b
-; VBITS_GE_512-NEXT: and z2.b, z2.b, #0x1
-; VBITS_GE_512-NEXT: cmpne p1.b, p1/z, z2.b, #0
; VBITS_GE_512-NEXT: sel z0.b, p1, z0.b, z1.b
; VBITS_GE_512-NEXT: st1b { z0.b }, p0, [x0]
; VBITS_GE_512-NEXT: ret
@@ -103,10 +88,6 @@ define void @select_v128i8(ptr %a, ptr %b) vscale_range(8,0) #0 {
; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0]
; CHECK-NEXT: ld1b { z1.b }, p0/z, [x1]
; CHECK-NEXT: cmpeq p1.b, p0/z, z0.b, z1.b
-; CHECK-NEXT: mov z2.b, p1/z, #-1 // =0xffffffffffffffff
-; CHECK-NEXT: ptrue p1.b
-; CHECK-NEXT: and z2.b, z2.b, #0x1
-; CHECK-NEXT: cmpne p1.b, p1/z, z2.b, #0
; CHECK-NEXT: sel z0.b, p1, z0.b, z1.b
; CHECK-NEXT: st1b { z0.b }, p0, [x0]
; CHECK-NEXT: ret
@@ -125,10 +106,6 @@ define void @select_v256i8(ptr %a, ptr %b) vscale_range(16,0) #0 {
; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0]
; CHECK-NEXT: ld1b { z1.b }, p0/z, [x1]
; CHECK-NEXT: cmpeq p1.b, p0/z, z0.b, z1.b
-; CHECK-NEXT: mov z2.b, p1/z, #-1 // =0xffffffffffffffff
-; CHECK-NEXT: ptrue p1.b
-; CHECK-NEXT: and z2.b, z2.b, #0x1
-; CHECK-NEXT: cmpne p1.b, p1/z, z2.b, #0
; CHECK-NEXT: sel z0.b, p1, z0.b, z1.b
; CHECK-NEXT: st1b { z0.b }, p0, [x0]
; CHECK-NEXT: ret
@@ -172,10 +149,6 @@ define void @select_v16i16(ptr %a, ptr %b) vscale_range(2,0) #0 {
; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0]
; CHECK-NEXT: ld1h { z1.h }, p0/z, [x1]
; CHECK-NEXT: cmpeq p1.h, p0/z, z0.h, z1.h
-; CHECK-NEXT: mov z2.h, p1/z, #-1 // =0xffffffffffffffff
-; CHECK-NEXT: ptrue p1.h
-; CHECK-NEXT: and z2.h, z2.h, #0x1
-; CHECK-NEXT: cmpne p1.h, p1/z, z2.h, #0
; CHECK-NEXT: sel z0.h, p1, z0.h, z1.h
; CHECK-NEXT: st1h { z0.h }, p0, [x0]
; CHECK-NEXT: ret
@@ -198,15 +171,8 @@ define void @select_v32i16(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1h { z3.h }, p0/z, [x1]
; VBITS_GE_256-NEXT: cmpeq p1.h, p0/z, z0.h, z1.h
; VBITS_GE_256-NEXT: cmpeq p2.h, p0/z, z2.h, z3.h
-; VBITS_GE_256-NEXT: mov z4.h, p1/z, #-1 // =0xffffffffffffffff
-; VBITS_GE_256-NEXT: ptrue p1.h
-; VBITS_GE_256-NEXT: mov z5.h, p2/z, #-1 // =0xffffffffffffffff
-; VBITS_GE_256-NEXT: and z4.h, z4.h, #0x1
-; VBITS_GE_256-NEXT: and z5.h, z5.h, #0x1
-; VBITS_GE_256-NEXT: cmpne p2.h, p1/z, z4.h, #0
-; VBITS_GE_256-NEXT: cmpne p1.h, p1/z, z5.h, #0
-; VBITS_GE_256-NEXT: sel z0.h, p2, z0.h, z1.h
-; VBITS_GE_256-NEXT: sel z1.h, p1, z2.h, z3.h
+; VBITS_GE_256-NEXT: sel z0.h, p1, z0.h, z1.h
+; VBITS_GE_256-NEXT: sel z1.h, p2, z2.h, z3.h
; VBITS_GE_256-NEXT: st1h { z0.h }, p0, [x0, x8, lsl #1]
; VBITS_GE_256-NEXT: st1h { z1.h }, p0, [x0]
; VBITS_GE_256-NEXT: ret
@@ -217,10 +183,6 @@ define void @select_v32i16(ptr %a, ptr %b) #0 {
; VBITS_GE_512-NEXT: ld1h { z0.h }, p0/z, [x0]
; VBITS_GE_512-NEXT: ld1h { z1.h }, p0/z, [x1]
; VBITS_GE_512-NEXT: cmpeq p1.h, p0/z, z0.h, z1.h
-; VBITS_GE_512-NEXT: mov z2.h, p1/z, #-1 // =0xffffffffffffffff
-; VBITS_GE_512-NEXT: ptrue p1.h
-; VBITS_GE_512-NEXT: and z2.h, z2.h, #0x1
-; VBITS_GE_512-NEXT: cmpne p1.h, p1/z, z2.h, #0
; VBITS_GE_512-NEXT: sel z0.h, p1, z0.h, z1.h
; VBITS_GE_512-NEXT: st1h { z0.h }, p0, [x0]
; VBITS_GE_512-NEXT: ret
@@ -239,10 +201,6 @@ define void @select_v64i16(ptr %a, ptr %b) vscale_range(8,0) #0 {
; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0]
; CHECK-NEXT: ld1h { z1.h }, p0/z, [x1]
; CHECK-NEXT: cmpeq p1.h, p0/z, z0.h, z1.h
-; CHECK-NEXT: mov z2.h, p1/z, #-1 // =0xffffffffffffffff
-; CHECK-NEXT: ptrue p1.h
-; CHECK-NEXT: and z2.h, z2.h, #0x1
-; CHECK-NEXT: cmpne p1.h, p1/z, z2.h, #0
; CHECK-NEXT: sel z0.h, p1, z0.h, z1.h
; CHECK-NEXT: st1h { z0.h }, p0, [x0]
; CHECK-NEXT: ret
@@ -261,10 +219,6 @@ define void @select_v128i16(ptr %a, ptr %b) vscale_range(16,0) #0 {
; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0]
; CHECK-NEXT: ld1h { z1.h }, p0/z, [x1]
; CHECK-NEXT: cmpeq p1.h, p0/z, z0.h, z1.h
-; CHECK-NEXT: mov z2.h, p1/z, #-1 // =0xffffffffffffffff
-; CHECK-NEXT: ptrue p1.h
-; CHECK-NEXT: and z2.h, z2.h, #0x1
-; CHECK-NEXT: cmpne p1.h, p1/z, z2.h, #0
; CHECK-NEXT: sel z0.h, p1, z0.h, z1.h
; CHECK-NEXT: st1h { z0.h }, p0, [x0]
; CHECK-NEXT: ret
@@ -308,10 +262,6 @@ define void @select_v8i32(ptr %a, ptr %b) vscale_range(2,0) #0 {
; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0]
; CHECK-NEXT: ld1w { z1.s }, p0/z, [x1]
; CHECK-NEXT: cmpeq p1.s, p0/z, z0.s, z1.s
-; CHECK-NEXT: mov z2.s, p1/z, #-1 // =0xffffffffffffffff
-; CHECK-NEXT: ptrue p1.s
-; CHECK-NEXT: and z2.s, z2.s, #0x1
-; CHECK-NEXT: cmpne p1.s, p1/z, z2.s, #0
; CHECK-NEXT: sel z0.s, p1, z0.s, z1.s
; CHECK-NEXT: st1w { z0.s }, p0, [x0]
; CHECK-NEXT: ret
@@ -334,15 +284,8 @@ define void @select_v16i32(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1w { z3.s }, p0/z, [x1]
; VBITS_GE_256-NEXT: cmpeq p1.s, p0/z, z0.s, z1.s
; VBITS_GE_256-NEXT: cmpeq p2.s, p0/z, z2.s, z3.s
-; VBITS_GE_256-NEXT: mov z4.s, p1/z, #-1 // =0xffffffffffffffff
-; VBITS_GE_256-NEXT: ptrue p1.s
-; VBITS_GE_256-NEXT: mov z5.s, p2/z, #-1 // =0xffffffffffffffff
-; VBITS_GE_256-NEXT: and z4.s, z4.s, #0x1
-; VBITS_GE_256-NEXT: and z5.s, z5.s, #0x1
-; VBITS_GE_256-NEXT: cmpne p2.s, p1/z, z4.s, #0
-; VBITS_GE_256-NEXT: cmpne p1.s, p1/z, z5.s, #0
-; VBITS_GE_256-NEXT: sel z0.s, p2, z0.s, z1.s
-; VBITS_GE_256-NEXT: sel z1.s, p1, z2.s, z3.s
+; VBITS_GE_256-NEXT: sel z0.s, p1, z0.s, z1.s
+; VBITS_GE_256-NEXT: sel z1.s, p2, z2.s, z3.s
; VBITS_GE_256-NEXT: st1w { z0.s }, p0, [x0, x8, lsl #2]
; VBITS_GE_256-NEXT: st1w { z1.s }, p0, [x0]
; VBITS_GE_256-NEXT: ret
@@ -353,10 +296,6 @@ define void @select_v16i32(ptr %a, ptr %b) #0 {
; VBITS_GE_512-NEXT: ld1w { z0.s }, p0/z, [x0]
; VBITS_GE_512-NEXT: ld1w { z1.s }, p0/z, [x1]
; VBITS_GE_512-NEXT: cmpeq p1.s, p0/z, z0.s, z1.s
-; VBITS_GE_512-NEXT: mov z2.s, p1/z, #-1 // =0xffffffffffffffff
-; VBITS_GE_512-NEXT: ptrue p1.s
-; VBITS_GE_512-NEXT: and z2.s, z2.s, #0x1
-; VBITS_GE_512-NEXT: cmpne p1.s, p1/z, z2.s, #0
; VBITS_GE_512-NEXT: sel z0.s, p1, z0.s, z1.s
; VBITS_GE_512-NEXT: st1w { z0.s }, p0, [x0]
; VBITS_GE_512-NEXT: ret
@@ -375,10 +314,6 @@ define void @select_v32i32(ptr %a, ptr %b) vscale_range(8,0) #0 {
; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0]
; CHECK-NEXT: ld1w { z1.s }, p0/z, [x1]
; CHECK-NEXT: cmpeq p1.s, p0/z, z0.s, z1.s
-; CHECK-NEXT: mov z2.s, p1/z, #-1 // =0xffffffffffffffff
-; CHECK-NEXT: ptrue p1.s
-; CHECK-NEXT: and z2.s, z2.s, #0x1
-; CHECK-NEXT: cmpne p1.s, p1/z, z2.s, #0
; CHECK-NEXT: sel z0.s, p1, z0.s, z1.s
; CHECK-NEXT: st1w { z0.s }, p0, [x0]
; CHECK-NEXT: ret
@@ -397,10 +332,6 @@ define void @select_v64i32(ptr %a, ptr %b) vscale_range(16,0) #0 {
; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0]
; CHECK-NEXT: ld1w { z1.s }, p0/z, [x1]
; CHECK-NEXT: cmpeq p1.s, p0/z, z0.s, z1.s
-; CHECK-NEXT: mov z2.s, p1/z, #-1 // =0xffffffffffffffff
-; CHECK-NEXT: ptrue p1.s
-; CHECK-NEXT: and z2.s, z2.s, #0x1
-; CHECK-NEXT: cmpne p1.s, p1/z, z2.s, #0
; CHECK-NEXT: sel z0.s, p1, z0.s, z1.s
; CHECK-NEXT: st1w { z0.s }, p0, [x0]
; CHECK-NEXT: ret
@@ -445,10 +376,6 @@ define void @select_v4i64(ptr %a, ptr %b) vscale_range(2,0) #0 {
; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0]
; CHECK-NEXT: ld1d { z1.d }, p0/z, [x1]
; CHECK-NEXT: cmpeq p1.d, p0/z, z0.d, z1.d
-; CHECK-NEXT: mov z2.d, p1/z, #-1 // =0xffffffffffffffff
-; CHECK-NEXT: ptrue p1.d
-; CHECK-NEXT: and z2.d, z2.d, #0x1
-; CHECK-NEXT: cmpne p1.d, p1/z, z2.d, #0
; CHECK-NEXT: sel z0.d, p1, z0.d, z1.d
; CHECK-NEXT: st1d { z0.d }, p0, [x0]
; CHECK-NEXT: ret
@@ -471,15 +398,8 @@ define void @select_v8i64(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1d { z3.d }, p0/z, [x1]
; VBITS_GE_256-NEXT: cmpeq p1.d, p0/z, z0.d, z1.d
; VBITS_GE_256-NEXT: cmpeq p2.d, p0/z, z2.d, z3.d
-; VBITS_GE_256-NEXT: mov z4.d, p1/z, #-1 // =0xffffffffffffffff
-; VBITS_GE_256-NEXT: ptrue p1.d
-; VBITS_GE_256-NEXT: mov z5.d, p2/z, #-1 // =0xffffffffffffffff
-; VBITS_GE_256-NEXT: and z4.d, z4.d, #0x1
-; VBITS_GE_256-NEXT: and z5.d, z5.d, #0x1
-; VBITS_GE_256-NEXT: cmpne p2.d, p1/z, z4.d, #0
-; VBITS_GE_256-NEXT: cmpne p1.d, p1/z, z5.d, #0
-; VBITS_GE_256-NEXT: sel z0.d, p2, z0.d, z1.d
-; VBITS_GE_256-NEXT: sel z1.d, p1, z2.d, z3.d
+; VBITS_GE_256-NEXT: sel z0.d, p1, z0.d, z1.d
+; VBITS_GE_256-NEXT: sel z1.d, p2, z2.d, z3.d
; VBITS_GE_256-NEXT: st1d { z0.d }, p0, [x0, x8, lsl #3]
; VBITS_GE_256-NEXT: st1d { z1.d }, p0, [x0]
; VBITS_GE_256-NEXT: ret
@@ -490,10 +410,6 @@ define void @select_v8i64(ptr %a, ptr %b) #0 {
; VBITS_GE_512-NEXT: ld1d { z0.d }, p0/z, [x0]
; VBITS_GE_512-NEXT: ld1d { z1.d }, p0/z, [x1]
; VBITS_GE_512-NEXT: cmpeq p1.d, p0/z, z0.d, z1.d
-; VBITS_GE_512-NEXT: mov z2.d, p1/z, #-1 // =0xffffffffffffffff
-; VBITS_GE_512-NEXT: ptrue p1.d
-; VBITS_GE_512-NEXT: and z2.d, z2.d, #0x1
-; VBITS_GE_512-NEXT: cmpne p1.d, p1/z, z2.d, #0
; VBITS_GE_512-NEXT: sel z0.d, p1, z0.d, z1.d
; VBITS_GE_512-NEXT: st1d { z0.d }, p0, [x0]
; VBITS_GE_512-NEXT: ret
@@ -512,10 +428,6 @@ define void @select_v16i64(ptr %a, ptr %b) vscale_range(8,0) #0 {
; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0]
; CHECK-NEXT: ld1d { z1.d }, p0/z, [x1]
; CHECK-NEXT: cmpeq p1.d, p0/z, z0.d, z1.d
-; CHECK-NEXT: mov z2.d, p1/z, #-1 // =0xffffffffffffffff
-; CHECK-NEXT: ptrue p1.d
-; CHECK-NEXT: and z2.d, z2.d, #0x1
-; CHECK-NEXT: cmpne p1.d, p1/z, z2.d, #0
; CHECK-NEXT: sel z0.d, p1, z0.d, z1.d
; CHECK-NEXT: st1d { z0.d }, p0, [x0]
; CHECK-NEXT: ret
@@ -534,10 +446,6 @@ define void @select_v32i64(ptr %a, ptr %b) vscale_range(16,0) #0 {
; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0]
; CHECK-NEXT: ld1d { z1.d }, p0/z, [x1]
; CHECK-NEXT: cmpeq p1.d, p0/z, z0.d, z1.d
-; CHECK-NEXT: mov z2.d, p1/z, #-1 // =0xffffffffffffffff
-; CHECK-NEXT: ptrue p1.d
-; CHECK-NEXT: and z2.d, z2.d, #0x1
-; CHECK-NEXT: cmpne p1.d, p1/z, z2.d, #0
; CHECK-NEXT: sel z0.d, p1, z0.d, z1.d
; CHECK-NEXT: st1d { z0.d }, p0, [x0]
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-gather.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-gather.ll
index ebd32c73ec65b..093e6cd9328c8 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-gather.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-gather.ll
@@ -1198,15 +1198,11 @@ define void @masked_gather_passthru(ptr %a, ptr %b, ptr %c) vscale_range(16,0) #
; CHECK-NEXT: ptrue p0.s, vl32
; CHECK-NEXT: ptrue p2.d, vl32
; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0]
+; CHECK-NEXT: ld1w { z1.s }, p0/z, [x2]
; CHECK-NEXT: fcmeq p1.s, p0/z, z0.s, #0.0
; CHECK-NEXT: ld1d { z0.d }, p2/z, [x1]
; CHECK-NEXT: punpklo p2.h, p1.b
-; CHECK-NEXT: mov z1.s, p1/z, #-1 // =0xffffffffffffffff
-; CHECK-NEXT: ptrue p1.s
; CHECK-NEXT: ld1w { z0.d }, p2/z, [z0.d]
-; CHECK-NEXT: and z1.s, z1.s, #0x1
-; CHECK-NEXT: cmpne p1.s, p1/z, z1.s, #0
-; CHECK-NEXT: ld1w { z1.s }, p0/z, [x2]
; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s
; CHECK-NEXT: sel z0.s, p1, z0.s, z1.s
; CHECK-NEXT: st1w { z0.s }, p0, [x0]
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-vselect.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-vselect.ll
index 8b845dff64ffe..ec0693a541e44 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-vselect.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-vselect.ll
@@ -199,13 +199,6 @@ define void @select_v16f16(ptr %a, ptr %b) {
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: fcmeq p1.h, p0/z, z1.h, z0.h
; CHECK-NEXT: fcmeq p0.h, p0/z, z2.h, z3.h
-; CHECK-NEXT: mov z4.h, p1/z, #-1 // =0xffffffffffffffff
-; CHECK-NEXT: mov z5.h, p0/z, #-1 // =0xffffffffffffffff
-; CHECK-NEXT: ptrue p0.h
-; CHECK-NEXT: and z4.h, z4.h, #0x1
-; CHECK-NEXT: and z5.h, z5.h, #0x1
-; CHECK-NEXT: cmpne p1.h, p0/z, z4.h, #0
-; CHECK-NEXT: cmpne p0.h, p0/z, z5.h, #0
; CHECK-NEXT: mov z0.h, p1/m, z1.h
; CHECK-NEXT: sel z1.h, p0, z2.h, z3.h
; CHECK-NEXT: stp q0, q1, [x0]
@@ -441,13 +434,6 @@ define void @select_v8f32(ptr %a, ptr %b) {
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: fcmeq p1.s, p0/z, z1.s, z0.s
; CHECK-NEXT: fcmeq p0.s, p0/z, z2.s, z3.s
-; CHECK-NEXT: mov z4.s, p1/z, #-1 // =0xffffffffffffffff
-; CHECK-NEXT: mov z5.s, p0/z, #-1 // =0xffffffffffffffff
-; CHECK-NEXT: ptrue p0.s
-; CHECK-NEXT: and z4.s, z4.s, #0x1
-; CHECK-NEXT: and z5.s, z5.s, #0x1
-; CHECK-NEXT: cmpne p1.s, p0/z, z4.s, #0
-; CHECK-NEXT: cmpne p0.s, p0/z, z5.s, #0
; CHECK-NEXT: mov z0.s, p1/m, z1.s
; CHECK-NEXT: sel z1.s, p0, z2.s, z3.s
; CHECK-NEXT: stp q0, q1, [x0]
@@ -572,13 +558,6 @@ define void @select_v4f64(ptr %a, ptr %b) {
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: fcmeq p1.d, p0/z, z1.d, z0.d
; CHECK-NEXT: fcmeq p0.d, p0/z, z2.d, z3.d
-; CHECK-NEXT: mov z4.d, p1/z, #-1 // =0xffffffffffffffff
-; CHECK-NEXT: mov z5.d, p0/z, #-1 // =0xffffffffffffffff
-; CHECK-NEXT: ptrue p0.d
-; CHECK-NEXT: and z4.d, z4.d, #0x1
-; CHECK-NEXT: and z5.d, z5.d, #0x1
-; CHECK-NEXT: cmpne p1.d, p0/z, z4.d, #0
-; CHECK-NEXT: cmpne p0.d, p0/z, z5.d, #0
; CHECK-NEXT: mov z0.d, p1/m, z1.d
; CHECK-NEXT: sel z1.d, p0, z2.d, z3.d
; CHECK-NEXT: stp q0, q1, [x0]
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-vselect.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-vselect.ll
index 12b7886d76c70..39701131d7db6 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-vselect.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-vselect.ll
@@ -293,13 +293,6 @@ define void @select_v32i8(ptr %a, ptr %b) {
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: cmpeq p1.b, p0/z, z1.b, z0.b
; CHECK-NEXT: cmpeq p0.b, p0/z, z2.b, z3.b
-; CHECK-NEXT: mov z4.b, p1/z, #-1 // =0xffffffffffffffff
-; CHECK-NEXT: mov z5.b, p0/z, #-1 // =0xffffffffffffffff
-; CHECK-NEXT: ptrue p0.b
-; CHECK-NEXT: and z4.b, z4.b, #0x1
-; CHECK-NEXT: and z5.b, z5.b, #0x1
-; CHECK-NEXT: cmpne p1.b, p0/z, z4.b, #0
-; CHECK-NEXT: cmpne p0.b, p0/z, z5.b, #0
; CHECK-NEXT: mov z0.b, p1/m, z1.b
; CHECK-NEXT: sel z1.b, p0, z2.b, z3.b
; CHECK-NEXT: stp q0, q1, [x0]
@@ -704,13 +697,6 @@ define void @select_v16i16(ptr %a, ptr %b) {
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: cmpeq p1.h, p0/z, z1.h, z0.h
; CHECK-NEXT: cmpeq p0.h, p0/z, z2.h, z3.h
-; CHECK-NEXT: mov z4.h, p1/z, #-1 // =0xffffffffffffffff
-; CHECK-NEXT: mov z5.h, p0/z, #-1 // =0xffffffffffffffff
-; CHECK-NEXT: ptrue p0.h
-; CHECK-NEXT: and z4.h, z4.h, #0x1
-; CHECK-NEXT: and z5.h, z5.h, #0x1
-; CHECK-NEXT: cmpne p1.h, p0/z, z4.h, #0
-; CHECK-NEXT: cmpne p0.h, p0/z, z5.h, #0
; CHECK-NEXT: mov z0.h, p1/m, z1.h
; CHECK-NEXT: sel z1.h, p0, z2.h, z3.h
; CHECK-NEXT: stp q0, q1, [x0]
@@ -925,13 +911,6 @@ define void @select_v8i32(ptr %a, ptr %b) {
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: cmpeq p1.s, p0/z, z1.s, z0.s
; CHECK-NEXT: cmpeq p0.s, p0/z, z2.s, z3.s
-; CHECK-NEXT: mov z4.s, p1/z, #-1 // =0xffffffffffffffff
-; CHECK-NEXT: mov z5.s, p0/z, #-1 // =0xffffffffffffffff
-; CHECK-NEXT: ptrue p0.s
-; CHECK-NEXT: and z4.s, z4.s, #0x1
-; CHECK-NEXT: and z5.s, z5.s, #0x1
-; CHECK-NEXT: cmpne p1.s, p0/z, z4.s, #0
-; CHECK-NEXT: cmpne p0.s, p0/z, z5.s, #0
; CHECK-NEXT: mov z0.s, p1/m, z1.s
; CHECK-NEXT: sel z1.s, p0, z2.s, z3.s
; CHECK-NEXT: stp q0, q1, [x0]
@@ -1065,13 +1044,6 @@ define void @select_v4i64(ptr %a, ptr %b) {
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: cmpeq p1.d, p0/z, z1.d, z0.d
; CHECK-NEXT: cmpeq p0.d, p0/z, z2.d, z3.d
-; CHECK-NEXT: mov z4.d, p1/z, #-1 // =0xffffffffffffffff
-; CHECK-NEXT: mov z5.d, p0/z, #-1 // =0xffffffffffffffff
-; CHECK-NEXT: ptrue p0.d
-; CHECK-NEXT: and z4.d, z4.d, #0x1
-; CHECK-NEXT: and z5.d, z5.d, #0x1
-; CHECK-NEXT: cmpne p1.d, p0/z, z4.d, #0
-; CHECK-NEXT: cmpne p0.d, p0/z, z5.d, #0
; CHECK-NEXT: mov z0.d, p1/m, z1.d
; CHECK-NEXT: sel z1.d, p0, z2.d, z3.d
; CHECK-NEXT: stp q0, q1, [x0]
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfw-web-simplification.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfw-web-simplification.ll
index 5aa3a246d7616..aba9056c78cda 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfw-web-simplification.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfw-web-simplification.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+zvfh,+f,+d -verify-machineinstrs %s -o - --riscv-lower-ext-max-web-size=1 | FileCheck %s --check-prefixes=NO_FOLDING,NO_FOLDING1
-; RUN: llc -mtriple=riscv64 -mattr=+v,+zvfh,+f,+d -verify-machineinstrs %s -o - --riscv-lower-ext-max-web-size=2 | FileCheck %s --check-prefixes=NO_FOLDING,NO_FOLDING2
+; RUN: llc -mtriple=riscv64 -mattr=+v,+zvfh,+f,+d -verify-machineinstrs %s -o - --riscv-lower-ext-max-web-size=1 | FileCheck %s --check-prefixes=NO_FOLDING
+; RUN: llc -mtriple=riscv64 -mattr=+v,+zvfh,+f,+d -verify-machineinstrs %s -o - --riscv-lower-ext-max-web-size=2 | FileCheck %s --check-prefixes=NO_FOLDING
; RUN: llc -mtriple=riscv64 -mattr=+v,+zvfh,+f,+d -verify-machineinstrs %s -o - --riscv-lower-ext-max-web-size=3 | FileCheck %s --check-prefixes=FOLDING,ZVFH
; RUN: llc -mtriple=riscv64 -mattr=+v,+zvfhmin,+f,+d -verify-machineinstrs %s -o - --riscv-lower-ext-max-web-size=3 | FileCheck %s --check-prefixes=FOLDING,ZVFHMIN
; Check that the default value enables the web folding and
@@ -8,35 +8,20 @@
; RUN: llc -mtriple=riscv64 -mattr=+v,+zvfh,+f,+d -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=FOLDING
define void @vfwmul_v2f116_multiple_users(ptr %x, ptr %y, ptr %z, <2 x half> %a, <2 x half> %b, <2 x half> %b2) {
-; NO_FOLDING1-LABEL: vfwmul_v2f116_multiple_users:
-; NO_FOLDING1: # %bb.0:
-; NO_FOLDING1-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
-; NO_FOLDING1-NEXT: vfwcvt.f.f.v v11, v8
-; NO_FOLDING1-NEXT: vfwcvt.f.f.v v8, v9
-; NO_FOLDING1-NEXT: vfwcvt.f.f.v v9, v10
-; NO_FOLDING1-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
-; NO_FOLDING1-NEXT: vfmul.vv v10, v11, v8
-; NO_FOLDING1-NEXT: vfadd.vv v11, v11, v9
-; NO_FOLDING1-NEXT: vfsub.vv v8, v8, v9
-; NO_FOLDING1-NEXT: vse32.v v10, (a0)
-; NO_FOLDING1-NEXT: vse32.v v11, (a1)
-; NO_FOLDING1-NEXT: vse32.v v8, (a2)
-; NO_FOLDING1-NEXT: ret
-;
-; NO_FOLDING2-LABEL: vfwmul_v2f116_multiple_users:
-; NO_FOLDING2: # %bb.0:
-; NO_FOLDING2-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
-; NO_FOLDING2-NEXT: vfwcvt.f.f.v v11, v8
-; NO_FOLDING2-NEXT: vfwcvt.f.f.v v8, v9
-; NO_FOLDING2-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
-; NO_FOLDING2-NEXT: vfmul.vv v9, v11, v8
-; NO_FOLDING2-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
-; NO_FOLDING2-NEXT: vfwadd.wv v11, v11, v10
-; NO_FOLDING2-NEXT: vfwsub.wv v8, v8, v10
-; NO_FOLDING2-NEXT: vse32.v v9, (a0)
-; NO_FOLDING2-NEXT: vse32.v v11, (a1)
-; NO_FOLDING2-NEXT: vse32.v v8, (a2)
-; NO_FOLDING2-NEXT: ret
+; NO_FOLDING-LABEL: vfwmul_v2f116_multiple_users:
+; NO_FOLDING: # %bb.0:
+; NO_FOLDING-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; NO_FOLDING-NEXT: vfwcvt.f.f.v v11, v8
+; NO_FOLDING-NEXT: vfwcvt.f.f.v v8, v9
+; NO_FOLDING-NEXT: vfwcvt.f.f.v v9, v10
+; NO_FOLDING-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; NO_FOLDING-NEXT: vfmul.vv v10, v11, v8
+; NO_FOLDING-NEXT: vfadd.vv v11, v11, v9
+; NO_FOLDING-NEXT: vfsub.vv v8, v8, v9
+; NO_FOLDING-NEXT: vse32.v v10, (a0)
+; NO_FOLDING-NEXT: vse32.v v11, (a1)
+; NO_FOLDING-NEXT: vse32.v v8, (a2)
+; NO_FOLDING-NEXT: ret
;
; ZVFH-LABEL: vfwmul_v2f116_multiple_users:
; ZVFH: # %bb.0:
@@ -76,35 +61,20 @@ define void @vfwmul_v2f116_multiple_users(ptr %x, ptr %y, ptr %z, <2 x half> %a,
}
define void @vfwmul_v2f32_multiple_users(ptr %x, ptr %y, ptr %z, <2 x float> %a, <2 x float> %b, <2 x float> %b2) {
-; NO_FOLDING1-LABEL: vfwmul_v2f32_multiple_users:
-; NO_FOLDING1: # %bb.0:
-; NO_FOLDING1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; NO_FOLDING1-NEXT: vfwcvt.f.f.v v11, v8
-; NO_FOLDING1-NEXT: vfwcvt.f.f.v v8, v9
-; NO_FOLDING1-NEXT: vfwcvt.f.f.v v9, v10
-; NO_FOLDING1-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; NO_FOLDING1-NEXT: vfmul.vv v10, v11, v8
-; NO_FOLDING1-NEXT: vfadd.vv v11, v11, v9
-; NO_FOLDING1-NEXT: vfsub.vv v8, v8, v9
-; NO_FOLDING1-NEXT: vse64.v v10, (a0)
-; NO_FOLDING1-NEXT: vse64.v v11, (a1)
-; NO_FOLDING1-NEXT: vse64.v v8, (a2)
-; NO_FOLDING1-NEXT: ret
-;
-; NO_FOLDING2-LABEL: vfwmul_v2f32_multiple_users:
-; NO_FOLDING2: # %bb.0:
-; NO_FOLDING2-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; NO_FOLDING2-NEXT: vfwcvt.f.f.v v11, v8
-; NO_FOLDING2-NEXT: vfwcvt.f.f.v v8, v9
-; NO_FOLDING2-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; NO_FOLDING2-NEXT: vfmul.vv v9, v11, v8
-; NO_FOLDING2-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
-; NO_FOLDING2-NEXT: vfwadd.wv v11, v11, v10
-; NO_FOLDING2-NEXT: vfwsub.wv v8, v8, v10
-; NO_FOLDING2-NEXT: vse64.v v9, (a0)
-; NO_FOLDING2-NEXT: vse64.v v11, (a1)
-; NO_FOLDING2-NEXT: vse64.v v8, (a2)
-; NO_FOLDING2-NEXT: ret
+; NO_FOLDING-LABEL: vfwmul_v2f32_multiple_users:
+; NO_FOLDING: # %bb.0:
+; NO_FOLDING-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; NO_FOLDING-NEXT: vfwcvt.f.f.v v11, v8
+; NO_FOLDING-NEXT: vfwcvt.f.f.v v8, v9
+; NO_FOLDING-NEXT: vfwcvt.f.f.v v9, v10
+; NO_FOLDING-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; NO_FOLDING-NEXT: vfmul.vv v10, v11, v8
+; NO_FOLDING-NEXT: vfadd.vv v11, v11, v9
+; NO_FOLDING-NEXT: vfsub.vv v8, v8, v9
+; NO_FOLDING-NEXT: vse64.v v10, (a0)
+; NO_FOLDING-NEXT: vse64.v v11, (a1)
+; NO_FOLDING-NEXT: vse64.v v8, (a2)
+; NO_FOLDING-NEXT: ret
;
; FOLDING-LABEL: vfwmul_v2f32_multiple_users:
; FOLDING: # %bb.0:
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vw-web-simplification.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vw-web-simplification.ll
index b093e9e35edad..227a428831b60 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vw-web-simplification.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vw-web-simplification.ll
@@ -1,8 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs %s -o - --riscv-lower-ext-max-web-size=1 | FileCheck %s --check-prefixes=NO_FOLDING1
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs %s -o - --riscv-lower-ext-max-web-size=1 | FileCheck %s --check-prefixes=NO_FOLDING1
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs %s -o - --riscv-lower-ext-max-web-size=2 | FileCheck %s --check-prefixes=NO_FOLDING2
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs %s -o - --riscv-lower-ext-max-web-size=2 | FileCheck %s --check-prefixes=NO_FOLDING2
+; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs %s -o - --riscv-lower-ext-max-web-size=1 | FileCheck %s --check-prefixes=NO_FOLDING
+; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs %s -o - --riscv-lower-ext-max-web-size=1 | FileCheck %s --check-prefixes=NO_FOLDING
+; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs %s -o - --riscv-lower-ext-max-web-size=2 | FileCheck %s --check-prefixes=NO_FOLDING
+; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs %s -o - --riscv-lower-ext-max-web-size=2 | FileCheck %s --check-prefixes=NO_FOLDING
; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs %s -o - --riscv-lower-ext-max-web-size=3 | FileCheck %s --check-prefixes=FOLDING
; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs %s -o - --riscv-lower-ext-max-web-size=3 | FileCheck %s --check-prefixes=FOLDING
; Check that the default value enables the web folding and
@@ -16,38 +16,21 @@
; We need the web size to be at least 3 for the folding to happen, because
; %c has 3 uses.
define <2 x i16> @vwmul_v2i16_multiple_users(ptr %x, ptr %y, ptr %z) {
-; NO_FOLDING1-LABEL: vwmul_v2i16_multiple_users:
-; NO_FOLDING1: # %bb.0:
-; NO_FOLDING1-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
-; NO_FOLDING1-NEXT: vle8.v v8, (a0)
-; NO_FOLDING1-NEXT: vle8.v v9, (a1)
-; NO_FOLDING1-NEXT: vle8.v v10, (a2)
-; NO_FOLDING1-NEXT: vsext.vf2 v11, v8
-; NO_FOLDING1-NEXT: vsext.vf2 v8, v9
-; NO_FOLDING1-NEXT: vsext.vf2 v9, v10
-; NO_FOLDING1-NEXT: vmul.vv v8, v11, v8
-; NO_FOLDING1-NEXT: vadd.vv v10, v11, v9
-; NO_FOLDING1-NEXT: vsub.vv v9, v11, v9
-; NO_FOLDING1-NEXT: vor.vv v8, v8, v10
-; NO_FOLDING1-NEXT: vor.vv v8, v8, v9
-; NO_FOLDING1-NEXT: ret
-;
-; NO_FOLDING2-LABEL: vwmul_v2i16_multiple_users:
-; NO_FOLDING2: # %bb.0:
-; NO_FOLDING2-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
-; NO_FOLDING2-NEXT: vle8.v v8, (a0)
-; NO_FOLDING2-NEXT: vle8.v v9, (a1)
-; NO_FOLDING2-NEXT: vle8.v v10, (a2)
-; NO_FOLDING2-NEXT: vsext.vf2 v11, v8
-; NO_FOLDING2-NEXT: vsext.vf2 v8, v9
-; NO_FOLDING2-NEXT: vmul.vv v8, v11, v8
-; NO_FOLDING2-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
-; NO_FOLDING2-NEXT: vwadd.wv v9, v11, v10
-; NO_FOLDING2-NEXT: vwsub.wv v11, v11, v10
-; NO_FOLDING2-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
-; NO_FOLDING2-NEXT: vor.vv v8, v8, v9
-; NO_FOLDING2-NEXT: vor.vv v8, v8, v11
-; NO_FOLDING2-NEXT: ret
+; NO_FOLDING-LABEL: vwmul_v2i16_multiple_users:
+; NO_FOLDING: # %bb.0:
+; NO_FOLDING-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; NO_FOLDING-NEXT: vle8.v v8, (a0)
+; NO_FOLDING-NEXT: vle8.v v9, (a1)
+; NO_FOLDING-NEXT: vle8.v v10, (a2)
+; NO_FOLDING-NEXT: vsext.vf2 v11, v8
+; NO_FOLDING-NEXT: vsext.vf2 v8, v9
+; NO_FOLDING-NEXT: vsext.vf2 v9, v10
+; NO_FOLDING-NEXT: vmul.vv v8, v11, v8
+; NO_FOLDING-NEXT: vadd.vv v10, v11, v9
+; NO_FOLDING-NEXT: vsub.vv v9, v11, v9
+; NO_FOLDING-NEXT: vor.vv v8, v8, v10
+; NO_FOLDING-NEXT: vor.vv v8, v8, v9
+; NO_FOLDING-NEXT: ret
;
; FOLDING-LABEL: vwmul_v2i16_multiple_users:
; FOLDING: # %bb.0:
More information about the llvm-commits
mailing list