[llvm] [SLP]Represent externally used values as original scalars, if (PR #100904)
Alexey Bataev via llvm-commits
llvm-commits at lists.llvm.org
Sat Jul 27 15:27:22 PDT 2024
https://github.com/alexey-bataev created https://github.com/llvm/llvm-project/pull/100904
profitable.
>From 9b5ed1d218a62378bc1ff3f766d5df20e34b6d75 Mon Sep 17 00:00:00 2001
From: Alexey Bataev <a.bataev at outlook.com>
Date: Sat, 27 Jul 2024 22:27:11 +0000
Subject: [PATCH] =?UTF-8?q?[=F0=9D=98=80=F0=9D=97=BD=F0=9D=97=BF]=20initia?=
=?UTF-8?q?l=20version?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Created using spr 1.3.5
---
.../Transforms/Vectorize/SLPVectorizer.cpp | 146 ++++---
.../AArch64/external-non-inst-use.ll | 5 +-
.../SLPVectorizer/AArch64/matmul.ll | 32 +-
.../AArch64/memory-runtime-checks.ll | 12 +-
.../AArch64/multiple_reduction.ll | 376 ++++++++++++------
.../AArch64/scalarization-overhead.ll | 25 +-
.../AArch64/shuffle-vectors-mask-size.ll | 4 +-
.../SLPVectorizer/AArch64/slp-fma-loss.ll | 110 ++---
.../vectorizable-selects-uniform-cmps.ll | 30 +-
.../SLPVectorizer/AMDGPU/horizontal-store.ll | 52 ++-
.../SLPVectorizer/RISCV/complex-loads.ll | 294 +++++++-------
.../RISCV/mixed-extracts-types.ll | 2 +-
.../strided-loads-with-external-indices.ll | 4 +-
13 files changed, 615 insertions(+), 477 deletions(-)
diff --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
index 3bdd8fdadc40c..99a6246e9c157 100644
--- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
+++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
@@ -1233,7 +1233,7 @@ class BoUpSLP {
NonScheduledFirst.clear();
EntryToLastInstruction.clear();
ExternalUses.clear();
- ExternalUsesAsGEPs.clear();
+ ExternalUsesAsOriginalScalar.clear();
for (auto &Iter : BlocksSchedules) {
BlockScheduling *BS = Iter.second.get();
BS->clear();
@@ -3434,7 +3434,7 @@ class BoUpSLP {
/// A list of GEPs which can be reaplced by scalar GEPs instead of
/// extractelement instructions.
- SmallPtrSet<Value *, 4> ExternalUsesAsGEPs;
+ SmallPtrSet<Value *, 4> ExternalUsesAsOriginalScalar;
/// Values used only by @llvm.assume calls.
SmallPtrSet<const Value *, 32> EphValues;
@@ -10486,6 +10486,7 @@ InstructionCost BoUpSLP::getTreeCost(ArrayRef<Value *> VectorizedVals) {
SmallDenseSet<Value *, 4> UsedInserts;
DenseSet<std::pair<const TreeEntry *, Type *>> VectorCasts;
std::optional<DenseMap<Value *, unsigned>> ValueToExtUses;
+ DenseMap<const TreeEntry *, DenseSet<Value *>> ExtractsCount;
for (ExternalUser &EU : ExternalUses) {
// We only add extract cost once for the same scalar.
if (!isa_and_nonnull<InsertElementInst>(EU.User) &&
@@ -10594,52 +10595,72 @@ InstructionCost BoUpSLP::getTreeCost(ArrayRef<Value *> VectorizedVals) {
}
}
}
- // Leave the GEPs as is, they are free in most cases and better to keep them
- // as GEPs.
+
TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
- if (auto *GEP = dyn_cast<GetElementPtrInst>(EU.Scalar)) {
+ // If we plan to rewrite the tree in a smaller type, we will need to sign
+ // extend the extracted value back to the original type. Here, we account
+ // for the extract and the added cost of the sign extend if needed.
+ InstructionCost ExtraCost = TTI::TCC_Free;
+ auto *VecTy = getWidenedType(EU.Scalar->getType(), BundleWidth);
+ const TreeEntry *Entry = getTreeEntry(EU.Scalar);
+ auto It = MinBWs.find(Entry);
+ if (It != MinBWs.end()) {
+ auto *MinTy = IntegerType::get(F->getContext(), It->second.first);
+ unsigned Extend =
+ It->second.second ? Instruction::SExt : Instruction::ZExt;
+ VecTy = getWidenedType(MinTy, BundleWidth);
+ ExtraCost = TTI->getExtractWithExtendCost(Extend, EU.Scalar->getType(),
+ VecTy, EU.Lane);
+ } else {
+ ExtraCost = TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy,
+ CostKind, EU.Lane);
+ }
+ // Leave the scalar instructions as is if they are cheaper than extracts.
+ if (Entry->Idx != 0 || Entry->getOpcode() == Instruction::GetElementPtr ||
+ Entry->getOpcode() == Instruction::Load) {
if (!ValueToExtUses) {
ValueToExtUses.emplace();
for_each(enumerate(ExternalUses), [&](const auto &P) {
ValueToExtUses->try_emplace(P.value().Scalar, P.index());
});
}
- // Can use original GEP, if no operands vectorized or they are marked as
- // externally used already.
- bool CanBeUsedAsGEP = all_of(GEP->operands(), [&](Value *V) {
+ // Can use original instruction, if no operands vectorized or they are
+ // marked as externally used already.
+ auto *Inst = cast<Instruction>(EU.Scalar);
+ bool CanBeUsedAsScalar = all_of(Inst->operands(), [&](Value *V) {
if (!getTreeEntry(V))
return true;
- auto It = ValueToExtUses->find(V);
- if (It != ValueToExtUses->end()) {
- // Replace all uses to avoid compiler crash.
- ExternalUses[It->second].User = nullptr;
- return true;
- }
- return false;
+ return ValueToExtUses->contains(V);
});
- if (CanBeUsedAsGEP) {
- ExtractCost += TTI->getInstructionCost(GEP, CostKind);
- ExternalUsesAsGEPs.insert(EU.Scalar);
- continue;
+ if (CanBeUsedAsScalar) {
+ InstructionCost ScalarCost = TTI->getInstructionCost(Inst, CostKind);
+ bool KeepScalar = true;
+ if (ScalarCost > ExtraCost) {
+ unsigned ScalarUsesCount = count_if(Entry->Scalars, [&](Value *V) {
+ return ValueToExtUses->contains(V);
+ });
+ auto It = ExtractsCount.find(Entry);
+ if (It != ExtractsCount.end())
+ ScalarUsesCount -= It->getSecond().size();
+ KeepScalar = ScalarUsesCount > 1 && isPowerOf2_32(ScalarUsesCount);
+ if (!KeepScalar)
+ ExtractsCount[Entry].insert(Inst);
+ }
+ if (KeepScalar) {
+ ExternalUsesAsOriginalScalar.insert(EU.Scalar);
+ for_each(Inst->operands(), [&](Value *V) {
+ auto It = ValueToExtUses->find(V);
+ if (It != ValueToExtUses->end()) {
+ // Replace all uses to avoid compiler crash.
+ ExternalUses[It->second].User = nullptr;
+ }
+ });
+ ExtraCost = ScalarCost;
+ }
}
}
- // If we plan to rewrite the tree in a smaller type, we will need to sign
- // extend the extracted value back to the original type. Here, we account
- // for the extract and the added cost of the sign extend if needed.
- auto *VecTy = getWidenedType(EU.Scalar->getType(), BundleWidth);
- auto It = MinBWs.find(getTreeEntry(EU.Scalar));
- if (It != MinBWs.end()) {
- auto *MinTy = IntegerType::get(F->getContext(), It->second.first);
- unsigned Extend =
- It->second.second ? Instruction::SExt : Instruction::ZExt;
- VecTy = getWidenedType(MinTy, BundleWidth);
- ExtractCost += TTI->getExtractWithExtendCost(Extend, EU.Scalar->getType(),
- VecTy, EU.Lane);
- } else {
- ExtractCost += TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy,
- CostKind, EU.Lane);
- }
+ ExtractCost += ExtraCost;
}
// Add reduced value cost, if resized.
if (!VectorizedVals.empty()) {
@@ -13870,8 +13891,8 @@ Value *BoUpSLP::vectorizeTree(
if (Scalar->getType() != Vec->getType()) {
Value *Ex = nullptr;
Value *ExV = nullptr;
- auto *GEP = dyn_cast<GetElementPtrInst>(Scalar);
- bool ReplaceGEP = GEP && ExternalUsesAsGEPs.contains(GEP);
+ auto *Inst = dyn_cast<Instruction>(Scalar);
+ bool ReplaceInst = Inst && ExternalUsesAsOriginalScalar.contains(Inst);
auto It = ScalarToEEs.find(Scalar);
if (It != ScalarToEEs.end()) {
// No need to emit many extracts, just move the only one in the
@@ -13897,18 +13918,18 @@ Value *BoUpSLP::vectorizeTree(
if (const TreeEntry *ETE = getTreeEntry(V))
V = ETE->VectorizedValue;
Ex = Builder.CreateExtractElement(V, ES->getIndexOperand());
- } else if (ReplaceGEP) {
- // Leave the GEPs as is, they are free in most cases and better to
- // keep them as GEPs.
- auto *CloneGEP = GEP->clone();
- if (isa<Instruction>(Vec))
- CloneGEP->insertBefore(*Builder.GetInsertBlock(),
- Builder.GetInsertPoint());
+ } else if (ReplaceInst) {
+ // Leave the instruction as is, if it cheaper extracts and all
+ // operands are scalar.
+ auto *CloneInst = Inst->clone();
+ if (isa<Instruction>(Vec) && !isa<PHINode>(Inst))
+ CloneInst->insertBefore(*Builder.GetInsertBlock(),
+ Builder.GetInsertPoint());
else
- CloneGEP->insertBefore(GEP);
- if (GEP->hasName())
- CloneGEP->takeName(GEP);
- Ex = CloneGEP;
+ CloneInst->insertBefore(Inst);
+ if (Inst->hasName())
+ CloneInst->takeName(Inst);
+ Ex = CloneInst;
} else {
Ex = Builder.CreateExtractElement(Vec, Lane);
}
@@ -13920,12 +13941,11 @@ Value *BoUpSLP::vectorizeTree(
MinBWs.find(E)->second.second);
if (auto *I = dyn_cast<Instruction>(Ex))
ScalarToEEs[Scalar].try_emplace(
- Builder.GetInsertBlock(),
- std::make_pair(I, cast<Instruction>(ExV)));
+ I->getParent(), std::make_pair(I, cast<Instruction>(ExV)));
}
// The then branch of the previous if may produce constants, since 0
// operand might be a constant.
- if (auto *ExI = dyn_cast<Instruction>(Ex)) {
+ if (auto *ExI = dyn_cast<Instruction>(Ex); ExI && !isa<PHINode>(ExI)) {
GatherShuffleExtractSeq.insert(ExI);
CSEBlocks.insert(ExI->getParent());
}
@@ -13946,9 +13966,10 @@ Value *BoUpSLP::vectorizeTree(
continue;
assert((ExternallyUsedValues.count(Scalar) ||
Scalar->hasNUsesOrMore(UsesLimit) ||
+ ExternalUsesAsOriginalScalar.contains(Scalar) ||
any_of(Scalar->users(),
[&](llvm::User *U) {
- if (ExternalUsesAsGEPs.contains(U))
+ if (ExternalUsesAsOriginalScalar.contains(U))
return true;
TreeEntry *UseEntry = getTreeEntry(U);
return UseEntry &&
@@ -16645,7 +16666,9 @@ class HorizontalReduction {
/// Maps reduced value to the corresponding reduction operation.
DenseMap<Value *, SmallVector<Instruction *>> ReducedValsToOps;
// Use map vector to make stable output.
- MapVector<Instruction *, Value *> ExtraArgs;
+ using ExtraValueToDebugLocsMap =
+ MapVector<Value *, SmallVector<Instruction *, 2>>;
+ ExtraValueToDebugLocsMap ExtraArgs;
WeakTrackingVH ReductionRoot;
/// The type of reduction operation.
RecurKind RdxKind;
@@ -17062,12 +17085,13 @@ class HorizontalReduction {
CheckOperands(TreeN, Args, PossibleRedVals, PossibleReductionOps);
// If too many extra args - mark the instruction itself as a reduction
// value, not a reduction operation.
- if (Args.size() < 2) {
+ if (Args.size() <= 2) {
addReductionOps(TreeN);
// Add extra args.
if (!Args.empty()) {
- assert(Args.size() == 1 && "Expected only single argument.");
- ExtraArgs[TreeN] = Args.front();
+ assert(Args.size() <= 2 && "Expected only single argument.");
+ for (Value *V : Args)
+ ExtraArgs[V].emplace_back(TreeN);
}
// Add reduction values. The values are sorted for better vectorization
// results.
@@ -17167,15 +17191,13 @@ class HorizontalReduction {
// because of the vectorization.
DenseMap<Value *, WeakTrackingVH> TrackedVals(
ReducedVals.size() * ReducedVals.front().size() + ExtraArgs.size());
- BoUpSLP::ExtraValueToDebugLocsMap ExternallyUsedValues;
+ BoUpSLP::ExtraValueToDebugLocsMap ExternallyUsedValues(ExtraArgs);
SmallVector<std::pair<Value *, Value *>> ReplacedExternals;
- ExternallyUsedValues.reserve(ExtraArgs.size() + 1);
// The same extra argument may be used several times, so log each attempt
// to use it.
- for (const std::pair<Instruction *, Value *> &Pair : ExtraArgs) {
- assert(Pair.first && "DebugLoc must be set.");
- ExternallyUsedValues[Pair.second].push_back(Pair.first);
- TrackedVals.try_emplace(Pair.second, Pair.second);
+ for (const auto &Pair : ExtraArgs) {
+ assert(!Pair.second.empty() && "DebugLoc must be set.");
+ TrackedVals.try_emplace(Pair.first, Pair.first);
}
// The compare instruction of a min/max is the insertion point for new
diff --git a/llvm/test/Transforms/SLPVectorizer/AArch64/external-non-inst-use.ll b/llvm/test/Transforms/SLPVectorizer/AArch64/external-non-inst-use.ll
index d4e3fb3e24853..0d6eb7b5e08aa 100644
--- a/llvm/test/Transforms/SLPVectorizer/AArch64/external-non-inst-use.ll
+++ b/llvm/test/Transforms/SLPVectorizer/AArch64/external-non-inst-use.ll
@@ -4,8 +4,9 @@
define i16 @foo(ptr %p1, ptr %p2) {
; CHECK-LABEL: @foo(
; CHECK-NEXT: entry:
-; CHECK-NEXT: store i32 0, ptr [[P1:%.*]], align 1
-; CHECK-NEXT: [[CONST_MAT:%.*]] = or i32 0, 0
+; CHECK-NEXT: [[CONST:%.*]] = bitcast i32 0 to i32
+; CHECK-NEXT: store i32 [[CONST]], ptr [[P1:%.*]], align 1
+; CHECK-NEXT: [[CONST_MAT:%.*]] = or i32 [[CONST]], 0
; CHECK-NEXT: store <2 x i32> zeroinitializer, ptr [[P2:%.*]], align 1
; CHECK-NEXT: ret i16 0
;
diff --git a/llvm/test/Transforms/SLPVectorizer/AArch64/matmul.ll b/llvm/test/Transforms/SLPVectorizer/AArch64/matmul.ll
index 10f07f158175d..983c1a25d69d7 100644
--- a/llvm/test/Transforms/SLPVectorizer/AArch64/matmul.ll
+++ b/llvm/test/Transforms/SLPVectorizer/AArch64/matmul.ll
@@ -35,22 +35,26 @@ define void @wrap_mul4(ptr nocapture %Out, ptr nocapture readonly %A, ptr nocapt
; CHECK-NEXT: [[TMP12:%.*]] = load <2 x double>, ptr [[ARRAYIDX30_I]], align 8
; CHECK-NEXT: [[TMP13:%.*]] = fmul <2 x double> [[TMP7]], [[TMP12]]
; CHECK-NEXT: [[TMP14:%.*]] = fadd <2 x double> [[TMP11]], [[TMP13]]
-; CHECK-NEXT: store <2 x double> [[TMP9]], ptr [[OUT]], align 8
-; CHECK-NEXT: store <2 x double> [[TMP14]], ptr [[RES_I_SROA_5_0_OUT2_I_SROA_IDX4]], align 8
; CHECK-NEXT: [[RES_I_SROA_7_0_OUT2_I_SROA_IDX8:%.*]] = getelementptr inbounds double, ptr [[OUT]], i64 4
-; CHECK-NEXT: [[TMP15:%.*]] = insertelement <2 x double> poison, double [[TEMP10]], i32 0
-; CHECK-NEXT: [[TMP16:%.*]] = shufflevector <2 x double> [[TMP15]], <2 x double> poison, <2 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP17:%.*]] = fmul <2 x double> [[TMP1]], [[TMP16]]
-; CHECK-NEXT: [[TMP18:%.*]] = insertelement <2 x double> poison, double [[TEMP11]], i32 0
-; CHECK-NEXT: [[TMP19:%.*]] = shufflevector <2 x double> [[TMP18]], <2 x double> poison, <2 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP20:%.*]] = fmul <2 x double> [[TMP5]], [[TMP19]]
-; CHECK-NEXT: [[TMP21:%.*]] = fadd <2 x double> [[TMP17]], [[TMP20]]
-; CHECK-NEXT: store <2 x double> [[TMP21]], ptr [[RES_I_SROA_7_0_OUT2_I_SROA_IDX8]], align 8
+; CHECK-NEXT: [[TMP15:%.*]] = load <2 x double>, ptr [[B]], align 8
+; CHECK-NEXT: [[TMP16:%.*]] = insertelement <2 x double> poison, double [[TEMP10]], i32 0
+; CHECK-NEXT: [[TMP17:%.*]] = shufflevector <2 x double> [[TMP16]], <2 x double> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP18:%.*]] = fmul <2 x double> [[TMP15]], [[TMP17]]
+; CHECK-NEXT: [[TMP19:%.*]] = load <2 x double>, ptr [[ARRAYIDX7_I]], align 8
+; CHECK-NEXT: [[TMP20:%.*]] = insertelement <2 x double> poison, double [[TEMP11]], i32 0
+; CHECK-NEXT: [[TMP21:%.*]] = shufflevector <2 x double> [[TMP20]], <2 x double> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP22:%.*]] = fmul <2 x double> [[TMP19]], [[TMP21]]
+; CHECK-NEXT: [[TMP23:%.*]] = fadd <2 x double> [[TMP18]], [[TMP22]]
; CHECK-NEXT: [[RES_I_SROA_9_0_OUT2_I_SROA_IDX12:%.*]] = getelementptr inbounds double, ptr [[OUT]], i64 6
-; CHECK-NEXT: [[TMP22:%.*]] = fmul <2 x double> [[TMP10]], [[TMP16]]
-; CHECK-NEXT: [[TMP23:%.*]] = fmul <2 x double> [[TMP12]], [[TMP19]]
-; CHECK-NEXT: [[TMP24:%.*]] = fadd <2 x double> [[TMP22]], [[TMP23]]
-; CHECK-NEXT: store <2 x double> [[TMP24]], ptr [[RES_I_SROA_9_0_OUT2_I_SROA_IDX12]], align 8
+; CHECK-NEXT: [[TMP24:%.*]] = load <2 x double>, ptr [[ARRAYIDX25_I]], align 8
+; CHECK-NEXT: [[TMP25:%.*]] = fmul <2 x double> [[TMP24]], [[TMP17]]
+; CHECK-NEXT: [[TMP26:%.*]] = load <2 x double>, ptr [[ARRAYIDX30_I]], align 8
+; CHECK-NEXT: [[TMP27:%.*]] = fmul <2 x double> [[TMP26]], [[TMP21]]
+; CHECK-NEXT: [[TMP28:%.*]] = fadd <2 x double> [[TMP25]], [[TMP27]]
+; CHECK-NEXT: store <2 x double> [[TMP9]], ptr [[OUT]], align 8
+; CHECK-NEXT: store <2 x double> [[TMP14]], ptr [[RES_I_SROA_5_0_OUT2_I_SROA_IDX4]], align 8
+; CHECK-NEXT: store <2 x double> [[TMP23]], ptr [[RES_I_SROA_7_0_OUT2_I_SROA_IDX8]], align 8
+; CHECK-NEXT: store <2 x double> [[TMP28]], ptr [[RES_I_SROA_9_0_OUT2_I_SROA_IDX12]], align 8
; CHECK-NEXT: ret void
;
%temp = load double, ptr %A, align 8
diff --git a/llvm/test/Transforms/SLPVectorizer/AArch64/memory-runtime-checks.ll b/llvm/test/Transforms/SLPVectorizer/AArch64/memory-runtime-checks.ll
index 70cdd08548b2d..91bb8d5378b5f 100644
--- a/llvm/test/Transforms/SLPVectorizer/AArch64/memory-runtime-checks.ll
+++ b/llvm/test/Transforms/SLPVectorizer/AArch64/memory-runtime-checks.ll
@@ -1239,17 +1239,17 @@ define void @crash_no_tracked_instructions(ptr %arg, ptr %arg.2, ptr %arg.3, i1
; CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x float> poison, float [[T23]], i32 0
; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <2 x float> [[TMP1]], <2 x float> poison, <2 x i32> zeroinitializer
; CHECK-NEXT: [[TMP3:%.*]] = fmul <2 x float> [[TMP2]], <float 9.900000e+01, float 1.000000e+01>
-; CHECK-NEXT: [[TMP4:%.*]] = extractelement <2 x float> [[TMP3]], i32 1
-; CHECK-NEXT: store float [[TMP4]], ptr [[T25]], align 4
+; CHECK-NEXT: [[T26:%.*]] = fmul float [[T23]], 1.000000e+01
+; CHECK-NEXT: store float [[T26]], ptr [[T25]], align 4
; CHECK-NEXT: [[T27:%.*]] = load float, ptr [[ARG_2:%.*]], align 8
-; CHECK-NEXT: [[TMP5:%.*]] = fadd <2 x float> [[TMP3]], <float 2.000000e+01, float 2.000000e+01>
+; CHECK-NEXT: [[TMP4:%.*]] = fadd <2 x float> [[TMP3]], <float 2.000000e+01, float 2.000000e+01>
; CHECK-NEXT: br label [[BB30]]
; CHECK: bb30:
-; CHECK-NEXT: [[TMP6:%.*]] = phi <2 x float> [ [[TMP5]], [[BB22]] ], [ [[TMP0]], [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[TMP5:%.*]] = phi <2 x float> [ [[TMP4]], [[BB22]] ], [ [[TMP0]], [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[BB36:%.*]]
; CHECK: bb36:
-; CHECK-NEXT: [[TMP7:%.*]] = fmul <2 x float> [[TMP6]], <float 3.000000e+00, float 3.000000e+00>
-; CHECK-NEXT: store <2 x float> [[TMP7]], ptr [[ARG_3]], align 4
+; CHECK-NEXT: [[TMP6:%.*]] = fmul <2 x float> [[TMP5]], <float 3.000000e+00, float 3.000000e+00>
+; CHECK-NEXT: store <2 x float> [[TMP6]], ptr [[ARG_3]], align 4
; CHECK-NEXT: br label [[BB41:%.*]]
; CHECK: bb41:
; CHECK-NEXT: ret void
diff --git a/llvm/test/Transforms/SLPVectorizer/AArch64/multiple_reduction.ll b/llvm/test/Transforms/SLPVectorizer/AArch64/multiple_reduction.ll
index f85f658fed4d5..da3e1dc7c6778 100644
--- a/llvm/test/Transforms/SLPVectorizer/AArch64/multiple_reduction.ll
+++ b/llvm/test/Transforms/SLPVectorizer/AArch64/multiple_reduction.ll
@@ -14,20 +14,76 @@ define i64 @straight(ptr nocapture noundef readonly %p, i32 noundef %st) {
; CHECK-LABEL: @straight(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[IDX_EXT:%.*]] = sext i32 [[ST:%.*]] to i64
-; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i16>, ptr [[P:%.*]], align 2
+; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds i16, ptr [[P:%.*]], i64 1
+; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds i16, ptr [[P]], i64 2
+; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds i16, ptr [[P]], i64 3
+; CHECK-NEXT: [[ARRAYIDX_4:%.*]] = getelementptr inbounds i16, ptr [[P]], i64 4
+; CHECK-NEXT: [[ARRAYIDX_5:%.*]] = getelementptr inbounds i16, ptr [[P]], i64 5
+; CHECK-NEXT: [[ARRAYIDX_6:%.*]] = getelementptr inbounds i16, ptr [[P]], i64 6
+; CHECK-NEXT: [[ARRAYIDX_7:%.*]] = getelementptr inbounds i16, ptr [[P]], i64 7
+; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i16>, ptr [[P]], align 2
; CHECK-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds i16, ptr [[P]], i64 [[IDX_EXT]]
+; CHECK-NEXT: [[ARRAYIDX_1_1:%.*]] = getelementptr inbounds i16, ptr [[ADD_PTR]], i64 1
+; CHECK-NEXT: [[ARRAYIDX_2_1:%.*]] = getelementptr inbounds i16, ptr [[ADD_PTR]], i64 2
+; CHECK-NEXT: [[ARRAYIDX_3_1:%.*]] = getelementptr inbounds i16, ptr [[ADD_PTR]], i64 3
+; CHECK-NEXT: [[ARRAYIDX_4_1:%.*]] = getelementptr inbounds i16, ptr [[ADD_PTR]], i64 4
+; CHECK-NEXT: [[ARRAYIDX_5_1:%.*]] = getelementptr inbounds i16, ptr [[ADD_PTR]], i64 5
+; CHECK-NEXT: [[ARRAYIDX_6_1:%.*]] = getelementptr inbounds i16, ptr [[ADD_PTR]], i64 6
+; CHECK-NEXT: [[ARRAYIDX_7_1:%.*]] = getelementptr inbounds i16, ptr [[ADD_PTR]], i64 7
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr [[ADD_PTR]], align 2
; CHECK-NEXT: [[ADD_PTR_1:%.*]] = getelementptr inbounds i16, ptr [[ADD_PTR]], i64 [[IDX_EXT]]
+; CHECK-NEXT: [[ARRAYIDX_1_2:%.*]] = getelementptr inbounds i16, ptr [[ADD_PTR_1]], i64 1
+; CHECK-NEXT: [[ARRAYIDX_2_2:%.*]] = getelementptr inbounds i16, ptr [[ADD_PTR_1]], i64 2
+; CHECK-NEXT: [[ARRAYIDX_3_2:%.*]] = getelementptr inbounds i16, ptr [[ADD_PTR_1]], i64 3
+; CHECK-NEXT: [[ARRAYIDX_4_2:%.*]] = getelementptr inbounds i16, ptr [[ADD_PTR_1]], i64 4
+; CHECK-NEXT: [[ARRAYIDX_5_2:%.*]] = getelementptr inbounds i16, ptr [[ADD_PTR_1]], i64 5
+; CHECK-NEXT: [[ARRAYIDX_6_2:%.*]] = getelementptr inbounds i16, ptr [[ADD_PTR_1]], i64 6
+; CHECK-NEXT: [[ARRAYIDX_7_2:%.*]] = getelementptr inbounds i16, ptr [[ADD_PTR_1]], i64 7
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr [[ADD_PTR_1]], align 2
; CHECK-NEXT: [[ADD_PTR_2:%.*]] = getelementptr inbounds i16, ptr [[ADD_PTR_1]], i64 [[IDX_EXT]]
+; CHECK-NEXT: [[ARRAYIDX_1_3:%.*]] = getelementptr inbounds i16, ptr [[ADD_PTR_2]], i64 1
+; CHECK-NEXT: [[ARRAYIDX_2_3:%.*]] = getelementptr inbounds i16, ptr [[ADD_PTR_2]], i64 2
+; CHECK-NEXT: [[ARRAYIDX_3_3:%.*]] = getelementptr inbounds i16, ptr [[ADD_PTR_2]], i64 3
+; CHECK-NEXT: [[ARRAYIDX_4_3:%.*]] = getelementptr inbounds i16, ptr [[ADD_PTR_2]], i64 4
+; CHECK-NEXT: [[ARRAYIDX_5_3:%.*]] = getelementptr inbounds i16, ptr [[ADD_PTR_2]], i64 5
+; CHECK-NEXT: [[ARRAYIDX_6_3:%.*]] = getelementptr inbounds i16, ptr [[ADD_PTR_2]], i64 6
+; CHECK-NEXT: [[ARRAYIDX_7_3:%.*]] = getelementptr inbounds i16, ptr [[ADD_PTR_2]], i64 7
; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr [[ADD_PTR_2]], align 2
; CHECK-NEXT: [[ADD_PTR_3:%.*]] = getelementptr inbounds i16, ptr [[ADD_PTR_2]], i64 [[IDX_EXT]]
+; CHECK-NEXT: [[ARRAYIDX_1_4:%.*]] = getelementptr inbounds i16, ptr [[ADD_PTR_3]], i64 1
+; CHECK-NEXT: [[ARRAYIDX_2_4:%.*]] = getelementptr inbounds i16, ptr [[ADD_PTR_3]], i64 2
+; CHECK-NEXT: [[ARRAYIDX_3_4:%.*]] = getelementptr inbounds i16, ptr [[ADD_PTR_3]], i64 3
+; CHECK-NEXT: [[ARRAYIDX_4_4:%.*]] = getelementptr inbounds i16, ptr [[ADD_PTR_3]], i64 4
+; CHECK-NEXT: [[ARRAYIDX_5_4:%.*]] = getelementptr inbounds i16, ptr [[ADD_PTR_3]], i64 5
+; CHECK-NEXT: [[ARRAYIDX_6_4:%.*]] = getelementptr inbounds i16, ptr [[ADD_PTR_3]], i64 6
+; CHECK-NEXT: [[ARRAYIDX_7_4:%.*]] = getelementptr inbounds i16, ptr [[ADD_PTR_3]], i64 7
; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i16>, ptr [[ADD_PTR_3]], align 2
; CHECK-NEXT: [[ADD_PTR_4:%.*]] = getelementptr inbounds i16, ptr [[ADD_PTR_3]], i64 [[IDX_EXT]]
+; CHECK-NEXT: [[ARRAYIDX_1_5:%.*]] = getelementptr inbounds i16, ptr [[ADD_PTR_4]], i64 1
+; CHECK-NEXT: [[ARRAYIDX_2_5:%.*]] = getelementptr inbounds i16, ptr [[ADD_PTR_4]], i64 2
+; CHECK-NEXT: [[ARRAYIDX_3_5:%.*]] = getelementptr inbounds i16, ptr [[ADD_PTR_4]], i64 3
+; CHECK-NEXT: [[ARRAYIDX_4_5:%.*]] = getelementptr inbounds i16, ptr [[ADD_PTR_4]], i64 4
+; CHECK-NEXT: [[ARRAYIDX_5_5:%.*]] = getelementptr inbounds i16, ptr [[ADD_PTR_4]], i64 5
+; CHECK-NEXT: [[ARRAYIDX_6_5:%.*]] = getelementptr inbounds i16, ptr [[ADD_PTR_4]], i64 6
+; CHECK-NEXT: [[ARRAYIDX_7_5:%.*]] = getelementptr inbounds i16, ptr [[ADD_PTR_4]], i64 7
; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i16>, ptr [[ADD_PTR_4]], align 2
; CHECK-NEXT: [[ADD_PTR_5:%.*]] = getelementptr inbounds i16, ptr [[ADD_PTR_4]], i64 [[IDX_EXT]]
+; CHECK-NEXT: [[ARRAYIDX_1_6:%.*]] = getelementptr inbounds i16, ptr [[ADD_PTR_5]], i64 1
+; CHECK-NEXT: [[ARRAYIDX_2_6:%.*]] = getelementptr inbounds i16, ptr [[ADD_PTR_5]], i64 2
+; CHECK-NEXT: [[ARRAYIDX_3_6:%.*]] = getelementptr inbounds i16, ptr [[ADD_PTR_5]], i64 3
+; CHECK-NEXT: [[ARRAYIDX_4_6:%.*]] = getelementptr inbounds i16, ptr [[ADD_PTR_5]], i64 4
+; CHECK-NEXT: [[ARRAYIDX_5_6:%.*]] = getelementptr inbounds i16, ptr [[ADD_PTR_5]], i64 5
+; CHECK-NEXT: [[ARRAYIDX_6_6:%.*]] = getelementptr inbounds i16, ptr [[ADD_PTR_5]], i64 6
+; CHECK-NEXT: [[ARRAYIDX_7_6:%.*]] = getelementptr inbounds i16, ptr [[ADD_PTR_5]], i64 7
; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i16>, ptr [[ADD_PTR_5]], align 2
; CHECK-NEXT: [[ADD_PTR_6:%.*]] = getelementptr inbounds i16, ptr [[ADD_PTR_5]], i64 [[IDX_EXT]]
+; CHECK-NEXT: [[ARRAYIDX_1_7:%.*]] = getelementptr inbounds i16, ptr [[ADD_PTR_6]], i64 1
+; CHECK-NEXT: [[ARRAYIDX_2_7:%.*]] = getelementptr inbounds i16, ptr [[ADD_PTR_6]], i64 2
+; CHECK-NEXT: [[ARRAYIDX_3_7:%.*]] = getelementptr inbounds i16, ptr [[ADD_PTR_6]], i64 3
+; CHECK-NEXT: [[ARRAYIDX_4_7:%.*]] = getelementptr inbounds i16, ptr [[ADD_PTR_6]], i64 4
+; CHECK-NEXT: [[ARRAYIDX_5_7:%.*]] = getelementptr inbounds i16, ptr [[ADD_PTR_6]], i64 5
+; CHECK-NEXT: [[ARRAYIDX_6_7:%.*]] = getelementptr inbounds i16, ptr [[ADD_PTR_6]], i64 6
+; CHECK-NEXT: [[ARRAYIDX_7_7:%.*]] = getelementptr inbounds i16, ptr [[ADD_PTR_6]], i64 7
; CHECK-NEXT: [[TMP7:%.*]] = load <8 x i16>, ptr [[ADD_PTR_6]], align 2
; CHECK-NEXT: [[TMP8:%.*]] = shufflevector <8 x i16> [[TMP0]], <8 x i16> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
; CHECK-NEXT: [[TMP9:%.*]] = shufflevector <8 x i16> [[TMP1]], <8 x i16> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
@@ -45,134 +101,198 @@ define i64 @straight(ptr nocapture noundef readonly %p, i32 noundef %st) {
; CHECK-NEXT: [[TMP21:%.*]] = shufflevector <8 x i16> [[TMP7]], <8 x i16> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
; CHECK-NEXT: [[TMP22:%.*]] = shufflevector <64 x i16> [[TMP20]], <64 x i16> [[TMP21]], <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71>
; CHECK-NEXT: [[TMP23:%.*]] = zext <64 x i16> [[TMP22]] to <64 x i32>
-; CHECK-NEXT: [[TMP24:%.*]] = extractelement <64 x i32> [[TMP23]], i32 0
-; CHECK-NEXT: [[TMP25:%.*]] = extractelement <64 x i32> [[TMP23]], i32 1
-; CHECK-NEXT: [[ADD_1:%.*]] = add nuw nsw i32 [[TMP24]], [[TMP25]]
+; CHECK-NEXT: [[TMP24:%.*]] = load i16, ptr [[P]], align 2
+; CHECK-NEXT: [[CONV:%.*]] = zext i16 [[TMP24]] to i32
+; CHECK-NEXT: [[TMP25:%.*]] = load i16, ptr [[ARRAYIDX_1]], align 2
+; CHECK-NEXT: [[CONV_1:%.*]] = zext i16 [[TMP25]] to i32
+; CHECK-NEXT: [[ADD_1:%.*]] = add nuw nsw i32 [[CONV]], [[CONV_1]]
; CHECK-NEXT: [[TMP26:%.*]] = mul nuw nsw <64 x i32> [[TMP23]], [[TMP23]]
-; CHECK-NEXT: [[TMP27:%.*]] = extractelement <64 x i32> [[TMP23]], i32 2
-; CHECK-NEXT: [[ADD_2:%.*]] = add nuw nsw i32 [[ADD_1]], [[TMP27]]
-; CHECK-NEXT: [[TMP28:%.*]] = extractelement <64 x i32> [[TMP23]], i32 3
-; CHECK-NEXT: [[ADD_3:%.*]] = add nuw nsw i32 [[ADD_2]], [[TMP28]]
-; CHECK-NEXT: [[TMP29:%.*]] = extractelement <64 x i32> [[TMP23]], i32 4
-; CHECK-NEXT: [[ADD_4:%.*]] = add nuw nsw i32 [[ADD_3]], [[TMP29]]
-; CHECK-NEXT: [[TMP30:%.*]] = extractelement <64 x i32> [[TMP23]], i32 5
-; CHECK-NEXT: [[ADD_5:%.*]] = add nuw nsw i32 [[ADD_4]], [[TMP30]]
-; CHECK-NEXT: [[TMP31:%.*]] = extractelement <64 x i32> [[TMP23]], i32 6
-; CHECK-NEXT: [[ADD_6:%.*]] = add nuw nsw i32 [[ADD_5]], [[TMP31]]
-; CHECK-NEXT: [[TMP32:%.*]] = extractelement <64 x i32> [[TMP23]], i32 7
-; CHECK-NEXT: [[ADD_7:%.*]] = add nuw nsw i32 [[ADD_6]], [[TMP32]]
-; CHECK-NEXT: [[TMP33:%.*]] = extractelement <64 x i32> [[TMP23]], i32 8
-; CHECK-NEXT: [[ADD_141:%.*]] = add nuw nsw i32 [[ADD_7]], [[TMP33]]
-; CHECK-NEXT: [[TMP34:%.*]] = extractelement <64 x i32> [[TMP23]], i32 9
-; CHECK-NEXT: [[ADD_1_1:%.*]] = add nuw nsw i32 [[ADD_141]], [[TMP34]]
-; CHECK-NEXT: [[TMP35:%.*]] = extractelement <64 x i32> [[TMP23]], i32 10
-; CHECK-NEXT: [[ADD_2_1:%.*]] = add nuw nsw i32 [[ADD_1_1]], [[TMP35]]
-; CHECK-NEXT: [[TMP36:%.*]] = extractelement <64 x i32> [[TMP23]], i32 11
-; CHECK-NEXT: [[ADD_3_1:%.*]] = add nuw nsw i32 [[ADD_2_1]], [[TMP36]]
-; CHECK-NEXT: [[TMP37:%.*]] = extractelement <64 x i32> [[TMP23]], i32 12
-; CHECK-NEXT: [[ADD_4_1:%.*]] = add nuw nsw i32 [[ADD_3_1]], [[TMP37]]
-; CHECK-NEXT: [[TMP38:%.*]] = extractelement <64 x i32> [[TMP23]], i32 13
-; CHECK-NEXT: [[ADD_5_1:%.*]] = add nuw nsw i32 [[ADD_4_1]], [[TMP38]]
-; CHECK-NEXT: [[TMP39:%.*]] = extractelement <64 x i32> [[TMP23]], i32 14
-; CHECK-NEXT: [[ADD_6_1:%.*]] = add nuw nsw i32 [[ADD_5_1]], [[TMP39]]
-; CHECK-NEXT: [[TMP40:%.*]] = extractelement <64 x i32> [[TMP23]], i32 15
-; CHECK-NEXT: [[ADD_7_1:%.*]] = add nuw nsw i32 [[ADD_6_1]], [[TMP40]]
-; CHECK-NEXT: [[TMP41:%.*]] = extractelement <64 x i32> [[TMP23]], i32 16
-; CHECK-NEXT: [[ADD_245:%.*]] = add nuw nsw i32 [[ADD_7_1]], [[TMP41]]
-; CHECK-NEXT: [[TMP42:%.*]] = extractelement <64 x i32> [[TMP23]], i32 17
-; CHECK-NEXT: [[ADD_1_2:%.*]] = add nuw nsw i32 [[ADD_245]], [[TMP42]]
-; CHECK-NEXT: [[TMP43:%.*]] = extractelement <64 x i32> [[TMP23]], i32 18
-; CHECK-NEXT: [[ADD_2_2:%.*]] = add nuw nsw i32 [[ADD_1_2]], [[TMP43]]
-; CHECK-NEXT: [[TMP44:%.*]] = extractelement <64 x i32> [[TMP23]], i32 19
-; CHECK-NEXT: [[ADD_3_2:%.*]] = add nuw nsw i32 [[ADD_2_2]], [[TMP44]]
-; CHECK-NEXT: [[TMP45:%.*]] = extractelement <64 x i32> [[TMP23]], i32 20
-; CHECK-NEXT: [[ADD_4_2:%.*]] = add nuw nsw i32 [[ADD_3_2]], [[TMP45]]
-; CHECK-NEXT: [[TMP46:%.*]] = extractelement <64 x i32> [[TMP23]], i32 21
-; CHECK-NEXT: [[ADD_5_2:%.*]] = add nuw nsw i32 [[ADD_4_2]], [[TMP46]]
-; CHECK-NEXT: [[TMP47:%.*]] = extractelement <64 x i32> [[TMP23]], i32 22
-; CHECK-NEXT: [[ADD_6_2:%.*]] = add nuw nsw i32 [[ADD_5_2]], [[TMP47]]
-; CHECK-NEXT: [[TMP48:%.*]] = extractelement <64 x i32> [[TMP23]], i32 23
-; CHECK-NEXT: [[ADD_7_2:%.*]] = add nuw nsw i32 [[ADD_6_2]], [[TMP48]]
-; CHECK-NEXT: [[TMP49:%.*]] = extractelement <64 x i32> [[TMP23]], i32 24
-; CHECK-NEXT: [[ADD_349:%.*]] = add nuw nsw i32 [[ADD_7_2]], [[TMP49]]
-; CHECK-NEXT: [[TMP50:%.*]] = extractelement <64 x i32> [[TMP23]], i32 25
-; CHECK-NEXT: [[ADD_1_3:%.*]] = add nuw nsw i32 [[ADD_349]], [[TMP50]]
-; CHECK-NEXT: [[TMP51:%.*]] = extractelement <64 x i32> [[TMP23]], i32 26
-; CHECK-NEXT: [[ADD_2_3:%.*]] = add nuw nsw i32 [[ADD_1_3]], [[TMP51]]
-; CHECK-NEXT: [[TMP52:%.*]] = extractelement <64 x i32> [[TMP23]], i32 27
-; CHECK-NEXT: [[ADD_3_3:%.*]] = add nuw nsw i32 [[ADD_2_3]], [[TMP52]]
-; CHECK-NEXT: [[TMP53:%.*]] = extractelement <64 x i32> [[TMP23]], i32 28
-; CHECK-NEXT: [[ADD_4_3:%.*]] = add nuw nsw i32 [[ADD_3_3]], [[TMP53]]
-; CHECK-NEXT: [[TMP54:%.*]] = extractelement <64 x i32> [[TMP23]], i32 29
-; CHECK-NEXT: [[ADD_5_3:%.*]] = add nuw nsw i32 [[ADD_4_3]], [[TMP54]]
-; CHECK-NEXT: [[TMP55:%.*]] = extractelement <64 x i32> [[TMP23]], i32 30
-; CHECK-NEXT: [[ADD_6_3:%.*]] = add nuw nsw i32 [[ADD_5_3]], [[TMP55]]
-; CHECK-NEXT: [[TMP56:%.*]] = extractelement <64 x i32> [[TMP23]], i32 31
-; CHECK-NEXT: [[ADD_7_3:%.*]] = add nuw nsw i32 [[ADD_6_3]], [[TMP56]]
-; CHECK-NEXT: [[TMP57:%.*]] = extractelement <64 x i32> [[TMP23]], i32 32
-; CHECK-NEXT: [[ADD_453:%.*]] = add nuw nsw i32 [[ADD_7_3]], [[TMP57]]
-; CHECK-NEXT: [[TMP58:%.*]] = extractelement <64 x i32> [[TMP23]], i32 33
-; CHECK-NEXT: [[ADD_1_4:%.*]] = add nuw nsw i32 [[ADD_453]], [[TMP58]]
-; CHECK-NEXT: [[TMP59:%.*]] = extractelement <64 x i32> [[TMP23]], i32 34
-; CHECK-NEXT: [[ADD_2_4:%.*]] = add nuw nsw i32 [[ADD_1_4]], [[TMP59]]
-; CHECK-NEXT: [[TMP60:%.*]] = extractelement <64 x i32> [[TMP23]], i32 35
-; CHECK-NEXT: [[ADD_3_4:%.*]] = add nuw nsw i32 [[ADD_2_4]], [[TMP60]]
-; CHECK-NEXT: [[TMP61:%.*]] = extractelement <64 x i32> [[TMP23]], i32 36
-; CHECK-NEXT: [[ADD_4_4:%.*]] = add nuw nsw i32 [[ADD_3_4]], [[TMP61]]
-; CHECK-NEXT: [[TMP62:%.*]] = extractelement <64 x i32> [[TMP23]], i32 37
-; CHECK-NEXT: [[ADD_5_4:%.*]] = add nuw nsw i32 [[ADD_4_4]], [[TMP62]]
-; CHECK-NEXT: [[TMP63:%.*]] = extractelement <64 x i32> [[TMP23]], i32 38
-; CHECK-NEXT: [[ADD_6_4:%.*]] = add nuw nsw i32 [[ADD_5_4]], [[TMP63]]
-; CHECK-NEXT: [[TMP64:%.*]] = extractelement <64 x i32> [[TMP23]], i32 39
-; CHECK-NEXT: [[ADD_7_4:%.*]] = add nuw nsw i32 [[ADD_6_4]], [[TMP64]]
-; CHECK-NEXT: [[TMP65:%.*]] = extractelement <64 x i32> [[TMP23]], i32 40
-; CHECK-NEXT: [[ADD_557:%.*]] = add nuw nsw i32 [[ADD_7_4]], [[TMP65]]
-; CHECK-NEXT: [[TMP66:%.*]] = extractelement <64 x i32> [[TMP23]], i32 41
-; CHECK-NEXT: [[ADD_1_5:%.*]] = add nuw nsw i32 [[ADD_557]], [[TMP66]]
-; CHECK-NEXT: [[TMP67:%.*]] = extractelement <64 x i32> [[TMP23]], i32 42
-; CHECK-NEXT: [[ADD_2_5:%.*]] = add nuw nsw i32 [[ADD_1_5]], [[TMP67]]
-; CHECK-NEXT: [[TMP68:%.*]] = extractelement <64 x i32> [[TMP23]], i32 43
-; CHECK-NEXT: [[ADD_3_5:%.*]] = add nuw nsw i32 [[ADD_2_5]], [[TMP68]]
-; CHECK-NEXT: [[TMP69:%.*]] = extractelement <64 x i32> [[TMP23]], i32 44
-; CHECK-NEXT: [[ADD_4_5:%.*]] = add nuw nsw i32 [[ADD_3_5]], [[TMP69]]
-; CHECK-NEXT: [[TMP70:%.*]] = extractelement <64 x i32> [[TMP23]], i32 45
-; CHECK-NEXT: [[ADD_5_5:%.*]] = add nuw nsw i32 [[ADD_4_5]], [[TMP70]]
-; CHECK-NEXT: [[TMP71:%.*]] = extractelement <64 x i32> [[TMP23]], i32 46
-; CHECK-NEXT: [[ADD_6_5:%.*]] = add nuw nsw i32 [[ADD_5_5]], [[TMP71]]
-; CHECK-NEXT: [[TMP72:%.*]] = extractelement <64 x i32> [[TMP23]], i32 47
-; CHECK-NEXT: [[ADD_7_5:%.*]] = add nuw nsw i32 [[ADD_6_5]], [[TMP72]]
-; CHECK-NEXT: [[TMP73:%.*]] = extractelement <64 x i32> [[TMP23]], i32 48
-; CHECK-NEXT: [[ADD_661:%.*]] = add nuw nsw i32 [[ADD_7_5]], [[TMP73]]
-; CHECK-NEXT: [[TMP74:%.*]] = extractelement <64 x i32> [[TMP23]], i32 49
-; CHECK-NEXT: [[ADD_1_6:%.*]] = add nuw nsw i32 [[ADD_661]], [[TMP74]]
-; CHECK-NEXT: [[TMP75:%.*]] = extractelement <64 x i32> [[TMP23]], i32 50
-; CHECK-NEXT: [[ADD_2_6:%.*]] = add nuw nsw i32 [[ADD_1_6]], [[TMP75]]
-; CHECK-NEXT: [[TMP76:%.*]] = extractelement <64 x i32> [[TMP23]], i32 51
-; CHECK-NEXT: [[ADD_3_6:%.*]] = add nuw nsw i32 [[ADD_2_6]], [[TMP76]]
-; CHECK-NEXT: [[TMP77:%.*]] = extractelement <64 x i32> [[TMP23]], i32 52
-; CHECK-NEXT: [[ADD_4_6:%.*]] = add nuw nsw i32 [[ADD_3_6]], [[TMP77]]
-; CHECK-NEXT: [[TMP78:%.*]] = extractelement <64 x i32> [[TMP23]], i32 53
-; CHECK-NEXT: [[ADD_5_6:%.*]] = add nuw nsw i32 [[ADD_4_6]], [[TMP78]]
-; CHECK-NEXT: [[TMP79:%.*]] = extractelement <64 x i32> [[TMP23]], i32 54
-; CHECK-NEXT: [[ADD_6_6:%.*]] = add nuw nsw i32 [[ADD_5_6]], [[TMP79]]
-; CHECK-NEXT: [[TMP80:%.*]] = extractelement <64 x i32> [[TMP23]], i32 55
-; CHECK-NEXT: [[ADD_7_6:%.*]] = add nuw nsw i32 [[ADD_6_6]], [[TMP80]]
-; CHECK-NEXT: [[TMP81:%.*]] = extractelement <64 x i32> [[TMP23]], i32 56
-; CHECK-NEXT: [[ADD_765:%.*]] = add nuw nsw i32 [[ADD_7_6]], [[TMP81]]
-; CHECK-NEXT: [[TMP82:%.*]] = extractelement <64 x i32> [[TMP23]], i32 57
-; CHECK-NEXT: [[ADD_1_7:%.*]] = add nuw nsw i32 [[ADD_765]], [[TMP82]]
-; CHECK-NEXT: [[TMP83:%.*]] = extractelement <64 x i32> [[TMP23]], i32 58
-; CHECK-NEXT: [[ADD_2_7:%.*]] = add nuw nsw i32 [[ADD_1_7]], [[TMP83]]
-; CHECK-NEXT: [[TMP84:%.*]] = extractelement <64 x i32> [[TMP23]], i32 59
-; CHECK-NEXT: [[ADD_3_7:%.*]] = add nuw nsw i32 [[ADD_2_7]], [[TMP84]]
-; CHECK-NEXT: [[TMP85:%.*]] = extractelement <64 x i32> [[TMP23]], i32 60
-; CHECK-NEXT: [[ADD_4_7:%.*]] = add nuw nsw i32 [[ADD_3_7]], [[TMP85]]
-; CHECK-NEXT: [[TMP86:%.*]] = extractelement <64 x i32> [[TMP23]], i32 61
-; CHECK-NEXT: [[ADD_5_7:%.*]] = add nuw nsw i32 [[ADD_4_7]], [[TMP86]]
-; CHECK-NEXT: [[TMP87:%.*]] = extractelement <64 x i32> [[TMP23]], i32 62
-; CHECK-NEXT: [[ADD_6_7:%.*]] = add nuw nsw i32 [[ADD_5_7]], [[TMP87]]
-; CHECK-NEXT: [[TMP88:%.*]] = extractelement <64 x i32> [[TMP23]], i32 63
-; CHECK-NEXT: [[ADD_7_7:%.*]] = add nuw nsw i32 [[ADD_6_7]], [[TMP88]]
+; CHECK-NEXT: [[TMP27:%.*]] = load i16, ptr [[ARRAYIDX_2]], align 2
+; CHECK-NEXT: [[CONV_2:%.*]] = zext i16 [[TMP27]] to i32
+; CHECK-NEXT: [[ADD_2:%.*]] = add nuw nsw i32 [[ADD_1]], [[CONV_2]]
+; CHECK-NEXT: [[TMP28:%.*]] = load i16, ptr [[ARRAYIDX_3]], align 2
+; CHECK-NEXT: [[CONV_3:%.*]] = zext i16 [[TMP28]] to i32
+; CHECK-NEXT: [[ADD_3:%.*]] = add nuw nsw i32 [[ADD_2]], [[CONV_3]]
+; CHECK-NEXT: [[TMP29:%.*]] = load i16, ptr [[ARRAYIDX_4]], align 2
+; CHECK-NEXT: [[CONV_4:%.*]] = zext i16 [[TMP29]] to i32
+; CHECK-NEXT: [[ADD_4:%.*]] = add nuw nsw i32 [[ADD_3]], [[CONV_4]]
+; CHECK-NEXT: [[TMP30:%.*]] = load i16, ptr [[ARRAYIDX_5]], align 2
+; CHECK-NEXT: [[CONV_5:%.*]] = zext i16 [[TMP30]] to i32
+; CHECK-NEXT: [[ADD_5:%.*]] = add nuw nsw i32 [[ADD_4]], [[CONV_5]]
+; CHECK-NEXT: [[TMP31:%.*]] = load i16, ptr [[ARRAYIDX_6]], align 2
+; CHECK-NEXT: [[CONV_6:%.*]] = zext i16 [[TMP31]] to i32
+; CHECK-NEXT: [[ADD_6:%.*]] = add nuw nsw i32 [[ADD_5]], [[CONV_6]]
+; CHECK-NEXT: [[TMP32:%.*]] = load i16, ptr [[ARRAYIDX_7]], align 2
+; CHECK-NEXT: [[CONV_7:%.*]] = zext i16 [[TMP32]] to i32
+; CHECK-NEXT: [[ADD_7:%.*]] = add nuw nsw i32 [[ADD_6]], [[CONV_7]]
+; CHECK-NEXT: [[TMP33:%.*]] = load i16, ptr [[ADD_PTR]], align 2
+; CHECK-NEXT: [[CONV_140:%.*]] = zext i16 [[TMP33]] to i32
+; CHECK-NEXT: [[ADD_141:%.*]] = add nuw nsw i32 [[ADD_7]], [[CONV_140]]
+; CHECK-NEXT: [[TMP34:%.*]] = load i16, ptr [[ARRAYIDX_1_1]], align 2
+; CHECK-NEXT: [[CONV_1_1:%.*]] = zext i16 [[TMP34]] to i32
+; CHECK-NEXT: [[ADD_1_1:%.*]] = add nuw nsw i32 [[ADD_141]], [[CONV_1_1]]
+; CHECK-NEXT: [[TMP35:%.*]] = load i16, ptr [[ARRAYIDX_2_1]], align 2
+; CHECK-NEXT: [[CONV_2_1:%.*]] = zext i16 [[TMP35]] to i32
+; CHECK-NEXT: [[ADD_2_1:%.*]] = add nuw nsw i32 [[ADD_1_1]], [[CONV_2_1]]
+; CHECK-NEXT: [[TMP36:%.*]] = load i16, ptr [[ARRAYIDX_3_1]], align 2
+; CHECK-NEXT: [[CONV_3_1:%.*]] = zext i16 [[TMP36]] to i32
+; CHECK-NEXT: [[ADD_3_1:%.*]] = add nuw nsw i32 [[ADD_2_1]], [[CONV_3_1]]
+; CHECK-NEXT: [[TMP37:%.*]] = load i16, ptr [[ARRAYIDX_4_1]], align 2
+; CHECK-NEXT: [[CONV_4_1:%.*]] = zext i16 [[TMP37]] to i32
+; CHECK-NEXT: [[ADD_4_1:%.*]] = add nuw nsw i32 [[ADD_3_1]], [[CONV_4_1]]
+; CHECK-NEXT: [[TMP38:%.*]] = load i16, ptr [[ARRAYIDX_5_1]], align 2
+; CHECK-NEXT: [[CONV_5_1:%.*]] = zext i16 [[TMP38]] to i32
+; CHECK-NEXT: [[ADD_5_1:%.*]] = add nuw nsw i32 [[ADD_4_1]], [[CONV_5_1]]
+; CHECK-NEXT: [[TMP39:%.*]] = load i16, ptr [[ARRAYIDX_6_1]], align 2
+; CHECK-NEXT: [[CONV_6_1:%.*]] = zext i16 [[TMP39]] to i32
+; CHECK-NEXT: [[ADD_6_1:%.*]] = add nuw nsw i32 [[ADD_5_1]], [[CONV_6_1]]
+; CHECK-NEXT: [[TMP40:%.*]] = load i16, ptr [[ARRAYIDX_7_1]], align 2
+; CHECK-NEXT: [[CONV_7_1:%.*]] = zext i16 [[TMP40]] to i32
+; CHECK-NEXT: [[ADD_7_1:%.*]] = add nuw nsw i32 [[ADD_6_1]], [[CONV_7_1]]
+; CHECK-NEXT: [[TMP41:%.*]] = load i16, ptr [[ADD_PTR_1]], align 2
+; CHECK-NEXT: [[CONV_244:%.*]] = zext i16 [[TMP41]] to i32
+; CHECK-NEXT: [[ADD_245:%.*]] = add nuw nsw i32 [[ADD_7_1]], [[CONV_244]]
+; CHECK-NEXT: [[TMP42:%.*]] = load i16, ptr [[ARRAYIDX_1_2]], align 2
+; CHECK-NEXT: [[CONV_1_2:%.*]] = zext i16 [[TMP42]] to i32
+; CHECK-NEXT: [[ADD_1_2:%.*]] = add nuw nsw i32 [[ADD_245]], [[CONV_1_2]]
+; CHECK-NEXT: [[TMP43:%.*]] = load i16, ptr [[ARRAYIDX_2_2]], align 2
+; CHECK-NEXT: [[CONV_2_2:%.*]] = zext i16 [[TMP43]] to i32
+; CHECK-NEXT: [[ADD_2_2:%.*]] = add nuw nsw i32 [[ADD_1_2]], [[CONV_2_2]]
+; CHECK-NEXT: [[TMP44:%.*]] = load i16, ptr [[ARRAYIDX_3_2]], align 2
+; CHECK-NEXT: [[CONV_3_2:%.*]] = zext i16 [[TMP44]] to i32
+; CHECK-NEXT: [[ADD_3_2:%.*]] = add nuw nsw i32 [[ADD_2_2]], [[CONV_3_2]]
+; CHECK-NEXT: [[TMP45:%.*]] = load i16, ptr [[ARRAYIDX_4_2]], align 2
+; CHECK-NEXT: [[CONV_4_2:%.*]] = zext i16 [[TMP45]] to i32
+; CHECK-NEXT: [[ADD_4_2:%.*]] = add nuw nsw i32 [[ADD_3_2]], [[CONV_4_2]]
+; CHECK-NEXT: [[TMP46:%.*]] = load i16, ptr [[ARRAYIDX_5_2]], align 2
+; CHECK-NEXT: [[CONV_5_2:%.*]] = zext i16 [[TMP46]] to i32
+; CHECK-NEXT: [[ADD_5_2:%.*]] = add nuw nsw i32 [[ADD_4_2]], [[CONV_5_2]]
+; CHECK-NEXT: [[TMP47:%.*]] = load i16, ptr [[ARRAYIDX_6_2]], align 2
+; CHECK-NEXT: [[CONV_6_2:%.*]] = zext i16 [[TMP47]] to i32
+; CHECK-NEXT: [[ADD_6_2:%.*]] = add nuw nsw i32 [[ADD_5_2]], [[CONV_6_2]]
+; CHECK-NEXT: [[TMP48:%.*]] = load i16, ptr [[ARRAYIDX_7_2]], align 2
+; CHECK-NEXT: [[CONV_7_2:%.*]] = zext i16 [[TMP48]] to i32
+; CHECK-NEXT: [[ADD_7_2:%.*]] = add nuw nsw i32 [[ADD_6_2]], [[CONV_7_2]]
+; CHECK-NEXT: [[TMP49:%.*]] = load i16, ptr [[ADD_PTR_2]], align 2
+; CHECK-NEXT: [[CONV_348:%.*]] = zext i16 [[TMP49]] to i32
+; CHECK-NEXT: [[ADD_349:%.*]] = add nuw nsw i32 [[ADD_7_2]], [[CONV_348]]
+; CHECK-NEXT: [[TMP50:%.*]] = load i16, ptr [[ARRAYIDX_1_3]], align 2
+; CHECK-NEXT: [[CONV_1_3:%.*]] = zext i16 [[TMP50]] to i32
+; CHECK-NEXT: [[ADD_1_3:%.*]] = add nuw nsw i32 [[ADD_349]], [[CONV_1_3]]
+; CHECK-NEXT: [[TMP51:%.*]] = load i16, ptr [[ARRAYIDX_2_3]], align 2
+; CHECK-NEXT: [[CONV_2_3:%.*]] = zext i16 [[TMP51]] to i32
+; CHECK-NEXT: [[ADD_2_3:%.*]] = add nuw nsw i32 [[ADD_1_3]], [[CONV_2_3]]
+; CHECK-NEXT: [[TMP52:%.*]] = load i16, ptr [[ARRAYIDX_3_3]], align 2
+; CHECK-NEXT: [[CONV_3_3:%.*]] = zext i16 [[TMP52]] to i32
+; CHECK-NEXT: [[ADD_3_3:%.*]] = add nuw nsw i32 [[ADD_2_3]], [[CONV_3_3]]
+; CHECK-NEXT: [[TMP53:%.*]] = load i16, ptr [[ARRAYIDX_4_3]], align 2
+; CHECK-NEXT: [[CONV_4_3:%.*]] = zext i16 [[TMP53]] to i32
+; CHECK-NEXT: [[ADD_4_3:%.*]] = add nuw nsw i32 [[ADD_3_3]], [[CONV_4_3]]
+; CHECK-NEXT: [[TMP54:%.*]] = load i16, ptr [[ARRAYIDX_5_3]], align 2
+; CHECK-NEXT: [[CONV_5_3:%.*]] = zext i16 [[TMP54]] to i32
+; CHECK-NEXT: [[ADD_5_3:%.*]] = add nuw nsw i32 [[ADD_4_3]], [[CONV_5_3]]
+; CHECK-NEXT: [[TMP55:%.*]] = load i16, ptr [[ARRAYIDX_6_3]], align 2
+; CHECK-NEXT: [[CONV_6_3:%.*]] = zext i16 [[TMP55]] to i32
+; CHECK-NEXT: [[ADD_6_3:%.*]] = add nuw nsw i32 [[ADD_5_3]], [[CONV_6_3]]
+; CHECK-NEXT: [[TMP56:%.*]] = load i16, ptr [[ARRAYIDX_7_3]], align 2
+; CHECK-NEXT: [[CONV_7_3:%.*]] = zext i16 [[TMP56]] to i32
+; CHECK-NEXT: [[ADD_7_3:%.*]] = add nuw nsw i32 [[ADD_6_3]], [[CONV_7_3]]
+; CHECK-NEXT: [[TMP57:%.*]] = load i16, ptr [[ADD_PTR_3]], align 2
+; CHECK-NEXT: [[CONV_452:%.*]] = zext i16 [[TMP57]] to i32
+; CHECK-NEXT: [[ADD_453:%.*]] = add nuw nsw i32 [[ADD_7_3]], [[CONV_452]]
+; CHECK-NEXT: [[TMP58:%.*]] = load i16, ptr [[ARRAYIDX_1_4]], align 2
+; CHECK-NEXT: [[CONV_1_4:%.*]] = zext i16 [[TMP58]] to i32
+; CHECK-NEXT: [[ADD_1_4:%.*]] = add nuw nsw i32 [[ADD_453]], [[CONV_1_4]]
+; CHECK-NEXT: [[TMP59:%.*]] = load i16, ptr [[ARRAYIDX_2_4]], align 2
+; CHECK-NEXT: [[CONV_2_4:%.*]] = zext i16 [[TMP59]] to i32
+; CHECK-NEXT: [[ADD_2_4:%.*]] = add nuw nsw i32 [[ADD_1_4]], [[CONV_2_4]]
+; CHECK-NEXT: [[TMP60:%.*]] = load i16, ptr [[ARRAYIDX_3_4]], align 2
+; CHECK-NEXT: [[CONV_3_4:%.*]] = zext i16 [[TMP60]] to i32
+; CHECK-NEXT: [[ADD_3_4:%.*]] = add nuw nsw i32 [[ADD_2_4]], [[CONV_3_4]]
+; CHECK-NEXT: [[TMP61:%.*]] = load i16, ptr [[ARRAYIDX_4_4]], align 2
+; CHECK-NEXT: [[CONV_4_4:%.*]] = zext i16 [[TMP61]] to i32
+; CHECK-NEXT: [[ADD_4_4:%.*]] = add nuw nsw i32 [[ADD_3_4]], [[CONV_4_4]]
+; CHECK-NEXT: [[TMP62:%.*]] = load i16, ptr [[ARRAYIDX_5_4]], align 2
+; CHECK-NEXT: [[CONV_5_4:%.*]] = zext i16 [[TMP62]] to i32
+; CHECK-NEXT: [[ADD_5_4:%.*]] = add nuw nsw i32 [[ADD_4_4]], [[CONV_5_4]]
+; CHECK-NEXT: [[TMP63:%.*]] = load i16, ptr [[ARRAYIDX_6_4]], align 2
+; CHECK-NEXT: [[CONV_6_4:%.*]] = zext i16 [[TMP63]] to i32
+; CHECK-NEXT: [[ADD_6_4:%.*]] = add nuw nsw i32 [[ADD_5_4]], [[CONV_6_4]]
+; CHECK-NEXT: [[TMP64:%.*]] = load i16, ptr [[ARRAYIDX_7_4]], align 2
+; CHECK-NEXT: [[CONV_7_4:%.*]] = zext i16 [[TMP64]] to i32
+; CHECK-NEXT: [[ADD_7_4:%.*]] = add nuw nsw i32 [[ADD_6_4]], [[CONV_7_4]]
+; CHECK-NEXT: [[TMP65:%.*]] = load i16, ptr [[ADD_PTR_4]], align 2
+; CHECK-NEXT: [[CONV_556:%.*]] = zext i16 [[TMP65]] to i32
+; CHECK-NEXT: [[ADD_557:%.*]] = add nuw nsw i32 [[ADD_7_4]], [[CONV_556]]
+; CHECK-NEXT: [[TMP66:%.*]] = load i16, ptr [[ARRAYIDX_1_5]], align 2
+; CHECK-NEXT: [[CONV_1_5:%.*]] = zext i16 [[TMP66]] to i32
+; CHECK-NEXT: [[ADD_1_5:%.*]] = add nuw nsw i32 [[ADD_557]], [[CONV_1_5]]
+; CHECK-NEXT: [[TMP67:%.*]] = load i16, ptr [[ARRAYIDX_2_5]], align 2
+; CHECK-NEXT: [[CONV_2_5:%.*]] = zext i16 [[TMP67]] to i32
+; CHECK-NEXT: [[ADD_2_5:%.*]] = add nuw nsw i32 [[ADD_1_5]], [[CONV_2_5]]
+; CHECK-NEXT: [[TMP68:%.*]] = load i16, ptr [[ARRAYIDX_3_5]], align 2
+; CHECK-NEXT: [[CONV_3_5:%.*]] = zext i16 [[TMP68]] to i32
+; CHECK-NEXT: [[ADD_3_5:%.*]] = add nuw nsw i32 [[ADD_2_5]], [[CONV_3_5]]
+; CHECK-NEXT: [[TMP69:%.*]] = load i16, ptr [[ARRAYIDX_4_5]], align 2
+; CHECK-NEXT: [[CONV_4_5:%.*]] = zext i16 [[TMP69]] to i32
+; CHECK-NEXT: [[ADD_4_5:%.*]] = add nuw nsw i32 [[ADD_3_5]], [[CONV_4_5]]
+; CHECK-NEXT: [[TMP70:%.*]] = load i16, ptr [[ARRAYIDX_5_5]], align 2
+; CHECK-NEXT: [[CONV_5_5:%.*]] = zext i16 [[TMP70]] to i32
+; CHECK-NEXT: [[ADD_5_5:%.*]] = add nuw nsw i32 [[ADD_4_5]], [[CONV_5_5]]
+; CHECK-NEXT: [[TMP71:%.*]] = load i16, ptr [[ARRAYIDX_6_5]], align 2
+; CHECK-NEXT: [[CONV_6_5:%.*]] = zext i16 [[TMP71]] to i32
+; CHECK-NEXT: [[ADD_6_5:%.*]] = add nuw nsw i32 [[ADD_5_5]], [[CONV_6_5]]
+; CHECK-NEXT: [[TMP72:%.*]] = load i16, ptr [[ARRAYIDX_7_5]], align 2
+; CHECK-NEXT: [[CONV_7_5:%.*]] = zext i16 [[TMP72]] to i32
+; CHECK-NEXT: [[ADD_7_5:%.*]] = add nuw nsw i32 [[ADD_6_5]], [[CONV_7_5]]
+; CHECK-NEXT: [[TMP73:%.*]] = load i16, ptr [[ADD_PTR_5]], align 2
+; CHECK-NEXT: [[CONV_660:%.*]] = zext i16 [[TMP73]] to i32
+; CHECK-NEXT: [[ADD_661:%.*]] = add nuw nsw i32 [[ADD_7_5]], [[CONV_660]]
+; CHECK-NEXT: [[TMP74:%.*]] = load i16, ptr [[ARRAYIDX_1_6]], align 2
+; CHECK-NEXT: [[CONV_1_6:%.*]] = zext i16 [[TMP74]] to i32
+; CHECK-NEXT: [[ADD_1_6:%.*]] = add nuw nsw i32 [[ADD_661]], [[CONV_1_6]]
+; CHECK-NEXT: [[TMP75:%.*]] = load i16, ptr [[ARRAYIDX_2_6]], align 2
+; CHECK-NEXT: [[CONV_2_6:%.*]] = zext i16 [[TMP75]] to i32
+; CHECK-NEXT: [[ADD_2_6:%.*]] = add nuw nsw i32 [[ADD_1_6]], [[CONV_2_6]]
+; CHECK-NEXT: [[TMP76:%.*]] = load i16, ptr [[ARRAYIDX_3_6]], align 2
+; CHECK-NEXT: [[CONV_3_6:%.*]] = zext i16 [[TMP76]] to i32
+; CHECK-NEXT: [[ADD_3_6:%.*]] = add nuw nsw i32 [[ADD_2_6]], [[CONV_3_6]]
+; CHECK-NEXT: [[TMP77:%.*]] = load i16, ptr [[ARRAYIDX_4_6]], align 2
+; CHECK-NEXT: [[CONV_4_6:%.*]] = zext i16 [[TMP77]] to i32
+; CHECK-NEXT: [[ADD_4_6:%.*]] = add nuw nsw i32 [[ADD_3_6]], [[CONV_4_6]]
+; CHECK-NEXT: [[TMP78:%.*]] = load i16, ptr [[ARRAYIDX_5_6]], align 2
+; CHECK-NEXT: [[CONV_5_6:%.*]] = zext i16 [[TMP78]] to i32
+; CHECK-NEXT: [[ADD_5_6:%.*]] = add nuw nsw i32 [[ADD_4_6]], [[CONV_5_6]]
+; CHECK-NEXT: [[TMP79:%.*]] = load i16, ptr [[ARRAYIDX_6_6]], align 2
+; CHECK-NEXT: [[CONV_6_6:%.*]] = zext i16 [[TMP79]] to i32
+; CHECK-NEXT: [[ADD_6_6:%.*]] = add nuw nsw i32 [[ADD_5_6]], [[CONV_6_6]]
+; CHECK-NEXT: [[TMP80:%.*]] = load i16, ptr [[ARRAYIDX_7_6]], align 2
+; CHECK-NEXT: [[CONV_7_6:%.*]] = zext i16 [[TMP80]] to i32
+; CHECK-NEXT: [[ADD_7_6:%.*]] = add nuw nsw i32 [[ADD_6_6]], [[CONV_7_6]]
+; CHECK-NEXT: [[TMP81:%.*]] = load i16, ptr [[ADD_PTR_6]], align 2
+; CHECK-NEXT: [[CONV_764:%.*]] = zext i16 [[TMP81]] to i32
+; CHECK-NEXT: [[ADD_765:%.*]] = add nuw nsw i32 [[ADD_7_6]], [[CONV_764]]
+; CHECK-NEXT: [[TMP82:%.*]] = load i16, ptr [[ARRAYIDX_1_7]], align 2
+; CHECK-NEXT: [[CONV_1_7:%.*]] = zext i16 [[TMP82]] to i32
+; CHECK-NEXT: [[ADD_1_7:%.*]] = add nuw nsw i32 [[ADD_765]], [[CONV_1_7]]
+; CHECK-NEXT: [[TMP83:%.*]] = load i16, ptr [[ARRAYIDX_2_7]], align 2
+; CHECK-NEXT: [[CONV_2_7:%.*]] = zext i16 [[TMP83]] to i32
+; CHECK-NEXT: [[ADD_2_7:%.*]] = add nuw nsw i32 [[ADD_1_7]], [[CONV_2_7]]
+; CHECK-NEXT: [[TMP84:%.*]] = load i16, ptr [[ARRAYIDX_3_7]], align 2
+; CHECK-NEXT: [[CONV_3_7:%.*]] = zext i16 [[TMP84]] to i32
+; CHECK-NEXT: [[ADD_3_7:%.*]] = add nuw nsw i32 [[ADD_2_7]], [[CONV_3_7]]
+; CHECK-NEXT: [[TMP85:%.*]] = load i16, ptr [[ARRAYIDX_4_7]], align 2
+; CHECK-NEXT: [[CONV_4_7:%.*]] = zext i16 [[TMP85]] to i32
+; CHECK-NEXT: [[ADD_4_7:%.*]] = add nuw nsw i32 [[ADD_3_7]], [[CONV_4_7]]
+; CHECK-NEXT: [[TMP86:%.*]] = load i16, ptr [[ARRAYIDX_5_7]], align 2
+; CHECK-NEXT: [[CONV_5_7:%.*]] = zext i16 [[TMP86]] to i32
+; CHECK-NEXT: [[ADD_5_7:%.*]] = add nuw nsw i32 [[ADD_4_7]], [[CONV_5_7]]
+; CHECK-NEXT: [[TMP87:%.*]] = load i16, ptr [[ARRAYIDX_6_7]], align 2
+; CHECK-NEXT: [[CONV_6_7:%.*]] = zext i16 [[TMP87]] to i32
+; CHECK-NEXT: [[ADD_6_7:%.*]] = add nuw nsw i32 [[ADD_5_7]], [[CONV_6_7]]
+; CHECK-NEXT: [[TMP88:%.*]] = load i16, ptr [[ARRAYIDX_7_7]], align 2
+; CHECK-NEXT: [[CONV_7_7:%.*]] = zext i16 [[TMP88]] to i32
+; CHECK-NEXT: [[ADD_7_7:%.*]] = add nuw nsw i32 [[ADD_6_7]], [[CONV_7_7]]
; CHECK-NEXT: [[TMP89:%.*]] = call i32 @llvm.vector.reduce.add.v64i32(<64 x i32> [[TMP26]])
; CHECK-NEXT: [[CONV15:%.*]] = zext i32 [[ADD_7_7]] to i64
; CHECK-NEXT: [[CONV16:%.*]] = zext i32 [[TMP89]] to i64
diff --git a/llvm/test/Transforms/SLPVectorizer/AArch64/scalarization-overhead.ll b/llvm/test/Transforms/SLPVectorizer/AArch64/scalarization-overhead.ll
index c6209fd71063a..b4dec0fac92c7 100644
--- a/llvm/test/Transforms/SLPVectorizer/AArch64/scalarization-overhead.ll
+++ b/llvm/test/Transforms/SLPVectorizer/AArch64/scalarization-overhead.ll
@@ -17,25 +17,24 @@ define fastcc i64 @zot(float %arg, float %arg1, float %arg2, float %arg3, float
; CHECK-NEXT: br i1 [[ARG6:%.*]], label [[BB18:%.*]], label [[BB57:%.*]]
; CHECK: bb18:
; CHECK-NEXT: [[TMP7:%.*]] = phi <4 x float> [ [[TMP6]], [[BB:%.*]] ]
-; CHECK-NEXT: [[TMP8:%.*]] = extractelement <4 x float> [[TMP6]], i32 2
-; CHECK-NEXT: [[VAL23:%.*]] = fmul fast float [[TMP8]], 2.000000e+00
-; CHECK-NEXT: [[TMP9:%.*]] = extractelement <4 x float> [[TMP6]], i32 3
-; CHECK-NEXT: [[VAL24:%.*]] = fmul fast float [[TMP9]], 3.000000e+00
+; CHECK-NEXT: [[VAL16:%.*]] = fadd fast float [[ARG3]], 1.000000e+00
+; CHECK-NEXT: [[VAL23:%.*]] = fmul fast float [[VAL16]], 2.000000e+00
+; CHECK-NEXT: [[VAL24:%.*]] = fmul fast float [[VAL16]], 3.000000e+00
; CHECK-NEXT: br i1 [[ARG7:%.*]], label [[BB25:%.*]], label [[BB57]]
; CHECK: bb25:
-; CHECK-NEXT: [[TMP10:%.*]] = phi <4 x float> [ [[TMP7]], [[BB18]] ]
-; CHECK-NEXT: [[TMP11:%.*]] = extractelement <4 x float> [[TMP3]], i32 1
+; CHECK-NEXT: [[TMP8:%.*]] = phi <4 x float> [ [[TMP7]], [[BB18]] ]
+; CHECK-NEXT: [[VAL9:%.*]] = fmul fast float 0.000000e+00, [[ARG]]
; CHECK-NEXT: br label [[BB30:%.*]]
; CHECK: bb30:
; CHECK-NEXT: [[VAL31:%.*]] = phi float [ [[VAL55:%.*]], [[BB30]] ], [ 0.000000e+00, [[BB25]] ]
-; CHECK-NEXT: [[VAL32:%.*]] = phi float [ [[TMP11]], [[BB30]] ], [ 0.000000e+00, [[BB25]] ]
-; CHECK-NEXT: [[TMP12:%.*]] = load <4 x i8>, ptr [[ARG5:%.*]], align 1
-; CHECK-NEXT: [[TMP13:%.*]] = uitofp <4 x i8> [[TMP12]] to <4 x float>
-; CHECK-NEXT: [[TMP14:%.*]] = fsub fast <4 x float> [[TMP13]], [[TMP3]]
-; CHECK-NEXT: [[TMP15:%.*]] = fmul fast <4 x float> [[TMP14]], [[TMP10]]
-; CHECK-NEXT: [[TMP16:%.*]] = call fast float @llvm.vector.reduce.fadd.v4f32(float -0.000000e+00, <4 x float> [[TMP15]])
+; CHECK-NEXT: [[VAL32:%.*]] = phi float [ [[VAL9]], [[BB30]] ], [ 0.000000e+00, [[BB25]] ]
+; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i8>, ptr [[ARG5:%.*]], align 1
+; CHECK-NEXT: [[TMP10:%.*]] = uitofp <4 x i8> [[TMP9]] to <4 x float>
+; CHECK-NEXT: [[TMP11:%.*]] = fsub fast <4 x float> [[TMP10]], [[TMP3]]
+; CHECK-NEXT: [[TMP12:%.*]] = fmul fast <4 x float> [[TMP11]], [[TMP8]]
+; CHECK-NEXT: [[TMP13:%.*]] = call fast float @llvm.vector.reduce.fadd.v4f32(float -0.000000e+00, <4 x float> [[TMP12]])
; CHECK-NEXT: [[VAL55]] = tail call fast float @llvm.minnum.f32(float [[VAL31]], float [[ARG1:%.*]])
-; CHECK-NEXT: [[VAL56:%.*]] = tail call fast float @llvm.maxnum.f32(float [[ARG2:%.*]], float [[TMP16]])
+; CHECK-NEXT: [[VAL56:%.*]] = tail call fast float @llvm.maxnum.f32(float [[ARG2:%.*]], float [[TMP13]])
; CHECK-NEXT: call void @ham(float [[VAL55]], float [[VAL56]])
; CHECK-NEXT: br i1 [[ARG8:%.*]], label [[BB30]], label [[BB57]]
; CHECK: bb57:
diff --git a/llvm/test/Transforms/SLPVectorizer/AArch64/shuffle-vectors-mask-size.ll b/llvm/test/Transforms/SLPVectorizer/AArch64/shuffle-vectors-mask-size.ll
index 0783a28f56d85..e39cd8aaa111b 100644
--- a/llvm/test/Transforms/SLPVectorizer/AArch64/shuffle-vectors-mask-size.ll
+++ b/llvm/test/Transforms/SLPVectorizer/AArch64/shuffle-vectors-mask-size.ll
@@ -7,9 +7,11 @@ define void @p(double %0) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x double> <double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double poison>, double [[TMP0]], i32 3
; CHECK-NEXT: [[TMP2:%.*]] = fmul <4 x double> [[TMP1]], zeroinitializer
+; CHECK-NEXT: [[MUL16_150_1_I:%.*]] = fmul double 0.000000e+00, 0.000000e+00
; CHECK-NEXT: [[TMP3:%.*]] = fadd <4 x double> zeroinitializer, [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = fadd <4 x double> [[TMP3]], zeroinitializer
-; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <4 x double> [[TMP2]], <4 x double> [[TMP3]], <2 x i32> <i32 1, i32 7>
+; CHECK-NEXT: [[TMP14:%.*]] = shufflevector <4 x double> [[TMP3]], <4 x double> poison, <2 x i32> <i32 poison, i32 3>
+; CHECK-NEXT: [[TMP5:%.*]] = insertelement <2 x double> [[TMP14]], double [[MUL16_150_1_I]], i32 0
; CHECK-NEXT: [[TMP6:%.*]] = fadd <2 x double> zeroinitializer, [[TMP5]]
; CHECK-NEXT: [[TMP7:%.*]] = fmul <2 x double> [[TMP6]], zeroinitializer
; CHECK-NEXT: [[TMP8:%.*]] = fmul <4 x double> [[TMP4]], zeroinitializer
diff --git a/llvm/test/Transforms/SLPVectorizer/AArch64/slp-fma-loss.ll b/llvm/test/Transforms/SLPVectorizer/AArch64/slp-fma-loss.ll
index 0b26c53ca4503..b1aa537079f20 100644
--- a/llvm/test/Transforms/SLPVectorizer/AArch64/slp-fma-loss.ll
+++ b/llvm/test/Transforms/SLPVectorizer/AArch64/slp-fma-loss.ll
@@ -7,19 +7,21 @@ define void @slp_not_profitable_with_fast_fmf(ptr %A, ptr %B) {
; CHECK-LABEL: @slp_not_profitable_with_fast_fmf(
; CHECK-NEXT: [[GEP_B_1:%.*]] = getelementptr inbounds float, ptr [[B:%.*]], i64 1
; CHECK-NEXT: [[A_0:%.*]] = load float, ptr [[A:%.*]], align 4
-; CHECK-NEXT: [[B_1:%.*]] = load float, ptr [[GEP_B_1]], align 4
-; CHECK-NEXT: [[MUL_0:%.*]] = fmul fast float [[B_1]], [[A_0]]
; CHECK-NEXT: [[B_0:%.*]] = load float, ptr [[B]], align 4
; CHECK-NEXT: [[GEP_B_2:%.*]] = getelementptr inbounds float, ptr [[B]], i64 2
+; CHECK-NEXT: [[TMP1:%.*]] = load <2 x float>, ptr [[GEP_B_1]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = insertelement <2 x float> poison, float [[B_0]], i32 0
+; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <2 x float> [[TMP2]], <2 x float> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP4:%.*]] = fmul fast <2 x float> [[TMP3]], [[TMP1]]
+; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <2 x float> [[TMP4]], <2 x float> poison, <2 x i32> <i32 1, i32 0>
+; CHECK-NEXT: [[TMP6:%.*]] = insertelement <2 x float> poison, float [[A_0]], i32 0
+; CHECK-NEXT: [[TMP7:%.*]] = shufflevector <2 x float> [[TMP6]], <2 x float> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP8:%.*]] = fmul fast <2 x float> [[TMP1]], [[TMP7]]
+; CHECK-NEXT: [[TMP9:%.*]] = fsub fast <2 x float> [[TMP8]], [[TMP5]]
+; CHECK-NEXT: [[TMP10:%.*]] = fadd fast <2 x float> [[TMP8]], [[TMP5]]
+; CHECK-NEXT: [[TMP11:%.*]] = shufflevector <2 x float> [[TMP9]], <2 x float> [[TMP10]], <2 x i32> <i32 0, i32 3>
+; CHECK-NEXT: store <2 x float> [[TMP11]], ptr [[A]], align 4
; CHECK-NEXT: [[B_2:%.*]] = load float, ptr [[GEP_B_2]], align 4
-; CHECK-NEXT: [[MUL_1:%.*]] = fmul fast float [[B_2]], [[B_0]]
-; CHECK-NEXT: [[SUB:%.*]] = fsub fast float [[MUL_0]], [[MUL_1]]
-; CHECK-NEXT: [[MUL_2:%.*]] = fmul fast float [[B_0]], [[B_1]]
-; CHECK-NEXT: [[MUL_3:%.*]] = fmul fast float [[B_2]], [[A_0]]
-; CHECK-NEXT: [[ADD:%.*]] = fadd fast float [[MUL_3]], [[MUL_2]]
-; CHECK-NEXT: store float [[SUB]], ptr [[A]], align 4
-; CHECK-NEXT: [[GEP_A_1:%.*]] = getelementptr inbounds float, ptr [[A]], i64 1
-; CHECK-NEXT: store float [[ADD]], ptr [[GEP_A_1]], align 4
; CHECK-NEXT: store float [[B_2]], ptr [[B]], align 4
; CHECK-NEXT: ret void
;
@@ -46,19 +48,21 @@ define void @slp_not_profitable_with_reassoc_fmf(ptr %A, ptr %B) {
; CHECK-LABEL: @slp_not_profitable_with_reassoc_fmf(
; CHECK-NEXT: [[GEP_B_1:%.*]] = getelementptr inbounds float, ptr [[B:%.*]], i64 1
; CHECK-NEXT: [[A_0:%.*]] = load float, ptr [[A:%.*]], align 4
-; CHECK-NEXT: [[B_1:%.*]] = load float, ptr [[GEP_B_1]], align 4
-; CHECK-NEXT: [[MUL_0:%.*]] = fmul reassoc float [[B_1]], [[A_0]]
; CHECK-NEXT: [[B_0:%.*]] = load float, ptr [[B]], align 4
; CHECK-NEXT: [[GEP_B_2:%.*]] = getelementptr inbounds float, ptr [[B]], i64 2
+; CHECK-NEXT: [[TMP1:%.*]] = load <2 x float>, ptr [[GEP_B_1]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = insertelement <2 x float> poison, float [[B_0]], i32 0
+; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <2 x float> [[TMP2]], <2 x float> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP4:%.*]] = fmul <2 x float> [[TMP3]], [[TMP1]]
+; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <2 x float> [[TMP4]], <2 x float> poison, <2 x i32> <i32 1, i32 0>
+; CHECK-NEXT: [[TMP6:%.*]] = insertelement <2 x float> poison, float [[A_0]], i32 0
+; CHECK-NEXT: [[TMP7:%.*]] = shufflevector <2 x float> [[TMP6]], <2 x float> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP8:%.*]] = fmul reassoc <2 x float> [[TMP1]], [[TMP7]]
+; CHECK-NEXT: [[TMP9:%.*]] = fsub reassoc <2 x float> [[TMP8]], [[TMP5]]
+; CHECK-NEXT: [[TMP10:%.*]] = fadd reassoc <2 x float> [[TMP8]], [[TMP5]]
+; CHECK-NEXT: [[TMP11:%.*]] = shufflevector <2 x float> [[TMP9]], <2 x float> [[TMP10]], <2 x i32> <i32 0, i32 3>
+; CHECK-NEXT: store <2 x float> [[TMP11]], ptr [[A]], align 4
; CHECK-NEXT: [[B_2:%.*]] = load float, ptr [[GEP_B_2]], align 4
-; CHECK-NEXT: [[MUL_1:%.*]] = fmul float [[B_2]], [[B_0]]
-; CHECK-NEXT: [[SUB:%.*]] = fsub reassoc float [[MUL_0]], [[MUL_1]]
-; CHECK-NEXT: [[MUL_2:%.*]] = fmul float [[B_0]], [[B_1]]
-; CHECK-NEXT: [[MUL_3:%.*]] = fmul reassoc float [[B_2]], [[A_0]]
-; CHECK-NEXT: [[ADD:%.*]] = fadd reassoc float [[MUL_3]], [[MUL_2]]
-; CHECK-NEXT: store float [[SUB]], ptr [[A]], align 4
-; CHECK-NEXT: [[GEP_A_1:%.*]] = getelementptr inbounds float, ptr [[A]], i64 1
-; CHECK-NEXT: store float [[ADD]], ptr [[GEP_A_1]], align 4
; CHECK-NEXT: store float [[B_2]], ptr [[B]], align 4
; CHECK-NEXT: ret void
;
@@ -86,19 +90,21 @@ define void @slp_profitable_missing_fmf_on_fadd_fsub(ptr %A, ptr %B) {
; CHECK-LABEL: @slp_profitable_missing_fmf_on_fadd_fsub(
; CHECK-NEXT: [[GEP_B_1:%.*]] = getelementptr inbounds float, ptr [[B:%.*]], i64 1
; CHECK-NEXT: [[A_0:%.*]] = load float, ptr [[A:%.*]], align 4
-; CHECK-NEXT: [[B_1:%.*]] = load float, ptr [[GEP_B_1]], align 4
-; CHECK-NEXT: [[MUL_0:%.*]] = fmul fast float [[B_1]], [[A_0]]
; CHECK-NEXT: [[B_0:%.*]] = load float, ptr [[B]], align 4
; CHECK-NEXT: [[GEP_B_2:%.*]] = getelementptr inbounds float, ptr [[B]], i64 2
+; CHECK-NEXT: [[TMP1:%.*]] = load <2 x float>, ptr [[GEP_B_1]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = insertelement <2 x float> poison, float [[B_0]], i32 0
+; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <2 x float> [[TMP2]], <2 x float> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP4:%.*]] = fmul fast <2 x float> [[TMP3]], [[TMP1]]
+; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <2 x float> [[TMP4]], <2 x float> poison, <2 x i32> <i32 1, i32 0>
+; CHECK-NEXT: [[TMP6:%.*]] = insertelement <2 x float> poison, float [[A_0]], i32 0
+; CHECK-NEXT: [[TMP7:%.*]] = shufflevector <2 x float> [[TMP6]], <2 x float> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP8:%.*]] = fmul fast <2 x float> [[TMP1]], [[TMP7]]
+; CHECK-NEXT: [[TMP9:%.*]] = fsub <2 x float> [[TMP8]], [[TMP5]]
+; CHECK-NEXT: [[TMP10:%.*]] = fadd <2 x float> [[TMP8]], [[TMP5]]
+; CHECK-NEXT: [[TMP11:%.*]] = shufflevector <2 x float> [[TMP9]], <2 x float> [[TMP10]], <2 x i32> <i32 0, i32 3>
+; CHECK-NEXT: store <2 x float> [[TMP11]], ptr [[A]], align 4
; CHECK-NEXT: [[B_2:%.*]] = load float, ptr [[GEP_B_2]], align 4
-; CHECK-NEXT: [[MUL_1:%.*]] = fmul fast float [[B_2]], [[B_0]]
-; CHECK-NEXT: [[SUB:%.*]] = fsub float [[MUL_0]], [[MUL_1]]
-; CHECK-NEXT: [[MUL_2:%.*]] = fmul fast float [[B_0]], [[B_1]]
-; CHECK-NEXT: [[MUL_3:%.*]] = fmul fast float [[B_2]], [[A_0]]
-; CHECK-NEXT: [[ADD:%.*]] = fadd float [[MUL_3]], [[MUL_2]]
-; CHECK-NEXT: store float [[SUB]], ptr [[A]], align 4
-; CHECK-NEXT: [[GEP_A_1:%.*]] = getelementptr inbounds float, ptr [[A]], i64 1
-; CHECK-NEXT: store float [[ADD]], ptr [[GEP_A_1]], align 4
; CHECK-NEXT: store float [[B_2]], ptr [[B]], align 4
; CHECK-NEXT: ret void
;
@@ -126,19 +132,21 @@ define void @slp_profitable_missing_fmf_on_fmul_fadd_fsub(ptr %A, ptr %B) {
; CHECK-LABEL: @slp_profitable_missing_fmf_on_fmul_fadd_fsub(
; CHECK-NEXT: [[GEP_B_1:%.*]] = getelementptr inbounds float, ptr [[B:%.*]], i64 1
; CHECK-NEXT: [[A_0:%.*]] = load float, ptr [[A:%.*]], align 4
-; CHECK-NEXT: [[B_1:%.*]] = load float, ptr [[GEP_B_1]], align 4
-; CHECK-NEXT: [[MUL_0:%.*]] = fmul float [[B_1]], [[A_0]]
; CHECK-NEXT: [[B_0:%.*]] = load float, ptr [[B]], align 4
; CHECK-NEXT: [[GEP_B_2:%.*]] = getelementptr inbounds float, ptr [[B]], i64 2
+; CHECK-NEXT: [[TMP1:%.*]] = load <2 x float>, ptr [[GEP_B_1]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = insertelement <2 x float> poison, float [[B_0]], i32 0
+; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <2 x float> [[TMP2]], <2 x float> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP4:%.*]] = fmul <2 x float> [[TMP3]], [[TMP1]]
+; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <2 x float> [[TMP4]], <2 x float> poison, <2 x i32> <i32 1, i32 0>
+; CHECK-NEXT: [[TMP6:%.*]] = insertelement <2 x float> poison, float [[A_0]], i32 0
+; CHECK-NEXT: [[TMP7:%.*]] = shufflevector <2 x float> [[TMP6]], <2 x float> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP8:%.*]] = fmul <2 x float> [[TMP1]], [[TMP7]]
+; CHECK-NEXT: [[TMP9:%.*]] = fsub <2 x float> [[TMP8]], [[TMP5]]
+; CHECK-NEXT: [[TMP10:%.*]] = fadd <2 x float> [[TMP8]], [[TMP5]]
+; CHECK-NEXT: [[TMP11:%.*]] = shufflevector <2 x float> [[TMP9]], <2 x float> [[TMP10]], <2 x i32> <i32 0, i32 3>
+; CHECK-NEXT: store <2 x float> [[TMP11]], ptr [[A]], align 4
; CHECK-NEXT: [[B_2:%.*]] = load float, ptr [[GEP_B_2]], align 4
-; CHECK-NEXT: [[MUL_1:%.*]] = fmul float [[B_2]], [[B_0]]
-; CHECK-NEXT: [[SUB:%.*]] = fsub float [[MUL_0]], [[MUL_1]]
-; CHECK-NEXT: [[MUL_2:%.*]] = fmul float [[B_0]], [[B_1]]
-; CHECK-NEXT: [[MUL_3:%.*]] = fmul float [[B_2]], [[A_0]]
-; CHECK-NEXT: [[ADD:%.*]] = fadd float [[MUL_3]], [[MUL_2]]
-; CHECK-NEXT: store float [[SUB]], ptr [[A]], align 4
-; CHECK-NEXT: [[GEP_A_1:%.*]] = getelementptr inbounds float, ptr [[A]], i64 1
-; CHECK-NEXT: store float [[ADD]], ptr [[GEP_A_1]], align 4
; CHECK-NEXT: store float [[B_2]], ptr [[B]], align 4
; CHECK-NEXT: ret void
;
@@ -166,19 +174,21 @@ define void @slp_profitable_missing_fmf_nnans_only(ptr %A, ptr %B) {
; CHECK-LABEL: @slp_profitable_missing_fmf_nnans_only(
; CHECK-NEXT: [[GEP_B_1:%.*]] = getelementptr inbounds float, ptr [[B:%.*]], i64 1
; CHECK-NEXT: [[A_0:%.*]] = load float, ptr [[A:%.*]], align 4
-; CHECK-NEXT: [[B_1:%.*]] = load float, ptr [[GEP_B_1]], align 4
-; CHECK-NEXT: [[MUL_0:%.*]] = fmul nnan float [[B_1]], [[A_0]]
; CHECK-NEXT: [[B_0:%.*]] = load float, ptr [[B]], align 4
; CHECK-NEXT: [[GEP_B_2:%.*]] = getelementptr inbounds float, ptr [[B]], i64 2
+; CHECK-NEXT: [[TMP1:%.*]] = load <2 x float>, ptr [[GEP_B_1]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = insertelement <2 x float> poison, float [[B_0]], i32 0
+; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <2 x float> [[TMP2]], <2 x float> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP4:%.*]] = fmul nnan <2 x float> [[TMP3]], [[TMP1]]
+; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <2 x float> [[TMP4]], <2 x float> poison, <2 x i32> <i32 1, i32 0>
+; CHECK-NEXT: [[TMP6:%.*]] = insertelement <2 x float> poison, float [[A_0]], i32 0
+; CHECK-NEXT: [[TMP7:%.*]] = shufflevector <2 x float> [[TMP6]], <2 x float> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP8:%.*]] = fmul nnan <2 x float> [[TMP1]], [[TMP7]]
+; CHECK-NEXT: [[TMP9:%.*]] = fsub nnan <2 x float> [[TMP8]], [[TMP5]]
+; CHECK-NEXT: [[TMP10:%.*]] = fadd nnan <2 x float> [[TMP8]], [[TMP5]]
+; CHECK-NEXT: [[TMP11:%.*]] = shufflevector <2 x float> [[TMP9]], <2 x float> [[TMP10]], <2 x i32> <i32 0, i32 3>
+; CHECK-NEXT: store <2 x float> [[TMP11]], ptr [[A]], align 4
; CHECK-NEXT: [[B_2:%.*]] = load float, ptr [[GEP_B_2]], align 4
-; CHECK-NEXT: [[MUL_1:%.*]] = fmul nnan float [[B_2]], [[B_0]]
-; CHECK-NEXT: [[SUB:%.*]] = fsub nnan float [[MUL_0]], [[MUL_1]]
-; CHECK-NEXT: [[MUL_2:%.*]] = fmul nnan float [[B_0]], [[B_1]]
-; CHECK-NEXT: [[MUL_3:%.*]] = fmul nnan float [[B_2]], [[A_0]]
-; CHECK-NEXT: [[ADD:%.*]] = fadd nnan float [[MUL_3]], [[MUL_2]]
-; CHECK-NEXT: store float [[SUB]], ptr [[A]], align 4
-; CHECK-NEXT: [[GEP_A_1:%.*]] = getelementptr inbounds float, ptr [[A]], i64 1
-; CHECK-NEXT: store float [[ADD]], ptr [[GEP_A_1]], align 4
; CHECK-NEXT: store float [[B_2]], ptr [[B]], align 4
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/SLPVectorizer/AArch64/vectorizable-selects-uniform-cmps.ll b/llvm/test/Transforms/SLPVectorizer/AArch64/vectorizable-selects-uniform-cmps.ll
index b59659ca75eb2..9de84673f25fe 100644
--- a/llvm/test/Transforms/SLPVectorizer/AArch64/vectorizable-selects-uniform-cmps.ll
+++ b/llvm/test/Transforms/SLPVectorizer/AArch64/vectorizable-selects-uniform-cmps.ll
@@ -245,8 +245,8 @@ define void @select_uniform_ugt_16xi8(ptr %ptr, i8 %x) {
; CHECK-NEXT: [[GEP_8:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i8 8
; CHECK-NEXT: [[L_8:%.*]] = load i8, ptr [[GEP_8]], align 1
; CHECK-NEXT: [[CMP_8:%.*]] = icmp ugt i8 [[L_8]], -1
-; CHECK-NEXT: [[TMP1:%.*]] = extractelement <8 x i8> [[TMP0]], i32 0
-; CHECK-NEXT: [[S_8:%.*]] = select i1 [[CMP_8]], i8 [[TMP1]], i8 [[X:%.*]]
+; CHECK-NEXT: [[L_0:%.*]] = load i8, ptr [[PTR]], align 1
+; CHECK-NEXT: [[S_8:%.*]] = select i1 [[CMP_8]], i8 [[L_0]], i8 [[X:%.*]]
; CHECK-NEXT: [[GEP_9:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i8 9
; CHECK-NEXT: [[L_9:%.*]] = load i8, ptr [[GEP_9]], align 1
; CHECK-NEXT: [[GEP_10:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i8 10
@@ -254,19 +254,19 @@ define void @select_uniform_ugt_16xi8(ptr %ptr, i8 %x) {
; CHECK-NEXT: [[GEP_11:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i8 11
; CHECK-NEXT: [[L_11:%.*]] = load i8, ptr [[GEP_11]], align 1
; CHECK-NEXT: [[GEP_12:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i8 12
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i8>, ptr [[GEP_12]], align 1
-; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <8 x i8> [[TMP0]], <8 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
-; CHECK-NEXT: [[TMP4:%.*]] = insertelement <16 x i8> [[TMP3]], i8 [[L_9]], i32 9
-; CHECK-NEXT: [[TMP5:%.*]] = insertelement <16 x i8> [[TMP4]], i8 [[L_10]], i32 10
-; CHECK-NEXT: [[TMP6:%.*]] = insertelement <16 x i8> [[TMP5]], i8 [[L_11]], i32 11
-; CHECK-NEXT: [[TMP7:%.*]] = shufflevector <4 x i8> [[TMP2]], <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
-; CHECK-NEXT: [[TMP8:%.*]] = shufflevector <16 x i8> [[TMP6]], <16 x i8> [[TMP7]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 16, i32 17, i32 18, i32 19>
-; CHECK-NEXT: [[TMP9:%.*]] = shufflevector <16 x i8> [[TMP8]], <16 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
-; CHECK-NEXT: [[TMP10:%.*]] = icmp ugt <16 x i8> [[TMP9]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
-; CHECK-NEXT: [[TMP11:%.*]] = insertelement <16 x i8> poison, i8 [[X]], i32 0
-; CHECK-NEXT: [[TMP12:%.*]] = shufflevector <16 x i8> [[TMP11]], <16 x i8> poison, <16 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP13:%.*]] = select <16 x i1> [[TMP10]], <16 x i8> [[TMP9]], <16 x i8> [[TMP12]]
-; CHECK-NEXT: store <16 x i8> [[TMP13]], ptr [[PTR]], align 2
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i8>, ptr [[GEP_12]], align 1
+; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <8 x i8> [[TMP0]], <8 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP3:%.*]] = insertelement <16 x i8> [[TMP2]], i8 [[L_9]], i32 9
+; CHECK-NEXT: [[TMP4:%.*]] = insertelement <16 x i8> [[TMP3]], i8 [[L_10]], i32 10
+; CHECK-NEXT: [[TMP5:%.*]] = insertelement <16 x i8> [[TMP4]], i8 [[L_11]], i32 11
+; CHECK-NEXT: [[TMP6:%.*]] = shufflevector <4 x i8> [[TMP1]], <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP7:%.*]] = shufflevector <16 x i8> [[TMP5]], <16 x i8> [[TMP6]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 16, i32 17, i32 18, i32 19>
+; CHECK-NEXT: [[TMP8:%.*]] = shufflevector <16 x i8> [[TMP7]], <16 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+; CHECK-NEXT: [[TMP9:%.*]] = icmp ugt <16 x i8> [[TMP8]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+; CHECK-NEXT: [[TMP10:%.*]] = insertelement <16 x i8> poison, i8 [[X]], i32 0
+; CHECK-NEXT: [[TMP11:%.*]] = shufflevector <16 x i8> [[TMP10]], <16 x i8> poison, <16 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP12:%.*]] = select <16 x i1> [[TMP9]], <16 x i8> [[TMP8]], <16 x i8> [[TMP11]]
+; CHECK-NEXT: store <16 x i8> [[TMP12]], ptr [[PTR]], align 2
; CHECK-NEXT: ret void
;
entry:
diff --git a/llvm/test/Transforms/SLPVectorizer/AMDGPU/horizontal-store.ll b/llvm/test/Transforms/SLPVectorizer/AMDGPU/horizontal-store.ll
index 72fb03e60a297..e17172de36b5e 100644
--- a/llvm/test/Transforms/SLPVectorizer/AMDGPU/horizontal-store.ll
+++ b/llvm/test/Transforms/SLPVectorizer/AMDGPU/horizontal-store.ll
@@ -15,15 +15,14 @@
; Tests whether the min/max reduction pattern is vectorized if SLP starts at the store.
define i32 @smaxv6() {
; GFX9-LABEL: @smaxv6(
-; GFX9-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr @arr, align 16
-; GFX9-NEXT: [[TMP2:%.*]] = extractelement <2 x i32> [[TMP1]], i32 0
-; GFX9-NEXT: [[TMP3:%.*]] = extractelement <2 x i32> [[TMP1]], i32 1
-; GFX9-NEXT: [[CMP1:%.*]] = icmp sgt i32 [[TMP2]], [[TMP3]]
-; GFX9-NEXT: [[SELECT1:%.*]] = select i1 [[CMP1]], i32 [[TMP2]], i32 [[TMP3]]
-; GFX9-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr getelementptr inbounds ([32 x i32], ptr @arr, i64 0, i64 2), align 8
-; GFX9-NEXT: [[TMP5:%.*]] = call i32 @llvm.vector.reduce.smax.v4i32(<4 x i32> [[TMP4]])
-; GFX9-NEXT: [[OP_RDX:%.*]] = icmp sgt i32 [[TMP5]], [[SELECT1]]
-; GFX9-NEXT: [[OP_RDX1:%.*]] = select i1 [[OP_RDX]], i32 [[TMP5]], i32 [[SELECT1]]
+; GFX9-NEXT: [[LOAD1:%.*]] = load i32, ptr @arr, align 16
+; GFX9-NEXT: [[LOAD2:%.*]] = load i32, ptr getelementptr inbounds ([32 x i32], ptr @arr, i64 0, i64 1), align 4
+; GFX9-NEXT: [[CMP1:%.*]] = icmp sgt i32 [[LOAD1]], [[LOAD2]]
+; GFX9-NEXT: [[SELECT1:%.*]] = select i1 [[CMP1]], i32 [[LOAD1]], i32 [[LOAD2]]
+; GFX9-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr getelementptr inbounds ([32 x i32], ptr @arr, i64 0, i64 2), align 8
+; GFX9-NEXT: [[TMP2:%.*]] = call i32 @llvm.vector.reduce.smax.v4i32(<4 x i32> [[TMP1]])
+; GFX9-NEXT: [[OP_RDX:%.*]] = icmp sgt i32 [[TMP2]], [[SELECT1]]
+; GFX9-NEXT: [[OP_RDX1:%.*]] = select i1 [[OP_RDX]], i32 [[TMP2]], i32 [[SELECT1]]
; GFX9-NEXT: [[STORE_SELECT:%.*]] = select i1 [[CMP1]], i32 3, i32 4
; GFX9-NEXT: store i32 [[STORE_SELECT]], ptr @var, align 8
; GFX9-NEXT: ret i32 [[OP_RDX1]]
@@ -56,15 +55,14 @@ define i32 @smaxv6() {
define i64 @sminv6() {
; GFX9-LABEL: @sminv6(
-; GFX9-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @arr64, align 16
-; GFX9-NEXT: [[TMP2:%.*]] = extractelement <2 x i64> [[TMP1]], i32 0
-; GFX9-NEXT: [[TMP3:%.*]] = extractelement <2 x i64> [[TMP1]], i32 1
-; GFX9-NEXT: [[CMP1:%.*]] = icmp slt i64 [[TMP2]], [[TMP3]]
-; GFX9-NEXT: [[SELECT1:%.*]] = select i1 [[CMP1]], i64 [[TMP2]], i64 [[TMP3]]
-; GFX9-NEXT: [[TMP4:%.*]] = load <4 x i64>, ptr getelementptr inbounds ([32 x i64], ptr @arr64, i64 0, i64 2), align 16
-; GFX9-NEXT: [[TMP5:%.*]] = call i64 @llvm.vector.reduce.smin.v4i64(<4 x i64> [[TMP4]])
-; GFX9-NEXT: [[OP_RDX:%.*]] = icmp slt i64 [[TMP5]], [[SELECT1]]
-; GFX9-NEXT: [[OP_RDX1:%.*]] = select i1 [[OP_RDX]], i64 [[TMP5]], i64 [[SELECT1]]
+; GFX9-NEXT: [[LOAD1:%.*]] = load i64, ptr @arr64, align 16
+; GFX9-NEXT: [[LOAD2:%.*]] = load i64, ptr getelementptr inbounds ([32 x i64], ptr @arr64, i64 0, i64 1), align 8
+; GFX9-NEXT: [[CMP1:%.*]] = icmp slt i64 [[LOAD1]], [[LOAD2]]
+; GFX9-NEXT: [[SELECT1:%.*]] = select i1 [[CMP1]], i64 [[LOAD1]], i64 [[LOAD2]]
+; GFX9-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr getelementptr inbounds ([32 x i64], ptr @arr64, i64 0, i64 2), align 16
+; GFX9-NEXT: [[TMP2:%.*]] = call i64 @llvm.vector.reduce.smin.v4i64(<4 x i64> [[TMP1]])
+; GFX9-NEXT: [[OP_RDX:%.*]] = icmp slt i64 [[TMP2]], [[SELECT1]]
+; GFX9-NEXT: [[OP_RDX1:%.*]] = select i1 [[OP_RDX]], i64 [[TMP2]], i64 [[SELECT1]]
; GFX9-NEXT: [[STORE_SELECT:%.*]] = select i1 [[CMP1]], i64 3, i64 4
; GFX9-NEXT: store i64 [[STORE_SELECT]], ptr @var64, align 8
; GFX9-NEXT: ret i64 [[OP_RDX1]]
@@ -99,11 +97,10 @@ define i64 @sminv6() {
; with fastmath on the select.
define float @fmaxv6() {
; GFX9-LABEL: @fmaxv6(
-; GFX9-NEXT: [[TMP1:%.*]] = load <2 x float>, ptr @farr, align 16
-; GFX9-NEXT: [[TMP2:%.*]] = extractelement <2 x float> [[TMP1]], i32 0
-; GFX9-NEXT: [[TMP3:%.*]] = extractelement <2 x float> [[TMP1]], i32 1
-; GFX9-NEXT: [[CMP1:%.*]] = fcmp fast ogt float [[TMP2]], [[TMP3]]
-; GFX9-NEXT: [[SELECT1:%.*]] = select i1 [[CMP1]], float [[TMP2]], float [[TMP3]]
+; GFX9-NEXT: [[LOAD1:%.*]] = load float, ptr @farr, align 16
+; GFX9-NEXT: [[LOAD2:%.*]] = load float, ptr getelementptr inbounds ([32 x float], ptr @farr, i64 0, i64 1), align 4
+; GFX9-NEXT: [[CMP1:%.*]] = fcmp fast ogt float [[LOAD1]], [[LOAD2]]
+; GFX9-NEXT: [[SELECT1:%.*]] = select i1 [[CMP1]], float [[LOAD1]], float [[LOAD2]]
; GFX9-NEXT: [[LOAD3:%.*]] = load float, ptr getelementptr inbounds ([32 x float], ptr @farr, i64 0, i64 2), align 8
; GFX9-NEXT: [[CMP2:%.*]] = fcmp fast ogt float [[SELECT1]], [[LOAD3]]
; GFX9-NEXT: [[SELECT2:%.*]] = select i1 [[CMP2]], float [[SELECT1]], float [[LOAD3]]
@@ -150,11 +147,10 @@ define float @fmaxv6() {
; with fastmath on the select.
define double @dminv6() {
; GFX9-LABEL: @dminv6(
-; GFX9-NEXT: [[TMP1:%.*]] = load <2 x double>, ptr @darr, align 16
-; GFX9-NEXT: [[TMP2:%.*]] = extractelement <2 x double> [[TMP1]], i32 0
-; GFX9-NEXT: [[TMP3:%.*]] = extractelement <2 x double> [[TMP1]], i32 1
-; GFX9-NEXT: [[CMP1:%.*]] = fcmp fast olt double [[TMP2]], [[TMP3]]
-; GFX9-NEXT: [[SELECT1:%.*]] = select i1 [[CMP1]], double [[TMP2]], double [[TMP3]]
+; GFX9-NEXT: [[LOAD1:%.*]] = load double, ptr @darr, align 16
+; GFX9-NEXT: [[LOAD2:%.*]] = load double, ptr getelementptr inbounds ([32 x double], ptr @darr, i64 0, i64 1), align 4
+; GFX9-NEXT: [[CMP1:%.*]] = fcmp fast olt double [[LOAD1]], [[LOAD2]]
+; GFX9-NEXT: [[SELECT1:%.*]] = select i1 [[CMP1]], double [[LOAD1]], double [[LOAD2]]
; GFX9-NEXT: [[LOAD3:%.*]] = load double, ptr getelementptr inbounds ([32 x double], ptr @darr, i64 0, i64 2), align 8
; GFX9-NEXT: [[CMP2:%.*]] = fcmp fast olt double [[SELECT1]], [[LOAD3]]
; GFX9-NEXT: [[SELECT2:%.*]] = select i1 [[CMP2]], double [[SELECT1]], double [[LOAD3]]
diff --git a/llvm/test/Transforms/SLPVectorizer/RISCV/complex-loads.ll b/llvm/test/Transforms/SLPVectorizer/RISCV/complex-loads.ll
index aa9a070a79450..c0868c3674858 100644
--- a/llvm/test/Transforms/SLPVectorizer/RISCV/complex-loads.ll
+++ b/llvm/test/Transforms/SLPVectorizer/RISCV/complex-loads.ll
@@ -5,22 +5,12 @@ define i32 @test(ptr %pix1, ptr %pix2, i64 %idx.ext, i64 %idx.ext63, ptr %add.pt
; CHECK-LABEL: define i32 @test(
; CHECK-SAME: ptr [[PIX1:%.*]], ptr [[PIX2:%.*]], i64 [[IDX_EXT:%.*]], i64 [[IDX_EXT63:%.*]], ptr [[ADD_PTR:%.*]], ptr [[ADD_PTR64:%.*]]) #[[ATTR0:[0-9]+]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i8, ptr [[PIX1]], align 1
-; CHECK-NEXT: [[CONV1:%.*]] = zext i8 [[TMP0]] to i32
-; CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x ptr> poison, ptr [[PIX1]], i32 0
-; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <2 x ptr> [[TMP1]], <2 x ptr> poison, <2 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, <2 x ptr> [[TMP2]], <2 x i64> <i64 4, i64 6>
-; CHECK-NEXT: [[TMP4:%.*]] = insertelement <2 x ptr> poison, ptr [[PIX2]], i32 0
-; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <2 x ptr> [[TMP4]], <2 x ptr> poison, <2 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP6:%.*]] = getelementptr i8, <2 x ptr> [[TMP5]], <2 x i64> <i64 4, i64 6>
-; CHECK-NEXT: [[ARRAYIDX8:%.*]] = getelementptr i8, ptr [[PIX1]], i64 1
-; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, <2 x ptr> [[TMP5]], <2 x i64> <i64 1, i64 3>
-; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i8, <2 x ptr> [[TMP2]], <2 x i64> <i64 5, i64 7>
-; CHECK-NEXT: [[TMP9:%.*]] = getelementptr i8, <2 x ptr> [[TMP5]], <2 x i64> <i64 5, i64 7>
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[PIX1]], i64 4
+; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr i8, ptr [[PIX2]], i64 4
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[PIX1]], i64 2
; CHECK-NEXT: [[ARRAYIDX22:%.*]] = getelementptr i8, ptr [[PIX2]], i64 2
-; CHECK-NEXT: [[ARRAYIDX32:%.*]] = getelementptr i8, ptr [[PIX1]], i64 3
-; CHECK-NEXT: [[TMP10:%.*]] = load i8, ptr [[ARRAYIDX32]], align 1
-; CHECK-NEXT: [[CONV33:%.*]] = zext i8 [[TMP10]] to i32
+; CHECK-NEXT: [[ARRAYIDX25:%.*]] = getelementptr i8, ptr [[PIX1]], i64 6
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i8, ptr [[PIX2]], i64 6
; CHECK-NEXT: [[ADD_PTR3:%.*]] = getelementptr i8, ptr [[PIX1]], i64 [[IDX_EXT]]
; CHECK-NEXT: [[ADD_PTR644:%.*]] = getelementptr i8, ptr [[PIX2]], i64 [[IDX_EXT63]]
; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr [[ADD_PTR3]], align 1
@@ -72,7 +62,8 @@ define i32 @test(ptr %pix1, ptr %pix2, i64 %idx.ext, i64 %idx.ext63, ptr %add.pt
; CHECK-NEXT: [[SUB47_2:%.*]] = extractelement <2 x i32> [[TMP38]], i32 1
; CHECK-NEXT: [[ADD46_2:%.*]] = add i32 [[SUB47_2]], [[SUB45_2]]
; CHECK-NEXT: [[SUB59_2:%.*]] = sub i32 [[SUB45_2]], [[SUB47_2]]
-; CHECK-NEXT: [[ADD48_2:%.*]] = add i32 [[ADD46_2]], [[ADD44_3]]
+; CHECK-NEXT: [[ADD48_4:%.*]] = add i32 [[ADD46_2]], [[ADD44_3]]
+; CHECK-NEXT: [[SUB59_3:%.*]] = sub i32 [[SUB51_2]], [[SUB59_2]]
; CHECK-NEXT: [[TMP43:%.*]] = load i8, ptr null, align 1
; CHECK-NEXT: [[ARRAYIDX20_3:%.*]] = getelementptr i8, ptr null, i64 2
; CHECK-NEXT: [[ARRAYIDX22_3:%.*]] = getelementptr i8, ptr null, i64 2
@@ -103,14 +94,25 @@ define i32 @test(ptr %pix1, ptr %pix2, i64 %idx.ext, i64 %idx.ext63, ptr %add.pt
; CHECK-NEXT: [[TMP68:%.*]] = zext <2 x i8> [[TMP67]] to <2 x i32>
; CHECK-NEXT: [[TMP69:%.*]] = sub <2 x i32> [[TMP66]], [[TMP68]]
; CHECK-NEXT: [[TMP70:%.*]] = shl <2 x i32> [[TMP69]], <i32 16, i32 16>
-; CHECK-NEXT: [[TMP71:%.*]] = add <2 x i32> [[TMP70]], [[TMP63]]
-; CHECK-NEXT: [[TMP72:%.*]] = add <2 x i32> [[TMP71]], [[TMP58]]
-; CHECK-NEXT: [[TMP190:%.*]] = sub <2 x i32> [[TMP58]], [[TMP71]]
-; CHECK-NEXT: [[TMP74:%.*]] = extractelement <2 x i32> [[TMP72]], i32 0
-; CHECK-NEXT: [[TMP75:%.*]] = extractelement <2 x i32> [[TMP72]], i32 1
-; CHECK-NEXT: [[ADD48_3:%.*]] = add i32 [[TMP74]], [[TMP75]]
-; CHECK-NEXT: [[ADD94:%.*]] = add i32 [[ADD48_3]], [[ADD48_2]]
+; CHECK-NEXT: [[TMP73:%.*]] = add <2 x i32> [[TMP70]], [[TMP63]]
+; CHECK-NEXT: [[TMP74:%.*]] = add <2 x i32> [[TMP73]], [[TMP58]]
+; CHECK-NEXT: [[TMP75:%.*]] = sub <2 x i32> [[TMP58]], [[TMP73]]
+; CHECK-NEXT: [[TMP78:%.*]] = extractelement <2 x i32> [[TMP74]], i32 0
+; CHECK-NEXT: [[TMP82:%.*]] = extractelement <2 x i32> [[TMP74]], i32 1
+; CHECK-NEXT: [[ADD48_5:%.*]] = add i32 [[TMP78]], [[TMP82]]
+; CHECK-NEXT: [[TMP95:%.*]] = shufflevector <2 x i32> [[TMP74]], <2 x i32> poison, <2 x i32> <i32 1, i32 poison>
+; CHECK-NEXT: [[TMP97:%.*]] = insertelement <2 x i32> [[TMP95]], i32 [[ADD44_3]], i32 1
+; CHECK-NEXT: [[TMP98:%.*]] = insertelement <2 x i32> [[TMP74]], i32 [[ADD46_2]], i32 1
+; CHECK-NEXT: [[TMP103:%.*]] = sub <2 x i32> [[TMP97]], [[TMP98]]
+; CHECK-NEXT: [[TMP105:%.*]] = insertelement <2 x i32> [[TMP75]], i32 [[SUB59_2]], i32 1
+; CHECK-NEXT: [[TMP128:%.*]] = shufflevector <2 x i32> [[TMP75]], <2 x i32> poison, <2 x i32> <i32 1, i32 poison>
+; CHECK-NEXT: [[TMP71:%.*]] = insertelement <2 x i32> [[TMP128]], i32 [[SUB51_2]], i32 1
+; CHECK-NEXT: [[TMP72:%.*]] = add <2 x i32> [[TMP105]], [[TMP71]]
+; CHECK-NEXT: [[ADD48_3:%.*]] = extractelement <2 x i32> [[TMP75]], i32 0
+; CHECK-NEXT: [[ADD48_2:%.*]] = extractelement <2 x i32> [[TMP75]], i32 1
; CHECK-NEXT: [[SUB102:%.*]] = sub i32 [[ADD48_2]], [[ADD48_3]]
+; CHECK-NEXT: [[ADD94:%.*]] = add i32 [[ADD48_5]], [[ADD48_4]]
+; CHECK-NEXT: [[SUB103:%.*]] = sub i32 [[ADD48_4]], [[ADD48_5]]
; CHECK-NEXT: [[TMP79:%.*]] = extractelement <2 x i32> [[TMP47]], i32 1
; CHECK-NEXT: [[SHR_I49_2:%.*]] = lshr i32 [[TMP79]], 15
; CHECK-NEXT: [[AND_I50_2:%.*]] = and i32 [[SHR_I49_2]], 65537
@@ -118,54 +120,74 @@ define i32 @test(ptr %pix1, ptr %pix2, i64 %idx.ext, i64 %idx.ext63, ptr %add.pt
; CHECK-NEXT: [[SHR_I49_3:%.*]] = lshr i32 [[ADD46_2]], 15
; CHECK-NEXT: [[AND_I50_3:%.*]] = and i32 [[SHR_I49_3]], 65537
; CHECK-NEXT: [[MUL_I51_3:%.*]] = mul i32 [[AND_I50_3]], 65535
+; CHECK-NEXT: [[TMP76:%.*]] = extractelement <2 x i32> [[TMP72]], i32 0
+; CHECK-NEXT: [[TMP77:%.*]] = extractelement <2 x i32> [[TMP72]], i32 1
+; CHECK-NEXT: [[ADD78_2:%.*]] = add i32 [[TMP76]], [[TMP77]]
+; CHECK-NEXT: [[SUB102_1:%.*]] = sub i32 [[TMP77]], [[TMP76]]
; CHECK-NEXT: [[TMP107:%.*]] = extractelement <2 x i32> [[TMP16]], i32 0
; CHECK-NEXT: [[SHR_I49_5:%.*]] = lshr i32 [[TMP107]], 15
; CHECK-NEXT: [[AND_I50_5:%.*]] = and i32 [[SHR_I49_5]], 65537
; CHECK-NEXT: [[MUL_I51_5:%.*]] = mul i32 [[AND_I50_5]], 65535
+; CHECK-NEXT: [[TMP141:%.*]] = extractelement <2 x i32> [[TMP103]], i32 0
+; CHECK-NEXT: [[TMP80:%.*]] = extractelement <2 x i32> [[TMP103]], i32 1
+; CHECK-NEXT: [[ADD94_4:%.*]] = add i32 [[TMP141]], [[TMP80]]
+; CHECK-NEXT: [[TMP215:%.*]] = sub i32 [[TMP80]], [[TMP141]]
; CHECK-NEXT: [[SHR_I49_4:%.*]] = lshr i32 [[CONV_1]], 15
; CHECK-NEXT: [[AND_I50_4:%.*]] = and i32 [[SHR_I49_4]], 65537
; CHECK-NEXT: [[MUL_I51_4:%.*]] = mul i32 [[AND_I50_4]], 65535
+; CHECK-NEXT: [[TMP81:%.*]] = load <2 x i8>, ptr [[TMP1]], align 1
+; CHECK-NEXT: [[TMP102:%.*]] = zext <2 x i8> [[TMP81]] to <2 x i32>
+; CHECK-NEXT: [[TMP143:%.*]] = load <2 x i8>, ptr [[ARRAYIDX22]], align 1
+; CHECK-NEXT: [[TMP84:%.*]] = zext <2 x i8> [[TMP143]] to <2 x i32>
+; CHECK-NEXT: [[TMP145:%.*]] = load <2 x i8>, ptr [[ARRAYIDX25]], align 1
+; CHECK-NEXT: [[TMP86:%.*]] = zext <2 x i8> [[TMP145]] to <2 x i32>
+; CHECK-NEXT: [[TMP150:%.*]] = load <2 x i8>, ptr [[TMP4]], align 1
+; CHECK-NEXT: [[TMP154:%.*]] = zext <2 x i8> [[TMP150]] to <2 x i32>
+; CHECK-NEXT: [[TMP89:%.*]] = sub <2 x i32> [[TMP86]], [[TMP154]]
+; CHECK-NEXT: [[TMP90:%.*]] = shl <2 x i32> [[TMP89]], <i32 16, i32 16>
+; CHECK-NEXT: [[TMP91:%.*]] = sub <2 x i32> [[TMP102]], [[TMP84]]
+; CHECK-NEXT: [[TMP92:%.*]] = add <2 x i32> [[TMP90]], [[TMP91]]
+; CHECK-NEXT: [[TMP156:%.*]] = extractelement <2 x i32> [[TMP92]], i32 0
+; CHECK-NEXT: [[TMP173:%.*]] = extractelement <2 x i32> [[TMP92]], i32 1
+; CHECK-NEXT: [[CONV1:%.*]] = add i32 [[TMP173]], [[TMP156]]
+; CHECK-NEXT: [[TMP108:%.*]] = sub i32 [[TMP156]], [[TMP173]]
; CHECK-NEXT: [[SHR_I49_6:%.*]] = lshr i32 [[CONV1]], 15
; CHECK-NEXT: [[AND_I50_6:%.*]] = and i32 [[SHR_I49_6]], 65537
; CHECK-NEXT: [[MUL_I51_6:%.*]] = mul i32 [[AND_I50_6]], 65535
-; CHECK-NEXT: [[TMP78:%.*]] = load <2 x i8>, ptr [[ARRAYIDX8]], align 1
-; CHECK-NEXT: [[TMP102:%.*]] = zext <2 x i8> [[TMP78]] to <2 x i32>
-; CHECK-NEXT: [[TMP80:%.*]] = insertelement <2 x ptr> [[TMP5]], ptr [[ARRAYIDX22]], i32 1
-; CHECK-NEXT: [[TMP81:%.*]] = call <2 x i8> @llvm.masked.gather.v2i8.v2p0(<2 x ptr> [[TMP80]], i32 1, <2 x i1> <i1 true, i1 true>, <2 x i8> poison)
-; CHECK-NEXT: [[TMP82:%.*]] = zext <2 x i8> [[TMP81]] to <2 x i32>
-; CHECK-NEXT: [[TMP83:%.*]] = call <2 x i8> @llvm.masked.gather.v2i8.v2p0(<2 x ptr> [[TMP3]], i32 1, <2 x i1> <i1 true, i1 true>, <2 x i8> poison)
-; CHECK-NEXT: [[TMP84:%.*]] = zext <2 x i8> [[TMP83]] to <2 x i32>
-; CHECK-NEXT: [[TMP85:%.*]] = call <2 x i8> @llvm.masked.gather.v2i8.v2p0(<2 x ptr> [[TMP6]], i32 1, <2 x i1> <i1 true, i1 true>, <2 x i8> poison)
-; CHECK-NEXT: [[TMP86:%.*]] = zext <2 x i8> [[TMP85]] to <2 x i32>
-; CHECK-NEXT: [[TMP87:%.*]] = sub <2 x i32> [[TMP84]], [[TMP86]]
-; CHECK-NEXT: [[TMP88:%.*]] = shl <2 x i32> [[TMP87]], <i32 16, i32 16>
-; CHECK-NEXT: [[TMP89:%.*]] = call <2 x i8> @llvm.masked.gather.v2i8.v2p0(<2 x ptr> [[TMP7]], i32 1, <2 x i1> <i1 true, i1 true>, <2 x i8> poison)
-; CHECK-NEXT: [[TMP90:%.*]] = zext <2 x i8> [[TMP89]] to <2 x i32>
-; CHECK-NEXT: [[TMP91:%.*]] = call <2 x i8> @llvm.masked.gather.v2i8.v2p0(<2 x ptr> [[TMP8]], i32 1, <2 x i1> <i1 true, i1 true>, <2 x i8> poison)
-; CHECK-NEXT: [[TMP92:%.*]] = zext <2 x i8> [[TMP91]] to <2 x i32>
-; CHECK-NEXT: [[TMP93:%.*]] = call <2 x i8> @llvm.masked.gather.v2i8.v2p0(<2 x ptr> [[TMP9]], i32 1, <2 x i1> <i1 true, i1 true>, <2 x i8> poison)
-; CHECK-NEXT: [[TMP94:%.*]] = zext <2 x i8> [[TMP93]] to <2 x i32>
-; CHECK-NEXT: [[TMP95:%.*]] = sub <2 x i32> [[TMP92]], [[TMP94]]
-; CHECK-NEXT: [[TMP96:%.*]] = shl <2 x i32> [[TMP95]], <i32 16, i32 16>
-; CHECK-NEXT: [[TMP97:%.*]] = insertelement <2 x i32> [[TMP102]], i32 [[CONV33]], i32 1
-; CHECK-NEXT: [[TMP98:%.*]] = sub <2 x i32> [[TMP97]], [[TMP90]]
-; CHECK-NEXT: [[TMP104:%.*]] = add <2 x i32> [[TMP96]], [[TMP98]]
-; CHECK-NEXT: [[TMP100:%.*]] = insertelement <2 x i32> [[TMP102]], i32 [[CONV1]], i32 0
-; CHECK-NEXT: [[TMP101:%.*]] = sub <2 x i32> [[TMP100]], [[TMP82]]
-; CHECK-NEXT: [[TMP200:%.*]] = add <2 x i32> [[TMP88]], [[TMP101]]
-; CHECK-NEXT: [[TMP128:%.*]] = shufflevector <2 x i32> [[TMP104]], <2 x i32> [[TMP200]], <2 x i32> <i32 0, i32 2>
-; CHECK-NEXT: [[TMP106:%.*]] = add <2 x i32> [[TMP104]], [[TMP200]]
-; CHECK-NEXT: [[TMP105:%.*]] = sub <2 x i32> [[TMP200]], [[TMP104]]
-; CHECK-NEXT: [[TMP238:%.*]] = extractelement <2 x i32> [[TMP106]], i32 0
-; CHECK-NEXT: [[TMP108:%.*]] = extractelement <2 x i32> [[TMP106]], i32 1
-; CHECK-NEXT: [[ADD48:%.*]] = add i32 [[TMP108]], [[TMP238]]
-; CHECK-NEXT: [[TMP142:%.*]] = extractelement <2 x i32> [[TMP105]], i32 1
; CHECK-NEXT: [[SHR_I59_1:%.*]] = lshr i32 [[TMP108]], 15
; CHECK-NEXT: [[AND_I60_1:%.*]] = and i32 [[SHR_I59_1]], 65537
; CHECK-NEXT: [[MUL_I61_1:%.*]] = mul i32 [[AND_I60_1]], 65535
+; CHECK-NEXT: [[TMP3:%.*]] = insertelement <2 x i32> poison, i32 [[SUB59_3]], i32 0
+; CHECK-NEXT: [[TMP83:%.*]] = shufflevector <2 x i32> [[TMP3]], <2 x i32> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP6:%.*]] = insertelement <2 x i32> poison, i32 [[SUB102]], i32 0
+; CHECK-NEXT: [[TMP85:%.*]] = shufflevector <2 x i32> [[TMP6]], <2 x i32> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP99:%.*]] = add <2 x i32> [[TMP83]], [[TMP85]]
+; CHECK-NEXT: [[TMP100:%.*]] = sub <2 x i32> [[TMP83]], [[TMP85]]
+; CHECK-NEXT: [[TMP101:%.*]] = shufflevector <2 x i32> [[TMP99]], <2 x i32> [[TMP100]], <2 x i32> <i32 0, i32 3>
+; CHECK-NEXT: [[TMP87:%.*]] = load <2 x i8>, ptr [[PIX1]], align 1
+; CHECK-NEXT: [[TMP88:%.*]] = zext <2 x i8> [[TMP87]] to <2 x i32>
+; CHECK-NEXT: [[TMP93:%.*]] = load <2 x i8>, ptr [[PIX2]], align 1
+; CHECK-NEXT: [[TMP94:%.*]] = zext <2 x i8> [[TMP93]] to <2 x i32>
+; CHECK-NEXT: [[TMP96:%.*]] = load <2 x i8>, ptr [[TMP0]], align 1
+; CHECK-NEXT: [[TMP104:%.*]] = zext <2 x i8> [[TMP96]] to <2 x i32>
+; CHECK-NEXT: [[TMP174:%.*]] = load <2 x i8>, ptr [[ARRAYIDX5]], align 1
+; CHECK-NEXT: [[TMP175:%.*]] = zext <2 x i8> [[TMP174]] to <2 x i32>
+; CHECK-NEXT: [[TMP106:%.*]] = sub <2 x i32> [[TMP104]], [[TMP175]]
+; CHECK-NEXT: [[TMP238:%.*]] = shl <2 x i32> [[TMP106]], <i32 16, i32 16>
+; CHECK-NEXT: [[TMP176:%.*]] = sub <2 x i32> [[TMP88]], [[TMP94]]
+; CHECK-NEXT: [[TMP179:%.*]] = add <2 x i32> [[TMP238]], [[TMP176]]
+; CHECK-NEXT: [[TMP180:%.*]] = extractelement <2 x i32> [[TMP179]], i32 0
+; CHECK-NEXT: [[TMP181:%.*]] = extractelement <2 x i32> [[TMP179]], i32 1
+; CHECK-NEXT: [[TMP142:%.*]] = add i32 [[TMP181]], [[TMP180]]
+; CHECK-NEXT: [[SUB45:%.*]] = sub i32 [[TMP180]], [[TMP181]]
; CHECK-NEXT: [[SHR_I59_4:%.*]] = lshr i32 [[TMP142]], 15
; CHECK-NEXT: [[AND_I60_4:%.*]] = and i32 [[SHR_I59_4]], 65537
; CHECK-NEXT: [[MUL_I61_4:%.*]] = mul i32 [[AND_I60_4]], 65535
+; CHECK-NEXT: [[TMP182:%.*]] = lshr <2 x i32> [[TMP88]], <i32 15, i32 15>
+; CHECK-NEXT: [[TMP183:%.*]] = and <2 x i32> [[TMP182]], <i32 65537, i32 65537>
+; CHECK-NEXT: [[TMP184:%.*]] = mul <2 x i32> [[TMP183]], <i32 65535, i32 65535>
+; CHECK-NEXT: [[ADD48:%.*]] = add i32 [[CONV1]], [[TMP142]]
+; CHECK-NEXT: [[ADD94_5:%.*]] = sub i32 [[SUB45]], [[TMP108]]
; CHECK-NEXT: [[TMP109:%.*]] = load <2 x i8>, ptr [[ARRAYIDX8_1]], align 1
; CHECK-NEXT: [[TMP110:%.*]] = zext <2 x i8> [[TMP109]] to <2 x i32>
; CHECK-NEXT: [[TMP111:%.*]] = insertelement <2 x i8> poison, i8 [[TMP12]], i32 0
@@ -198,56 +220,55 @@ define i32 @test(ptr %pix1, ptr %pix2, i64 %idx.ext, i64 %idx.ext63, ptr %add.pt
; CHECK-NEXT: [[TMP138:%.*]] = sub <2 x i32> [[TMP137]], [[TMP144]]
; CHECK-NEXT: [[TMP139:%.*]] = add <2 x i32> [[TMP136]], [[TMP138]]
; CHECK-NEXT: [[TMP140:%.*]] = insertelement <2 x i32> [[TMP110]], i32 [[CONV_1]], i32 0
-; CHECK-NEXT: [[TMP141:%.*]] = sub <2 x i32> [[TMP140]], [[TMP113]]
-; CHECK-NEXT: [[TMP155:%.*]] = add <2 x i32> [[TMP125]], [[TMP141]]
-; CHECK-NEXT: [[TMP143:%.*]] = add <2 x i32> [[TMP139]], [[TMP155]]
-; CHECK-NEXT: [[TMP189:%.*]] = sub <2 x i32> [[TMP155]], [[TMP139]]
-; CHECK-NEXT: [[TMP145:%.*]] = extractelement <2 x i32> [[TMP143]], i32 0
-; CHECK-NEXT: [[TMP146:%.*]] = extractelement <2 x i32> [[TMP143]], i32 1
-; CHECK-NEXT: [[ADD48_1:%.*]] = add i32 [[TMP146]], [[TMP145]]
+; CHECK-NEXT: [[TMP151:%.*]] = sub <2 x i32> [[TMP140]], [[TMP113]]
+; CHECK-NEXT: [[TMP152:%.*]] = add <2 x i32> [[TMP125]], [[TMP151]]
+; CHECK-NEXT: [[TMP153:%.*]] = add <2 x i32> [[TMP139]], [[TMP152]]
+; CHECK-NEXT: [[TMP189:%.*]] = sub <2 x i32> [[TMP152]], [[TMP139]]
+; CHECK-NEXT: [[TMP155:%.*]] = extractelement <2 x i32> [[TMP153]], i32 0
+; CHECK-NEXT: [[TMP146:%.*]] = extractelement <2 x i32> [[TMP153]], i32 1
+; CHECK-NEXT: [[ADD48_1:%.*]] = add i32 [[TMP146]], [[TMP155]]
+; CHECK-NEXT: [[TMP157:%.*]] = insertelement <2 x i32> [[TMP153]], i32 [[TMP142]], i32 1
+; CHECK-NEXT: [[TMP158:%.*]] = shufflevector <2 x i32> [[TMP153]], <2 x i32> poison, <2 x i32> <i32 1, i32 poison>
+; CHECK-NEXT: [[TMP159:%.*]] = insertelement <2 x i32> [[TMP158]], i32 [[CONV1]], i32 1
+; CHECK-NEXT: [[TMP160:%.*]] = sub <2 x i32> [[TMP157]], [[TMP159]]
+; CHECK-NEXT: [[TMP161:%.*]] = shufflevector <2 x i32> [[TMP189]], <2 x i32> poison, <2 x i32> <i32 1, i32 poison>
+; CHECK-NEXT: [[TMP162:%.*]] = insertelement <2 x i32> [[TMP161]], i32 [[TMP108]], i32 1
+; CHECK-NEXT: [[TMP163:%.*]] = insertelement <2 x i32> [[TMP189]], i32 [[SUB45]], i32 1
+; CHECK-NEXT: [[TMP185:%.*]] = add <2 x i32> [[TMP162]], [[TMP163]]
+; CHECK-NEXT: [[TMP186:%.*]] = extractelement <2 x i32> [[TMP189]], i32 0
+; CHECK-NEXT: [[TMP191:%.*]] = extractelement <2 x i32> [[TMP189]], i32 1
+; CHECK-NEXT: [[SUB59_1:%.*]] = sub i32 [[TMP186]], [[TMP191]]
; CHECK-NEXT: [[SHR_I54:%.*]] = lshr i32 [[TMP146]], 15
; CHECK-NEXT: [[AND_I55:%.*]] = and i32 [[SHR_I54]], 65537
; CHECK-NEXT: [[MUL_I56:%.*]] = mul i32 [[AND_I55]], 65535
; CHECK-NEXT: [[TMP147:%.*]] = lshr <2 x i32> [[TMP110]], <i32 15, i32 15>
; CHECK-NEXT: [[TMP148:%.*]] = and <2 x i32> [[TMP147]], <i32 65537, i32 65537>
; CHECK-NEXT: [[TMP149:%.*]] = mul <2 x i32> [[TMP148]], <i32 65535, i32 65535>
+; CHECK-NEXT: [[TMP170:%.*]] = insertelement <2 x i32> poison, i32 [[SUB59_1]], i32 0
+; CHECK-NEXT: [[TMP192:%.*]] = shufflevector <2 x i32> [[TMP170]], <2 x i32> poison, <2 x i32> zeroinitializer
; CHECK-NEXT: [[ADD78:%.*]] = add i32 [[ADD48_1]], [[ADD48]]
; CHECK-NEXT: [[SUB86:%.*]] = sub i32 [[ADD48]], [[ADD48_1]]
; CHECK-NEXT: [[ADD103:%.*]] = add i32 [[ADD94]], [[ADD78]]
; CHECK-NEXT: [[SUB104:%.*]] = sub i32 [[ADD78]], [[ADD94]]
-; CHECK-NEXT: [[ADD105:%.*]] = add i32 [[SUB102]], [[SUB86]]
-; CHECK-NEXT: [[SUB106:%.*]] = sub i32 [[SUB86]], [[SUB102]]
+; CHECK-NEXT: [[ADD105:%.*]] = add i32 [[SUB103]], [[SUB86]]
+; CHECK-NEXT: [[SUB106:%.*]] = sub i32 [[SUB86]], [[SUB103]]
; CHECK-NEXT: [[ADD_I:%.*]] = add i32 [[MUL_I51_2]], [[ADD103]]
; CHECK-NEXT: [[XOR_I:%.*]] = xor i32 [[ADD_I]], [[TMP79]]
; CHECK-NEXT: [[ADD_I52:%.*]] = add i32 [[MUL_I51_3]], [[ADD105]]
; CHECK-NEXT: [[XOR_I53:%.*]] = xor i32 [[ADD_I52]], [[ADD46_2]]
; CHECK-NEXT: [[ADD_I57:%.*]] = add i32 [[MUL_I56]], [[SUB104]]
; CHECK-NEXT: [[XOR_I58:%.*]] = xor i32 [[ADD_I57]], [[TMP146]]
-; CHECK-NEXT: [[ADD_I62:%.*]] = add i32 [[MUL_I61_1]], [[SUB106]]
-; CHECK-NEXT: [[XOR_I63:%.*]] = xor i32 [[ADD_I62]], [[TMP108]]
+; CHECK-NEXT: [[ADD_I62:%.*]] = add i32 [[MUL_I51_6]], [[SUB106]]
+; CHECK-NEXT: [[XOR_I63:%.*]] = xor i32 [[ADD_I62]], [[CONV1]]
; CHECK-NEXT: [[ADD110:%.*]] = add i32 [[XOR_I53]], [[XOR_I]]
; CHECK-NEXT: [[ADD112:%.*]] = add i32 [[ADD110]], [[XOR_I58]]
; CHECK-NEXT: [[ADD113:%.*]] = add i32 [[ADD112]], [[XOR_I63]]
-; CHECK-NEXT: [[TMP150:%.*]] = shufflevector <2 x i32> [[TMP105]], <2 x i32> poison, <2 x i32> <i32 1, i32 0>
-; CHECK-NEXT: [[TMP151:%.*]] = insertelement <2 x i32> [[TMP150]], i32 [[SUB59_2]], i32 1
-; CHECK-NEXT: [[TMP152:%.*]] = insertelement <2 x i32> [[TMP105]], i32 [[SUB51_2]], i32 1
-; CHECK-NEXT: [[TMP153:%.*]] = add <2 x i32> [[TMP151]], [[TMP152]]
-; CHECK-NEXT: [[TMP154:%.*]] = shufflevector <2 x i32> [[TMP189]], <2 x i32> [[TMP190]], <2 x i32> <i32 1, i32 2>
-; CHECK-NEXT: [[TMP184:%.*]] = shufflevector <2 x i32> [[TMP189]], <2 x i32> [[TMP190]], <2 x i32> <i32 0, i32 3>
-; CHECK-NEXT: [[TMP156:%.*]] = add <2 x i32> [[TMP154]], [[TMP184]]
-; CHECK-NEXT: [[TMP157:%.*]] = extractelement <2 x i32> [[TMP153]], i32 1
-; CHECK-NEXT: [[TMP158:%.*]] = extractelement <2 x i32> [[TMP156]], i32 1
-; CHECK-NEXT: [[TMP159:%.*]] = shufflevector <2 x i32> [[TMP156]], <2 x i32> [[TMP153]], <2 x i32> <i32 1, i32 3>
-; CHECK-NEXT: [[ADD78_2:%.*]] = add i32 [[TMP158]], [[TMP157]]
-; CHECK-NEXT: [[TMP160:%.*]] = extractelement <2 x i32> [[TMP153]], i32 0
-; CHECK-NEXT: [[TMP161:%.*]] = extractelement <2 x i32> [[TMP156]], i32 0
-; CHECK-NEXT: [[TMP162:%.*]] = shufflevector <2 x i32> [[TMP156]], <2 x i32> [[TMP153]], <2 x i32> <i32 0, i32 2>
-; CHECK-NEXT: [[ADD94_1:%.*]] = add i32 [[TMP161]], [[TMP160]]
-; CHECK-NEXT: [[TMP163:%.*]] = sub <2 x i32> [[TMP153]], [[TMP156]]
-; CHECK-NEXT: [[TMP164:%.*]] = extractelement <2 x i32> [[TMP163]], i32 0
-; CHECK-NEXT: [[TMP165:%.*]] = extractelement <2 x i32> [[TMP163]], i32 1
-; CHECK-NEXT: [[ADD105_1:%.*]] = add i32 [[TMP165]], [[TMP164]]
+; CHECK-NEXT: [[TMP165:%.*]] = extractelement <2 x i32> [[TMP185]], i32 0
+; CHECK-NEXT: [[TMP164:%.*]] = extractelement <2 x i32> [[TMP185]], i32 1
+; CHECK-NEXT: [[ADD94_1:%.*]] = add i32 [[TMP165]], [[TMP164]]
; CHECK-NEXT: [[SUB106_1:%.*]] = sub i32 [[TMP164]], [[TMP165]]
+; CHECK-NEXT: [[ADD105_1:%.*]] = add i32 [[SUB102_1]], [[SUB106_1]]
+; CHECK-NEXT: [[SUB106_3:%.*]] = sub i32 [[SUB106_1]], [[SUB102_1]]
; CHECK-NEXT: [[ADD_I52_1:%.*]] = add i32 [[MUL_I51_5]], [[ADD105_1]]
; CHECK-NEXT: [[XOR_I53_1:%.*]] = xor i32 [[ADD_I52_1]], [[TMP107]]
; CHECK-NEXT: [[TMP166:%.*]] = shufflevector <2 x i32> [[TMP16]], <2 x i32> [[TMP189]], <2 x i32> <i32 1, i32 3>
@@ -263,30 +284,18 @@ define i32 @test(ptr %pix1, ptr %pix2, i64 %idx.ext, i64 %idx.ext63, ptr %add.pt
; CHECK-NEXT: [[TMP283:%.*]] = shufflevector <2 x i32> [[TMP282]], <2 x i32> [[TMP211]], <2 x i32> <i32 0, i32 3>
; CHECK-NEXT: [[TMP177:%.*]] = add <2 x i32> [[TMP169]], [[TMP283]]
; CHECK-NEXT: [[TMP178:%.*]] = xor <2 x i32> [[TMP177]], [[TMP166]]
-; CHECK-NEXT: [[ADD_I62_1:%.*]] = add i32 [[MUL_I61_4]], [[SUB106_1]]
-; CHECK-NEXT: [[XOR_I63_1:%.*]] = xor i32 [[ADD_I62_1]], [[TMP142]]
+; CHECK-NEXT: [[ADD_I62_1:%.*]] = add i32 [[MUL_I61_1]], [[SUB106_3]]
+; CHECK-NEXT: [[XOR_I63_1:%.*]] = xor i32 [[ADD_I62_1]], [[TMP108]]
; CHECK-NEXT: [[ADD108_1:%.*]] = add i32 [[XOR_I53_1]], [[ADD113]]
-; CHECK-NEXT: [[TMP179:%.*]] = extractelement <2 x i32> [[TMP178]], i32 0
-; CHECK-NEXT: [[ADD110_1:%.*]] = add i32 [[ADD108_1]], [[TMP179]]
-; CHECK-NEXT: [[TMP180:%.*]] = extractelement <2 x i32> [[TMP178]], i32 1
-; CHECK-NEXT: [[ADD112_1:%.*]] = add i32 [[ADD110_1]], [[TMP180]]
+; CHECK-NEXT: [[TMP187:%.*]] = extractelement <2 x i32> [[TMP178]], i32 0
+; CHECK-NEXT: [[ADD110_1:%.*]] = add i32 [[ADD108_1]], [[TMP187]]
+; CHECK-NEXT: [[TMP188:%.*]] = extractelement <2 x i32> [[TMP178]], i32 1
+; CHECK-NEXT: [[ADD112_1:%.*]] = add i32 [[ADD110_1]], [[TMP188]]
; CHECK-NEXT: [[ADD113_1:%.*]] = add i32 [[ADD112_1]], [[XOR_I63_1]]
-; CHECK-NEXT: [[TMP181:%.*]] = shufflevector <2 x i32> [[TMP106]], <2 x i32> poison, <2 x i32> <i32 poison, i32 0>
-; CHECK-NEXT: [[TMP182:%.*]] = insertelement <2 x i32> [[TMP181]], i32 [[ADD44_3]], i32 0
-; CHECK-NEXT: [[TMP183:%.*]] = insertelement <2 x i32> [[TMP106]], i32 [[ADD46_2]], i32 0
-; CHECK-NEXT: [[TMP195:%.*]] = sub <2 x i32> [[TMP182]], [[TMP183]]
-; CHECK-NEXT: [[TMP185:%.*]] = shufflevector <2 x i32> [[TMP72]], <2 x i32> [[TMP143]], <2 x i32> <i32 1, i32 2>
-; CHECK-NEXT: [[TMP186:%.*]] = shufflevector <2 x i32> [[TMP72]], <2 x i32> [[TMP143]], <2 x i32> <i32 0, i32 3>
-; CHECK-NEXT: [[TMP187:%.*]] = sub <2 x i32> [[TMP185]], [[TMP186]]
-; CHECK-NEXT: [[TMP188:%.*]] = extractelement <2 x i32> [[TMP195]], i32 0
-; CHECK-NEXT: [[TMP196:%.*]] = extractelement <2 x i32> [[TMP187]], i32 0
-; CHECK-NEXT: [[TMP199:%.*]] = shufflevector <2 x i32> [[TMP187]], <2 x i32> [[TMP195]], <2 x i32> <i32 0, i32 2>
-; CHECK-NEXT: [[ADD94_4:%.*]] = add i32 [[TMP196]], [[TMP188]]
-; CHECK-NEXT: [[TMP191:%.*]] = extractelement <2 x i32> [[TMP195]], i32 1
-; CHECK-NEXT: [[TMP192:%.*]] = extractelement <2 x i32> [[TMP187]], i32 1
-; CHECK-NEXT: [[TMP193:%.*]] = shufflevector <2 x i32> [[TMP187]], <2 x i32> [[TMP195]], <2 x i32> <i32 1, i32 3>
-; CHECK-NEXT: [[ADD94_2:%.*]] = add i32 [[TMP192]], [[TMP191]]
-; CHECK-NEXT: [[TMP194:%.*]] = sub <2 x i32> [[TMP195]], [[TMP187]]
+; CHECK-NEXT: [[TMP193:%.*]] = extractelement <2 x i32> [[TMP160]], i32 0
+; CHECK-NEXT: [[TMP190:%.*]] = extractelement <2 x i32> [[TMP160]], i32 1
+; CHECK-NEXT: [[ADD94_2:%.*]] = add i32 [[TMP193]], [[TMP190]]
+; CHECK-NEXT: [[TMP203:%.*]] = sub i32 [[TMP190]], [[TMP193]]
; CHECK-NEXT: [[TMP244:%.*]] = insertelement <2 x i32> poison, i32 [[ADD94_2]], i32 0
; CHECK-NEXT: [[TMP245:%.*]] = shufflevector <2 x i32> [[TMP244]], <2 x i32> poison, <2 x i32> zeroinitializer
; CHECK-NEXT: [[TMP197:%.*]] = insertelement <2 x i32> poison, i32 [[ADD94_4]], i32 0
@@ -294,69 +303,44 @@ define i32 @test(ptr %pix1, ptr %pix2, i64 %idx.ext, i64 %idx.ext63, ptr %add.pt
; CHECK-NEXT: [[TMP216:%.*]] = add <2 x i32> [[TMP245]], [[TMP198]]
; CHECK-NEXT: [[TMP210:%.*]] = sub <2 x i32> [[TMP245]], [[TMP198]]
; CHECK-NEXT: [[TMP221:%.*]] = shufflevector <2 x i32> [[TMP216]], <2 x i32> [[TMP210]], <2 x i32> <i32 0, i32 3>
-; CHECK-NEXT: [[TMP215:%.*]] = extractelement <2 x i32> [[TMP194]], i32 0
-; CHECK-NEXT: [[TMP203:%.*]] = extractelement <2 x i32> [[TMP194]], i32 1
; CHECK-NEXT: [[ADD105_2:%.*]] = add i32 [[TMP215]], [[TMP203]]
; CHECK-NEXT: [[SUB106_2:%.*]] = sub i32 [[TMP203]], [[TMP215]]
; CHECK-NEXT: [[ADD_I52_2:%.*]] = add i32 [[MUL_I51_4]], [[ADD105_2]]
; CHECK-NEXT: [[XOR_I53_2:%.*]] = xor i32 [[ADD_I52_2]], [[CONV_1]]
; CHECK-NEXT: [[TMP266:%.*]] = add <2 x i32> [[TMP149]], [[TMP221]]
; CHECK-NEXT: [[TMP267:%.*]] = xor <2 x i32> [[TMP266]], [[TMP110]]
-; CHECK-NEXT: [[SHR_I59_2:%.*]] = lshr i32 [[TMP238]], 15
-; CHECK-NEXT: [[AND_I60_2:%.*]] = and i32 [[SHR_I59_2]], 65537
-; CHECK-NEXT: [[MUL_I61_2:%.*]] = mul i32 [[AND_I60_2]], 65535
-; CHECK-NEXT: [[ADD_I62_2:%.*]] = add i32 [[MUL_I61_2]], [[SUB106_2]]
-; CHECK-NEXT: [[XOR_I63_2:%.*]] = xor i32 [[ADD_I62_2]], [[TMP238]]
+; CHECK-NEXT: [[ADD_I62_2:%.*]] = add i32 [[MUL_I61_4]], [[SUB106_2]]
+; CHECK-NEXT: [[XOR_I63_2:%.*]] = xor i32 [[ADD_I62_2]], [[TMP142]]
; CHECK-NEXT: [[ADD108_2:%.*]] = add i32 [[XOR_I53_2]], [[ADD113_1]]
-; CHECK-NEXT: [[TMP206:%.*]] = extractelement <2 x i32> [[TMP267]], i32 0
-; CHECK-NEXT: [[ADD110_2:%.*]] = add i32 [[ADD108_2]], [[TMP206]]
-; CHECK-NEXT: [[TMP207:%.*]] = extractelement <2 x i32> [[TMP267]], i32 1
-; CHECK-NEXT: [[ADD112_2:%.*]] = add i32 [[ADD110_2]], [[TMP207]]
+; CHECK-NEXT: [[TMP200:%.*]] = extractelement <2 x i32> [[TMP267]], i32 0
+; CHECK-NEXT: [[ADD110_2:%.*]] = add i32 [[ADD108_2]], [[TMP200]]
+; CHECK-NEXT: [[TMP201:%.*]] = extractelement <2 x i32> [[TMP267]], i32 1
+; CHECK-NEXT: [[ADD112_2:%.*]] = add i32 [[ADD110_2]], [[TMP201]]
; CHECK-NEXT: [[ADD113_2:%.*]] = add i32 [[ADD112_2]], [[XOR_I63_2]]
-; CHECK-NEXT: [[TMP222:%.*]] = insertelement <2 x i32> [[TMP150]], i32 [[SUB51_2]], i32 0
-; CHECK-NEXT: [[TMP225:%.*]] = insertelement <2 x i32> [[TMP105]], i32 [[SUB59_2]], i32 0
-; CHECK-NEXT: [[TMP226:%.*]] = sub <2 x i32> [[TMP222]], [[TMP225]]
-; CHECK-NEXT: [[TMP227:%.*]] = shufflevector <2 x i32> [[TMP190]], <2 x i32> [[TMP189]], <2 x i32> <i32 1, i32 2>
-; CHECK-NEXT: [[TMP212:%.*]] = shufflevector <2 x i32> [[TMP190]], <2 x i32> [[TMP189]], <2 x i32> <i32 0, i32 3>
-; CHECK-NEXT: [[TMP213:%.*]] = sub <2 x i32> [[TMP227]], [[TMP212]]
-; CHECK-NEXT: [[TMP214:%.*]] = extractelement <2 x i32> [[TMP226]], i32 0
-; CHECK-NEXT: [[TMP237:%.*]] = extractelement <2 x i32> [[TMP213]], i32 0
-; CHECK-NEXT: [[TMP239:%.*]] = shufflevector <2 x i32> [[TMP213]], <2 x i32> [[TMP226]], <2 x i32> <i32 0, i32 2>
-; CHECK-NEXT: [[ADD94_5:%.*]] = add i32 [[TMP237]], [[TMP214]]
-; CHECK-NEXT: [[TMP217:%.*]] = extractelement <2 x i32> [[TMP226]], i32 1
-; CHECK-NEXT: [[TMP218:%.*]] = extractelement <2 x i32> [[TMP213]], i32 1
-; CHECK-NEXT: [[TMP219:%.*]] = shufflevector <2 x i32> [[TMP213]], <2 x i32> [[TMP226]], <2 x i32> <i32 1, i32 3>
-; CHECK-NEXT: [[ADD94_3:%.*]] = add i32 [[TMP218]], [[TMP217]]
-; CHECK-NEXT: [[TMP240:%.*]] = sub <2 x i32> [[TMP226]], [[TMP213]]
-; CHECK-NEXT: [[TMP223:%.*]] = insertelement <2 x i32> poison, i32 [[ADD94_3]], i32 0
-; CHECK-NEXT: [[TMP224:%.*]] = shufflevector <2 x i32> [[TMP223]], <2 x i32> poison, <2 x i32> zeroinitializer
; CHECK-NEXT: [[TMP241:%.*]] = insertelement <2 x i32> poison, i32 [[ADD94_5]], i32 0
; CHECK-NEXT: [[TMP242:%.*]] = shufflevector <2 x i32> [[TMP241]], <2 x i32> poison, <2 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP261:%.*]] = add <2 x i32> [[TMP224]], [[TMP242]]
-; CHECK-NEXT: [[TMP262:%.*]] = sub <2 x i32> [[TMP224]], [[TMP242]]
+; CHECK-NEXT: [[TMP261:%.*]] = add <2 x i32> [[TMP242]], [[TMP192]]
+; CHECK-NEXT: [[TMP262:%.*]] = sub <2 x i32> [[TMP242]], [[TMP192]]
; CHECK-NEXT: [[TMP220:%.*]] = shufflevector <2 x i32> [[TMP261]], <2 x i32> [[TMP262]], <2 x i32> <i32 0, i32 3>
-; CHECK-NEXT: [[TMP228:%.*]] = extractelement <2 x i32> [[TMP240]], i32 0
-; CHECK-NEXT: [[TMP229:%.*]] = extractelement <2 x i32> [[TMP240]], i32 1
-; CHECK-NEXT: [[ADD105_3:%.*]] = add i32 [[TMP228]], [[TMP229]]
-; CHECK-NEXT: [[SUB106_3:%.*]] = sub i32 [[TMP229]], [[TMP228]]
-; CHECK-NEXT: [[ADD_I52_3:%.*]] = add i32 [[MUL_I51_6]], [[ADD105_3]]
-; CHECK-NEXT: [[XOR_I53_3:%.*]] = xor i32 [[ADD_I52_3]], [[CONV1]]
+; CHECK-NEXT: [[TMP207:%.*]] = shufflevector <2 x i32> [[TMP262]], <2 x i32> [[TMP261]], <2 x i32> <i32 0, i32 3>
+; CHECK-NEXT: [[TMP213:%.*]] = shufflevector <2 x i32> [[TMP100]], <2 x i32> [[TMP99]], <2 x i32> <i32 0, i32 3>
+; CHECK-NEXT: [[TMP214:%.*]] = add <2 x i32> [[TMP213]], [[TMP207]]
+; CHECK-NEXT: [[TMP217:%.*]] = sub <2 x i32> [[TMP220]], [[TMP101]]
+; CHECK-NEXT: [[TMP222:%.*]] = add <2 x i32> [[TMP184]], [[TMP214]]
+; CHECK-NEXT: [[TMP212:%.*]] = xor <2 x i32> [[TMP222]], [[TMP88]]
; CHECK-NEXT: [[TMP230:%.*]] = lshr <2 x i32> [[TMP102]], <i32 15, i32 15>
; CHECK-NEXT: [[TMP231:%.*]] = and <2 x i32> [[TMP230]], <i32 65537, i32 65537>
; CHECK-NEXT: [[TMP232:%.*]] = mul <2 x i32> [[TMP231]], <i32 65535, i32 65535>
-; CHECK-NEXT: [[TMP233:%.*]] = add <2 x i32> [[TMP232]], [[TMP220]]
+; CHECK-NEXT: [[TMP233:%.*]] = add <2 x i32> [[TMP232]], [[TMP217]]
; CHECK-NEXT: [[TMP234:%.*]] = xor <2 x i32> [[TMP233]], [[TMP102]]
-; CHECK-NEXT: [[SHR_I59_3:%.*]] = lshr i32 [[CONV33]], 15
-; CHECK-NEXT: [[AND_I60_3:%.*]] = and i32 [[SHR_I59_3]], 65537
-; CHECK-NEXT: [[MUL_I61_3:%.*]] = mul i32 [[AND_I60_3]], 65535
-; CHECK-NEXT: [[ADD_I62_3:%.*]] = add i32 [[MUL_I61_3]], [[SUB106_3]]
-; CHECK-NEXT: [[XOR_I63_3:%.*]] = xor i32 [[ADD_I62_3]], [[CONV33]]
-; CHECK-NEXT: [[ADD108_3:%.*]] = add i32 [[XOR_I53_3]], [[ADD113_2]]
-; CHECK-NEXT: [[TMP235:%.*]] = extractelement <2 x i32> [[TMP234]], i32 0
-; CHECK-NEXT: [[ADD110_3:%.*]] = add i32 [[ADD108_3]], [[TMP235]]
-; CHECK-NEXT: [[TMP236:%.*]] = extractelement <2 x i32> [[TMP234]], i32 1
-; CHECK-NEXT: [[ADD112_3:%.*]] = add i32 [[ADD110_3]], [[TMP236]]
-; CHECK-NEXT: [[ADD113_3:%.*]] = add i32 [[ADD112_3]], [[XOR_I63_3]]
+; CHECK-NEXT: [[TMP218:%.*]] = extractelement <2 x i32> [[TMP212]], i32 0
+; CHECK-NEXT: [[ADD108_3:%.*]] = add i32 [[TMP218]], [[ADD113_2]]
+; CHECK-NEXT: [[TMP219:%.*]] = extractelement <2 x i32> [[TMP212]], i32 1
+; CHECK-NEXT: [[ADD110_3:%.*]] = add i32 [[ADD108_3]], [[TMP219]]
+; CHECK-NEXT: [[TMP223:%.*]] = extractelement <2 x i32> [[TMP234]], i32 0
+; CHECK-NEXT: [[ADD112_3:%.*]] = add i32 [[ADD110_3]], [[TMP223]]
+; CHECK-NEXT: [[TMP224:%.*]] = extractelement <2 x i32> [[TMP234]], i32 1
+; CHECK-NEXT: [[ADD113_3:%.*]] = add i32 [[ADD112_3]], [[TMP224]]
; CHECK-NEXT: ret i32 [[ADD113_3]]
;
entry:
diff --git a/llvm/test/Transforms/SLPVectorizer/RISCV/mixed-extracts-types.ll b/llvm/test/Transforms/SLPVectorizer/RISCV/mixed-extracts-types.ll
index 0d5c644b9cc0f..e46b19d610776 100644
--- a/llvm/test/Transforms/SLPVectorizer/RISCV/mixed-extracts-types.ll
+++ b/llvm/test/Transforms/SLPVectorizer/RISCV/mixed-extracts-types.ll
@@ -12,7 +12,7 @@ define i32 @test() {
; CHECK-NEXT: [[TMP1:%.*]] = icmp ne <2 x i8> [[TMP0]], zeroinitializer
; CHECK-NEXT: [[TMP2:%.*]] = zext <2 x i1> [[TMP1]] to <2 x i16>
; CHECK-NEXT: store <2 x i16> [[TMP2]], ptr getelementptr ([0 x i16], ptr null, i64 0, i64 -14), align 2
-; CHECK-NEXT: [[TMP3:%.*]] = extractelement <2 x i8> [[TMP0]], i32 0
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr ([9 x i8], ptr null, i64 -2, i64 5), align 1
; CHECK-NEXT: [[CONV5_1:%.*]] = sext i8 [[TMP3]] to i32
; CHECK-NEXT: store i32 [[CONV5_1]], ptr getelementptr ([0 x i32], ptr null, i64 0, i64 -13), align 4
; CHECK-NEXT: ret i32 0
diff --git a/llvm/test/Transforms/SLPVectorizer/RISCV/strided-loads-with-external-indices.ll b/llvm/test/Transforms/SLPVectorizer/RISCV/strided-loads-with-external-indices.ll
index c72d6cc75d827..93f5b5e46d2c3 100644
--- a/llvm/test/Transforms/SLPVectorizer/RISCV/strided-loads-with-external-indices.ll
+++ b/llvm/test/Transforms/SLPVectorizer/RISCV/strided-loads-with-external-indices.ll
@@ -7,6 +7,7 @@ define void @test() {
; CHECK-LABEL: define void @test
; CHECK-SAME: () #[[ATTR0:[0-9]+]] {
; CHECK-NEXT: entry:
+; CHECK-NEXT: [[SUB4_I_I65_US:%.*]] = or i64 0, 1
; CHECK-NEXT: br label [[BODY:%.*]]
; CHECK: body:
; CHECK-NEXT: [[ADD_I_I62_US:%.*]] = shl i64 0, 0
@@ -17,8 +18,7 @@ define void @test() {
; CHECK-NEXT: [[TMP4:%.*]] = extractelement <2 x i32> [[TMP3]], i32 0
; CHECK-NEXT: [[TMP5:%.*]] = extractelement <2 x i32> [[TMP3]], i32 1
; CHECK-NEXT: [[CMP_I_I_I_I67_US:%.*]] = icmp slt i32 [[TMP4]], [[TMP5]]
-; CHECK-NEXT: [[TMP6:%.*]] = extractelement <2 x i64> [[TMP1]], i32 1
-; CHECK-NEXT: [[SPEC_SELECT_I_I68_US:%.*]] = select i1 false, i64 [[TMP6]], i64 0
+; CHECK-NEXT: [[SPEC_SELECT_I_I68_US:%.*]] = select i1 false, i64 [[SUB4_I_I65_US]], i64 0
; CHECK-NEXT: br label [[BODY]]
;
entry:
More information about the llvm-commits
mailing list