[llvm] [LV] Convert gather loads with invariant stride into strided loads (PR #147297)
Mel Chen via llvm-commits
llvm-commits at lists.llvm.org
Mon Feb 9 20:32:29 PST 2026
https://github.com/Mel-Chen updated https://github.com/llvm/llvm-project/pull/147297
>From 1f5ae3eb5bba16c195784d4965677ad37a27e8b4 Mon Sep 17 00:00:00 2001
From: Mel Chen <mel.chen at sifive.com>
Date: Mon, 30 Jun 2025 19:01:57 -0700
Subject: [PATCH 01/30] New VPWidenStridedLoadRecipe
---
.../Transforms/Vectorize/LoopVectorize.cpp | 4 +-
llvm/lib/Transforms/Vectorize/VPlan.h | 51 +++++++++++++++-
.../Transforms/Vectorize/VPlanAnalysis.cpp | 6 +-
.../lib/Transforms/Vectorize/VPlanRecipes.cpp | 59 +++++++++++++++++--
.../Transforms/Vectorize/VPlanTransforms.cpp | 10 ++--
.../Transforms/Vectorize/VPlanVerifier.cpp | 3 +-
6 files changed, 120 insertions(+), 13 deletions(-)
diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
index b5978c670dd94..ad658018c5880 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -4053,7 +4053,8 @@ void LoopVectorizationPlanner::emitInvalidCostRemarks(
.Case([](const VPHeaderPHIRecipe *R) { return Instruction::PHI; })
.Case(
[](const VPWidenStoreRecipe *R) { return Instruction::Store; })
- .Case([](const VPWidenLoadRecipe *R) { return Instruction::Load; })
+ .Case<VPWidenLoadRecipe, VPWidenStridedLoadRecipe>(
+ [](const auto *R) { return Instruction::Load; })
.Case<VPWidenCallRecipe, VPWidenIntrinsicRecipe>(
[](const auto *R) { return Instruction::Call; })
.Case<VPInstruction, VPWidenRecipe, VPReplicateRecipe,
@@ -4154,6 +4155,7 @@ static bool willGenerateVectors(VPlan &Plan, ElementCount VF,
case VPRecipeBase::VPReductionPHISC:
case VPRecipeBase::VPInterleaveEVLSC:
case VPRecipeBase::VPInterleaveSC:
+ case VPRecipeBase::VPWidenStridedLoadSC:
case VPRecipeBase::VPWidenLoadEVLSC:
case VPRecipeBase::VPWidenLoadSC:
case VPRecipeBase::VPWidenStoreEVLSC:
diff --git a/llvm/lib/Transforms/Vectorize/VPlan.h b/llvm/lib/Transforms/Vectorize/VPlan.h
index eac5b58841e80..181baea45d287 100644
--- a/llvm/lib/Transforms/Vectorize/VPlan.h
+++ b/llvm/lib/Transforms/Vectorize/VPlan.h
@@ -422,6 +422,7 @@ class LLVM_ABI_FOR_TEST VPRecipeBase
VPWidenCastSC,
VPWidenGEPSC,
VPWidenIntrinsicSC,
+ VPWidenStridedLoadSC,
VPWidenLoadEVLSC,
VPWidenLoadSC,
VPWidenStoreEVLSC,
@@ -628,6 +629,7 @@ class VPSingleDefRecipe : public VPRecipeBase, public VPRecipeValue {
case VPRecipeBase::VPInterleaveEVLSC:
case VPRecipeBase::VPInterleaveSC:
case VPRecipeBase::VPIRInstructionSC:
+ case VPRecipeBase::VPWidenStridedLoadSC:
case VPRecipeBase::VPWidenLoadEVLSC:
case VPRecipeBase::VPWidenLoadSC:
case VPRecipeBase::VPWidenStoreEVLSC:
@@ -3471,7 +3473,8 @@ class LLVM_ABI_FOR_TEST VPWidenMemoryRecipe : public VPRecipeBase,
return R->getVPRecipeID() == VPRecipeBase::VPWidenLoadSC ||
R->getVPRecipeID() == VPRecipeBase::VPWidenStoreSC ||
R->getVPRecipeID() == VPRecipeBase::VPWidenLoadEVLSC ||
- R->getVPRecipeID() == VPRecipeBase::VPWidenStoreEVLSC;
+ R->getVPRecipeID() == VPRecipeBase::VPWidenStoreEVLSC ||
+ R->getVPRecipeID() == VPRecipeBase::VPWidenStridedLoadSC;
}
static inline bool classof(const VPUser *U) {
@@ -3598,6 +3601,52 @@ struct VPWidenLoadEVLRecipe final : public VPWidenMemoryRecipe,
#endif
};
+/// A recipe for strided load operations, using the base address, stride, and an
+/// optional mask. This recipe will generate an vp.strided.load intrinsic call
+/// to represent memory accesses with a fixed stride.
+struct VPWidenStridedLoadRecipe final : public VPWidenMemoryRecipe,
+ public VPRecipeValue {
+ VPWidenStridedLoadRecipe(LoadInst &Load, VPValue *Addr, VPValue *Stride,
+ VPValue *VF, VPValue *Mask,
+ const VPIRMetadata &Metadata, DebugLoc DL)
+ : VPWidenMemoryRecipe(
+ VPRecipeBase::VPWidenStridedLoadSC, Load, {Addr, Stride, VF},
+ /*Consecutive=*/false, /*Reverse=*/false, Metadata, DL),
+ VPRecipeValue(this, &Load) {
+ setMask(Mask);
+ }
+
+ VPWidenStridedLoadRecipe *clone() override {
+ return new VPWidenStridedLoadRecipe(cast<LoadInst>(Ingredient), getAddr(),
+ getStride(), getVF(), getMask(), *this,
+ getDebugLoc());
+ }
+
+ VP_CLASSOF_IMPL(VPRecipeBase::VPWidenStridedLoadSC);
+
+ /// Return the stride operand.
+ VPValue *getStride() const { return getOperand(1); }
+
+ /// Return the VF operand.
+ VPValue *getVF() const { return getOperand(2); }
+
+ /// Generate a strided load.
+ void execute(VPTransformState &State) override;
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+ /// Print the recipe.
+ void printRecipe(raw_ostream &O, const Twine &Indent,
+ VPSlotTracker &SlotTracker) const override;
+#endif
+
+ /// Returns true if the recipe only uses the first lane of operand \p Op.
+ bool usesFirstLaneOnly(const VPValue *Op) const override {
+ assert(is_contained(operands(), Op) &&
+ "Op must be an operand of the recipe");
+ return Op == getAddr() || Op == getStride() || Op == getVF();
+ }
+};
+
/// A recipe for widening store operations, using the stored value, the address
/// to store to and an optional mask.
struct LLVM_ABI_FOR_TEST VPWidenStoreRecipe final : public VPWidenMemoryRecipe {
diff --git a/llvm/lib/Transforms/Vectorize/VPlanAnalysis.cpp b/llvm/lib/Transforms/Vectorize/VPlanAnalysis.cpp
index 4f97f8000c187..9b62532cecda7 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanAnalysis.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanAnalysis.cpp
@@ -193,8 +193,10 @@ Type *VPTypeAnalysis::inferScalarTypeForRecipe(const VPWidenCallRecipe *R) {
}
Type *VPTypeAnalysis::inferScalarTypeForRecipe(const VPWidenMemoryRecipe *R) {
- assert((isa<VPWidenLoadRecipe, VPWidenLoadEVLRecipe>(R)) &&
- "Store recipes should not define any values");
+ assert(
+ (isa<VPWidenLoadRecipe, VPWidenLoadEVLRecipe, VPWidenStridedLoadRecipe>(
+ R)) &&
+ "Store recipes should not define any values");
return cast<LoadInst>(&R->getIngredient())->getType();
}
diff --git a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
index 88cd5129b51ab..3d2f4380e5e05 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
@@ -90,6 +90,7 @@ bool VPRecipeBase::mayWriteToMemory() const {
case VPWidenCastSC:
case VPWidenGEPSC:
case VPWidenIntOrFpInductionSC:
+ case VPWidenStridedLoadSC:
case VPWidenLoadEVLSC:
case VPWidenLoadSC:
case VPWidenPHISC:
@@ -113,6 +114,7 @@ bool VPRecipeBase::mayReadFromMemory() const {
return cast<VPExpressionRecipe>(this)->mayReadOrWriteMemory();
case VPInstructionSC:
return cast<VPInstruction>(this)->opcodeMayReadOrWriteFromMemory();
+ case VPWidenStridedLoadSC:
case VPWidenLoadEVLSC:
case VPWidenLoadSC:
return true;
@@ -204,6 +206,7 @@ bool VPRecipeBase::mayHaveSideEffects() const {
case VPInterleaveEVLSC:
case VPInterleaveSC:
return mayWriteToMemory();
+ case VPWidenStridedLoadSC:
case VPWidenLoadEVLSC:
case VPWidenLoadSC:
case VPWidenStoreEVLSC:
@@ -3712,9 +3715,11 @@ InstructionCost VPWidenMemoryRecipe::computeCost(ElementCount VF,
Type *Ty = toVectorTy(getLoadStoreType(&Ingredient), VF);
unsigned AS = cast<PointerType>(Ctx.Types.inferScalarType(getAddr()))
->getAddressSpace();
- unsigned Opcode = isa<VPWidenLoadRecipe, VPWidenLoadEVLRecipe>(this)
- ? Instruction::Load
- : Instruction::Store;
+ unsigned Opcode =
+ isa<VPWidenLoadRecipe, VPWidenLoadEVLRecipe, VPWidenStridedLoadRecipe>(
+ this)
+ ? Instruction::Load
+ : Instruction::Store;
if (!Consecutive) {
// TODO: Using the original IR may not be accurate.
@@ -3724,8 +3729,13 @@ InstructionCost VPWidenMemoryRecipe::computeCost(ElementCount VF,
"Inconsecutive memory access should not have the order.");
const Value *Ptr = getLoadStorePointerOperand(&Ingredient);
- Type *PtrTy = Ptr->getType();
+ if (isa<VPWidenStridedLoadRecipe>(this))
+ return Ctx.TTI.getMemIntrinsicInstrCost(
+ MemIntrinsicCostAttributes(Intrinsic::experimental_vp_strided_load,
+ Ty, Ptr, IsMasked, Alignment, &Ingredient),
+ Ctx.CostKind);
+ Type *PtrTy = Ptr->getType();
// If the address value is uniform across all lanes, then the address can be
// calculated with scalar type and broadcast.
if (!vputils::isSingleScalar(getAddr()))
@@ -3872,6 +3882,47 @@ void VPWidenLoadEVLRecipe::printRecipe(raw_ostream &O, const Twine &Indent,
}
#endif
+void VPWidenStridedLoadRecipe::execute(VPTransformState &State) {
+ Type *ScalarDataTy = getLoadStoreType(&Ingredient);
+ auto *DataTy = VectorType::get(ScalarDataTy, State.VF);
+ const Align Alignment = getLoadStoreAlignment(&Ingredient);
+
+ auto &Builder = State.Builder;
+ Value *Addr = State.get(getAddr(), /*IsScalar*/ true);
+ Value *StrideInBytes = State.get(getStride(), /*IsScalar*/ true);
+ Value *Mask = nullptr;
+ if (VPValue *VPMask = getMask())
+ Mask = State.get(VPMask);
+ else
+ Mask = Builder.CreateVectorSplat(State.VF, Builder.getTrue());
+ Value *RunTimeVF = Builder.CreateZExtOrTrunc(State.get(getVF(), VPLane(0)),
+ Builder.getInt32Ty());
+
+ auto *PtrTy = Addr->getType();
+ auto *StrideTy = StrideInBytes->getType();
+ CallInst *NewLI = Builder.CreateIntrinsic(
+ Intrinsic::experimental_vp_strided_load, {DataTy, PtrTy, StrideTy},
+ {Addr, StrideInBytes, Mask, RunTimeVF}, nullptr, "wide.strided.load");
+ NewLI->addParamAttr(
+ 0, Attribute::getWithAlignment(NewLI->getContext(), Alignment));
+ applyMetadata(*NewLI);
+ State.set(this, NewLI);
+}
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+void VPWidenStridedLoadRecipe::printRecipe(raw_ostream &O, const Twine &Indent,
+ VPSlotTracker &SlotTracker) const {
+ O << Indent << "WIDEN ";
+ printAsOperand(O, SlotTracker);
+ O << " = load ";
+ getAddr()->printAsOperand(O, SlotTracker);
+ O << ", stride = ";
+ getStride()->printAsOperand(O, SlotTracker);
+ O << ", runtimeVF = ";
+ getVF()->printAsOperand(O, SlotTracker);
+}
+#endif
+
void VPWidenStoreRecipe::execute(VPTransformState &State) {
VPValue *StoredVPValue = getStoredValue();
bool CreateScatter = !isConsecutive();
diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
index 0d951a59cc8ba..a793cb95d7e36 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
@@ -3082,10 +3082,12 @@ static void fixupVFUsersForEVL(VPlan &Plan, VPValue &EVL) {
VPRegionBlock *LoopRegion = Plan.getVectorLoopRegion();
VPBasicBlock *Header = LoopRegion->getEntryBasicBlock();
- assert(all_of(Plan.getVF().users(),
- IsaPred<VPVectorEndPointerRecipe, VPScalarIVStepsRecipe,
- VPWidenIntOrFpInductionRecipe>) &&
- "User of VF that we can't transform to EVL.");
+ assert(
+ all_of(
+ Plan.getVF().users(),
+ IsaPred<VPVectorEndPointerRecipe, VPScalarIVStepsRecipe,
+ VPWidenIntOrFpInductionRecipe, VPWidenStridedLoadRecipe>) &&
+ "User of VF that we can't transform to EVL.");
Plan.getVF().replaceUsesWithIf(&EVL, [](VPUser &U, unsigned Idx) {
return isa<VPWidenIntOrFpInductionRecipe, VPScalarIVStepsRecipe>(U);
});
diff --git a/llvm/lib/Transforms/Vectorize/VPlanVerifier.cpp b/llvm/lib/Transforms/Vectorize/VPlanVerifier.cpp
index 9098b9ce8562d..8943652f3b281 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanVerifier.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanVerifier.cpp
@@ -168,7 +168,8 @@ bool VPlanVerifier::verifyEVLRecipe(const VPInstruction &EVL) const {
return VerifyEVLUse(*S, S->getNumOperands() - 1);
})
.Case<VPWidenStoreEVLRecipe, VPReductionEVLRecipe,
- VPWidenIntOrFpInductionRecipe, VPWidenPointerInductionRecipe>(
+ VPWidenIntOrFpInductionRecipe, VPWidenPointerInductionRecipe,
+ VPWidenStridedLoadRecipe>(
[&](const VPRecipeBase *S) { return VerifyEVLUse(*S, 2); })
.Case([&](const VPScalarIVStepsRecipe *R) {
if (R->getNumOperands() != 3) {
>From 031041f4d2c51585303b2bdb3348a7796a9f2d9f Mon Sep 17 00:00:00 2001
From: Mel Chen <mel.chen at sifive.com>
Date: Mon, 7 Jul 2025 01:02:03 -0700
Subject: [PATCH 02/30] Expand VPVectorPointerRecipe to support stride
---
.../Transforms/Vectorize/LoopVectorize.cpp | 5 ++-
llvm/lib/Transforms/Vectorize/VPlan.h | 17 ++++---
llvm/lib/Transforms/Vectorize/VPlanUnroll.cpp | 8 +++-
.../LoopVectorize/vplan-dot-printing.ll | 4 +-
.../vplan-printing-before-execute.ll | 2 +-
.../LoopVectorize/vplan-printing.ll | 44 +++++++++----------
6 files changed, 47 insertions(+), 33 deletions(-)
diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
index ad658018c5880..e0bc6a9886d8e 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -7746,7 +7746,10 @@ VPRecipeBase *VPRecipeBuilder::tryToWidenMemory(VPInstruction *VPI,
Ptr, &Plan.getVF(), getLoadStoreType(I),
/*Stride*/ -1, Flags, VPI->getDebugLoc());
} else {
- VectorPtr = new VPVectorPointerRecipe(Ptr, getLoadStoreType(I),
+ const DataLayout &DL = I->getDataLayout();
+ auto *StrideTy = DL.getIndexType(Ptr->getUnderlyingValue()->getType());
+ VPValue *StrideOne = Plan.getOrAddLiveIn(ConstantInt::get(StrideTy, 1));
+ VectorPtr = new VPVectorPointerRecipe(Ptr, getLoadStoreType(I), StrideOne,
GEP ? GEP->getNoWrapFlags()
: GEPNoWrapFlags::none(),
VPI->getDebugLoc());
diff --git a/llvm/lib/Transforms/Vectorize/VPlan.h b/llvm/lib/Transforms/Vectorize/VPlan.h
index 181baea45d287..8400378ae8a7b 100644
--- a/llvm/lib/Transforms/Vectorize/VPlan.h
+++ b/llvm/lib/Transforms/Vectorize/VPlan.h
@@ -2128,21 +2128,25 @@ class VPVectorEndPointerRecipe : public VPRecipeWithIRFlags,
};
/// A recipe to compute the pointers for widened memory accesses of \p
+/// SourceElementTy, with the \p Stride expressed in units of \p
/// SourceElementTy. Unrolling adds an extra offset operand for unrolled parts >
/// 0 and it produces `GEP Ptr, Offset`. The offset for unrolled part 0 is 0.
class VPVectorPointerRecipe : public VPRecipeWithIRFlags {
Type *SourceElementTy;
public:
- VPVectorPointerRecipe(VPValue *Ptr, Type *SourceElementTy,
+ VPVectorPointerRecipe(VPValue *Ptr, Type *SourceElementTy, VPValue *Stride,
GEPNoWrapFlags GEPFlags, DebugLoc DL)
- : VPRecipeWithIRFlags(VPRecipeBase::VPVectorPointerSC, Ptr, GEPFlags, DL),
+ : VPRecipeWithIRFlags(VPRecipeBase::VPVectorPointerSC,
+ ArrayRef<VPValue *>({Ptr, Stride}), GEPFlags, DL),
SourceElementTy(SourceElementTy) {}
VP_CLASSOF_IMPL(VPRecipeBase::VPVectorPointerSC)
+ VPValue *getStride() const { return getOperand(1); }
+
VPValue *getOffset() {
- return getNumOperands() == 2 ? getOperand(1) : nullptr;
+ return getNumOperands() > 2 ? getOperand(2) : nullptr;
}
void execute(VPTransformState &State) override;
@@ -2164,8 +2168,9 @@ class VPVectorPointerRecipe : public VPRecipeWithIRFlags {
}
VPVectorPointerRecipe *clone() override {
- auto *Clone = new VPVectorPointerRecipe(getOperand(0), SourceElementTy,
- getGEPNoWrapFlags(), getDebugLoc());
+ auto *Clone =
+ new VPVectorPointerRecipe(getOperand(0), SourceElementTy, getStride(),
+ getGEPNoWrapFlags(), getDebugLoc());
if (auto *Off = getOffset())
Clone->addOperand(Off);
return Clone;
@@ -3636,7 +3641,7 @@ struct VPWidenStridedLoadRecipe final : public VPWidenMemoryRecipe,
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
/// Print the recipe.
void printRecipe(raw_ostream &O, const Twine &Indent,
- VPSlotTracker &SlotTracker) const override;
+ VPSlotTracker &SlotTracker) const override;
#endif
/// Returns true if the recipe only uses the first lane of operand \p Op.
diff --git a/llvm/lib/Transforms/Vectorize/VPlanUnroll.cpp b/llvm/lib/Transforms/Vectorize/VPlanUnroll.cpp
index 53cac9fcd80d6..03356f36e476b 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanUnroll.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanUnroll.cpp
@@ -344,8 +344,14 @@ void UnrollState::unrollRecipeByUF(VPRecipeBase &R) {
VPValue *VFxPart = Builder.createOverflowingOp(
Instruction::Mul, {VF, Plan.getConstantInt(IndexTy, Part)},
{true, true});
+ VPValue *Stride = Builder.createScalarZExtOrTrunc(
+ VPR->getStride(), IndexTy, TypeInfo.inferScalarType(VPR->getStride()),
+ DebugLoc::getUnknown());
+ VPValue *Offset =
+ Builder.createOverflowingOp(Instruction::Mul, {VFxPart, Stride});
Copy->setOperand(0, VPR->getOperand(0));
- Copy->addOperand(VFxPart);
+ Copy->setOperand(1, VPR->getOperand(1));
+ Copy->addOperand(Offset);
continue;
}
if (auto *Red = dyn_cast<VPReductionRecipe>(&R)) {
diff --git a/llvm/test/Transforms/LoopVectorize/vplan-dot-printing.ll b/llvm/test/Transforms/LoopVectorize/vplan-dot-printing.ll
index ee3564bc87be4..75d2b29e180a9 100644
--- a/llvm/test/Transforms/LoopVectorize/vplan-dot-printing.ll
+++ b/llvm/test/Transforms/LoopVectorize/vplan-dot-printing.ll
@@ -42,11 +42,11 @@ define void @print_call_and_memory(i64 %n, ptr noalias %y, ptr noalias %x) nounw
; CHECK-NEXT: " EMIT vp\<[[CAN_IV:%.+]]\> = CANONICAL-INDUCTION ir\<0\>, vp\<[[CAN_IV_NEXT:%.+]]\>\l" +
; CHECK-NEXT: " vp\<[[STEPS:%.+]]\> = SCALAR-STEPS vp\<[[CAN_IV]]\>, ir\<1\>, vp\<[[VF]]\>\l" +
; CHECK-NEXT: " CLONE ir\<%arrayidx\> = getelementptr inbounds ir\<%y\>, vp\<[[STEPS]]\>\l" +
-; CHECK-NEXT: " vp\<[[VEC_PTR:%.+]]\> = vector-pointer inbounds ir\<%arrayidx\>\l" +
+; CHECK-NEXT: " vp\<[[VEC_PTR:%.+]]\> = vector-pointer inbounds ir\<%arrayidx\>, ir\<1\>\l" +
; CHECK-NEXT: " WIDEN ir\<%lv\> = load vp\<[[VEC_PTR]]\>\l" +
; CHECK-NEXT: " WIDEN-INTRINSIC ir\<%call\> = call llvm.sqrt(ir\<%lv\>)\l" +
; CHECK-NEXT: " CLONE ir\<%arrayidx2\> = getelementptr inbounds ir\<%x\>, vp\<[[STEPS]]\>\l" +
-; CHECK-NEXT: " vp\<[[VEC_PTR2:%.+]]\> = vector-pointer inbounds ir\<%arrayidx2\>\l" +
+; CHECK-NEXT: " vp\<[[VEC_PTR2:%.+]]\> = vector-pointer inbounds ir\<%arrayidx2\>, ir\<1\>\l" +
; CHECK-NEXT: " WIDEN store vp\<[[VEC_PTR2]]\>, ir\<%call\>\l" +
; CHECK-NEXT: " EMIT vp\<[[CAN_IV_NEXT]]\> = add nuw vp\<[[CAN_IV]]\>, vp\<[[VFxUF]]\>\l" +
; CHECK-NEXT: " EMIT branch-on-count vp\<[[CAN_IV_NEXT]]\>, vp\<[[VEC_TC]]\>\l" +
diff --git a/llvm/test/Transforms/LoopVectorize/vplan-printing-before-execute.ll b/llvm/test/Transforms/LoopVectorize/vplan-printing-before-execute.ll
index 9ef3fd48efcd5..5e8852e351a91 100644
--- a/llvm/test/Transforms/LoopVectorize/vplan-printing-before-execute.ll
+++ b/llvm/test/Transforms/LoopVectorize/vplan-printing-before-execute.ll
@@ -69,7 +69,7 @@ define void @test_tc_less_than_16(ptr %A, i64 %N) {
; CHECK-NEXT: Successor(s): vector.body
; CHECK-EMPTY:
; CHECK-NEXT: vector.body:
-; CHECK-NEXT: vp<[[VPTR2:%.]]> = vector-pointer ir<%A>, ir<8>
+; CHECK-NEXT: vp<[[VPTR2:%.]]> = vector-pointer ir<%A>, ir<1>, ir<8>
; CHECK-NEXT: WIDEN ir<%l> = load ir<%A>
; CHECK-NEXT: WIDEN ir<%l>.1 = load vp<[[VPTR2]]>
; CHECK-NEXT: WIDEN ir<%add> = add nsw ir<%l>, ir<10>
diff --git a/llvm/test/Transforms/LoopVectorize/vplan-printing.ll b/llvm/test/Transforms/LoopVectorize/vplan-printing.ll
index 1d5d71b0a64c0..0a4249c0331c3 100644
--- a/llvm/test/Transforms/LoopVectorize/vplan-printing.ll
+++ b/llvm/test/Transforms/LoopVectorize/vplan-printing.ll
@@ -26,11 +26,11 @@ define void @print_call_and_memory(i64 %n, ptr noalias %y, ptr noalias %x) nounw
; CHECK-NEXT: EMIT vp<[[VP3:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next>
; CHECK-NEXT: vp<[[VP4:%[0-9]+]]> = SCALAR-STEPS vp<[[VP3]]>, ir<1>, vp<[[VP0]]>
; CHECK-NEXT: CLONE ir<%arrayidx> = getelementptr inbounds ir<%y>, vp<[[VP4]]>
-; CHECK-NEXT: vp<[[VP5:%[0-9]+]]> = vector-pointer inbounds ir<%arrayidx>
+; CHECK-NEXT: vp<[[VP5:%[0-9]+]]> = vector-pointer inbounds ir<%arrayidx>, ir<1>
; CHECK-NEXT: WIDEN ir<%lv> = load vp<[[VP5]]>
; CHECK-NEXT: WIDEN-INTRINSIC ir<%call> = call llvm.sqrt(ir<%lv>)
; CHECK-NEXT: CLONE ir<%arrayidx2> = getelementptr inbounds ir<%x>, vp<[[VP4]]>
-; CHECK-NEXT: vp<[[VP6:%[0-9]+]]> = vector-pointer inbounds ir<%arrayidx2>
+; CHECK-NEXT: vp<[[VP6:%[0-9]+]]> = vector-pointer inbounds ir<%arrayidx2>, ir<1>
; CHECK-NEXT: WIDEN store vp<[[VP6]]>, ir<%call>
; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP3]]>, vp<[[VP1]]>
; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP2]]>
@@ -101,13 +101,13 @@ define void @print_widen_gep_and_select(i64 %n, ptr noalias %y, ptr noalias %x,
; CHECK-NEXT: ir<%iv> = WIDEN-INDUCTION ir<0>, ir<1>, vp<[[VP0]]>
; CHECK-NEXT: vp<[[VP4:%[0-9]+]]> = SCALAR-STEPS vp<[[VP3]]>, ir<1>, vp<[[VP0]]>
; CHECK-NEXT: WIDEN-GEP Inv[Var] ir<%arrayidx> = getelementptr inbounds ir<%y>, ir<%iv>
-; CHECK-NEXT: vp<[[VP5:%[0-9]+]]> = vector-pointer inbounds ir<%arrayidx>
+; CHECK-NEXT: vp<[[VP5:%[0-9]+]]> = vector-pointer inbounds ir<%arrayidx>, ir<1>
; CHECK-NEXT: WIDEN ir<%lv> = load vp<[[VP5]]>
; CHECK-NEXT: WIDEN ir<%cmp> = icmp eq ir<%arrayidx>, ir<%z>
; CHECK-NEXT: WIDEN ir<%sel> = select ir<%cmp>, ir<1.000000e+01>, ir<2.000000e+01>
; CHECK-NEXT: WIDEN ir<%add> = fadd ir<%lv>, ir<%sel>
; CHECK-NEXT: CLONE ir<%arrayidx2> = getelementptr inbounds ir<%x>, vp<[[VP4]]>
-; CHECK-NEXT: vp<[[VP6:%[0-9]+]]> = vector-pointer inbounds ir<%arrayidx2>
+; CHECK-NEXT: vp<[[VP6:%[0-9]+]]> = vector-pointer inbounds ir<%arrayidx2>, ir<1>
; CHECK-NEXT: WIDEN store vp<[[VP6]]>, ir<%add>
; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP3]]>, vp<[[VP1]]>
; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP2]]>
@@ -204,7 +204,7 @@ define void @print_replicate_predicated_phi(i64 %n, ptr %x) {
; CHECK-NEXT: if.then.0:
; CHECK-NEXT: BLEND ir<%d> = ir<0> vp<%7>/ir<%cmp>
; CHECK-NEXT: CLONE ir<%idx> = getelementptr ir<%x>, vp<[[VP5]]>
-; CHECK-NEXT: vp<[[VP8:%[0-9]+]]> = vector-pointer ir<%idx>
+; CHECK-NEXT: vp<[[VP8:%[0-9]+]]> = vector-pointer ir<%idx>, ir<1>
; CHECK-NEXT: WIDEN store vp<[[VP8]]>, ir<%d>
; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP4]]>, vp<[[VP1]]>
; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP2]]>
@@ -382,7 +382,7 @@ define void @recipe_debug_loc_location(ptr nocapture %src) !dbg !5 {
; CHECK-NEXT: EMIT vp<[[VP3:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next>
; CHECK-NEXT: vp<[[VP4:%[0-9]+]]> = SCALAR-STEPS vp<[[VP3]]>, ir<1>, vp<[[VP0]]>
; CHECK-NEXT: CLONE ir<%isd> = getelementptr inbounds ir<%src>, vp<[[VP4]]>, !dbg /tmp/s.c:5:3
-; CHECK-NEXT: vp<[[VP5:%[0-9]+]]> = vector-pointer inbounds ir<%isd>, !dbg /tmp/s.c:6:3
+; CHECK-NEXT: vp<[[VP5:%[0-9]+]]> = vector-pointer inbounds ir<%isd>, ir<1>, !dbg /tmp/s.c:6:3
; CHECK-NEXT: WIDEN ir<%lsd> = load vp<[[VP5]]>, !dbg /tmp/s.c:6:3
; CHECK-NEXT: WIDEN ir<%psd> = add nuw nsw ir<%lsd>, ir<23>, !dbg /tmp/s.c:7:3
; CHECK-NEXT: WIDEN ir<%cmp1> = icmp slt ir<%lsd>, ir<100>, !dbg /tmp/s.c:8:3
@@ -409,7 +409,7 @@ define void @recipe_debug_loc_location(ptr nocapture %src) !dbg !5 {
; CHECK-EMPTY:
; CHECK-NEXT: if.then.0:
; CHECK-NEXT: BLEND ir<%ysd.0> = ir<%psd> vp<%9>/vp<[[VP8]]>, !dbg /tmp/s.c:14:3
-; CHECK-NEXT: vp<[[VP10:%[0-9]+]]> = vector-pointer inbounds ir<%isd>, !dbg /tmp/s.c:15:3
+; CHECK-NEXT: vp<[[VP10:%[0-9]+]]> = vector-pointer inbounds ir<%isd>, ir<1>, !dbg /tmp/s.c:15:3
; CHECK-NEXT: WIDEN store vp<[[VP10]]>, ir<%ysd.0>, !dbg /tmp/s.c:15:3
; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP3]]>, vp<[[VP1]]>
; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP2]]>
@@ -568,7 +568,7 @@ define i32 @print_exit_value(ptr %ptr, i32 %off) {
; CHECK-NEXT: ir<%iv> = WIDEN-INDUCTION nsw ir<0>, ir<1>, vp<[[VP0]]>
; CHECK-NEXT: vp<[[VP4:%[0-9]+]]> = SCALAR-STEPS vp<[[VP3]]>, ir<1>, vp<[[VP0]]>
; CHECK-NEXT: CLONE ir<%gep> = getelementptr inbounds ir<%ptr>, vp<[[VP4]]>
-; CHECK-NEXT: vp<[[VP5:%[0-9]+]]> = vector-pointer inbounds ir<%gep>
+; CHECK-NEXT: vp<[[VP5:%[0-9]+]]> = vector-pointer inbounds ir<%gep>, ir<1>
; CHECK-NEXT: WIDEN store vp<[[VP5]]>, ir<0>
; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP3]]>, vp<[[VP1]]>
; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP2]]>
@@ -638,13 +638,13 @@ define void @print_fast_math_flags(i64 %n, ptr noalias %y, ptr noalias %x, ptr %
; CHECK-NEXT: EMIT vp<[[VP3:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next>
; CHECK-NEXT: vp<[[VP4:%[0-9]+]]> = SCALAR-STEPS vp<[[VP3]]>, ir<1>, vp<[[VP0]]>
; CHECK-NEXT: CLONE ir<%gep.y> = getelementptr inbounds ir<%y>, vp<[[VP4]]>
-; CHECK-NEXT: vp<[[VP5:%[0-9]+]]> = vector-pointer inbounds ir<%gep.y>
+; CHECK-NEXT: vp<[[VP5:%[0-9]+]]> = vector-pointer inbounds ir<%gep.y>, ir<1>
; CHECK-NEXT: WIDEN ir<%lv> = load vp<[[VP5]]>
; CHECK-NEXT: WIDEN ir<%add> = fadd nnan ir<%lv>, ir<1.000000e+00>
; CHECK-NEXT: WIDEN ir<%mul> = fmul fast ir<%add>, ir<2.000000e+00>
; CHECK-NEXT: WIDEN ir<%div> = fdiv reassoc nsz contract ir<%mul>, ir<2.000000e+00>
; CHECK-NEXT: CLONE ir<%gep.x> = getelementptr inbounds ir<%x>, vp<[[VP4]]>
-; CHECK-NEXT: vp<[[VP6:%[0-9]+]]> = vector-pointer inbounds ir<%gep.x>
+; CHECK-NEXT: vp<[[VP6:%[0-9]+]]> = vector-pointer inbounds ir<%gep.x>, ir<1>
; CHECK-NEXT: WIDEN store vp<[[VP6]]>, ir<%div>
; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP3]]>, vp<[[VP1]]>
; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP2]]>
@@ -717,12 +717,12 @@ define void @print_exact_flags(i64 %n, ptr noalias %x) {
; CHECK-NEXT: EMIT vp<[[VP3:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next>
; CHECK-NEXT: vp<[[VP4:%[0-9]+]]> = SCALAR-STEPS vp<[[VP3]]>, ir<1>, vp<[[VP0]]>
; CHECK-NEXT: CLONE ir<%gep.x> = getelementptr inbounds ir<%x>, vp<[[VP4]]>
-; CHECK-NEXT: vp<[[VP5:%[0-9]+]]> = vector-pointer inbounds ir<%gep.x>
+; CHECK-NEXT: vp<[[VP5:%[0-9]+]]> = vector-pointer inbounds ir<%gep.x>, ir<1>
; CHECK-NEXT: WIDEN ir<%lv> = load vp<[[VP5]]>
; CHECK-NEXT: WIDEN ir<%div.1> = udiv exact ir<%lv>, ir<20>
; CHECK-NEXT: WIDEN ir<%div.2> = udiv ir<%lv>, ir<60>
; CHECK-NEXT: WIDEN ir<%add> = add nuw nsw ir<%div.1>, ir<%div.2>
-; CHECK-NEXT: vp<[[VP6:%[0-9]+]]> = vector-pointer inbounds ir<%gep.x>
+; CHECK-NEXT: vp<[[VP6:%[0-9]+]]> = vector-pointer inbounds ir<%gep.x>, ir<1>
; CHECK-NEXT: WIDEN store vp<[[VP6]]>, ir<%add>
; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP3]]>, vp<[[VP1]]>
; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP2]]>
@@ -793,7 +793,7 @@ define void @print_call_flags(ptr readonly %src, ptr noalias %dest, i64 %n) {
; CHECK-NEXT: EMIT vp<[[VP3:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next>
; CHECK-NEXT: vp<[[VP4:%[0-9]+]]> = SCALAR-STEPS vp<[[VP3]]>, ir<1>, vp<[[VP0]]>
; CHECK-NEXT: CLONE ir<%ld.addr> = getelementptr inbounds ir<%src>, vp<[[VP4]]>
-; CHECK-NEXT: vp<[[VP5:%[0-9]+]]> = vector-pointer inbounds ir<%ld.addr>
+; CHECK-NEXT: vp<[[VP5:%[0-9]+]]> = vector-pointer inbounds ir<%ld.addr>, ir<1>
; CHECK-NEXT: WIDEN ir<%ld.value> = load vp<[[VP5]]>
; CHECK-NEXT: WIDEN ir<%ifcond> = fcmp oeq ir<%ld.value>, ir<5.000000e+00>
; CHECK-NEXT: Successor(s): pred.call
@@ -819,7 +819,7 @@ define void @print_call_flags(ptr readonly %src, ptr noalias %dest, i64 %n) {
; CHECK-NEXT: WIDEN ir<%fadd> = fadd vp<[[VP6]]>, vp<[[VP7]]>
; CHECK-NEXT: BLEND ir<%st.value> = ir<%ld.value> ir<%fadd>/ir<%ifcond>
; CHECK-NEXT: CLONE ir<%st.addr> = getelementptr inbounds ir<%dest>, vp<[[VP4]]>
-; CHECK-NEXT: vp<[[VP8:%[0-9]+]]> = vector-pointer inbounds ir<%st.addr>
+; CHECK-NEXT: vp<[[VP8:%[0-9]+]]> = vector-pointer inbounds ir<%st.addr>, ir<1>
; CHECK-NEXT: WIDEN store vp<[[VP8]]>, ir<%st.value>
; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP3]]>, vp<[[VP1]]>
; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP2]]>
@@ -895,12 +895,12 @@ define void @print_disjoint_flags(i64 %n, ptr noalias %x) {
; CHECK-NEXT: EMIT vp<[[VP3:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next>
; CHECK-NEXT: vp<[[VP4:%[0-9]+]]> = SCALAR-STEPS vp<[[VP3]]>, ir<1>, vp<[[VP0]]>
; CHECK-NEXT: CLONE ir<%gep.x> = getelementptr inbounds ir<%x>, vp<[[VP4]]>
-; CHECK-NEXT: vp<[[VP5:%[0-9]+]]> = vector-pointer inbounds ir<%gep.x>
+; CHECK-NEXT: vp<[[VP5:%[0-9]+]]> = vector-pointer inbounds ir<%gep.x>, ir<1>
; CHECK-NEXT: WIDEN ir<%lv> = load vp<[[VP5]]>
; CHECK-NEXT: WIDEN ir<%or.1> = or disjoint ir<%lv>, ir<1>
; CHECK-NEXT: WIDEN ir<%or.2> = or ir<%lv>, ir<3>
; CHECK-NEXT: WIDEN ir<%add> = add nuw nsw ir<%or.1>, ir<%or.2>
-; CHECK-NEXT: vp<[[VP6:%[0-9]+]]> = vector-pointer inbounds ir<%gep.x>
+; CHECK-NEXT: vp<[[VP6:%[0-9]+]]> = vector-pointer inbounds ir<%gep.x>, ir<1>
; CHECK-NEXT: WIDEN store vp<[[VP6]]>, ir<%add>
; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP3]]>, vp<[[VP1]]>
; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP2]]>
@@ -971,7 +971,7 @@ define void @zext_nneg(ptr noalias %p, ptr noalias %p1) {
; CHECK-NEXT: EMIT vp<[[VP3:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next>
; CHECK-NEXT: vp<[[VP4:%[0-9]+]]> = SCALAR-STEPS vp<[[VP3]]>, ir<1>, vp<[[VP0]]>
; CHECK-NEXT: CLONE ir<%idx> = getelementptr ir<%p>, vp<[[VP4]]>
-; CHECK-NEXT: vp<[[VP5:%[0-9]+]]> = vector-pointer ir<%idx>
+; CHECK-NEXT: vp<[[VP5:%[0-9]+]]> = vector-pointer ir<%idx>, ir<1>
; CHECK-NEXT: WIDEN ir<%l> = load vp<[[VP5]]>
; CHECK-NEXT: WIDEN-CAST ir<%zext> = zext nneg ir<%l> to i64
; CHECK-NEXT: EMIT vp<[[VP6:%[0-9]+]]> = extract-last-part ir<%zext>
@@ -1043,11 +1043,11 @@ define i16 @print_first_order_recurrence_and_result(ptr %ptr) {
; CHECK-NEXT: FIRST-ORDER-RECURRENCE-PHI ir<%for.1> = phi ir<22>, ir<%for.1.next>
; CHECK-NEXT: vp<[[VP4:%[0-9]+]]> = SCALAR-STEPS vp<[[VP3]]>, ir<1>, vp<[[VP0]]>
; CHECK-NEXT: CLONE ir<%gep.ptr> = getelementptr inbounds ir<%ptr>, vp<[[VP4]]>
-; CHECK-NEXT: vp<[[VP5:%[0-9]+]]> = vector-pointer inbounds ir<%gep.ptr>
+; CHECK-NEXT: vp<[[VP5:%[0-9]+]]> = vector-pointer inbounds ir<%gep.ptr>, ir<1>
; CHECK-NEXT: WIDEN ir<%for.1.next> = load vp<[[VP5]]>
; CHECK-NEXT: EMIT vp<[[VP6:%[0-9]+]]> = first-order splice ir<%for.1>, ir<%for.1.next>
; CHECK-NEXT: WIDEN ir<%add> = add vp<[[VP6]]>, ir<1>
-; CHECK-NEXT: vp<[[VP7:%[0-9]+]]> = vector-pointer inbounds ir<%gep.ptr>
+; CHECK-NEXT: vp<[[VP7:%[0-9]+]]> = vector-pointer inbounds ir<%gep.ptr>, ir<1>
; CHECK-NEXT: WIDEN store vp<[[VP7]]>, ir<%add>
; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP3]]>, vp<[[VP1]]>
; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP2]]>
@@ -1121,16 +1121,16 @@ define void @print_select_with_fastmath_flags(ptr noalias %a, ptr noalias %b, pt
; CHECK-NEXT: EMIT vp<[[VP3:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next>
; CHECK-NEXT: vp<[[VP4:%[0-9]+]]> = SCALAR-STEPS vp<[[VP3]]>, ir<1>, vp<[[VP0]]>
; CHECK-NEXT: CLONE ir<%gep> = getelementptr inbounds nuw ir<%b>, vp<[[VP4]]>
-; CHECK-NEXT: vp<[[VP5:%[0-9]+]]> = vector-pointer inbounds nuw ir<%gep>
+; CHECK-NEXT: vp<[[VP5:%[0-9]+]]> = vector-pointer inbounds nuw ir<%gep>, ir<1>
; CHECK-NEXT: WIDEN ir<%0> = load vp<[[VP5]]>
; CHECK-NEXT: CLONE ir<%gep3> = getelementptr inbounds nuw ir<%c>, vp<[[VP4]]>
-; CHECK-NEXT: vp<[[VP6:%[0-9]+]]> = vector-pointer inbounds nuw ir<%gep3>
+; CHECK-NEXT: vp<[[VP6:%[0-9]+]]> = vector-pointer inbounds nuw ir<%gep3>, ir<1>
; CHECK-NEXT: WIDEN ir<%1> = load vp<[[VP6]]>
; CHECK-NEXT: WIDEN ir<%cmp4> = fcmp ogt fast ir<%0>, ir<%1>
; CHECK-NEXT: WIDEN ir<%add> = fadd fast ir<%0>, ir<1.000000e+01>
; CHECK-NEXT: WIDEN ir<%cond> = select fast ir<%cmp4>, ir<%add>, ir<%1>
; CHECK-NEXT: CLONE ir<%gep11> = getelementptr inbounds nuw ir<%a>, vp<[[VP4]]>
-; CHECK-NEXT: vp<[[VP7:%[0-9]+]]> = vector-pointer inbounds nuw ir<%gep11>
+; CHECK-NEXT: vp<[[VP7:%[0-9]+]]> = vector-pointer inbounds nuw ir<%gep11>, ir<1>
; CHECK-NEXT: WIDEN store vp<[[VP7]]>, ir<%cond>
; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP3]]>, vp<[[VP1]]>
; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP2]]>
>From a652d6ae3397d2ee99fdae29b8a0d05e6d4f92a5 Mon Sep 17 00:00:00 2001
From: Mel Chen <mel.chen at sifive.com>
Date: Mon, 30 Jun 2025 20:38:38 -0700
Subject: [PATCH 03/30] Transform the gather to stride load
---
.../Transforms/Vectorize/LoopVectorize.cpp | 7 +
llvm/lib/Transforms/Vectorize/VPlan.h | 27 ++-
.../Transforms/Vectorize/VPlanTransforms.cpp | 189 ++++++++++++++++++
.../Transforms/Vectorize/VPlanTransforms.h | 6 +
.../RISCV/blocks-with-dead-instructions.ll | 13 +-
.../RISCV/masked_gather_scatter.ll | 82 +++++---
.../LoopVectorize/RISCV/strided-accesses.ll | 158 +++++++++------
.../RISCV/tail-folding-gather-scatter.ll | 80 ++++++--
.../RISCV/tail-folding-interleave.ll | 166 +++++++--------
.../LoopVectorize/vplan-print-after-all.ll | 1 +
10 files changed, 540 insertions(+), 189 deletions(-)
diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
index e0bc6a9886d8e..a12aec11df29b 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -8355,6 +8355,8 @@ VPlanPtr LoopVectorizationPlanner::tryToBuildVPlanWithVPRecipes(
if (!RUN_VPLAN_PASS(VPlanTransforms::handleFindLastReductions, *Plan))
return nullptr;
+ VPCostContext CostCtx(CM.TTI, *CM.TLI, *Plan, CM, CM.CostKind, CM.PSE,
+ OrigLoop);
// Create partial reduction recipes for scaled reductions and transform
// recipes to abstract recipes if it is legal and beneficial and clamp the
// range for better cost estimation.
@@ -8369,6 +8371,11 @@ VPlanPtr LoopVectorizationPlanner::tryToBuildVPlanWithVPRecipes(
Range);
}
+ // Convert memory recipes to strided access recipes if the strided access is
+ // legal and profitable.
+ RUN_VPLAN_PASS(VPlanTransforms::convertToStridedAccesses, *Plan, CostCtx,
+ Range);
+
for (ElementCount VF : Range)
Plan->addVF(VF);
Plan->setName("Initial VPlan");
diff --git a/llvm/lib/Transforms/Vectorize/VPlan.h b/llvm/lib/Transforms/Vectorize/VPlan.h
index 8400378ae8a7b..f6b916ce3fd32 100644
--- a/llvm/lib/Transforms/Vectorize/VPlan.h
+++ b/llvm/lib/Transforms/Vectorize/VPlan.h
@@ -2008,10 +2008,6 @@ class VPHistogramRecipe : public VPRecipeBase {
class LLVM_ABI_FOR_TEST VPWidenGEPRecipe : public VPRecipeWithIRFlags {
Type *SourceElementTy;
- bool isPointerLoopInvariant() const {
- return getOperand(0)->isDefinedOutsideLoopRegions();
- }
-
bool isIndexLoopInvariant(unsigned I) const {
return getOperand(I + 1)->isDefinedOutsideLoopRegions();
}
@@ -2041,6 +2037,29 @@ class LLVM_ABI_FOR_TEST VPWidenGEPRecipe : public VPRecipeWithIRFlags {
/// This recipe generates a GEP instruction.
unsigned getOpcode() const { return Instruction::GetElementPtr; }
+ bool isPointerLoopInvariant() const {
+ return getOperand(0)->isDefinedOutsideLoopRegions();
+ }
+
+ std::optional<unsigned> getUniqueVariantIndex() const {
+ std::optional<unsigned> VarIdx;
+ for (unsigned I = 0, E = getNumOperands() - 1; I < E; ++I) {
+ if (isIndexLoopInvariant(I))
+ continue;
+
+ if (VarIdx)
+ return std::nullopt;
+ VarIdx = I;
+ }
+ return VarIdx;
+ }
+
+ Type *getIndexedType(unsigned I) const {
+ auto *GEP = cast<GetElementPtrInst>(getUnderlyingInstr());
+ SmallVector<Value *, 4> Ops(GEP->idx_begin(), GEP->idx_begin() + I);
+ return GetElementPtrInst::getIndexedType(SourceElementTy, Ops);
+ }
+
/// Generate the gep nodes.
void execute(VPTransformState &State) override;
diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
index a793cb95d7e36..613c65c8b7d4f 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
@@ -5891,3 +5891,192 @@ void VPlanTransforms::createPartialReductions(VPlan &Plan,
for (const VPPartialReductionChain &Chain : Chains)
transformToPartialReduction(Chain, Range, CostCtx, Plan);
}
+
+static std::pair<VPValue *, VPValue *> matchStridedStart(VPValue *CurIndex) {
+ // TODO: Support VPWidenPointerInductionRecipe.
+ if (auto *WidenIV = dyn_cast<VPWidenIntOrFpInductionRecipe>(CurIndex))
+ return {WidenIV, WidenIV->getStepValue()};
+
+ auto *WidenR = dyn_cast<VPWidenRecipe>(CurIndex);
+ if (!WidenR || !CurIndex->getUnderlyingValue())
+ return {nullptr, nullptr};
+
+ unsigned Opcode = WidenR->getOpcode();
+ // TODO: Support Instruction::Add and Instruction::Or.
+ if (Opcode != Instruction::Shl && Opcode != Instruction::Mul)
+ return {nullptr, nullptr};
+
+ // Match the pattern binop(variant, invariant), or binop(invariant, variant)
+ // if the binary operator is commutative.
+ bool IsLHSUniform = vputils::isSingleScalar(WidenR->getOperand(0));
+ if (IsLHSUniform == vputils::isSingleScalar(WidenR->getOperand(1)) ||
+ (IsLHSUniform && !Instruction::isCommutative(Opcode)))
+ return {nullptr, nullptr};
+ unsigned VarIdx = IsLHSUniform ? 1 : 0;
+
+ auto [Start, Stride] = matchStridedStart(WidenR->getOperand(VarIdx));
+ if (!Start)
+ return {nullptr, nullptr};
+
+ SmallVector<VPValue *> StartOps(WidenR->operands());
+ StartOps[VarIdx] = Start;
+ auto *StartR = new VPReplicateRecipe(WidenR->getUnderlyingInstr(), StartOps,
+ /*IsUniform*/ true, /*Mask*/ nullptr,
+ /*Flags*/ *WidenR, /*Metadata*/ *WidenR,
+ WidenR->getDebugLoc());
+ StartR->insertBefore(WidenR);
+
+ unsigned InvIdx = VarIdx == 0 ? 1 : 0;
+ auto *StrideR =
+ new VPInstruction(Opcode, {Stride, WidenR->getOperand(InvIdx)},
+ VPRecipeWithIRFlags::WrapFlagsTy{false, false});
+ StrideR->insertBefore(WidenR);
+ return {StartR, StrideR};
+}
+
+static std::tuple<VPValue *, VPValue *, Type *>
+determineBaseAndStride(VPWidenGEPRecipe *WidenGEP) {
+ // TODO: Check if the base pointer is strided.
+ if (!WidenGEP->isPointerLoopInvariant())
+ return {nullptr, nullptr, nullptr};
+
+ // Find the only one variant index.
+ std::optional<unsigned> VarIndex = WidenGEP->getUniqueVariantIndex();
+ if (!VarIndex)
+ return {nullptr, nullptr, nullptr};
+
+ Type *ElementTy = WidenGEP->getIndexedType(*VarIndex);
+ if (ElementTy->isScalableTy() || ElementTy->isStructTy() ||
+ ElementTy->isVectorTy())
+ return {nullptr, nullptr, nullptr};
+
+ unsigned VarOp = *VarIndex + 1;
+ VPValue *IndexVPV = WidenGEP->getOperand(VarOp);
+ auto [Start, Stride] = matchStridedStart(IndexVPV);
+ if (!Start)
+ return {nullptr, nullptr, nullptr};
+
+ SmallVector<VPValue *> Ops(WidenGEP->operands());
+ Ops[VarOp] = Start;
+ auto *BasePtr = new VPReplicateRecipe(
+ WidenGEP->getUnderlyingInstr(), Ops,
+ /*IsUniform*/ true, /*Mask*/ nullptr, /*Flags*/ *WidenGEP,
+ /*Metadata*/ {}, WidenGEP->getDebugLoc());
+ BasePtr->insertBefore(WidenGEP);
+
+ return {BasePtr, Stride, ElementTy};
+}
+
+void VPlanTransforms::convertToStridedAccesses(VPlan &Plan, VPCostContext &Ctx,
+ VFRange &Range) {
+ if (Plan.hasScalarVFOnly())
+ return;
+
+ VPTypeAnalysis TypeInfo(Plan);
+ DenseMap<VPWidenGEPRecipe *, std::tuple<VPValue *, VPValue *, Type *>>
+ StrideCache;
+ SmallVector<VPRecipeBase *> ToErase;
+ SmallPtrSet<VPValue *, 4> PossiblyDead;
+ for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(
+ vp_depth_first_shallow(Plan.getVectorLoopRegion()->getEntry()))) {
+ for (VPRecipeBase &R : make_early_inc_range(*VPBB)) {
+ auto *MemR = dyn_cast<VPWidenMemoryRecipe>(&R);
+ // TODO: Support strided store.
+ // TODO: Transform reverse access into strided access with -1 stride.
+ // TODO: Transform gather/scatter with uniform address into strided access
+ // with 0 stride.
+ // TODO: Transform interleave access into multiple strided accesses.
+ if (!MemR || !isa<VPWidenLoadRecipe>(MemR) || MemR->isConsecutive())
+ continue;
+
+ auto *Ptr = dyn_cast<VPWidenGEPRecipe>(MemR->getAddr());
+ if (!Ptr)
+ continue;
+
+ // Memory cost model requires the pointer operand of memory access
+ // instruction.
+ Value *PtrUV = Ptr->getUnderlyingValue();
+ if (!PtrUV)
+ continue;
+
+ // Try to get base and stride here.
+ VPValue *BasePtr, *StrideInElement;
+ Type *ElementTy;
+ auto It = StrideCache.find(Ptr);
+ if (It != StrideCache.end())
+ std::tie(BasePtr, StrideInElement, ElementTy) = It->second;
+ else
+ std::tie(BasePtr, StrideInElement, ElementTy) = StrideCache[Ptr] =
+ determineBaseAndStride(Ptr);
+
+ // Skip if the memory access is not a strided access.
+ if (!BasePtr) {
+ assert(!StrideInElement && !ElementTy);
+ continue;
+ }
+ assert(StrideInElement && ElementTy);
+
+ Instruction &Ingredient = MemR->getIngredient();
+ auto IsProfitable = [&](ElementCount VF) -> bool {
+ Type *DataTy = toVectorTy(getLoadStoreType(&Ingredient), VF);
+ const Align Alignment = getLoadStoreAlignment(&Ingredient);
+ if (!Ctx.TTI.isLegalStridedLoadStore(DataTy, Alignment))
+ return false;
+ const InstructionCost CurrentCost = MemR->computeCost(VF, Ctx);
+ const InstructionCost StridedLoadStoreCost =
+ Ctx.TTI.getMemIntrinsicInstrCost(
+ MemIntrinsicCostAttributes(
+ Intrinsic::experimental_vp_strided_load, DataTy, PtrUV,
+ MemR->isMasked(), Alignment, &Ingredient),
+ Ctx.CostKind);
+ return StridedLoadStoreCost < CurrentCost;
+ };
+
+ if (!LoopVectorizationPlanner::getDecisionAndClampRange(IsProfitable,
+ Range)) {
+ PossiblyDead.insert(BasePtr);
+ PossiblyDead.insert(StrideInElement);
+ continue;
+ }
+ PossiblyDead.insert(Ptr);
+
+ // Create a new vector pointer for strided access.
+ auto *GEP = dyn_cast<GetElementPtrInst>(PtrUV->stripPointerCasts());
+ auto *NewPtr = new VPVectorPointerRecipe(
+ BasePtr, ElementTy, StrideInElement,
+ GEP ? GEP->getNoWrapFlags() : GEPNoWrapFlags::none(),
+ Ptr->getDebugLoc());
+ NewPtr->insertBefore(MemR);
+
+ const DataLayout &DL = Ingredient.getDataLayout();
+ TypeSize TS = DL.getTypeAllocSize(ElementTy);
+ unsigned TypeScale = TS.getFixedValue();
+ VPValue *StrideInBytes = StrideInElement;
+ // Scale the stride by the size of the indexed type.
+ if (TypeScale != 1) {
+ VPValue *ScaleVPV = Plan.getOrAddLiveIn(ConstantInt::get(
+ TypeInfo.inferScalarType(StrideInElement), TypeScale));
+ auto *ScaledStride =
+ new VPInstruction(Instruction::Mul, {StrideInElement, ScaleVPV},
+ VPRecipeWithIRFlags::WrapFlagsTy{false, false});
+ ScaledStride->insertBefore(MemR);
+ StrideInBytes = ScaledStride;
+ }
+
+ auto *LoadR = cast<VPWidenLoadRecipe>(MemR);
+ auto *StridedLoad = new VPWidenStridedLoadRecipe(
+ *cast<LoadInst>(&Ingredient), NewPtr, StrideInBytes, &Plan.getVF(),
+ LoadR->getMask(), *LoadR, LoadR->getDebugLoc());
+ StridedLoad->insertBefore(LoadR);
+ LoadR->replaceAllUsesWith(StridedLoad);
+
+ ToErase.push_back(LoadR);
+ }
+ }
+
+ // Clean up dead memory access recipes, and unused base address and stride.
+ for (auto *R : ToErase)
+ R->eraseFromParent();
+ for (auto *V : PossiblyDead)
+ recursivelyDeleteDeadRecipes(V);
+}
diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.h b/llvm/lib/Transforms/Vectorize/VPlanTransforms.h
index dec8b4fd6a1d8..1bccc4317ff95 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.h
+++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.h
@@ -299,6 +299,12 @@ struct VPlanTransforms {
&InterleaveGroups,
VPRecipeBuilder &RecipeBuilder, const bool &ScalarEpilogueAllowed);
+ /// Transform widen memory recipes into strided access recipes when legal
+ /// and profitable. Clamps \p Range to maintain consistency with widen
+ /// decisions of \p Plan, and uses \p Ctx to evaluate the cost.
+ static void convertToStridedAccesses(VPlan &Plan, VPCostContext &Ctx,
+ VFRange &Range);
+
/// Remove dead recipes from \p Plan.
static void removeDeadRecipes(VPlan &Plan);
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/blocks-with-dead-instructions.ll b/llvm/test/Transforms/LoopVectorize/RISCV/blocks-with-dead-instructions.ll
index 263c200c28801..fe233e661dafc 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/blocks-with-dead-instructions.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/blocks-with-dead-instructions.ll
@@ -310,6 +310,8 @@ define void @multiple_blocks_with_dead_inst_multiple_successors_6(ptr %src, i1 %
; CHECK-NEXT: [[TMP2:%.*]] = add nuw nsw i64 [[TMP1]], 1
; CHECK-NEXT: br label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP4:%.*]] = shl nuw i64 [[TMP3]], 3
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 8 x i1> poison, i1 [[IC]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 8 x i1> [[BROADCAST_SPLATINSERT]], <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
; CHECK-NEXT: [[TMP8:%.*]] = xor <vscale x 8 x i1> [[BROADCAST_SPLAT]], splat (i1 true)
@@ -317,15 +319,23 @@ define void @multiple_blocks_with_dead_inst_multiple_successors_6(ptr %src, i1 %
; CHECK-NEXT: [[TMP5:%.*]] = mul nsw <vscale x 8 x i64> [[TMP11]], splat (i64 3)
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 8 x i64> [ [[TMP5]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[TMP2]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP27:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 8, i1 true)
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT3:%.*]] = insertelement <vscale x 8 x i32> poison, i32 [[TMP27]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT4:%.*]] = shufflevector <vscale x 8 x i32> [[BROADCAST_SPLATINSERT3]], <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
; CHECK-NEXT: [[TMP12:%.*]] = zext i32 [[TMP27]] to i64
; CHECK-NEXT: [[TMP16:%.*]] = mul nsw i64 3, [[TMP12]]
; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[TMP16]], i64 0
; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 8 x i64> [[DOTSPLATINSERT]], <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP18:%.*]] = call <vscale x 8 x i32> @llvm.stepvector.nxv8i32()
+; CHECK-NEXT: [[TMP19:%.*]] = icmp ult <vscale x 8 x i32> [[TMP18]], [[BROADCAST_SPLAT4]]
+; CHECK-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[EVL_BASED_IV]], 3
+; CHECK-NEXT: [[TMP21:%.*]] = getelementptr i16, ptr [[SRC]], i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[TMP20:%.*]] = getelementptr i16, ptr [[SRC]], <vscale x 8 x i64> [[VEC_IND]]
-; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 8 x i16> @llvm.vp.gather.nxv8i16.nxv8p0(<vscale x 8 x ptr> align 2 [[TMP20]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP27]])
+; CHECK-NEXT: [[TMP15:%.*]] = trunc i64 [[TMP4]] to i32
+; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vp.strided.load.nxv8i16.p0.i64(ptr align 2 [[TMP21]], i64 6, <vscale x 8 x i1> [[TMP19]], i32 [[TMP15]])
; CHECK-NEXT: [[TMP17:%.*]] = icmp eq <vscale x 8 x i16> [[WIDE_MASKED_GATHER]], zeroinitializer
; CHECK-NEXT: [[TMP14:%.*]] = select <vscale x 8 x i1> [[TMP17]], <vscale x 8 x i1> [[TMP8]], <vscale x 8 x i1> zeroinitializer
; CHECK-NEXT: [[TMP28:%.*]] = xor <vscale x 8 x i1> [[TMP17]], splat (i1 true)
@@ -333,6 +343,7 @@ define void @multiple_blocks_with_dead_inst_multiple_successors_6(ptr %src, i1 %
; CHECK-NEXT: [[TMP23:%.*]] = select <vscale x 8 x i1> [[TMP17]], <vscale x 8 x i1> [[BROADCAST_SPLAT]], <vscale x 8 x i1> zeroinitializer
; CHECK-NEXT: [[TMP24:%.*]] = or <vscale x 8 x i1> [[TMP22]], [[TMP23]]
; CHECK-NEXT: call void @llvm.vp.scatter.nxv8i16.nxv8p0(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x ptr> align 2 [[TMP20]], <vscale x 8 x i1> [[TMP24]], i32 [[TMP27]])
+; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP12]], [[EVL_BASED_IV]]
; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP12]]
; CHECK-NEXT: [[VEC_IND_NEXT]] = add nsw <vscale x 8 x i64> [[VEC_IND]], [[DOTSPLAT]]
; CHECK-NEXT: [[TMP26:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/masked_gather_scatter.ll b/llvm/test/Transforms/LoopVectorize/RISCV/masked_gather_scatter.ll
index 9584076ecb709..d159e072fc12b 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/masked_gather_scatter.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/masked_gather_scatter.ll
@@ -16,7 +16,7 @@
define void @foo4(ptr nocapture %A, ptr nocapture readonly %B, ptr nocapture readonly %trigger) local_unnamed_addr #0 {
; RV32-LABEL: @foo4(
; RV32-NEXT: entry:
-; RV32-NEXT: br label [[VECTOR_MEMCHECK:%.*]]
+; RV32-NEXT: br label [[VECTOR_SCEVCHECK:%.*]]
; RV32: vector.scevcheck:
; RV32-NEXT: [[MUL:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 128, i32 624)
; RV32-NEXT: [[MUL_RESULT:%.*]] = extractvalue { i32, i1 } [[MUL]], 0
@@ -31,41 +31,53 @@ define void @foo4(ptr nocapture %A, ptr nocapture readonly %B, ptr nocapture rea
; RV32-NEXT: [[TMP4:%.*]] = icmp ult ptr [[TMP3]], [[B]]
; RV32-NEXT: [[TMP5:%.*]] = or i1 [[TMP4]], [[MUL_OVERFLOW3]]
; RV32-NEXT: [[TMP6:%.*]] = or i1 [[TMP2]], [[TMP5]]
-; RV32-NEXT: br i1 [[TMP6]], label [[SCALAR_PH:%.*]], label [[VECTOR_MEMCHECK1:%.*]]
+; RV32-NEXT: br i1 [[TMP6]], label [[SCALAR_PH:%.*]], label [[VECTOR_MEMCHECK:%.*]]
; RV32: vector.memcheck:
-; RV32-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[TRIGGER:%.*]], i32 39940
-; RV32-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[A]], i32 79880
-; RV32-NEXT: [[SCEVGEP2:%.*]] = getelementptr i8, ptr [[B]], i32 159752
-; RV32-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[TRIGGER]], [[SCEVGEP]]
-; RV32-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[A]], [[SCEVGEP1]]
+; RV32-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[TRIGGER:%.*]], i32 39940
+; RV32-NEXT: [[SCEVGEP4:%.*]] = getelementptr i8, ptr [[A]], i32 79880
+; RV32-NEXT: [[SCEVGEP5:%.*]] = getelementptr i8, ptr [[B]], i32 159752
+; RV32-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[TRIGGER]], [[SCEVGEP4]]
+; RV32-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[A]], [[SCEVGEP]]
; RV32-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
-; RV32-NEXT: [[BOUND03:%.*]] = icmp ult ptr [[A]], [[SCEVGEP2]]
-; RV32-NEXT: [[BOUND14:%.*]] = icmp ult ptr [[B]], [[SCEVGEP]]
-; RV32-NEXT: [[FOUND_CONFLICT5:%.*]] = and i1 [[BOUND03]], [[BOUND14]]
-; RV32-NEXT: [[CONFLICT_RDX:%.*]] = or i1 [[FOUND_CONFLICT]], [[FOUND_CONFLICT5]]
+; RV32-NEXT: [[BOUND06:%.*]] = icmp ult ptr [[A]], [[SCEVGEP5]]
+; RV32-NEXT: [[BOUND17:%.*]] = icmp ult ptr [[B]], [[SCEVGEP4]]
+; RV32-NEXT: [[FOUND_CONFLICT8:%.*]] = and i1 [[BOUND06]], [[BOUND17]]
+; RV32-NEXT: [[CONFLICT_RDX:%.*]] = or i1 [[FOUND_CONFLICT]], [[FOUND_CONFLICT8]]
; RV32-NEXT: br i1 [[CONFLICT_RDX]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
; RV32: vector.ph:
+; RV32-NEXT: [[TMP12:%.*]] = call i64 @llvm.vscale.i64()
+; RV32-NEXT: [[TMP13:%.*]] = shl nuw i64 [[TMP12]], 1
; RV32-NEXT: [[TMP7:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
; RV32-NEXT: [[TMP9:%.*]] = mul nuw nsw <vscale x 2 x i64> [[TMP7]], splat (i64 16)
; RV32-NEXT: br label [[VECTOR_BODY:%.*]]
; RV32: vector.body:
+; RV32-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; RV32-NEXT: [[VEC_IND:%.*]] = phi <vscale x 2 x i64> [ [[TMP9]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
; RV32-NEXT: [[AVL:%.*]] = phi i64 [ 625, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; RV32-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
+; RV32-NEXT: [[BROADCAST_SPLATINSERT9:%.*]] = insertelement <vscale x 2 x i32> poison, i32 [[TMP10]], i64 0
+; RV32-NEXT: [[BROADCAST_SPLAT10:%.*]] = shufflevector <vscale x 2 x i32> [[BROADCAST_SPLATINSERT9]], <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
; RV32-NEXT: [[TMP8:%.*]] = zext i32 [[TMP10]] to i64
; RV32-NEXT: [[TMP11:%.*]] = shl nuw nsw i64 [[TMP8]], 4
; RV32-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP11]], i64 0
; RV32-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-; RV32-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[TRIGGER]], <vscale x 2 x i64> [[VEC_IND]]
-; RV32-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 2 x i32> @llvm.vp.gather.nxv2i32.nxv2p0(<vscale x 2 x ptr> align 4 [[TMP13]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP10]]), !alias.scope [[META0:![0-9]+]], !noalias [[META3:![0-9]+]]
+; RV32-NEXT: [[TMP23:%.*]] = call <vscale x 2 x i32> @llvm.stepvector.nxv2i32()
+; RV32-NEXT: [[TMP15:%.*]] = icmp ult <vscale x 2 x i32> [[TMP23]], [[BROADCAST_SPLAT10]]
+; RV32-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[EVL_BASED_IV]], 16
+; RV32-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, ptr [[TRIGGER]], i64 [[OFFSET_IDX]]
+; RV32-NEXT: [[TMP25:%.*]] = trunc i64 [[TMP13]] to i32
+; RV32-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 2 x i32> @llvm.experimental.vp.strided.load.nxv2i32.p0.i64(ptr align 4 [[TMP16]], i64 64, <vscale x 2 x i1> [[TMP15]], i32 [[TMP25]]), !alias.scope [[META0:![0-9]+]], !noalias [[META3:![0-9]+]]
; RV32-NEXT: [[TMP14:%.*]] = icmp slt <vscale x 2 x i32> [[WIDE_MASKED_GATHER]], splat (i32 100)
-; RV32-NEXT: [[TMP15:%.*]] = shl nuw nsw <vscale x 2 x i64> [[VEC_IND]], splat (i64 1)
-; RV32-NEXT: [[TMP16:%.*]] = getelementptr inbounds double, ptr [[B]], <vscale x 2 x i64> [[TMP15]]
-; RV32-NEXT: [[WIDE_MASKED_GATHER6:%.*]] = call <vscale x 2 x double> @llvm.vp.gather.nxv2f64.nxv2p0(<vscale x 2 x ptr> align 8 [[TMP16]], <vscale x 2 x i1> [[TMP14]], i32 [[TMP10]]), !alias.scope [[META5:![0-9]+]]
+; RV32-NEXT: [[TMP26:%.*]] = select <vscale x 2 x i1> [[TMP15]], <vscale x 2 x i1> [[TMP14]], <vscale x 2 x i1> zeroinitializer
+; RV32-NEXT: [[TMP20:%.*]] = shl nuw nsw i64 [[OFFSET_IDX]], 1
+; RV32-NEXT: [[TMP21:%.*]] = getelementptr inbounds double, ptr [[B]], i64 [[TMP20]]
+; RV32-NEXT: [[TMP22:%.*]] = trunc i64 [[TMP13]] to i32
+; RV32-NEXT: [[WIDE_MASKED_GATHER6:%.*]] = call <vscale x 2 x double> @llvm.experimental.vp.strided.load.nxv2f64.p0.i64(ptr align 8 [[TMP21]], i64 256, <vscale x 2 x i1> [[TMP26]], i32 [[TMP22]]), !alias.scope [[META5:![0-9]+]]
; RV32-NEXT: [[TMP17:%.*]] = sitofp <vscale x 2 x i32> [[WIDE_MASKED_GATHER]] to <vscale x 2 x double>
; RV32-NEXT: [[TMP18:%.*]] = fadd <vscale x 2 x double> [[WIDE_MASKED_GATHER6]], [[TMP17]]
; RV32-NEXT: [[TMP19:%.*]] = getelementptr inbounds double, ptr [[A]], <vscale x 2 x i64> [[VEC_IND]]
; RV32-NEXT: call void @llvm.vp.scatter.nxv2f64.nxv2p0(<vscale x 2 x double> [[TMP18]], <vscale x 2 x ptr> align 8 [[TMP19]], <vscale x 2 x i1> [[TMP14]], i32 [[TMP10]]), !alias.scope [[META3]], !noalias [[META5]]
+; RV32-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP8]], [[EVL_BASED_IV]]
; RV32-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]]
; RV32-NEXT: [[VEC_IND_NEXT]] = add nuw nsw <vscale x 2 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]]
; RV32-NEXT: [[TMP24:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
@@ -73,20 +85,20 @@ define void @foo4(ptr nocapture %A, ptr nocapture readonly %B, ptr nocapture rea
; RV32: middle.block:
; RV32-NEXT: br label [[FOR_END:%.*]]
; RV32: scalar.ph:
-; RV32-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[VECTOR_MEMCHECK]] ], [ 0, [[VECTOR_MEMCHECK1]] ]
+; RV32-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[VECTOR_SCEVCHECK]] ], [ 0, [[VECTOR_MEMCHECK]] ]
; RV32-NEXT: br label [[FOR_BODY:%.*]]
; RV32: for.body:
; RV32-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_INC:%.*]] ]
; RV32-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TRIGGER]], i64 [[INDVARS_IV]]
-; RV32-NEXT: [[TMP21:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; RV32-NEXT: [[CMP1:%.*]] = icmp slt i32 [[TMP21]], 100
+; RV32-NEXT: [[TMP27:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
+; RV32-NEXT: [[CMP1:%.*]] = icmp slt i32 [[TMP27]], 100
; RV32-NEXT: br i1 [[CMP1]], label [[IF_THEN:%.*]], label [[FOR_INC]]
; RV32: if.then:
-; RV32-NEXT: [[TMP22:%.*]] = shl nuw nsw i64 [[INDVARS_IV]], 1
-; RV32-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds double, ptr [[B]], i64 [[TMP22]]
-; RV32-NEXT: [[TMP23:%.*]] = load double, ptr [[ARRAYIDX3]], align 8
-; RV32-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP21]] to double
-; RV32-NEXT: [[ADD:%.*]] = fadd double [[TMP23]], [[CONV]]
+; RV32-NEXT: [[TMP28:%.*]] = shl nuw nsw i64 [[INDVARS_IV]], 1
+; RV32-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds double, ptr [[B]], i64 [[TMP28]]
+; RV32-NEXT: [[TMP29:%.*]] = load double, ptr [[ARRAYIDX3]], align 8
+; RV32-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP27]] to double
+; RV32-NEXT: [[ADD:%.*]] = fadd double [[TMP29]], [[CONV]]
; RV32-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds double, ptr [[A]], i64 [[INDVARS_IV]]
; RV32-NEXT: store double [[ADD]], ptr [[ARRAYIDX7]], align 8
; RV32-NEXT: br label [[FOR_INC]]
@@ -113,27 +125,39 @@ define void @foo4(ptr nocapture %A, ptr nocapture readonly %B, ptr nocapture rea
; RV64-NEXT: [[CONFLICT_RDX:%.*]] = or i1 [[FOUND_CONFLICT]], [[FOUND_CONFLICT5]]
; RV64-NEXT: br i1 [[CONFLICT_RDX]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; RV64: vector.ph:
+; RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; RV64-NEXT: [[TMP2:%.*]] = shl nuw i64 [[TMP0]], 1
; RV64-NEXT: [[TMP7:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
; RV64-NEXT: [[TMP1:%.*]] = mul nuw nsw <vscale x 2 x i64> [[TMP7]], splat (i64 16)
; RV64-NEXT: br label [[VECTOR_BODY:%.*]]
; RV64: vector.body:
+; RV64-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; RV64-NEXT: [[VEC_IND:%.*]] = phi <vscale x 2 x i64> [ [[TMP1]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
; RV64-NEXT: [[AVL:%.*]] = phi i64 [ 625, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; RV64-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
+; RV64-NEXT: [[BROADCAST_SPLATINSERT6:%.*]] = insertelement <vscale x 2 x i32> poison, i32 [[TMP10]], i64 0
+; RV64-NEXT: [[BROADCAST_SPLAT7:%.*]] = shufflevector <vscale x 2 x i32> [[BROADCAST_SPLATINSERT6]], <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
; RV64-NEXT: [[TMP8:%.*]] = zext i32 [[TMP10]] to i64
; RV64-NEXT: [[TMP4:%.*]] = shl nuw nsw i64 [[TMP8]], 4
; RV64-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP4]], i64 0
; RV64-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-; RV64-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[TRIGGER]], <vscale x 2 x i64> [[VEC_IND]]
-; RV64-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 2 x i32> @llvm.vp.gather.nxv2i32.nxv2p0(<vscale x 2 x ptr> align 4 [[TMP13]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP10]]), !alias.scope [[META0:![0-9]+]]
+; RV64-NEXT: [[TMP11:%.*]] = call <vscale x 2 x i32> @llvm.stepvector.nxv2i32()
+; RV64-NEXT: [[TMP25:%.*]] = icmp ult <vscale x 2 x i32> [[TMP11]], [[BROADCAST_SPLAT7]]
+; RV64-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[EVL_BASED_IV]], 16
+; RV64-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[TRIGGER]], i64 [[OFFSET_IDX]]
+; RV64-NEXT: [[TMP16:%.*]] = trunc i64 [[TMP2]] to i32
+; RV64-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 2 x i32> @llvm.experimental.vp.strided.load.nxv2i32.p0.i64(ptr align 4 [[TMP9]], i64 64, <vscale x 2 x i1> [[TMP25]], i32 [[TMP16]]), !alias.scope [[META0:![0-9]+]]
; RV64-NEXT: [[TMP14:%.*]] = icmp slt <vscale x 2 x i32> [[WIDE_MASKED_GATHER]], splat (i32 100)
-; RV64-NEXT: [[TMP15:%.*]] = shl nuw nsw <vscale x 2 x i64> [[VEC_IND]], splat (i64 1)
-; RV64-NEXT: [[TMP16:%.*]] = getelementptr inbounds double, ptr [[B]], <vscale x 2 x i64> [[TMP15]]
-; RV64-NEXT: [[WIDE_MASKED_GATHER6:%.*]] = call <vscale x 2 x double> @llvm.vp.gather.nxv2f64.nxv2p0(<vscale x 2 x ptr> align 8 [[TMP16]], <vscale x 2 x i1> [[TMP14]], i32 [[TMP10]]), !alias.scope [[META3:![0-9]+]]
+; RV64-NEXT: [[TMP12:%.*]] = select <vscale x 2 x i1> [[TMP25]], <vscale x 2 x i1> [[TMP14]], <vscale x 2 x i1> zeroinitializer
+; RV64-NEXT: [[TMP13:%.*]] = shl nuw nsw i64 [[OFFSET_IDX]], 1
+; RV64-NEXT: [[TMP28:%.*]] = getelementptr inbounds double, ptr [[B]], i64 [[TMP13]]
+; RV64-NEXT: [[TMP15:%.*]] = trunc i64 [[TMP2]] to i32
+; RV64-NEXT: [[WIDE_MASKED_GATHER6:%.*]] = call <vscale x 2 x double> @llvm.experimental.vp.strided.load.nxv2f64.p0.i64(ptr align 8 [[TMP28]], i64 256, <vscale x 2 x i1> [[TMP12]], i32 [[TMP15]]), !alias.scope [[META3:![0-9]+]]
; RV64-NEXT: [[TMP17:%.*]] = sitofp <vscale x 2 x i32> [[WIDE_MASKED_GATHER]] to <vscale x 2 x double>
; RV64-NEXT: [[TMP18:%.*]] = fadd <vscale x 2 x double> [[WIDE_MASKED_GATHER6]], [[TMP17]]
; RV64-NEXT: [[TMP19:%.*]] = getelementptr inbounds double, ptr [[A]], <vscale x 2 x i64> [[VEC_IND]]
; RV64-NEXT: call void @llvm.vp.scatter.nxv2f64.nxv2p0(<vscale x 2 x double> [[TMP18]], <vscale x 2 x ptr> align 8 [[TMP19]], <vscale x 2 x i1> [[TMP14]], i32 [[TMP10]]), !alias.scope [[META5:![0-9]+]], !noalias [[META7:![0-9]+]]
+; RV64-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP8]], [[EVL_BASED_IV]]
; RV64-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]]
; RV64-NEXT: [[VEC_IND_NEXT]] = add nuw nsw <vscale x 2 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]]
; RV64-NEXT: [[TMP24:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/strided-accesses.ll b/llvm/test/Transforms/LoopVectorize/RISCV/strided-accesses.ll
index 0da00bf0dbd0f..3f9c44ebdbf99 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/strided-accesses.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/strided-accesses.ll
@@ -10,20 +10,31 @@ define void @single_constant_stride_int_scaled(ptr %p) {
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
+; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP1:%.*]] = shl nuw i64 [[TMP0]], 2
; CHECK-NEXT: [[TMP8:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
+; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[TMP8]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP11]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT1]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP12:%.*]] = zext i32 [[TMP11]] to i64
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP12]], i64 0
; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP5:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
+; CHECK-NEXT: [[TMP6:%.*]] = icmp ult <vscale x 4 x i32> [[TMP5]], [[BROADCAST_SPLAT2]]
+; CHECK-NEXT: [[TMP7:%.*]] = shl nuw nsw i64 [[EVL_BASED_IV]], 3
; CHECK-NEXT: [[TMP14:%.*]] = shl nuw nsw <vscale x 4 x i64> [[VEC_IND]], splat (i64 3)
-; CHECK-NEXT: [[TMP15:%.*]] = getelementptr i32, ptr [[P:%.*]], <vscale x 4 x i64> [[TMP14]]
-; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.vp.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> align 4 [[TMP15]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
+; CHECK-NEXT: [[TMP9:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[TMP7]]
+; CHECK-NEXT: [[TMP15:%.*]] = getelementptr i32, ptr [[P]], <vscale x 4 x i64> [[TMP14]]
+; CHECK-NEXT: [[TMP13:%.*]] = trunc i64 [[TMP1]] to i32
+; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP9]], i64 32, <vscale x 4 x i1> [[TMP6]], i32 [[TMP13]])
; CHECK-NEXT: [[TMP16:%.*]] = add <vscale x 4 x i32> [[WIDE_MASKED_GATHER]], splat (i32 1)
; CHECK-NEXT: call void @llvm.vp.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[TMP16]], <vscale x 4 x ptr> align 4 [[TMP15]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
+; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP12]], [[EVL_BASED_IV]]
; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP12]]
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[DOTSPLAT]]
; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
@@ -55,12 +66,18 @@ define void @single_constant_stride_int_scaled(ptr %p) {
; CHECK-UF2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-UF2-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[TMP7]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-UF2-NEXT: [[STEP_ADD:%.*]] = add <vscale x 4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]]
+; CHECK-UF2-NEXT: [[TMP8:%.*]] = shl nuw nsw i64 [[INDEX]], 3
; CHECK-UF2-NEXT: [[TMP9:%.*]] = shl nuw nsw <vscale x 4 x i64> [[VEC_IND]], splat (i64 3)
; CHECK-UF2-NEXT: [[TMP10:%.*]] = shl nuw nsw <vscale x 4 x i64> [[STEP_ADD]], splat (i64 3)
-; CHECK-UF2-NEXT: [[TMP11:%.*]] = getelementptr i32, ptr [[P:%.*]], <vscale x 4 x i64> [[TMP9]]
+; CHECK-UF2-NEXT: [[TMP22:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[TMP8]]
+; CHECK-UF2-NEXT: [[TMP11:%.*]] = getelementptr i32, ptr [[P]], <vscale x 4 x i64> [[TMP9]]
; CHECK-UF2-NEXT: [[TMP12:%.*]] = getelementptr i32, ptr [[P]], <vscale x 4 x i64> [[TMP10]]
-; CHECK-UF2-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> align 4 [[TMP11]], <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> poison)
-; CHECK-UF2-NEXT: [[WIDE_MASKED_GATHER1:%.*]] = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> poison)
+; CHECK-UF2-NEXT: [[TMP17:%.*]] = shl i64 [[TMP3]], 3
+; CHECK-UF2-NEXT: [[TMP18:%.*]] = getelementptr i32, ptr [[TMP22]], i64 [[TMP17]]
+; CHECK-UF2-NEXT: [[TMP19:%.*]] = trunc i64 [[TMP3]] to i32
+; CHECK-UF2-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP22]], i64 32, <vscale x 4 x i1> splat (i1 true), i32 [[TMP19]])
+; CHECK-UF2-NEXT: [[TMP20:%.*]] = trunc i64 [[TMP3]] to i32
+; CHECK-UF2-NEXT: [[WIDE_MASKED_GATHER1:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP18]], i64 32, <vscale x 4 x i1> splat (i1 true), i32 [[TMP20]])
; CHECK-UF2-NEXT: [[TMP13:%.*]] = add <vscale x 4 x i32> [[WIDE_MASKED_GATHER]], splat (i32 1)
; CHECK-UF2-NEXT: [[TMP14:%.*]] = add <vscale x 4 x i32> [[WIDE_MASKED_GATHER1]], splat (i32 1)
; CHECK-UF2-NEXT: call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[TMP13]], <vscale x 4 x ptr> align 4 [[TMP11]], <vscale x 4 x i1> splat (i1 true))
@@ -110,21 +127,32 @@ define void @single_constant_stride_int_iv(ptr %p) {
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
+; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP2:%.*]] = shl nuw i64 [[TMP0]], 2
; CHECK-NEXT: [[TMP6:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
; CHECK-NEXT: [[TMP1:%.*]] = mul nuw nsw <vscale x 4 x i64> [[TMP6]], splat (i64 64)
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
+; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[TMP1]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP7]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT1]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP11:%.*]] = zext i32 [[TMP7]] to i64
; CHECK-NEXT: [[TMP4:%.*]] = shl nuw nsw i64 [[TMP11]], 6
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP4]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i32, ptr [[P:%.*]], <vscale x 4 x i64> [[VEC_IND]]
-; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.vp.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP7]])
+; CHECK-NEXT: [[TMP14:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
+; CHECK-NEXT: [[TMP8:%.*]] = icmp ult <vscale x 4 x i32> [[TMP14]], [[BROADCAST_SPLAT2]]
+; CHECK-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[EVL_BASED_IV]], 64
+; CHECK-NEXT: [[TMP9:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i32, ptr [[P]], <vscale x 4 x i64> [[VEC_IND]]
+; CHECK-NEXT: [[TMP15:%.*]] = trunc i64 [[TMP2]] to i32
+; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP9]], i64 256, <vscale x 4 x i1> [[TMP8]], i32 [[TMP15]])
; CHECK-NEXT: [[TMP13:%.*]] = add <vscale x 4 x i32> [[WIDE_MASKED_GATHER]], splat (i32 1)
; CHECK-NEXT: call void @llvm.vp.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[TMP13]], <vscale x 4 x ptr> align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP7]])
+; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP11]], [[EVL_BASED_IV]]
; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]]
; CHECK-NEXT: [[VEC_IND_NEXT]] = add nuw nsw <vscale x 4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]]
; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
@@ -157,10 +185,16 @@ define void @single_constant_stride_int_iv(ptr %p) {
; CHECK-UF2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-UF2-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[TMP8]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-UF2-NEXT: [[STEP_ADD:%.*]] = add <vscale x 4 x i64> [[VEC_IND]], [[TMP6]]
-; CHECK-UF2-NEXT: [[TMP9:%.*]] = getelementptr i32, ptr [[P:%.*]], <vscale x 4 x i64> [[VEC_IND]]
+; CHECK-UF2-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 64
+; CHECK-UF2-NEXT: [[TMP18:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[OFFSET_IDX]]
+; CHECK-UF2-NEXT: [[TMP9:%.*]] = getelementptr i32, ptr [[P]], <vscale x 4 x i64> [[VEC_IND]]
; CHECK-UF2-NEXT: [[TMP10:%.*]] = getelementptr i32, ptr [[P]], <vscale x 4 x i64> [[STEP_ADD]]
-; CHECK-UF2-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> align 4 [[TMP9]], <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> poison)
-; CHECK-UF2-NEXT: [[WIDE_MASKED_GATHER1:%.*]] = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> align 4 [[TMP10]], <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> poison)
+; CHECK-UF2-NEXT: [[TMP14:%.*]] = shl i64 [[TMP3]], 6
+; CHECK-UF2-NEXT: [[TMP15:%.*]] = getelementptr i32, ptr [[TMP18]], i64 [[TMP14]]
+; CHECK-UF2-NEXT: [[TMP16:%.*]] = trunc i64 [[TMP3]] to i32
+; CHECK-UF2-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP18]], i64 256, <vscale x 4 x i1> splat (i1 true), i32 [[TMP16]])
+; CHECK-UF2-NEXT: [[TMP17:%.*]] = trunc i64 [[TMP3]] to i32
+; CHECK-UF2-NEXT: [[WIDE_MASKED_GATHER1:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP15]], i64 256, <vscale x 4 x i1> splat (i1 true), i32 [[TMP17]])
; CHECK-UF2-NEXT: [[TMP11:%.*]] = add <vscale x 4 x i32> [[WIDE_MASKED_GATHER]], splat (i32 1)
; CHECK-UF2-NEXT: [[TMP12:%.*]] = add <vscale x 4 x i32> [[WIDE_MASKED_GATHER1]], splat (i32 1)
; CHECK-UF2-NEXT: call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[TMP11]], <vscale x 4 x ptr> align 4 [[TMP9]], <vscale x 4 x i1> splat (i1 true))
@@ -764,23 +798,34 @@ define void @double_stride_int_scaled(ptr %p, ptr %p2, i64 %stride) {
; STRIDED-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
; STRIDED-NEXT: br i1 [[FOUND_CONFLICT]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
; STRIDED: vector.ph:
+; STRIDED-NEXT: [[TMP42:%.*]] = call i64 @llvm.vscale.i64()
+; STRIDED-NEXT: [[TMP45:%.*]] = shl nuw i64 [[TMP42]], 2
+; STRIDED-NEXT: [[TMP47:%.*]] = shl i64 [[STRIDE]], 2
; STRIDED-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[STRIDE]], i64 0
; STRIDED-NEXT: [[BROADCAST_SPLAT1:%.*]] = shufflevector <vscale x 4 x i64> [[BROADCAST_SPLATINSERT1]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
; STRIDED-NEXT: [[TMP12:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
; STRIDED-NEXT: br label [[VECTOR_BODY:%.*]]
; STRIDED: vector.body:
+; STRIDED-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; STRIDED-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[TMP12]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
; STRIDED-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; STRIDED-NEXT: [[TMP43:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
+; STRIDED-NEXT: [[BROADCAST_SPLATINSERT11:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP43]], i64 0
+; STRIDED-NEXT: [[BROADCAST_SPLAT12:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT11]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
; STRIDED-NEXT: [[TMP44:%.*]] = zext i32 [[TMP43]] to i64
; STRIDED-NEXT: [[BROADCAST_SPLATINSERT9:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP44]], i64 0
; STRIDED-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[BROADCAST_SPLATINSERT9]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
+; STRIDED-NEXT: [[TMP48:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
+; STRIDED-NEXT: [[TMP49:%.*]] = icmp ult <vscale x 4 x i32> [[TMP48]], [[BROADCAST_SPLAT12]]
+; STRIDED-NEXT: [[TMP50:%.*]] = mul nuw nsw i64 [[EVL_BASED_IV]], [[STRIDE]]
; STRIDED-NEXT: [[TMP18:%.*]] = mul nuw nsw <vscale x 4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT1]]
-; STRIDED-NEXT: [[TMP19:%.*]] = getelementptr i32, ptr [[P]], <vscale x 4 x i64> [[TMP18]]
-; STRIDED-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.vp.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> align 4 [[TMP19]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP43]]), !alias.scope [[META5:![0-9]+]]
+; STRIDED-NEXT: [[TMP46:%.*]] = getelementptr i32, ptr [[P]], i64 [[TMP50]]
+; STRIDED-NEXT: [[TMP51:%.*]] = trunc i64 [[TMP45]] to i32
+; STRIDED-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP46]], i64 [[TMP47]], <vscale x 4 x i1> [[TMP49]], i32 [[TMP51]]), !alias.scope [[META5:![0-9]+]]
; STRIDED-NEXT: [[TMP20:%.*]] = add <vscale x 4 x i32> [[WIDE_MASKED_GATHER]], splat (i32 1)
; STRIDED-NEXT: [[TMP21:%.*]] = getelementptr i32, ptr [[P2]], <vscale x 4 x i64> [[TMP18]]
; STRIDED-NEXT: call void @llvm.vp.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[TMP20]], <vscale x 4 x ptr> align 4 [[TMP21]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP43]]), !alias.scope [[META8:![0-9]+]], !noalias [[META5]]
+; STRIDED-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP44]], [[EVL_BASED_IV]]
; STRIDED-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP44]]
; STRIDED-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]]
; STRIDED-NEXT: [[TMP41:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
@@ -866,6 +911,7 @@ define void @double_stride_int_scaled(ptr %p, ptr %p2, i64 %stride) {
; STRIDED-UF2-NEXT: [[TMP30:%.*]] = shl nuw i64 [[TMP29]], 1
; STRIDED-UF2-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP30]]
; STRIDED-UF2-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
+; STRIDED-UF2-NEXT: [[TMP32:%.*]] = shl i64 [[STRIDE]], 2
; STRIDED-UF2-NEXT: [[BROADCAST_SPLATINSERT10:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[STRIDE]], i64 0
; STRIDED-UF2-NEXT: [[BROADCAST_SPLAT11:%.*]] = shufflevector <vscale x 4 x i64> [[BROADCAST_SPLATINSERT10]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
; STRIDED-UF2-NEXT: [[TMP31:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
@@ -874,12 +920,16 @@ define void @double_stride_int_scaled(ptr %p, ptr %p2, i64 %stride) {
; STRIDED-UF2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; STRIDED-UF2-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[TMP31]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
; STRIDED-UF2-NEXT: [[STEP_ADD:%.*]] = add <vscale x 4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]]
+; STRIDED-UF2-NEXT: [[TMP36:%.*]] = mul nuw nsw i64 [[INDEX]], [[STRIDE]]
; STRIDED-UF2-NEXT: [[TMP33:%.*]] = mul nuw nsw <vscale x 4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT11]]
; STRIDED-UF2-NEXT: [[TMP34:%.*]] = mul nuw nsw <vscale x 4 x i64> [[STEP_ADD]], [[BROADCAST_SPLAT11]]
-; STRIDED-UF2-NEXT: [[TMP35:%.*]] = getelementptr i32, ptr [[P]], <vscale x 4 x i64> [[TMP33]]
-; STRIDED-UF2-NEXT: [[TMP36:%.*]] = getelementptr i32, ptr [[P]], <vscale x 4 x i64> [[TMP34]]
-; STRIDED-UF2-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> align 4 [[TMP35]], <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> poison), !alias.scope [[META8:![0-9]+]]
-; STRIDED-UF2-NEXT: [[WIDE_MASKED_GATHER12:%.*]] = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> align 4 [[TMP36]], <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> poison), !alias.scope [[META8]]
+; STRIDED-UF2-NEXT: [[TMP44:%.*]] = getelementptr i32, ptr [[P]], i64 [[TMP36]]
+; STRIDED-UF2-NEXT: [[TMP47:%.*]] = mul i64 [[TMP29]], [[STRIDE]]
+; STRIDED-UF2-NEXT: [[TMP48:%.*]] = getelementptr i32, ptr [[TMP44]], i64 [[TMP47]]
+; STRIDED-UF2-NEXT: [[TMP42:%.*]] = trunc i64 [[TMP29]] to i32
+; STRIDED-UF2-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP44]], i64 [[TMP32]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP42]]), !alias.scope [[META8:![0-9]+]]
+; STRIDED-UF2-NEXT: [[TMP43:%.*]] = trunc i64 [[TMP29]] to i32
+; STRIDED-UF2-NEXT: [[WIDE_MASKED_GATHER12:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP48]], i64 [[TMP32]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP43]]), !alias.scope [[META8]]
; STRIDED-UF2-NEXT: [[TMP37:%.*]] = add <vscale x 4 x i32> [[WIDE_MASKED_GATHER]], splat (i32 1)
; STRIDED-UF2-NEXT: [[TMP38:%.*]] = add <vscale x 4 x i32> [[WIDE_MASKED_GATHER12]], splat (i32 1)
; STRIDED-UF2-NEXT: [[TMP39:%.*]] = getelementptr i32, ptr [[P2]], <vscale x 4 x i64> [[TMP33]]
@@ -1282,23 +1332,25 @@ define void @constant_stride_reinterpret(ptr noalias %in, ptr noalias %out) {
; NOSTRIDED-NEXT: entry:
; NOSTRIDED-NEXT: br label [[VECTOR_PH:%.*]]
; NOSTRIDED: vector.ph:
-; NOSTRIDED-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
+; NOSTRIDED-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; NOSTRIDED-NEXT: [[TMP1:%.*]] = shl nuw i64 [[TMP0]], 1
; NOSTRIDED-NEXT: br label [[VECTOR_BODY:%.*]]
; NOSTRIDED: vector.body:
; NOSTRIDED-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
-; NOSTRIDED-NEXT: [[VEC_IND:%.*]] = phi <vscale x 2 x i64> [ [[TMP0]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
; NOSTRIDED-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; NOSTRIDED-NEXT: [[TMP2:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
-; NOSTRIDED-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64
-; NOSTRIDED-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP3]], i64 0
-; NOSTRIDED-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-; NOSTRIDED-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw i32, ptr [[IN:%.*]], <vscale x 2 x i64> [[VEC_IND]]
-; NOSTRIDED-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 2 x i64> @llvm.vp.gather.nxv2i64.nxv2p0(<vscale x 2 x ptr> align 8 [[TMP4]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP2]])
+; NOSTRIDED-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i32> poison, i32 [[TMP2]], i64 0
+; NOSTRIDED-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
+; NOSTRIDED-NEXT: [[TMP3:%.*]] = call <vscale x 2 x i32> @llvm.stepvector.nxv2i32()
+; NOSTRIDED-NEXT: [[TMP4:%.*]] = icmp ult <vscale x 2 x i32> [[TMP3]], [[BROADCAST_SPLAT]]
+; NOSTRIDED-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw i32, ptr [[IN:%.*]], i64 [[EVL_BASED_IV]]
+; NOSTRIDED-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP1]] to i32
+; NOSTRIDED-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vp.strided.load.nxv2i64.p0.i64(ptr align 8 [[TMP9]], i64 4, <vscale x 2 x i1> [[TMP4]], i32 [[TMP10]])
; NOSTRIDED-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw i64, ptr [[OUT:%.*]], i64 [[EVL_BASED_IV]]
; NOSTRIDED-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[WIDE_MASKED_GATHER]], ptr align 8 [[TMP5]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP2]])
-; NOSTRIDED-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP3]], [[EVL_BASED_IV]]
-; NOSTRIDED-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP3]]
-; NOSTRIDED-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]]
+; NOSTRIDED-NEXT: [[TMP11:%.*]] = zext i32 [[TMP2]] to i64
+; NOSTRIDED-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP11]], [[EVL_BASED_IV]]
+; NOSTRIDED-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]]
; NOSTRIDED-NEXT: [[TMP7:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
; NOSTRIDED-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
; NOSTRIDED: middle.block:
@@ -1315,27 +1367,23 @@ define void @constant_stride_reinterpret(ptr noalias %in, ptr noalias %out) {
; NOSTRIDED-UF2: vector.ph:
; NOSTRIDED-UF2-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; NOSTRIDED-UF2-NEXT: [[TMP3:%.*]] = shl nuw i64 [[TMP2]], 1
-; NOSTRIDED-UF2-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP3]], i64 0
-; NOSTRIDED-UF2-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
; NOSTRIDED-UF2-NEXT: [[TMP4:%.*]] = shl nuw i64 [[TMP3]], 1
; NOSTRIDED-UF2-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP4]]
; NOSTRIDED-UF2-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
-; NOSTRIDED-UF2-NEXT: [[TMP5:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
; NOSTRIDED-UF2-NEXT: br label [[VECTOR_BODY:%.*]]
; NOSTRIDED-UF2: vector.body:
; NOSTRIDED-UF2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; NOSTRIDED-UF2-NEXT: [[VEC_IND:%.*]] = phi <vscale x 2 x i64> [ [[TMP5]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
-; NOSTRIDED-UF2-NEXT: [[STEP_ADD:%.*]] = add <vscale x 2 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]]
-; NOSTRIDED-UF2-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw i32, ptr [[IN:%.*]], <vscale x 2 x i64> [[VEC_IND]]
-; NOSTRIDED-UF2-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw i32, ptr [[IN]], <vscale x 2 x i64> [[STEP_ADD]]
-; NOSTRIDED-UF2-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64.nxv2p0(<vscale x 2 x ptr> align 8 [[TMP7]], <vscale x 2 x i1> splat (i1 true), <vscale x 2 x i64> poison)
-; NOSTRIDED-UF2-NEXT: [[WIDE_MASKED_GATHER1:%.*]] = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64.nxv2p0(<vscale x 2 x ptr> align 8 [[TMP8]], <vscale x 2 x i1> splat (i1 true), <vscale x 2 x i64> poison)
+; NOSTRIDED-UF2-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw i32, ptr [[IN:%.*]], i64 [[INDEX]]
+; NOSTRIDED-UF2-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw i32, ptr [[TMP5]], i64 [[TMP3]]
+; NOSTRIDED-UF2-NEXT: [[TMP7:%.*]] = trunc i64 [[TMP3]] to i32
+; NOSTRIDED-UF2-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vp.strided.load.nxv2i64.p0.i64(ptr align 8 [[TMP5]], i64 4, <vscale x 2 x i1> splat (i1 true), i32 [[TMP7]])
+; NOSTRIDED-UF2-NEXT: [[TMP8:%.*]] = trunc i64 [[TMP3]] to i32
+; NOSTRIDED-UF2-NEXT: [[WIDE_MASKED_GATHER1:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vp.strided.load.nxv2i64.p0.i64(ptr align 8 [[TMP6]], i64 4, <vscale x 2 x i1> splat (i1 true), i32 [[TMP8]])
; NOSTRIDED-UF2-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw i64, ptr [[OUT:%.*]], i64 [[INDEX]]
; NOSTRIDED-UF2-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw i64, ptr [[TMP9]], i64 [[TMP3]]
; NOSTRIDED-UF2-NEXT: store <vscale x 2 x i64> [[WIDE_MASKED_GATHER]], ptr [[TMP9]], align 8
; NOSTRIDED-UF2-NEXT: store <vscale x 2 x i64> [[WIDE_MASKED_GATHER1]], ptr [[TMP12]], align 8
; NOSTRIDED-UF2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP4]]
-; NOSTRIDED-UF2-NEXT: [[VEC_IND_NEXT]] = add nuw nsw <vscale x 2 x i64> [[STEP_ADD]], [[BROADCAST_SPLAT]]
; NOSTRIDED-UF2-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NOSTRIDED-UF2-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
; NOSTRIDED-UF2: middle.block:
@@ -1360,23 +1408,25 @@ define void @constant_stride_reinterpret(ptr noalias %in, ptr noalias %out) {
; STRIDED-NEXT: entry:
; STRIDED-NEXT: br label [[VECTOR_PH:%.*]]
; STRIDED: vector.ph:
-; STRIDED-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
+; STRIDED-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; STRIDED-NEXT: [[TMP1:%.*]] = shl nuw i64 [[TMP0]], 1
; STRIDED-NEXT: br label [[VECTOR_BODY:%.*]]
; STRIDED: vector.body:
; STRIDED-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
-; STRIDED-NEXT: [[VEC_IND:%.*]] = phi <vscale x 2 x i64> [ [[TMP0]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
; STRIDED-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; STRIDED-NEXT: [[TMP2:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
-; STRIDED-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64
-; STRIDED-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP3]], i64 0
-; STRIDED-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-; STRIDED-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw i32, ptr [[IN:%.*]], <vscale x 2 x i64> [[VEC_IND]]
-; STRIDED-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 2 x i64> @llvm.vp.gather.nxv2i64.nxv2p0(<vscale x 2 x ptr> align 8 [[TMP4]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP2]])
+; STRIDED-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i32> poison, i32 [[TMP2]], i64 0
+; STRIDED-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
+; STRIDED-NEXT: [[TMP3:%.*]] = call <vscale x 2 x i32> @llvm.stepvector.nxv2i32()
+; STRIDED-NEXT: [[TMP4:%.*]] = icmp ult <vscale x 2 x i32> [[TMP3]], [[BROADCAST_SPLAT]]
+; STRIDED-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw i32, ptr [[IN:%.*]], i64 [[EVL_BASED_IV]]
+; STRIDED-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP1]] to i32
+; STRIDED-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vp.strided.load.nxv2i64.p0.i64(ptr align 8 [[TMP9]], i64 4, <vscale x 2 x i1> [[TMP4]], i32 [[TMP10]])
; STRIDED-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw i64, ptr [[OUT:%.*]], i64 [[EVL_BASED_IV]]
; STRIDED-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[WIDE_MASKED_GATHER]], ptr align 8 [[TMP5]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP2]])
-; STRIDED-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP3]], [[EVL_BASED_IV]]
-; STRIDED-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP3]]
-; STRIDED-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]]
+; STRIDED-NEXT: [[TMP11:%.*]] = zext i32 [[TMP2]] to i64
+; STRIDED-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP11]], [[EVL_BASED_IV]]
+; STRIDED-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]]
; STRIDED-NEXT: [[TMP7:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
; STRIDED-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]]
; STRIDED: middle.block:
@@ -1393,27 +1443,23 @@ define void @constant_stride_reinterpret(ptr noalias %in, ptr noalias %out) {
; STRIDED-UF2: vector.ph:
; STRIDED-UF2-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; STRIDED-UF2-NEXT: [[TMP3:%.*]] = shl nuw i64 [[TMP2]], 1
-; STRIDED-UF2-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP3]], i64 0
-; STRIDED-UF2-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
; STRIDED-UF2-NEXT: [[TMP4:%.*]] = shl nuw i64 [[TMP3]], 1
; STRIDED-UF2-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP4]]
; STRIDED-UF2-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
-; STRIDED-UF2-NEXT: [[TMP5:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
; STRIDED-UF2-NEXT: br label [[VECTOR_BODY:%.*]]
; STRIDED-UF2: vector.body:
; STRIDED-UF2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; STRIDED-UF2-NEXT: [[VEC_IND:%.*]] = phi <vscale x 2 x i64> [ [[TMP5]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
-; STRIDED-UF2-NEXT: [[STEP_ADD:%.*]] = add <vscale x 2 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]]
-; STRIDED-UF2-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw i32, ptr [[IN:%.*]], <vscale x 2 x i64> [[VEC_IND]]
-; STRIDED-UF2-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw i32, ptr [[IN]], <vscale x 2 x i64> [[STEP_ADD]]
-; STRIDED-UF2-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64.nxv2p0(<vscale x 2 x ptr> align 8 [[TMP7]], <vscale x 2 x i1> splat (i1 true), <vscale x 2 x i64> poison)
-; STRIDED-UF2-NEXT: [[WIDE_MASKED_GATHER1:%.*]] = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64.nxv2p0(<vscale x 2 x ptr> align 8 [[TMP8]], <vscale x 2 x i1> splat (i1 true), <vscale x 2 x i64> poison)
+; STRIDED-UF2-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw i32, ptr [[IN:%.*]], i64 [[INDEX]]
+; STRIDED-UF2-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw i32, ptr [[TMP5]], i64 [[TMP3]]
+; STRIDED-UF2-NEXT: [[TMP7:%.*]] = trunc i64 [[TMP3]] to i32
+; STRIDED-UF2-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vp.strided.load.nxv2i64.p0.i64(ptr align 8 [[TMP5]], i64 4, <vscale x 2 x i1> splat (i1 true), i32 [[TMP7]])
+; STRIDED-UF2-NEXT: [[TMP8:%.*]] = trunc i64 [[TMP3]] to i32
+; STRIDED-UF2-NEXT: [[WIDE_MASKED_GATHER1:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vp.strided.load.nxv2i64.p0.i64(ptr align 8 [[TMP6]], i64 4, <vscale x 2 x i1> splat (i1 true), i32 [[TMP8]])
; STRIDED-UF2-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw i64, ptr [[OUT:%.*]], i64 [[INDEX]]
; STRIDED-UF2-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw i64, ptr [[TMP9]], i64 [[TMP3]]
; STRIDED-UF2-NEXT: store <vscale x 2 x i64> [[WIDE_MASKED_GATHER]], ptr [[TMP9]], align 8
; STRIDED-UF2-NEXT: store <vscale x 2 x i64> [[WIDE_MASKED_GATHER1]], ptr [[TMP12]], align 8
; STRIDED-UF2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP4]]
-; STRIDED-UF2-NEXT: [[VEC_IND_NEXT]] = add nuw nsw <vscale x 2 x i64> [[STEP_ADD]], [[BROADCAST_SPLAT]]
; STRIDED-UF2-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; STRIDED-UF2-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]]
; STRIDED-UF2: middle.block:
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-gather-scatter.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-gather-scatter.ll
index ba7005f4f56dc..7482ddb62bfc5 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-gather-scatter.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-gather-scatter.ll
@@ -10,35 +10,77 @@
define void @gather_scatter(ptr noalias %in, ptr noalias %out, ptr noalias %index, i64 %n) {
; IF-EVL-LABEL: @gather_scatter(
; IF-EVL-NEXT: entry:
+; IF-EVL-NEXT: br label [[VECTOR_PH:%.*]]
+; IF-EVL: vector.ph:
+; IF-EVL-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64()
+; IF-EVL-NEXT: [[TMP12:%.*]] = shl nuw i64 [[TMP11]], 1
; IF-EVL-NEXT: br label [[FOR_BODY1:%.*]]
-; IF-EVL: for.body:
-; IF-EVL-NEXT: [[INDVARS_IV1:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[INDVARS_IV_NEXT1:%.*]], [[FOR_BODY1]] ]
-; IF-EVL-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i32, ptr [[INDEX:%.*]], i64 [[INDVARS_IV1]]
-; IF-EVL-NEXT: [[TMP0:%.*]] = load i64, ptr [[ARRAYIDX3]], align 8
-; IF-EVL-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds float, ptr [[IN:%.*]], i64 [[TMP0]]
-; IF-EVL-NEXT: [[TMP1:%.*]] = load float, ptr [[ARRAYIDX5]], align 4
-; IF-EVL-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds float, ptr [[OUT:%.*]], i64 [[TMP0]]
-; IF-EVL-NEXT: store float [[TMP1]], ptr [[ARRAYIDX7]], align 4
-; IF-EVL-NEXT: [[INDVARS_IV_NEXT1]] = add nuw nsw i64 [[INDVARS_IV1]], 1
-; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT1]], [[N:%.*]]
-; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END:%.*]], label [[FOR_BODY1]]
+; IF-EVL: vector.body:
+; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[FOR_BODY1]] ]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N:%.*]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[FOR_BODY1]] ]
+; IF-EVL-NEXT: [[TMP2:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
+; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i32> poison, i32 [[TMP2]], i64 0
+; IF-EVL-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
+; IF-EVL-NEXT: [[TMP3:%.*]] = call <vscale x 2 x i32> @llvm.stepvector.nxv2i32()
+; IF-EVL-NEXT: [[TMP4:%.*]] = icmp ult <vscale x 2 x i32> [[TMP3]], [[BROADCAST_SPLAT]]
+; IF-EVL-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[INDEX:%.*]], i64 [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP12]] to i32
+; IF-EVL-NEXT: [[WIDE_STRIDED_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vp.strided.load.nxv2i64.p0.i64(ptr align 8 [[TMP5]], i64 4, <vscale x 2 x i1> [[TMP4]], i32 [[TMP6]])
+; IF-EVL-NEXT: [[TMP7:%.*]] = getelementptr inbounds float, ptr [[IN:%.*]], <vscale x 2 x i64> [[WIDE_STRIDED_LOAD]]
+; IF-EVL-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 2 x float> @llvm.vp.gather.nxv2f32.nxv2p0(<vscale x 2 x ptr> align 4 [[TMP7]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP2]])
+; IF-EVL-NEXT: [[TMP8:%.*]] = getelementptr inbounds float, ptr [[OUT:%.*]], <vscale x 2 x i64> [[WIDE_STRIDED_LOAD]]
+; IF-EVL-NEXT: call void @llvm.vp.scatter.nxv2f32.nxv2p0(<vscale x 2 x float> [[WIDE_MASKED_GATHER]], <vscale x 2 x ptr> align 4 [[TMP8]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP2]])
+; IF-EVL-NEXT: [[TMP9:%.*]] = zext i32 [[TMP2]] to i64
+; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]]
+; IF-EVL-NEXT: [[TMP10:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
+; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY1]], !llvm.loop [[LOOP0:![0-9]+]]
+; IF-EVL: middle.block:
+; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL: for.end:
; IF-EVL-NEXT: ret void
;
; NO-VP-LABEL: @gather_scatter(
; NO-VP-NEXT: entry:
+; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP9:%.*]] = shl nuw i64 [[TMP4]], 1
+; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], [[TMP9]]
+; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[ENTRY:%.*]]
+; NO-VP: vector.ph:
+; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP3:%.*]] = shl nuw i64 [[TMP2]], 1
+; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
+; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
; NO-VP-NEXT: br label [[FOR_BODY1:%.*]]
-; NO-VP: for.body:
-; NO-VP-NEXT: [[INDVARS_IV1:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT1:%.*]], [[FOR_BODY1]] ]
+; NO-VP: vector.body:
+; NO-VP-NEXT: [[INDVARS_IV1:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[INDVARS_IV_NEXT1:%.*]], [[FOR_BODY1]] ]
; NO-VP-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i32, ptr [[INDEX:%.*]], i64 [[INDVARS_IV1]]
-; NO-VP-NEXT: [[TMP0:%.*]] = load i64, ptr [[ARRAYIDX3]], align 8
-; NO-VP-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds float, ptr [[IN:%.*]], i64 [[TMP0]]
+; NO-VP-NEXT: [[TMP5:%.*]] = trunc i64 [[TMP3]] to i32
+; NO-VP-NEXT: [[WIDE_STRIDED_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vp.strided.load.nxv2i64.p0.i64(ptr align 8 [[ARRAYIDX3]], i64 4, <vscale x 2 x i1> splat (i1 true), i32 [[TMP5]])
+; NO-VP-NEXT: [[TMP6:%.*]] = getelementptr inbounds float, ptr [[IN:%.*]], <vscale x 2 x i64> [[WIDE_STRIDED_LOAD]]
+; NO-VP-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 2 x float> @llvm.masked.gather.nxv2f32.nxv2p0(<vscale x 2 x ptr> align 4 [[TMP6]], <vscale x 2 x i1> splat (i1 true), <vscale x 2 x float> poison)
+; NO-VP-NEXT: [[TMP7:%.*]] = getelementptr inbounds float, ptr [[OUT:%.*]], <vscale x 2 x i64> [[WIDE_STRIDED_LOAD]]
+; NO-VP-NEXT: call void @llvm.masked.scatter.nxv2f32.nxv2p0(<vscale x 2 x float> [[WIDE_MASKED_GATHER]], <vscale x 2 x ptr> align 4 [[TMP7]], <vscale x 2 x i1> splat (i1 true))
+; NO-VP-NEXT: [[INDVARS_IV_NEXT1]] = add nuw i64 [[INDVARS_IV1]], [[TMP3]]
+; NO-VP-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT1]], [[N_VEC]]
+; NO-VP-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY1]], !llvm.loop [[LOOP0:![0-9]+]]
+; NO-VP: middle.block:
+; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; NO-VP-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
+; NO-VP: scalar.ph:
+; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY1:%.*]] ]
+; NO-VP-NEXT: br label [[FOR_BODY:%.*]]
+; NO-VP: for.body:
+; NO-VP-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
+; NO-VP-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, ptr [[INDEX]], i64 [[INDVARS_IV]]
+; NO-VP-NEXT: [[TMP0:%.*]] = load i64, ptr [[ARRAYIDX4]], align 8
+; NO-VP-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds float, ptr [[IN]], i64 [[TMP0]]
; NO-VP-NEXT: [[TMP1:%.*]] = load float, ptr [[ARRAYIDX5]], align 4
-; NO-VP-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds float, ptr [[OUT:%.*]], i64 [[TMP0]]
+; NO-VP-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds float, ptr [[OUT]], i64 [[TMP0]]
; NO-VP-NEXT: store float [[TMP1]], ptr [[ARRAYIDX7]], align 4
-; NO-VP-NEXT: [[INDVARS_IV_NEXT1]] = add nuw nsw i64 [[INDVARS_IV1]], 1
-; NO-VP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT1]], [[N:%.*]]
-; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END:%.*]], label [[FOR_BODY1]]
+; NO-VP-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
+; NO-VP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N]]
+; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; NO-VP: for.end:
; NO-VP-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-interleave.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-interleave.ll
index 4a12dacbacfc9..63c6e44768950 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-interleave.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-interleave.ll
@@ -118,27 +118,33 @@ define i32 @load_factor_4_with_gap(i64 %n, ptr noalias %a) {
; IF-EVL-NEXT: entry:
; IF-EVL-NEXT: br label [[VECTOR_PH:%.*]]
; IF-EVL: vector.ph:
-; IF-EVL-NEXT: [[TMP2:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
+; IF-EVL-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; IF-EVL-NEXT: [[TMP1:%.*]] = shl nuw i64 [[TMP0]], 2
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL: vector.body:
-; IF-EVL-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[TMP2]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP12:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N:%.*]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP4:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
-; IF-EVL-NEXT: [[TMP5:%.*]] = zext i32 [[TMP4]] to i64
-; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP5]], i64 0
-; IF-EVL-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-; IF-EVL-NEXT: [[TMP7:%.*]] = getelementptr inbounds [4 x i32], ptr [[A:%.*]], <vscale x 4 x i64> [[VEC_IND]], i32 0
-; IF-EVL-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.vp.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> align 4 [[TMP7]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP4]])
+; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP4]], i64 0
+; IF-EVL-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+; IF-EVL-NEXT: [[TMP3:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
+; IF-EVL-NEXT: [[TMP7:%.*]] = icmp ult <vscale x 4 x i32> [[TMP3]], [[BROADCAST_SPLAT]]
+; IF-EVL-NEXT: [[TMP5:%.*]] = getelementptr inbounds [4 x i32], ptr [[A:%.*]], i64 [[EVL_BASED_IV]], i32 0
+; IF-EVL-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP1]] to i32
+; IF-EVL-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP5]], i64 16, <vscale x 4 x i1> [[TMP7]], i32 [[TMP6]])
; IF-EVL-NEXT: [[TMP8:%.*]] = add <vscale x 4 x i32> [[VEC_PHI]], [[WIDE_MASKED_GATHER]]
-; IF-EVL-NEXT: [[WIDE_MASKED_GATHER1:%.*]] = call <vscale x 4 x i32> @llvm.vp.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> align 4 [[TMP7]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP4]])
+; IF-EVL-NEXT: [[TMP19:%.*]] = trunc i64 [[TMP1]] to i32
+; IF-EVL-NEXT: [[WIDE_MASKED_GATHER1:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP5]], i64 16, <vscale x 4 x i1> [[TMP7]], i32 [[TMP19]])
; IF-EVL-NEXT: [[TMP9:%.*]] = add <vscale x 4 x i32> [[TMP8]], [[WIDE_MASKED_GATHER1]]
-; IF-EVL-NEXT: [[TMP10:%.*]] = getelementptr inbounds [4 x i32], ptr [[A]], <vscale x 4 x i64> [[VEC_IND]], i32 3
-; IF-EVL-NEXT: [[WIDE_MASKED_GATHER2:%.*]] = call <vscale x 4 x i32> @llvm.vp.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> align 4 [[TMP10]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP4]])
+; IF-EVL-NEXT: [[TMP10:%.*]] = getelementptr inbounds [4 x i32], ptr [[A]], i64 [[EVL_BASED_IV]], i32 3
+; IF-EVL-NEXT: [[TMP20:%.*]] = trunc i64 [[TMP1]] to i32
+; IF-EVL-NEXT: [[WIDE_MASKED_GATHER2:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP10]], i64 16, <vscale x 4 x i1> [[TMP7]], i32 [[TMP20]])
; IF-EVL-NEXT: [[TMP11:%.*]] = add <vscale x 4 x i32> [[TMP9]], [[WIDE_MASKED_GATHER2]]
; IF-EVL-NEXT: [[TMP12]] = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> [[TMP11]], <vscale x 4 x i32> [[VEC_PHI]], i32 [[TMP4]])
-; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP5]]
-; IF-EVL-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]]
+; IF-EVL-NEXT: [[TMP21:%.*]] = zext i32 [[TMP4]] to i64
+; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP21]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP21]]
; IF-EVL-NEXT: [[TMP14:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
; IF-EVL-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; IF-EVL: middle.block:
@@ -160,24 +166,22 @@ define i32 @load_factor_4_with_gap(i64 %n, ptr noalias %a) {
; NO-VP-NEXT: [[TMP4:%.*]] = icmp eq i64 [[N_MOD_VF]], 0
; NO-VP-NEXT: [[TMP5:%.*]] = select i1 [[TMP4]], i64 [[TMP3]], i64 [[N_MOD_VF]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[TMP5]]
-; NO-VP-NEXT: [[TMP6:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
-; NO-VP-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP3]], i64 0
-; NO-VP-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
; NO-VP-NEXT: br label [[VECTOR_BODY:%.*]]
; NO-VP: vector.body:
; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; NO-VP-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[TMP6]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
; NO-VP-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP13:%.*]], [[VECTOR_BODY]] ]
-; NO-VP-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x i32], ptr [[A:%.*]], <vscale x 4 x i64> [[VEC_IND]], i32 0
-; NO-VP-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> align 4 [[TMP9]], <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> poison)
+; NO-VP-NEXT: [[TMP6:%.*]] = getelementptr inbounds [4 x i32], ptr [[A:%.*]], i64 [[INDEX]], i32 0
+; NO-VP-NEXT: [[TMP7:%.*]] = trunc i64 [[TMP3]] to i32
+; NO-VP-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP6]], i64 16, <vscale x 4 x i1> splat (i1 true), i32 [[TMP7]])
; NO-VP-NEXT: [[TMP10:%.*]] = add <vscale x 4 x i32> [[VEC_PHI]], [[WIDE_MASKED_GATHER]]
-; NO-VP-NEXT: [[WIDE_MASKED_GATHER1:%.*]] = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> align 4 [[TMP9]], <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> poison)
+; NO-VP-NEXT: [[TMP9:%.*]] = trunc i64 [[TMP3]] to i32
+; NO-VP-NEXT: [[WIDE_MASKED_GATHER1:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP6]], i64 16, <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
; NO-VP-NEXT: [[TMP11:%.*]] = add <vscale x 4 x i32> [[TMP10]], [[WIDE_MASKED_GATHER1]]
-; NO-VP-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i32], ptr [[A]], <vscale x 4 x i64> [[VEC_IND]], i32 3
-; NO-VP-NEXT: [[WIDE_MASKED_GATHER2:%.*]] = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> poison)
+; NO-VP-NEXT: [[TMP19:%.*]] = getelementptr inbounds [4 x i32], ptr [[A]], i64 [[INDEX]], i32 3
+; NO-VP-NEXT: [[TMP12:%.*]] = trunc i64 [[TMP3]] to i32
+; NO-VP-NEXT: [[WIDE_MASKED_GATHER2:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP19]], i64 16, <vscale x 4 x i1> splat (i1 true), i32 [[TMP12]])
; NO-VP-NEXT: [[TMP13]] = add <vscale x 4 x i32> [[TMP11]], [[WIDE_MASKED_GATHER2]]
; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
-; NO-VP-NEXT: [[VEC_IND_NEXT]] = add nuw nsw <vscale x 4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]]
; NO-VP-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; NO-VP: middle.block:
@@ -350,27 +354,33 @@ define i32 @load_factor_4_with_tail_gap(i64 %n, ptr noalias %a) {
; IF-EVL-NEXT: entry:
; IF-EVL-NEXT: br label [[VECTOR_PH:%.*]]
; IF-EVL: vector.ph:
-; IF-EVL-NEXT: [[TMP2:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
+; IF-EVL-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; IF-EVL-NEXT: [[TMP1:%.*]] = shl nuw i64 [[TMP0]], 2
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL: vector.body:
-; IF-EVL-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[TMP2]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP12:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N:%.*]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP4:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
-; IF-EVL-NEXT: [[TMP5:%.*]] = zext i32 [[TMP4]] to i64
-; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP5]], i64 0
-; IF-EVL-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-; IF-EVL-NEXT: [[TMP7:%.*]] = getelementptr inbounds [4 x i32], ptr [[A:%.*]], <vscale x 4 x i64> [[VEC_IND]], i32 0
-; IF-EVL-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.vp.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> align 4 [[TMP7]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP4]])
+; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP4]], i64 0
+; IF-EVL-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+; IF-EVL-NEXT: [[TMP3:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
+; IF-EVL-NEXT: [[TMP7:%.*]] = icmp ult <vscale x 4 x i32> [[TMP3]], [[BROADCAST_SPLAT]]
+; IF-EVL-NEXT: [[TMP5:%.*]] = getelementptr inbounds [4 x i32], ptr [[A:%.*]], i64 [[EVL_BASED_IV]], i32 0
+; IF-EVL-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP1]] to i32
+; IF-EVL-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP5]], i64 16, <vscale x 4 x i1> [[TMP7]], i32 [[TMP6]])
; IF-EVL-NEXT: [[TMP8:%.*]] = add <vscale x 4 x i32> [[VEC_PHI]], [[WIDE_MASKED_GATHER]]
-; IF-EVL-NEXT: [[WIDE_MASKED_GATHER1:%.*]] = call <vscale x 4 x i32> @llvm.vp.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> align 4 [[TMP7]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP4]])
+; IF-EVL-NEXT: [[TMP19:%.*]] = trunc i64 [[TMP1]] to i32
+; IF-EVL-NEXT: [[WIDE_MASKED_GATHER1:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP5]], i64 16, <vscale x 4 x i1> [[TMP7]], i32 [[TMP19]])
; IF-EVL-NEXT: [[TMP9:%.*]] = add <vscale x 4 x i32> [[TMP8]], [[WIDE_MASKED_GATHER1]]
-; IF-EVL-NEXT: [[TMP10:%.*]] = getelementptr inbounds [4 x i32], ptr [[A]], <vscale x 4 x i64> [[VEC_IND]], i32 2
-; IF-EVL-NEXT: [[WIDE_MASKED_GATHER2:%.*]] = call <vscale x 4 x i32> @llvm.vp.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> align 4 [[TMP10]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP4]])
+; IF-EVL-NEXT: [[TMP10:%.*]] = getelementptr inbounds [4 x i32], ptr [[A]], i64 [[EVL_BASED_IV]], i32 2
+; IF-EVL-NEXT: [[TMP20:%.*]] = trunc i64 [[TMP1]] to i32
+; IF-EVL-NEXT: [[WIDE_MASKED_GATHER2:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP10]], i64 16, <vscale x 4 x i1> [[TMP7]], i32 [[TMP20]])
; IF-EVL-NEXT: [[TMP11:%.*]] = add <vscale x 4 x i32> [[TMP9]], [[WIDE_MASKED_GATHER2]]
; IF-EVL-NEXT: [[TMP12]] = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> [[TMP11]], <vscale x 4 x i32> [[VEC_PHI]], i32 [[TMP4]])
-; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP5]]
-; IF-EVL-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]]
+; IF-EVL-NEXT: [[TMP21:%.*]] = zext i32 [[TMP4]] to i64
+; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP21]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP21]]
; IF-EVL-NEXT: [[TMP14:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
; IF-EVL-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; IF-EVL: middle.block:
@@ -392,24 +402,22 @@ define i32 @load_factor_4_with_tail_gap(i64 %n, ptr noalias %a) {
; NO-VP-NEXT: [[TMP4:%.*]] = icmp eq i64 [[N_MOD_VF]], 0
; NO-VP-NEXT: [[TMP5:%.*]] = select i1 [[TMP4]], i64 [[TMP3]], i64 [[N_MOD_VF]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[TMP5]]
-; NO-VP-NEXT: [[TMP6:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
-; NO-VP-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP3]], i64 0
-; NO-VP-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
; NO-VP-NEXT: br label [[VECTOR_BODY:%.*]]
; NO-VP: vector.body:
; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; NO-VP-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[TMP6]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
; NO-VP-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP13:%.*]], [[VECTOR_BODY]] ]
-; NO-VP-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x i32], ptr [[A:%.*]], <vscale x 4 x i64> [[VEC_IND]], i32 0
-; NO-VP-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> align 4 [[TMP9]], <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> poison)
+; NO-VP-NEXT: [[TMP6:%.*]] = getelementptr inbounds [4 x i32], ptr [[A:%.*]], i64 [[INDEX]], i32 0
+; NO-VP-NEXT: [[TMP7:%.*]] = trunc i64 [[TMP3]] to i32
+; NO-VP-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP6]], i64 16, <vscale x 4 x i1> splat (i1 true), i32 [[TMP7]])
; NO-VP-NEXT: [[TMP10:%.*]] = add <vscale x 4 x i32> [[VEC_PHI]], [[WIDE_MASKED_GATHER]]
-; NO-VP-NEXT: [[WIDE_MASKED_GATHER1:%.*]] = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> align 4 [[TMP9]], <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> poison)
+; NO-VP-NEXT: [[TMP9:%.*]] = trunc i64 [[TMP3]] to i32
+; NO-VP-NEXT: [[WIDE_MASKED_GATHER1:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP6]], i64 16, <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
; NO-VP-NEXT: [[TMP11:%.*]] = add <vscale x 4 x i32> [[TMP10]], [[WIDE_MASKED_GATHER1]]
-; NO-VP-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i32], ptr [[A]], <vscale x 4 x i64> [[VEC_IND]], i32 2
-; NO-VP-NEXT: [[WIDE_MASKED_GATHER2:%.*]] = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> poison)
+; NO-VP-NEXT: [[TMP19:%.*]] = getelementptr inbounds [4 x i32], ptr [[A]], i64 [[INDEX]], i32 2
+; NO-VP-NEXT: [[TMP12:%.*]] = trunc i64 [[TMP3]] to i32
+; NO-VP-NEXT: [[WIDE_MASKED_GATHER2:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP19]], i64 16, <vscale x 4 x i1> splat (i1 true), i32 [[TMP12]])
; NO-VP-NEXT: [[TMP13]] = add <vscale x 4 x i32> [[TMP11]], [[WIDE_MASKED_GATHER2]]
; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
-; NO-VP-NEXT: [[VEC_IND_NEXT]] = add nuw nsw <vscale x 4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]]
; NO-VP-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; NO-VP: middle.block:
@@ -575,35 +583,38 @@ define i32 @load_factor_4_reverse(i64 %n, ptr noalias %a) {
; IF-EVL-NEXT: [[TMP1:%.*]] = sub i64 [[N]], [[SMIN]]
; IF-EVL-NEXT: br label [[VECTOR_PH:%.*]]
; IF-EVL: vector.ph:
-; IF-EVL-NEXT: [[TMP4:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
-; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[N]], i64 0
-; IF-EVL-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-; IF-EVL-NEXT: [[TMP3:%.*]] = mul nsw <vscale x 4 x i64> [[TMP4]], splat (i64 -1)
-; IF-EVL-NEXT: [[INDUCTION:%.*]] = add nsw <vscale x 4 x i64> [[BROADCAST_SPLAT]], [[TMP3]]
+; IF-EVL-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; IF-EVL-NEXT: [[TMP3:%.*]] = shl nuw i64 [[TMP2]], 2
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL: vector.body:
-; IF-EVL-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP16:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[TMP1]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP6:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
-; IF-EVL-NEXT: [[TMP7:%.*]] = zext i32 [[TMP6]] to i64
-; IF-EVL-NEXT: [[TMP8:%.*]] = mul nsw i64 -1, [[TMP7]]
-; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP8]], i64 0
-; IF-EVL-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 4 x i64> [[BROADCAST_SPLATINSERT1]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-; IF-EVL-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x i32], ptr [[A:%.*]], <vscale x 4 x i64> [[VEC_IND]], i32 0
-; IF-EVL-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.vp.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> align 4 [[TMP9]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP6]])
+; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP6]], i64 0
+; IF-EVL-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+; IF-EVL-NEXT: [[TMP5:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
+; IF-EVL-NEXT: [[TMP9:%.*]] = icmp ult <vscale x 4 x i32> [[TMP5]], [[BROADCAST_SPLAT]]
+; IF-EVL-NEXT: [[OFFSET_IDX:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[TMP7:%.*]] = getelementptr inbounds [4 x i32], ptr [[A:%.*]], i64 [[OFFSET_IDX]], i32 0
+; IF-EVL-NEXT: [[TMP8:%.*]] = trunc i64 [[TMP3]] to i32
+; IF-EVL-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP7]], i64 -16, <vscale x 4 x i1> [[TMP9]], i32 [[TMP8]])
; IF-EVL-NEXT: [[TMP10:%.*]] = add <vscale x 4 x i32> [[VEC_PHI]], [[WIDE_MASKED_GATHER]]
-; IF-EVL-NEXT: [[WIDE_MASKED_GATHER3:%.*]] = call <vscale x 4 x i32> @llvm.vp.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> align 4 [[TMP9]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP6]])
+; IF-EVL-NEXT: [[TMP14:%.*]] = trunc i64 [[TMP3]] to i32
+; IF-EVL-NEXT: [[WIDE_MASKED_GATHER3:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP7]], i64 -16, <vscale x 4 x i1> [[TMP9]], i32 [[TMP14]])
; IF-EVL-NEXT: [[TMP11:%.*]] = add <vscale x 4 x i32> [[TMP10]], [[WIDE_MASKED_GATHER3]]
-; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i32], ptr [[A]], <vscale x 4 x i64> [[VEC_IND]], i32 2
-; IF-EVL-NEXT: [[WIDE_MASKED_GATHER4:%.*]] = call <vscale x 4 x i32> @llvm.vp.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP6]])
+; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i32], ptr [[A]], i64 [[OFFSET_IDX]], i32 2
+; IF-EVL-NEXT: [[TMP24:%.*]] = trunc i64 [[TMP3]] to i32
+; IF-EVL-NEXT: [[WIDE_MASKED_GATHER4:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP12]], i64 -16, <vscale x 4 x i1> [[TMP9]], i32 [[TMP24]])
; IF-EVL-NEXT: [[TMP13:%.*]] = add <vscale x 4 x i32> [[TMP11]], [[WIDE_MASKED_GATHER4]]
-; IF-EVL-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x i32], ptr [[A]], <vscale x 4 x i64> [[VEC_IND]], i32 3
-; IF-EVL-NEXT: [[WIDE_MASKED_GATHER5:%.*]] = call <vscale x 4 x i32> @llvm.vp.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> align 4 [[TMP14]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP6]])
+; IF-EVL-NEXT: [[TMP25:%.*]] = getelementptr inbounds [4 x i32], ptr [[A]], i64 [[OFFSET_IDX]], i32 3
+; IF-EVL-NEXT: [[TMP26:%.*]] = trunc i64 [[TMP3]] to i32
+; IF-EVL-NEXT: [[WIDE_MASKED_GATHER5:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP25]], i64 -16, <vscale x 4 x i1> [[TMP9]], i32 [[TMP26]])
; IF-EVL-NEXT: [[TMP15:%.*]] = add <vscale x 4 x i32> [[TMP13]], [[WIDE_MASKED_GATHER5]]
; IF-EVL-NEXT: [[TMP16]] = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> [[TMP15]], <vscale x 4 x i32> [[VEC_PHI]], i32 [[TMP6]])
-; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP7]]
-; IF-EVL-NEXT: [[VEC_IND_NEXT]] = add nsw <vscale x 4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT2]]
+; IF-EVL-NEXT: [[TMP27:%.*]] = zext i32 [[TMP6]] to i64
+; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP27]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP27]]
; IF-EVL-NEXT: [[TMP18:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
; IF-EVL-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; IF-EVL: middle.block:
@@ -627,32 +638,27 @@ define i32 @load_factor_4_reverse(i64 %n, ptr noalias %a) {
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP1]], [[TMP5]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP1]], [[N_MOD_VF]]
; NO-VP-NEXT: [[TMP6:%.*]] = sub i64 [[N]], [[N_VEC]]
-; NO-VP-NEXT: [[TMP7:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
-; NO-VP-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[N]], i64 0
-; NO-VP-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-; NO-VP-NEXT: [[TMP8:%.*]] = mul nsw <vscale x 4 x i64> [[TMP7]], splat (i64 -1)
-; NO-VP-NEXT: [[INDUCTION:%.*]] = add nsw <vscale x 4 x i64> [[BROADCAST_SPLAT]], [[TMP8]]
-; NO-VP-NEXT: [[TMP9:%.*]] = mul nsw i64 -1, [[TMP5]]
-; NO-VP-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP9]], i64 0
-; NO-VP-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 4 x i64> [[BROADCAST_SPLATINSERT1]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
; NO-VP-NEXT: br label [[VECTOR_BODY:%.*]]
; NO-VP: vector.body:
; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; NO-VP-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
; NO-VP-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP16:%.*]], [[VECTOR_BODY]] ]
-; NO-VP-NEXT: [[TMP10:%.*]] = getelementptr inbounds [4 x i32], ptr [[A:%.*]], <vscale x 4 x i64> [[VEC_IND]], i32 0
-; NO-VP-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> align 4 [[TMP10]], <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> poison)
+; NO-VP-NEXT: [[OFFSET_IDX:%.*]] = sub i64 [[N]], [[INDEX]]
+; NO-VP-NEXT: [[TMP7:%.*]] = getelementptr inbounds [4 x i32], ptr [[A:%.*]], i64 [[OFFSET_IDX]], i32 0
+; NO-VP-NEXT: [[TMP8:%.*]] = trunc i64 [[TMP5]] to i32
+; NO-VP-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP7]], i64 -16, <vscale x 4 x i1> splat (i1 true), i32 [[TMP8]])
; NO-VP-NEXT: [[TMP11:%.*]] = add <vscale x 4 x i32> [[VEC_PHI]], [[WIDE_MASKED_GATHER]]
-; NO-VP-NEXT: [[WIDE_MASKED_GATHER3:%.*]] = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> align 4 [[TMP10]], <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> poison)
+; NO-VP-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP5]] to i32
+; NO-VP-NEXT: [[WIDE_MASKED_GATHER3:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP7]], i64 -16, <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
; NO-VP-NEXT: [[TMP12:%.*]] = add <vscale x 4 x i32> [[TMP11]], [[WIDE_MASKED_GATHER3]]
-; NO-VP-NEXT: [[TMP13:%.*]] = getelementptr inbounds [4 x i32], ptr [[A]], <vscale x 4 x i64> [[VEC_IND]], i32 2
-; NO-VP-NEXT: [[WIDE_MASKED_GATHER4:%.*]] = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> align 4 [[TMP13]], <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> poison)
+; NO-VP-NEXT: [[TMP23:%.*]] = getelementptr inbounds [4 x i32], ptr [[A]], i64 [[OFFSET_IDX]], i32 2
+; NO-VP-NEXT: [[TMP13:%.*]] = trunc i64 [[TMP5]] to i32
+; NO-VP-NEXT: [[WIDE_MASKED_GATHER4:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP23]], i64 -16, <vscale x 4 x i1> splat (i1 true), i32 [[TMP13]])
; NO-VP-NEXT: [[TMP14:%.*]] = add <vscale x 4 x i32> [[TMP12]], [[WIDE_MASKED_GATHER4]]
-; NO-VP-NEXT: [[TMP15:%.*]] = getelementptr inbounds [4 x i32], ptr [[A]], <vscale x 4 x i64> [[VEC_IND]], i32 3
-; NO-VP-NEXT: [[WIDE_MASKED_GATHER5:%.*]] = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> align 4 [[TMP15]], <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> poison)
+; NO-VP-NEXT: [[TMP15:%.*]] = getelementptr inbounds [4 x i32], ptr [[A]], i64 [[OFFSET_IDX]], i32 3
+; NO-VP-NEXT: [[TMP24:%.*]] = trunc i64 [[TMP5]] to i32
+; NO-VP-NEXT: [[WIDE_MASKED_GATHER5:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP15]], i64 -16, <vscale x 4 x i1> splat (i1 true), i32 [[TMP24]])
; NO-VP-NEXT: [[TMP16]] = add <vscale x 4 x i32> [[TMP14]], [[WIDE_MASKED_GATHER5]]
; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; NO-VP-NEXT: [[VEC_IND_NEXT]] = add nsw <vscale x 4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT2]]
; NO-VP-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
; NO-VP: middle.block:
diff --git a/llvm/test/Transforms/LoopVectorize/vplan-print-after-all.ll b/llvm/test/Transforms/LoopVectorize/vplan-print-after-all.ll
index 5b68887f0f7da..f9da97ad1200d 100644
--- a/llvm/test/Transforms/LoopVectorize/vplan-print-after-all.ll
+++ b/llvm/test/Transforms/LoopVectorize/vplan-print-after-all.ll
@@ -12,6 +12,7 @@
; CHECK: VPlan after VPlanTransforms::handleFindLastReductions
; CHECK: VPlan after VPlanTransforms::createPartialReductions
; CHECK: VPlan after VPlanTransforms::convertToAbstractRecipes
+; CHECK: VPlan after VPlanTransforms::convertToStridedAccesses
; CHECK: VPlan after VPlanTransforms::createInterleaveGroups
; CHECK: VPlan after VPlanTransforms::replaceSymbolicStrides
; CHECK: VPlan after VPlanTransforms::dropPoisonGeneratingRecipes
>From cb290de8508081d4a90a0127039d8c265dcaf3e6 Mon Sep 17 00:00:00 2001
From: Mel Chen <mel.chen at sifive.com>
Date: Fri, 22 Aug 2025 01:28:33 -0700
Subject: [PATCH 04/30] Support EVL
---
.../Transforms/Vectorize/VPlanTransforms.cpp | 8 +++
.../RISCV/blocks-with-dead-instructions.ll | 9 +---
.../RISCV/masked_gather_scatter.ll | 26 ++-------
.../LoopVectorize/RISCV/strided-accesses.ll | 53 ++++---------------
.../RISCV/tail-folding-gather-scatter.ll | 9 +---
.../RISCV/tail-folding-interleave.ll | 48 ++++-------------
6 files changed, 33 insertions(+), 120 deletions(-)
diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
index 613c65c8b7d4f..e99916767b9bc 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
@@ -2980,6 +2980,14 @@ static VPRecipeBase *optimizeMaskToEVL(VPValue *HeaderMask,
TypeInfo.inferScalarType(LoadR), {}, {}, DL);
}
+ if (auto *StridedL = dyn_cast<VPWidenStridedLoadRecipe>(&CurRecipe))
+ if (StridedL->isMasked() &&
+ match(StridedL->getMask(), m_RemoveMask(HeaderMask, Mask)))
+ return new VPWidenStridedLoadRecipe(
+ *cast<LoadInst>(&StridedL->getIngredient()), StridedL->getAddr(),
+ StridedL->getStride(), &EVL, Mask, *StridedL,
+ StridedL->getDebugLoc());
+
VPValue *StoredVal;
if (match(&CurRecipe, m_MaskedStore(m_VPValue(Addr), m_VPValue(StoredVal),
m_RemoveMask(HeaderMask, Mask))) &&
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/blocks-with-dead-instructions.ll b/llvm/test/Transforms/LoopVectorize/RISCV/blocks-with-dead-instructions.ll
index fe233e661dafc..f023324baf9f1 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/blocks-with-dead-instructions.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/blocks-with-dead-instructions.ll
@@ -310,8 +310,6 @@ define void @multiple_blocks_with_dead_inst_multiple_successors_6(ptr %src, i1 %
; CHECK-NEXT: [[TMP2:%.*]] = add nuw nsw i64 [[TMP1]], 1
; CHECK-NEXT: br label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
-; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP4:%.*]] = shl nuw i64 [[TMP3]], 3
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 8 x i1> poison, i1 [[IC]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 8 x i1> [[BROADCAST_SPLATINSERT]], <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
; CHECK-NEXT: [[TMP8:%.*]] = xor <vscale x 8 x i1> [[BROADCAST_SPLAT]], splat (i1 true)
@@ -323,19 +321,14 @@ define void @multiple_blocks_with_dead_inst_multiple_successors_6(ptr %src, i1 %
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 8 x i64> [ [[TMP5]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[TMP2]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP27:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 8, i1 true)
-; CHECK-NEXT: [[BROADCAST_SPLATINSERT3:%.*]] = insertelement <vscale x 8 x i32> poison, i32 [[TMP27]], i64 0
-; CHECK-NEXT: [[BROADCAST_SPLAT4:%.*]] = shufflevector <vscale x 8 x i32> [[BROADCAST_SPLATINSERT3]], <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
; CHECK-NEXT: [[TMP12:%.*]] = zext i32 [[TMP27]] to i64
; CHECK-NEXT: [[TMP16:%.*]] = mul nsw i64 3, [[TMP12]]
; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[TMP16]], i64 0
; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 8 x i64> [[DOTSPLATINSERT]], <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP18:%.*]] = call <vscale x 8 x i32> @llvm.stepvector.nxv8i32()
-; CHECK-NEXT: [[TMP19:%.*]] = icmp ult <vscale x 8 x i32> [[TMP18]], [[BROADCAST_SPLAT4]]
; CHECK-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[EVL_BASED_IV]], 3
; CHECK-NEXT: [[TMP21:%.*]] = getelementptr i16, ptr [[SRC]], i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[TMP20:%.*]] = getelementptr i16, ptr [[SRC]], <vscale x 8 x i64> [[VEC_IND]]
-; CHECK-NEXT: [[TMP15:%.*]] = trunc i64 [[TMP4]] to i32
-; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vp.strided.load.nxv8i16.p0.i64(ptr align 2 [[TMP21]], i64 6, <vscale x 8 x i1> [[TMP19]], i32 [[TMP15]])
+; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vp.strided.load.nxv8i16.p0.i64(ptr align 2 [[TMP21]], i64 6, <vscale x 8 x i1> splat (i1 true), i32 [[TMP27]])
; CHECK-NEXT: [[TMP17:%.*]] = icmp eq <vscale x 8 x i16> [[WIDE_MASKED_GATHER]], zeroinitializer
; CHECK-NEXT: [[TMP14:%.*]] = select <vscale x 8 x i1> [[TMP17]], <vscale x 8 x i1> [[TMP8]], <vscale x 8 x i1> zeroinitializer
; CHECK-NEXT: [[TMP28:%.*]] = xor <vscale x 8 x i1> [[TMP17]], splat (i1 true)
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/masked_gather_scatter.ll b/llvm/test/Transforms/LoopVectorize/RISCV/masked_gather_scatter.ll
index d159e072fc12b..fb3d1bd277e3e 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/masked_gather_scatter.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/masked_gather_scatter.ll
@@ -45,8 +45,6 @@ define void @foo4(ptr nocapture %A, ptr nocapture readonly %B, ptr nocapture rea
; RV32-NEXT: [[CONFLICT_RDX:%.*]] = or i1 [[FOUND_CONFLICT]], [[FOUND_CONFLICT8]]
; RV32-NEXT: br i1 [[CONFLICT_RDX]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
; RV32: vector.ph:
-; RV32-NEXT: [[TMP12:%.*]] = call i64 @llvm.vscale.i64()
-; RV32-NEXT: [[TMP13:%.*]] = shl nuw i64 [[TMP12]], 1
; RV32-NEXT: [[TMP7:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
; RV32-NEXT: [[TMP9:%.*]] = mul nuw nsw <vscale x 2 x i64> [[TMP7]], splat (i64 16)
; RV32-NEXT: br label [[VECTOR_BODY:%.*]]
@@ -55,24 +53,17 @@ define void @foo4(ptr nocapture %A, ptr nocapture readonly %B, ptr nocapture rea
; RV32-NEXT: [[VEC_IND:%.*]] = phi <vscale x 2 x i64> [ [[TMP9]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
; RV32-NEXT: [[AVL:%.*]] = phi i64 [ 625, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; RV32-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
-; RV32-NEXT: [[BROADCAST_SPLATINSERT9:%.*]] = insertelement <vscale x 2 x i32> poison, i32 [[TMP10]], i64 0
-; RV32-NEXT: [[BROADCAST_SPLAT10:%.*]] = shufflevector <vscale x 2 x i32> [[BROADCAST_SPLATINSERT9]], <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
; RV32-NEXT: [[TMP8:%.*]] = zext i32 [[TMP10]] to i64
; RV32-NEXT: [[TMP11:%.*]] = shl nuw nsw i64 [[TMP8]], 4
; RV32-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP11]], i64 0
; RV32-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-; RV32-NEXT: [[TMP23:%.*]] = call <vscale x 2 x i32> @llvm.stepvector.nxv2i32()
-; RV32-NEXT: [[TMP15:%.*]] = icmp ult <vscale x 2 x i32> [[TMP23]], [[BROADCAST_SPLAT10]]
; RV32-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[EVL_BASED_IV]], 16
; RV32-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, ptr [[TRIGGER]], i64 [[OFFSET_IDX]]
-; RV32-NEXT: [[TMP25:%.*]] = trunc i64 [[TMP13]] to i32
-; RV32-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 2 x i32> @llvm.experimental.vp.strided.load.nxv2i32.p0.i64(ptr align 4 [[TMP16]], i64 64, <vscale x 2 x i1> [[TMP15]], i32 [[TMP25]]), !alias.scope [[META0:![0-9]+]], !noalias [[META3:![0-9]+]]
+; RV32-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 2 x i32> @llvm.experimental.vp.strided.load.nxv2i32.p0.i64(ptr align 4 [[TMP16]], i64 64, <vscale x 2 x i1> splat (i1 true), i32 [[TMP10]]), !alias.scope [[META0:![0-9]+]], !noalias [[META3:![0-9]+]]
; RV32-NEXT: [[TMP14:%.*]] = icmp slt <vscale x 2 x i32> [[WIDE_MASKED_GATHER]], splat (i32 100)
-; RV32-NEXT: [[TMP26:%.*]] = select <vscale x 2 x i1> [[TMP15]], <vscale x 2 x i1> [[TMP14]], <vscale x 2 x i1> zeroinitializer
; RV32-NEXT: [[TMP20:%.*]] = shl nuw nsw i64 [[OFFSET_IDX]], 1
; RV32-NEXT: [[TMP21:%.*]] = getelementptr inbounds double, ptr [[B]], i64 [[TMP20]]
-; RV32-NEXT: [[TMP22:%.*]] = trunc i64 [[TMP13]] to i32
-; RV32-NEXT: [[WIDE_MASKED_GATHER6:%.*]] = call <vscale x 2 x double> @llvm.experimental.vp.strided.load.nxv2f64.p0.i64(ptr align 8 [[TMP21]], i64 256, <vscale x 2 x i1> [[TMP26]], i32 [[TMP22]]), !alias.scope [[META5:![0-9]+]]
+; RV32-NEXT: [[WIDE_MASKED_GATHER6:%.*]] = call <vscale x 2 x double> @llvm.experimental.vp.strided.load.nxv2f64.p0.i64(ptr align 8 [[TMP21]], i64 256, <vscale x 2 x i1> [[TMP14]], i32 [[TMP10]]), !alias.scope [[META5:![0-9]+]]
; RV32-NEXT: [[TMP17:%.*]] = sitofp <vscale x 2 x i32> [[WIDE_MASKED_GATHER]] to <vscale x 2 x double>
; RV32-NEXT: [[TMP18:%.*]] = fadd <vscale x 2 x double> [[WIDE_MASKED_GATHER6]], [[TMP17]]
; RV32-NEXT: [[TMP19:%.*]] = getelementptr inbounds double, ptr [[A]], <vscale x 2 x i64> [[VEC_IND]]
@@ -125,8 +116,6 @@ define void @foo4(ptr nocapture %A, ptr nocapture readonly %B, ptr nocapture rea
; RV64-NEXT: [[CONFLICT_RDX:%.*]] = or i1 [[FOUND_CONFLICT]], [[FOUND_CONFLICT5]]
; RV64-NEXT: br i1 [[CONFLICT_RDX]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; RV64: vector.ph:
-; RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; RV64-NEXT: [[TMP2:%.*]] = shl nuw i64 [[TMP0]], 1
; RV64-NEXT: [[TMP7:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
; RV64-NEXT: [[TMP1:%.*]] = mul nuw nsw <vscale x 2 x i64> [[TMP7]], splat (i64 16)
; RV64-NEXT: br label [[VECTOR_BODY:%.*]]
@@ -135,24 +124,17 @@ define void @foo4(ptr nocapture %A, ptr nocapture readonly %B, ptr nocapture rea
; RV64-NEXT: [[VEC_IND:%.*]] = phi <vscale x 2 x i64> [ [[TMP1]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
; RV64-NEXT: [[AVL:%.*]] = phi i64 [ 625, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; RV64-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
-; RV64-NEXT: [[BROADCAST_SPLATINSERT6:%.*]] = insertelement <vscale x 2 x i32> poison, i32 [[TMP10]], i64 0
-; RV64-NEXT: [[BROADCAST_SPLAT7:%.*]] = shufflevector <vscale x 2 x i32> [[BROADCAST_SPLATINSERT6]], <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
; RV64-NEXT: [[TMP8:%.*]] = zext i32 [[TMP10]] to i64
; RV64-NEXT: [[TMP4:%.*]] = shl nuw nsw i64 [[TMP8]], 4
; RV64-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP4]], i64 0
; RV64-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-; RV64-NEXT: [[TMP11:%.*]] = call <vscale x 2 x i32> @llvm.stepvector.nxv2i32()
-; RV64-NEXT: [[TMP25:%.*]] = icmp ult <vscale x 2 x i32> [[TMP11]], [[BROADCAST_SPLAT7]]
; RV64-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[EVL_BASED_IV]], 16
; RV64-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[TRIGGER]], i64 [[OFFSET_IDX]]
-; RV64-NEXT: [[TMP16:%.*]] = trunc i64 [[TMP2]] to i32
-; RV64-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 2 x i32> @llvm.experimental.vp.strided.load.nxv2i32.p0.i64(ptr align 4 [[TMP9]], i64 64, <vscale x 2 x i1> [[TMP25]], i32 [[TMP16]]), !alias.scope [[META0:![0-9]+]]
+; RV64-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 2 x i32> @llvm.experimental.vp.strided.load.nxv2i32.p0.i64(ptr align 4 [[TMP9]], i64 64, <vscale x 2 x i1> splat (i1 true), i32 [[TMP10]]), !alias.scope [[META0:![0-9]+]]
; RV64-NEXT: [[TMP14:%.*]] = icmp slt <vscale x 2 x i32> [[WIDE_MASKED_GATHER]], splat (i32 100)
-; RV64-NEXT: [[TMP12:%.*]] = select <vscale x 2 x i1> [[TMP25]], <vscale x 2 x i1> [[TMP14]], <vscale x 2 x i1> zeroinitializer
; RV64-NEXT: [[TMP13:%.*]] = shl nuw nsw i64 [[OFFSET_IDX]], 1
; RV64-NEXT: [[TMP28:%.*]] = getelementptr inbounds double, ptr [[B]], i64 [[TMP13]]
-; RV64-NEXT: [[TMP15:%.*]] = trunc i64 [[TMP2]] to i32
-; RV64-NEXT: [[WIDE_MASKED_GATHER6:%.*]] = call <vscale x 2 x double> @llvm.experimental.vp.strided.load.nxv2f64.p0.i64(ptr align 8 [[TMP28]], i64 256, <vscale x 2 x i1> [[TMP12]], i32 [[TMP15]]), !alias.scope [[META3:![0-9]+]]
+; RV64-NEXT: [[WIDE_MASKED_GATHER6:%.*]] = call <vscale x 2 x double> @llvm.experimental.vp.strided.load.nxv2f64.p0.i64(ptr align 8 [[TMP28]], i64 256, <vscale x 2 x i1> [[TMP14]], i32 [[TMP10]]), !alias.scope [[META3:![0-9]+]]
; RV64-NEXT: [[TMP17:%.*]] = sitofp <vscale x 2 x i32> [[WIDE_MASKED_GATHER]] to <vscale x 2 x double>
; RV64-NEXT: [[TMP18:%.*]] = fadd <vscale x 2 x double> [[WIDE_MASKED_GATHER6]], [[TMP17]]
; RV64-NEXT: [[TMP19:%.*]] = getelementptr inbounds double, ptr [[A]], <vscale x 2 x i64> [[VEC_IND]]
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/strided-accesses.ll b/llvm/test/Transforms/LoopVectorize/RISCV/strided-accesses.ll
index 3f9c44ebdbf99..a1d094b83f0ca 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/strided-accesses.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/strided-accesses.ll
@@ -10,8 +10,6 @@ define void @single_constant_stride_int_scaled(ptr %p) {
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = shl nuw i64 [[TMP0]], 2
; CHECK-NEXT: [[TMP8:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
@@ -19,19 +17,14 @@ define void @single_constant_stride_int_scaled(ptr %p) {
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[TMP8]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
-; CHECK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP11]], i64 0
-; CHECK-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT1]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP12:%.*]] = zext i32 [[TMP11]] to i64
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP12]], i64 0
; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP5:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
-; CHECK-NEXT: [[TMP6:%.*]] = icmp ult <vscale x 4 x i32> [[TMP5]], [[BROADCAST_SPLAT2]]
; CHECK-NEXT: [[TMP7:%.*]] = shl nuw nsw i64 [[EVL_BASED_IV]], 3
; CHECK-NEXT: [[TMP14:%.*]] = shl nuw nsw <vscale x 4 x i64> [[VEC_IND]], splat (i64 3)
; CHECK-NEXT: [[TMP9:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[TMP7]]
; CHECK-NEXT: [[TMP15:%.*]] = getelementptr i32, ptr [[P]], <vscale x 4 x i64> [[TMP14]]
-; CHECK-NEXT: [[TMP13:%.*]] = trunc i64 [[TMP1]] to i32
-; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP9]], i64 32, <vscale x 4 x i1> [[TMP6]], i32 [[TMP13]])
+; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP9]], i64 32, <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
; CHECK-NEXT: [[TMP16:%.*]] = add <vscale x 4 x i32> [[WIDE_MASKED_GATHER]], splat (i32 1)
; CHECK-NEXT: call void @llvm.vp.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[TMP16]], <vscale x 4 x ptr> align 4 [[TMP15]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP12]], [[EVL_BASED_IV]]
@@ -127,8 +120,6 @@ define void @single_constant_stride_int_iv(ptr %p) {
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP2:%.*]] = shl nuw i64 [[TMP0]], 2
; CHECK-NEXT: [[TMP6:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
; CHECK-NEXT: [[TMP1:%.*]] = mul nuw nsw <vscale x 4 x i64> [[TMP6]], splat (i64 64)
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
@@ -137,19 +128,14 @@ define void @single_constant_stride_int_iv(ptr %p) {
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[TMP1]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
-; CHECK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP7]], i64 0
-; CHECK-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT1]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP11:%.*]] = zext i32 [[TMP7]] to i64
; CHECK-NEXT: [[TMP4:%.*]] = shl nuw nsw i64 [[TMP11]], 6
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP4]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP14:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
-; CHECK-NEXT: [[TMP8:%.*]] = icmp ult <vscale x 4 x i32> [[TMP14]], [[BROADCAST_SPLAT2]]
; CHECK-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[EVL_BASED_IV]], 64
; CHECK-NEXT: [[TMP9:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i32, ptr [[P]], <vscale x 4 x i64> [[VEC_IND]]
-; CHECK-NEXT: [[TMP15:%.*]] = trunc i64 [[TMP2]] to i32
-; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP9]], i64 256, <vscale x 4 x i1> [[TMP8]], i32 [[TMP15]])
+; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP9]], i64 256, <vscale x 4 x i1> splat (i1 true), i32 [[TMP7]])
; CHECK-NEXT: [[TMP13:%.*]] = add <vscale x 4 x i32> [[WIDE_MASKED_GATHER]], splat (i32 1)
; CHECK-NEXT: call void @llvm.vp.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[TMP13]], <vscale x 4 x ptr> align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP7]])
; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP11]], [[EVL_BASED_IV]]
@@ -798,8 +784,6 @@ define void @double_stride_int_scaled(ptr %p, ptr %p2, i64 %stride) {
; STRIDED-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
; STRIDED-NEXT: br i1 [[FOUND_CONFLICT]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
; STRIDED: vector.ph:
-; STRIDED-NEXT: [[TMP42:%.*]] = call i64 @llvm.vscale.i64()
-; STRIDED-NEXT: [[TMP45:%.*]] = shl nuw i64 [[TMP42]], 2
; STRIDED-NEXT: [[TMP47:%.*]] = shl i64 [[STRIDE]], 2
; STRIDED-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[STRIDE]], i64 0
; STRIDED-NEXT: [[BROADCAST_SPLAT1:%.*]] = shufflevector <vscale x 4 x i64> [[BROADCAST_SPLATINSERT1]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
@@ -810,21 +794,16 @@ define void @double_stride_int_scaled(ptr %p, ptr %p2, i64 %stride) {
; STRIDED-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[TMP12]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
; STRIDED-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; STRIDED-NEXT: [[TMP43:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
-; STRIDED-NEXT: [[BROADCAST_SPLATINSERT11:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP43]], i64 0
-; STRIDED-NEXT: [[BROADCAST_SPLAT12:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT11]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
; STRIDED-NEXT: [[TMP44:%.*]] = zext i32 [[TMP43]] to i64
; STRIDED-NEXT: [[BROADCAST_SPLATINSERT9:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP44]], i64 0
; STRIDED-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[BROADCAST_SPLATINSERT9]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-; STRIDED-NEXT: [[TMP48:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
-; STRIDED-NEXT: [[TMP49:%.*]] = icmp ult <vscale x 4 x i32> [[TMP48]], [[BROADCAST_SPLAT12]]
; STRIDED-NEXT: [[TMP50:%.*]] = mul nuw nsw i64 [[EVL_BASED_IV]], [[STRIDE]]
; STRIDED-NEXT: [[TMP18:%.*]] = mul nuw nsw <vscale x 4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT1]]
-; STRIDED-NEXT: [[TMP46:%.*]] = getelementptr i32, ptr [[P]], i64 [[TMP50]]
-; STRIDED-NEXT: [[TMP51:%.*]] = trunc i64 [[TMP45]] to i32
-; STRIDED-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP46]], i64 [[TMP47]], <vscale x 4 x i1> [[TMP49]], i32 [[TMP51]]), !alias.scope [[META5:![0-9]+]]
-; STRIDED-NEXT: [[TMP20:%.*]] = add <vscale x 4 x i32> [[WIDE_MASKED_GATHER]], splat (i32 1)
-; STRIDED-NEXT: [[TMP21:%.*]] = getelementptr i32, ptr [[P2]], <vscale x 4 x i64> [[TMP18]]
-; STRIDED-NEXT: call void @llvm.vp.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[TMP20]], <vscale x 4 x ptr> align 4 [[TMP21]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP43]]), !alias.scope [[META8:![0-9]+]], !noalias [[META5]]
+; STRIDED-NEXT: [[TMP42:%.*]] = getelementptr i32, ptr [[P]], i64 [[TMP50]]
+; STRIDED-NEXT: [[WIDE_STRIDED_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP42]], i64 [[TMP47]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP43]]), !alias.scope [[META5:![0-9]+]]
+; STRIDED-NEXT: [[TMP45:%.*]] = add <vscale x 4 x i32> [[WIDE_STRIDED_LOAD]], splat (i32 1)
+; STRIDED-NEXT: [[TMP46:%.*]] = getelementptr i32, ptr [[P2]], <vscale x 4 x i64> [[TMP18]]
+; STRIDED-NEXT: call void @llvm.vp.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[TMP45]], <vscale x 4 x ptr> align 4 [[TMP46]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP43]]), !alias.scope [[META8:![0-9]+]], !noalias [[META5]]
; STRIDED-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP44]], [[EVL_BASED_IV]]
; STRIDED-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP44]]
; STRIDED-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]]
@@ -1332,20 +1311,13 @@ define void @constant_stride_reinterpret(ptr noalias %in, ptr noalias %out) {
; NOSTRIDED-NEXT: entry:
; NOSTRIDED-NEXT: br label [[VECTOR_PH:%.*]]
; NOSTRIDED: vector.ph:
-; NOSTRIDED-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; NOSTRIDED-NEXT: [[TMP1:%.*]] = shl nuw i64 [[TMP0]], 1
; NOSTRIDED-NEXT: br label [[VECTOR_BODY:%.*]]
; NOSTRIDED: vector.body:
; NOSTRIDED-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; NOSTRIDED-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; NOSTRIDED-NEXT: [[TMP2:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
-; NOSTRIDED-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i32> poison, i32 [[TMP2]], i64 0
-; NOSTRIDED-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-; NOSTRIDED-NEXT: [[TMP3:%.*]] = call <vscale x 2 x i32> @llvm.stepvector.nxv2i32()
-; NOSTRIDED-NEXT: [[TMP4:%.*]] = icmp ult <vscale x 2 x i32> [[TMP3]], [[BROADCAST_SPLAT]]
; NOSTRIDED-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw i32, ptr [[IN:%.*]], i64 [[EVL_BASED_IV]]
-; NOSTRIDED-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP1]] to i32
-; NOSTRIDED-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vp.strided.load.nxv2i64.p0.i64(ptr align 8 [[TMP9]], i64 4, <vscale x 2 x i1> [[TMP4]], i32 [[TMP10]])
+; NOSTRIDED-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vp.strided.load.nxv2i64.p0.i64(ptr align 8 [[TMP9]], i64 4, <vscale x 2 x i1> splat (i1 true), i32 [[TMP2]])
; NOSTRIDED-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw i64, ptr [[OUT:%.*]], i64 [[EVL_BASED_IV]]
; NOSTRIDED-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[WIDE_MASKED_GATHER]], ptr align 8 [[TMP5]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP2]])
; NOSTRIDED-NEXT: [[TMP11:%.*]] = zext i32 [[TMP2]] to i64
@@ -1408,20 +1380,13 @@ define void @constant_stride_reinterpret(ptr noalias %in, ptr noalias %out) {
; STRIDED-NEXT: entry:
; STRIDED-NEXT: br label [[VECTOR_PH:%.*]]
; STRIDED: vector.ph:
-; STRIDED-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; STRIDED-NEXT: [[TMP1:%.*]] = shl nuw i64 [[TMP0]], 1
; STRIDED-NEXT: br label [[VECTOR_BODY:%.*]]
; STRIDED: vector.body:
; STRIDED-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; STRIDED-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; STRIDED-NEXT: [[TMP2:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
-; STRIDED-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i32> poison, i32 [[TMP2]], i64 0
-; STRIDED-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-; STRIDED-NEXT: [[TMP3:%.*]] = call <vscale x 2 x i32> @llvm.stepvector.nxv2i32()
-; STRIDED-NEXT: [[TMP4:%.*]] = icmp ult <vscale x 2 x i32> [[TMP3]], [[BROADCAST_SPLAT]]
; STRIDED-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw i32, ptr [[IN:%.*]], i64 [[EVL_BASED_IV]]
-; STRIDED-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP1]] to i32
-; STRIDED-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vp.strided.load.nxv2i64.p0.i64(ptr align 8 [[TMP9]], i64 4, <vscale x 2 x i1> [[TMP4]], i32 [[TMP10]])
+; STRIDED-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vp.strided.load.nxv2i64.p0.i64(ptr align 8 [[TMP9]], i64 4, <vscale x 2 x i1> splat (i1 true), i32 [[TMP2]])
; STRIDED-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw i64, ptr [[OUT:%.*]], i64 [[EVL_BASED_IV]]
; STRIDED-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[WIDE_MASKED_GATHER]], ptr align 8 [[TMP5]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP2]])
; STRIDED-NEXT: [[TMP11:%.*]] = zext i32 [[TMP2]] to i64
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-gather-scatter.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-gather-scatter.ll
index 7482ddb62bfc5..a17b92f930a94 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-gather-scatter.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-gather-scatter.ll
@@ -12,20 +12,13 @@ define void @gather_scatter(ptr noalias %in, ptr noalias %out, ptr noalias %inde
; IF-EVL-NEXT: entry:
; IF-EVL-NEXT: br label [[VECTOR_PH:%.*]]
; IF-EVL: vector.ph:
-; IF-EVL-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP12:%.*]] = shl nuw i64 [[TMP11]], 1
; IF-EVL-NEXT: br label [[FOR_BODY1:%.*]]
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[FOR_BODY1]] ]
; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N:%.*]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[FOR_BODY1]] ]
; IF-EVL-NEXT: [[TMP2:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
-; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i32> poison, i32 [[TMP2]], i64 0
-; IF-EVL-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-; IF-EVL-NEXT: [[TMP3:%.*]] = call <vscale x 2 x i32> @llvm.stepvector.nxv2i32()
-; IF-EVL-NEXT: [[TMP4:%.*]] = icmp ult <vscale x 2 x i32> [[TMP3]], [[BROADCAST_SPLAT]]
; IF-EVL-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[INDEX:%.*]], i64 [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP12]] to i32
-; IF-EVL-NEXT: [[WIDE_STRIDED_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vp.strided.load.nxv2i64.p0.i64(ptr align 8 [[TMP5]], i64 4, <vscale x 2 x i1> [[TMP4]], i32 [[TMP6]])
+; IF-EVL-NEXT: [[WIDE_STRIDED_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vp.strided.load.nxv2i64.p0.i64(ptr align 8 [[TMP5]], i64 4, <vscale x 2 x i1> splat (i1 true), i32 [[TMP2]])
; IF-EVL-NEXT: [[TMP7:%.*]] = getelementptr inbounds float, ptr [[IN:%.*]], <vscale x 2 x i64> [[WIDE_STRIDED_LOAD]]
; IF-EVL-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 2 x float> @llvm.vp.gather.nxv2f32.nxv2p0(<vscale x 2 x ptr> align 4 [[TMP7]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP2]])
; IF-EVL-NEXT: [[TMP8:%.*]] = getelementptr inbounds float, ptr [[OUT:%.*]], <vscale x 2 x i64> [[WIDE_STRIDED_LOAD]]
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-interleave.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-interleave.ll
index 63c6e44768950..9228418900470 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-interleave.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-interleave.ll
@@ -118,28 +118,19 @@ define i32 @load_factor_4_with_gap(i64 %n, ptr noalias %a) {
; IF-EVL-NEXT: entry:
; IF-EVL-NEXT: br label [[VECTOR_PH:%.*]]
; IF-EVL: vector.ph:
-; IF-EVL-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP1:%.*]] = shl nuw i64 [[TMP0]], 2
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP12:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N:%.*]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP4:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
-; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP4]], i64 0
-; IF-EVL-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-; IF-EVL-NEXT: [[TMP3:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
-; IF-EVL-NEXT: [[TMP7:%.*]] = icmp ult <vscale x 4 x i32> [[TMP3]], [[BROADCAST_SPLAT]]
; IF-EVL-NEXT: [[TMP5:%.*]] = getelementptr inbounds [4 x i32], ptr [[A:%.*]], i64 [[EVL_BASED_IV]], i32 0
-; IF-EVL-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP1]] to i32
-; IF-EVL-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP5]], i64 16, <vscale x 4 x i1> [[TMP7]], i32 [[TMP6]])
+; IF-EVL-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP5]], i64 16, <vscale x 4 x i1> splat (i1 true), i32 [[TMP4]])
; IF-EVL-NEXT: [[TMP8:%.*]] = add <vscale x 4 x i32> [[VEC_PHI]], [[WIDE_MASKED_GATHER]]
-; IF-EVL-NEXT: [[TMP19:%.*]] = trunc i64 [[TMP1]] to i32
-; IF-EVL-NEXT: [[WIDE_MASKED_GATHER1:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP5]], i64 16, <vscale x 4 x i1> [[TMP7]], i32 [[TMP19]])
+; IF-EVL-NEXT: [[WIDE_MASKED_GATHER1:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP5]], i64 16, <vscale x 4 x i1> splat (i1 true), i32 [[TMP4]])
; IF-EVL-NEXT: [[TMP9:%.*]] = add <vscale x 4 x i32> [[TMP8]], [[WIDE_MASKED_GATHER1]]
; IF-EVL-NEXT: [[TMP10:%.*]] = getelementptr inbounds [4 x i32], ptr [[A]], i64 [[EVL_BASED_IV]], i32 3
-; IF-EVL-NEXT: [[TMP20:%.*]] = trunc i64 [[TMP1]] to i32
-; IF-EVL-NEXT: [[WIDE_MASKED_GATHER2:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP10]], i64 16, <vscale x 4 x i1> [[TMP7]], i32 [[TMP20]])
+; IF-EVL-NEXT: [[WIDE_MASKED_GATHER2:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP10]], i64 16, <vscale x 4 x i1> splat (i1 true), i32 [[TMP4]])
; IF-EVL-NEXT: [[TMP11:%.*]] = add <vscale x 4 x i32> [[TMP9]], [[WIDE_MASKED_GATHER2]]
; IF-EVL-NEXT: [[TMP12]] = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> [[TMP11]], <vscale x 4 x i32> [[VEC_PHI]], i32 [[TMP4]])
; IF-EVL-NEXT: [[TMP21:%.*]] = zext i32 [[TMP4]] to i64
@@ -354,28 +345,19 @@ define i32 @load_factor_4_with_tail_gap(i64 %n, ptr noalias %a) {
; IF-EVL-NEXT: entry:
; IF-EVL-NEXT: br label [[VECTOR_PH:%.*]]
; IF-EVL: vector.ph:
-; IF-EVL-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP1:%.*]] = shl nuw i64 [[TMP0]], 2
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP12:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N:%.*]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP4:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
-; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP4]], i64 0
-; IF-EVL-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-; IF-EVL-NEXT: [[TMP3:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
-; IF-EVL-NEXT: [[TMP7:%.*]] = icmp ult <vscale x 4 x i32> [[TMP3]], [[BROADCAST_SPLAT]]
; IF-EVL-NEXT: [[TMP5:%.*]] = getelementptr inbounds [4 x i32], ptr [[A:%.*]], i64 [[EVL_BASED_IV]], i32 0
-; IF-EVL-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP1]] to i32
-; IF-EVL-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP5]], i64 16, <vscale x 4 x i1> [[TMP7]], i32 [[TMP6]])
+; IF-EVL-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP5]], i64 16, <vscale x 4 x i1> splat (i1 true), i32 [[TMP4]])
; IF-EVL-NEXT: [[TMP8:%.*]] = add <vscale x 4 x i32> [[VEC_PHI]], [[WIDE_MASKED_GATHER]]
-; IF-EVL-NEXT: [[TMP19:%.*]] = trunc i64 [[TMP1]] to i32
-; IF-EVL-NEXT: [[WIDE_MASKED_GATHER1:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP5]], i64 16, <vscale x 4 x i1> [[TMP7]], i32 [[TMP19]])
+; IF-EVL-NEXT: [[WIDE_MASKED_GATHER1:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP5]], i64 16, <vscale x 4 x i1> splat (i1 true), i32 [[TMP4]])
; IF-EVL-NEXT: [[TMP9:%.*]] = add <vscale x 4 x i32> [[TMP8]], [[WIDE_MASKED_GATHER1]]
; IF-EVL-NEXT: [[TMP10:%.*]] = getelementptr inbounds [4 x i32], ptr [[A]], i64 [[EVL_BASED_IV]], i32 2
-; IF-EVL-NEXT: [[TMP20:%.*]] = trunc i64 [[TMP1]] to i32
-; IF-EVL-NEXT: [[WIDE_MASKED_GATHER2:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP10]], i64 16, <vscale x 4 x i1> [[TMP7]], i32 [[TMP20]])
+; IF-EVL-NEXT: [[WIDE_MASKED_GATHER2:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP10]], i64 16, <vscale x 4 x i1> splat (i1 true), i32 [[TMP4]])
; IF-EVL-NEXT: [[TMP11:%.*]] = add <vscale x 4 x i32> [[TMP9]], [[WIDE_MASKED_GATHER2]]
; IF-EVL-NEXT: [[TMP12]] = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> [[TMP11]], <vscale x 4 x i32> [[VEC_PHI]], i32 [[TMP4]])
; IF-EVL-NEXT: [[TMP21:%.*]] = zext i32 [[TMP4]] to i64
@@ -583,33 +565,23 @@ define i32 @load_factor_4_reverse(i64 %n, ptr noalias %a) {
; IF-EVL-NEXT: [[TMP1:%.*]] = sub i64 [[N]], [[SMIN]]
; IF-EVL-NEXT: br label [[VECTOR_PH:%.*]]
; IF-EVL: vector.ph:
-; IF-EVL-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP3:%.*]] = shl nuw i64 [[TMP2]], 2
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP16:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[TMP1]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP6:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
-; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP6]], i64 0
-; IF-EVL-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-; IF-EVL-NEXT: [[TMP5:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
-; IF-EVL-NEXT: [[TMP9:%.*]] = icmp ult <vscale x 4 x i32> [[TMP5]], [[BROADCAST_SPLAT]]
; IF-EVL-NEXT: [[OFFSET_IDX:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[TMP7:%.*]] = getelementptr inbounds [4 x i32], ptr [[A:%.*]], i64 [[OFFSET_IDX]], i32 0
-; IF-EVL-NEXT: [[TMP8:%.*]] = trunc i64 [[TMP3]] to i32
-; IF-EVL-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP7]], i64 -16, <vscale x 4 x i1> [[TMP9]], i32 [[TMP8]])
+; IF-EVL-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP7]], i64 -16, <vscale x 4 x i1> splat (i1 true), i32 [[TMP6]])
; IF-EVL-NEXT: [[TMP10:%.*]] = add <vscale x 4 x i32> [[VEC_PHI]], [[WIDE_MASKED_GATHER]]
-; IF-EVL-NEXT: [[TMP14:%.*]] = trunc i64 [[TMP3]] to i32
-; IF-EVL-NEXT: [[WIDE_MASKED_GATHER3:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP7]], i64 -16, <vscale x 4 x i1> [[TMP9]], i32 [[TMP14]])
+; IF-EVL-NEXT: [[WIDE_MASKED_GATHER3:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP7]], i64 -16, <vscale x 4 x i1> splat (i1 true), i32 [[TMP6]])
; IF-EVL-NEXT: [[TMP11:%.*]] = add <vscale x 4 x i32> [[TMP10]], [[WIDE_MASKED_GATHER3]]
; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i32], ptr [[A]], i64 [[OFFSET_IDX]], i32 2
-; IF-EVL-NEXT: [[TMP24:%.*]] = trunc i64 [[TMP3]] to i32
-; IF-EVL-NEXT: [[WIDE_MASKED_GATHER4:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP12]], i64 -16, <vscale x 4 x i1> [[TMP9]], i32 [[TMP24]])
+; IF-EVL-NEXT: [[WIDE_MASKED_GATHER4:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP12]], i64 -16, <vscale x 4 x i1> splat (i1 true), i32 [[TMP6]])
; IF-EVL-NEXT: [[TMP13:%.*]] = add <vscale x 4 x i32> [[TMP11]], [[WIDE_MASKED_GATHER4]]
; IF-EVL-NEXT: [[TMP25:%.*]] = getelementptr inbounds [4 x i32], ptr [[A]], i64 [[OFFSET_IDX]], i32 3
-; IF-EVL-NEXT: [[TMP26:%.*]] = trunc i64 [[TMP3]] to i32
-; IF-EVL-NEXT: [[WIDE_MASKED_GATHER5:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP25]], i64 -16, <vscale x 4 x i1> [[TMP9]], i32 [[TMP26]])
+; IF-EVL-NEXT: [[WIDE_MASKED_GATHER5:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP25]], i64 -16, <vscale x 4 x i1> splat (i1 true), i32 [[TMP6]])
; IF-EVL-NEXT: [[TMP15:%.*]] = add <vscale x 4 x i32> [[TMP13]], [[WIDE_MASKED_GATHER5]]
; IF-EVL-NEXT: [[TMP16]] = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> [[TMP15]], <vscale x 4 x i32> [[VEC_PHI]], i32 [[TMP6]])
; IF-EVL-NEXT: [[TMP27:%.*]] = zext i32 [[TMP6]] to i64
>From 3a3fc7991d5f438cf55a98a5db6a7d687355cffc Mon Sep 17 00:00:00 2001
From: Mel Chen <mel.chen at sifive.com>
Date: Thu, 4 Sep 2025 05:42:28 -0700
Subject: [PATCH 05/30] patch planContainsAdditionalSimplifications
---
llvm/lib/Transforms/Vectorize/LoopVectorize.cpp | 6 ++++++
1 file changed, 6 insertions(+)
diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
index a12aec11df29b..dbae39ef7b271 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -7165,6 +7165,12 @@ static bool planContainsAdditionalSimplifications(VPlan &Plan,
RepR->getUnderlyingInstr(), VF))
return true;
}
+
+ // The strided load is transformed from a gather through VPlanTransform,
+ // and its cost will be lower than the original gather.
+ if (isa<VPWidenStridedLoadRecipe>(&R))
+ return true;
+
if (Instruction *UI = GetInstructionForCost(&R)) {
// If we adjusted the predicate of the recipe, the cost in the legacy
// cost model may be different.
>From a906415fdbdb9f5010ebe8b0ad165ee0dba4b0b6 Mon Sep 17 00:00:00 2001
From: Mel Chen <mel.chen at sifive.com>
Date: Tue, 21 Oct 2025 23:23:05 -0700
Subject: [PATCH 06/30] Update comment. nfc
---
llvm/lib/Transforms/Vectorize/VPlan.h | 8 +++++---
llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp | 8 ++++++--
2 files changed, 11 insertions(+), 5 deletions(-)
diff --git a/llvm/lib/Transforms/Vectorize/VPlan.h b/llvm/lib/Transforms/Vectorize/VPlan.h
index f6b916ce3fd32..bd080242530e0 100644
--- a/llvm/lib/Transforms/Vectorize/VPlan.h
+++ b/llvm/lib/Transforms/Vectorize/VPlan.h
@@ -2054,6 +2054,7 @@ class LLVM_ABI_FOR_TEST VPWidenGEPRecipe : public VPRecipeWithIRFlags {
return VarIdx;
}
+ /// Returns the element type for the first \p I indices of this recipe.
Type *getIndexedType(unsigned I) const {
auto *GEP = cast<GetElementPtrInst>(getUnderlyingInstr());
SmallVector<Value *, 4> Ops(GEP->idx_begin(), GEP->idx_begin() + I);
@@ -3625,9 +3626,9 @@ struct VPWidenLoadEVLRecipe final : public VPWidenMemoryRecipe,
#endif
};
-/// A recipe for strided load operations, using the base address, stride, and an
-/// optional mask. This recipe will generate an vp.strided.load intrinsic call
-/// to represent memory accesses with a fixed stride.
+/// A recipe for strided load operations, using the base address, stride, VF,
+/// and an optional mask. This recipe will generate a vp.strided.load intrinsic
+/// call to represent memory accesses with a fixed stride.
struct VPWidenStridedLoadRecipe final : public VPWidenMemoryRecipe,
public VPRecipeValue {
VPWidenStridedLoadRecipe(LoadInst &Load, VPValue *Addr, VPValue *Stride,
@@ -3667,6 +3668,7 @@ struct VPWidenStridedLoadRecipe final : public VPWidenMemoryRecipe,
bool usesFirstLaneOnly(const VPValue *Op) const override {
assert(is_contained(operands(), Op) &&
"Op must be an operand of the recipe");
+ // All operands except the mask are only used for the first lane.
return Op == getAddr() || Op == getStride() || Op == getVF();
}
};
diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
index e99916767b9bc..edf72c9d39e0e 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
@@ -5914,8 +5914,8 @@ static std::pair<VPValue *, VPValue *> matchStridedStart(VPValue *CurIndex) {
if (Opcode != Instruction::Shl && Opcode != Instruction::Mul)
return {nullptr, nullptr};
- // Match the pattern binop(variant, invariant), or binop(invariant, variant)
- // if the binary operator is commutative.
+ // Match the pattern binop(variant, uniform), or binop(uniform, variant) if
+ // the binary operator is commutative.
bool IsLHSUniform = vputils::isSingleScalar(WidenR->getOperand(0));
if (IsLHSUniform == vputils::isSingleScalar(WidenR->getOperand(1)) ||
(IsLHSUniform && !Instruction::isCommutative(Opcode)))
@@ -5942,6 +5942,10 @@ static std::pair<VPValue *, VPValue *> matchStridedStart(VPValue *CurIndex) {
return {StartR, StrideR};
}
+/// Checks if the given VPWidenGEPRecipe \p WidenGEP represents a strided
+/// access. If so, it creates recipes representing the base pointer and stride
+/// in element type, and returns a tuple of {base pointer, stride, element
+/// type}. Otherwise, returns a tuple where all elements are nullptr.
static std::tuple<VPValue *, VPValue *, Type *>
determineBaseAndStride(VPWidenGEPRecipe *WidenGEP) {
// TODO: Check if the base pointer is strided.
>From 7d1213452ab7a201dff28b78009f52e69c412e03 Mon Sep 17 00:00:00 2001
From: Mel Chen <mel.chen at sifive.com>
Date: Wed, 22 Oct 2025 02:15:56 -0700
Subject: [PATCH 07/30] Find base pointer and stride after profitable check
---
.../Transforms/Vectorize/VPlanTransforms.cpp | 50 +++++++++----------
1 file changed, 23 insertions(+), 27 deletions(-)
diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
index edf72c9d39e0e..18a84420a19c7 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
@@ -5987,8 +5987,7 @@ void VPlanTransforms::convertToStridedAccesses(VPlan &Plan, VPCostContext &Ctx,
VPTypeAnalysis TypeInfo(Plan);
DenseMap<VPWidenGEPRecipe *, std::tuple<VPValue *, VPValue *, Type *>>
StrideCache;
- SmallVector<VPRecipeBase *> ToErase;
- SmallPtrSet<VPValue *, 4> PossiblyDead;
+ SmallVector<VPWidenMemoryRecipe *> ToErase;
for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(
vp_depth_first_shallow(Plan.getVectorLoopRegion()->getEntry()))) {
for (VPRecipeBase &R : make_early_inc_range(*VPBB)) {
@@ -6011,23 +6010,6 @@ void VPlanTransforms::convertToStridedAccesses(VPlan &Plan, VPCostContext &Ctx,
if (!PtrUV)
continue;
- // Try to get base and stride here.
- VPValue *BasePtr, *StrideInElement;
- Type *ElementTy;
- auto It = StrideCache.find(Ptr);
- if (It != StrideCache.end())
- std::tie(BasePtr, StrideInElement, ElementTy) = It->second;
- else
- std::tie(BasePtr, StrideInElement, ElementTy) = StrideCache[Ptr] =
- determineBaseAndStride(Ptr);
-
- // Skip if the memory access is not a strided access.
- if (!BasePtr) {
- assert(!StrideInElement && !ElementTy);
- continue;
- }
- assert(StrideInElement && ElementTy);
-
Instruction &Ingredient = MemR->getIngredient();
auto IsProfitable = [&](ElementCount VF) -> bool {
Type *DataTy = toVectorTy(getLoadStoreType(&Ingredient), VF);
@@ -6045,12 +6027,25 @@ void VPlanTransforms::convertToStridedAccesses(VPlan &Plan, VPCostContext &Ctx,
};
if (!LoopVectorizationPlanner::getDecisionAndClampRange(IsProfitable,
- Range)) {
- PossiblyDead.insert(BasePtr);
- PossiblyDead.insert(StrideInElement);
+ Range))
+ continue;
+
+ // Try to get base and stride here.
+ VPValue *BasePtr, *StrideInElement;
+ Type *ElementTy;
+ auto It = StrideCache.find(Ptr);
+ if (It != StrideCache.end())
+ std::tie(BasePtr, StrideInElement, ElementTy) = It->second;
+ else
+ std::tie(BasePtr, StrideInElement, ElementTy) = StrideCache[Ptr] =
+ determineBaseAndStride(Ptr);
+
+ // Skip if the memory access is not a strided access.
+ if (!BasePtr) {
+ assert(!StrideInElement && !ElementTy);
continue;
}
- PossiblyDead.insert(Ptr);
+ assert(StrideInElement && ElementTy);
// Create a new vector pointer for strided access.
auto *GEP = dyn_cast<GetElementPtrInst>(PtrUV->stripPointerCasts());
@@ -6086,9 +6081,10 @@ void VPlanTransforms::convertToStridedAccesses(VPlan &Plan, VPCostContext &Ctx,
}
}
- // Clean up dead memory access recipes, and unused base address and stride.
- for (auto *R : ToErase)
+ // Clean up dead recipes.
+ for (auto *R : ToErase) {
+ VPValue *Addr = R->getAddr();
R->eraseFromParent();
- for (auto *V : PossiblyDead)
- recursivelyDeleteDeadRecipes(V);
+ recursivelyDeleteDeadRecipes(Addr);
+ }
}
>From d265f7367b0f871cc9baa0d60b55302bc50d734d Mon Sep 17 00:00:00 2001
From: Mel Chen <mel.chen at sifive.com>
Date: Wed, 22 Oct 2025 02:43:30 -0700
Subject: [PATCH 08/30] Replace getUnderlyingValue() with getUnderlyingInstr()
---
llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
index 18a84420a19c7..483b3428fb1e0 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
@@ -5906,7 +5906,7 @@ static std::pair<VPValue *, VPValue *> matchStridedStart(VPValue *CurIndex) {
return {WidenIV, WidenIV->getStepValue()};
auto *WidenR = dyn_cast<VPWidenRecipe>(CurIndex);
- if (!WidenR || !CurIndex->getUnderlyingValue())
+ if (!WidenR || !WidenR->getUnderlyingInstr())
return {nullptr, nullptr};
unsigned Opcode = WidenR->getOpcode();
>From fc1ee2944c6997c4e951b6af65e0628a9ec1fe0a Mon Sep 17 00:00:00 2001
From: Mel Chen <mel.chen at sifive.com>
Date: Wed, 22 Oct 2025 03:11:24 -0700
Subject: [PATCH 09/30] Directly use flags from WidenGEP
---
llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp | 4 +---
1 file changed, 1 insertion(+), 3 deletions(-)
diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
index 483b3428fb1e0..7755268cb29ba 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
@@ -6048,10 +6048,8 @@ void VPlanTransforms::convertToStridedAccesses(VPlan &Plan, VPCostContext &Ctx,
assert(StrideInElement && ElementTy);
// Create a new vector pointer for strided access.
- auto *GEP = dyn_cast<GetElementPtrInst>(PtrUV->stripPointerCasts());
auto *NewPtr = new VPVectorPointerRecipe(
- BasePtr, ElementTy, StrideInElement,
- GEP ? GEP->getNoWrapFlags() : GEPNoWrapFlags::none(),
+ BasePtr, ElementTy, StrideInElement, Ptr->getGEPNoWrapFlags(),
Ptr->getDebugLoc());
NewPtr->insertBefore(MemR);
>From 92049c95e6073809f6bfc0b42aefbe0c7e38f6d7 Mon Sep 17 00:00:00 2001
From: Mel Chen <mel.chen at sifive.com>
Date: Wed, 22 Oct 2025 03:29:02 -0700
Subject: [PATCH 10/30] Pass Ptr->getUnderlyingValue() in memory cost model
---
llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp | 11 +++--------
1 file changed, 3 insertions(+), 8 deletions(-)
diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
index 7755268cb29ba..42b722a0c89f7 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
@@ -6004,12 +6004,6 @@ void VPlanTransforms::convertToStridedAccesses(VPlan &Plan, VPCostContext &Ctx,
if (!Ptr)
continue;
- // Memory cost model requires the pointer operand of memory access
- // instruction.
- Value *PtrUV = Ptr->getUnderlyingValue();
- if (!PtrUV)
- continue;
-
Instruction &Ingredient = MemR->getIngredient();
auto IsProfitable = [&](ElementCount VF) -> bool {
Type *DataTy = toVectorTy(getLoadStoreType(&Ingredient), VF);
@@ -6020,8 +6014,9 @@ void VPlanTransforms::convertToStridedAccesses(VPlan &Plan, VPCostContext &Ctx,
const InstructionCost StridedLoadStoreCost =
Ctx.TTI.getMemIntrinsicInstrCost(
MemIntrinsicCostAttributes(
- Intrinsic::experimental_vp_strided_load, DataTy, PtrUV,
- MemR->isMasked(), Alignment, &Ingredient),
+ Intrinsic::experimental_vp_strided_load, DataTy,
+ Ptr->getUnderlyingValue(), MemR->isMasked(), Alignment,
+ &Ingredient),
Ctx.CostKind);
return StridedLoadStoreCost < CurrentCost;
};
>From dbc1252217950e19ff0db496b280a5773813d2b0 Mon Sep 17 00:00:00 2001
From: Mel Chen <mel.chen at sifive.com>
Date: Wed, 22 Oct 2025 06:30:47 -0700
Subject: [PATCH 11/30] Implement VPWidenStridedLoadRecipe::computeCost
---
llvm/lib/Transforms/Vectorize/VPlan.h | 4 +++
.../lib/Transforms/Vectorize/VPlanRecipes.cpp | 26 +++++++++++--------
2 files changed, 19 insertions(+), 11 deletions(-)
diff --git a/llvm/lib/Transforms/Vectorize/VPlan.h b/llvm/lib/Transforms/Vectorize/VPlan.h
index bd080242530e0..be583acf1b76c 100644
--- a/llvm/lib/Transforms/Vectorize/VPlan.h
+++ b/llvm/lib/Transforms/Vectorize/VPlan.h
@@ -3658,6 +3658,10 @@ struct VPWidenStridedLoadRecipe final : public VPWidenMemoryRecipe,
/// Generate a strided load.
void execute(VPTransformState &State) override;
+ /// Return the cost of this VPWidenStridedLoadRecipe.
+ InstructionCost computeCost(ElementCount VF,
+ VPCostContext &Ctx) const override;
+
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
/// Print the recipe.
void printRecipe(raw_ostream &O, const Twine &Indent,
diff --git a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
index 3d2f4380e5e05..bd275650e4922 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
@@ -3715,11 +3715,9 @@ InstructionCost VPWidenMemoryRecipe::computeCost(ElementCount VF,
Type *Ty = toVectorTy(getLoadStoreType(&Ingredient), VF);
unsigned AS = cast<PointerType>(Ctx.Types.inferScalarType(getAddr()))
->getAddressSpace();
- unsigned Opcode =
- isa<VPWidenLoadRecipe, VPWidenLoadEVLRecipe, VPWidenStridedLoadRecipe>(
- this)
- ? Instruction::Load
- : Instruction::Store;
+ unsigned Opcode = isa<VPWidenLoadRecipe, VPWidenLoadEVLRecipe>(this)
+ ? Instruction::Load
+ : Instruction::Store;
if (!Consecutive) {
// TODO: Using the original IR may not be accurate.
@@ -3729,12 +3727,6 @@ InstructionCost VPWidenMemoryRecipe::computeCost(ElementCount VF,
"Inconsecutive memory access should not have the order.");
const Value *Ptr = getLoadStorePointerOperand(&Ingredient);
- if (isa<VPWidenStridedLoadRecipe>(this))
- return Ctx.TTI.getMemIntrinsicInstrCost(
- MemIntrinsicCostAttributes(Intrinsic::experimental_vp_strided_load,
- Ty, Ptr, IsMasked, Alignment, &Ingredient),
- Ctx.CostKind);
-
Type *PtrTy = Ptr->getType();
// If the address value is uniform across all lanes, then the address can be
// calculated with scalar type and broadcast.
@@ -3909,6 +3901,18 @@ void VPWidenStridedLoadRecipe::execute(VPTransformState &State) {
State.set(this, NewLI);
}
+InstructionCost
+VPWidenStridedLoadRecipe::computeCost(ElementCount VF,
+ VPCostContext &Ctx) const {
+ Type *Ty = toVectorTy(getLoadStoreType(&Ingredient), VF);
+ const Value *Ptr = getLoadStorePointerOperand(&Ingredient);
+ const Align Alignment = getLoadStoreAlignment(&Ingredient);
+ return Ctx.TTI.getMemIntrinsicInstrCost(
+ MemIntrinsicCostAttributes(Intrinsic::experimental_vp_strided_load, Ty,
+ Ptr, IsMasked, Alignment, &Ingredient),
+ Ctx.CostKind);
+}
+
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void VPWidenStridedLoadRecipe::printRecipe(raw_ostream &O, const Twine &Indent,
VPSlotTracker &SlotTracker) const {
>From 4869fd5d61d31119ac3c18ebebd89647e26aa7a5 Mon Sep 17 00:00:00 2001
From: Mel Chen <mel.chen at sifive.com>
Date: Fri, 7 Nov 2025 00:48:22 -0800
Subject: [PATCH 12/30] update comments. nfc
---
llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
index 42b722a0c89f7..3e58d25f51ecd 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
@@ -5944,7 +5944,7 @@ static std::pair<VPValue *, VPValue *> matchStridedStart(VPValue *CurIndex) {
/// Checks if the given VPWidenGEPRecipe \p WidenGEP represents a strided
/// access. If so, it creates recipes representing the base pointer and stride
-/// in element type, and returns a tuple of {base pointer, stride, element
+/// in element units, and returns a tuple of {base pointer, stride, element
/// type}. Otherwise, returns a tuple where all elements are nullptr.
static std::tuple<VPValue *, VPValue *, Type *>
determineBaseAndStride(VPWidenGEPRecipe *WidenGEP) {
>From 59916a6d155bbfc4e6b85e82653c048156868ffa Mon Sep 17 00:00:00 2001
From: Mel Chen <mel.chen at sifive.com>
Date: Fri, 7 Nov 2025 01:11:31 -0800
Subject: [PATCH 13/30] cast to VPWidenLoadRecipe directly for now. nfc
---
.../Transforms/Vectorize/VPlanTransforms.cpp | 17 ++++++++---------
1 file changed, 8 insertions(+), 9 deletions(-)
diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
index 3e58d25f51ecd..b8dc4fc2631c5 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
@@ -5991,31 +5991,31 @@ void VPlanTransforms::convertToStridedAccesses(VPlan &Plan, VPCostContext &Ctx,
for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(
vp_depth_first_shallow(Plan.getVectorLoopRegion()->getEntry()))) {
for (VPRecipeBase &R : make_early_inc_range(*VPBB)) {
- auto *MemR = dyn_cast<VPWidenMemoryRecipe>(&R);
+ auto *LoadR = dyn_cast<VPWidenLoadRecipe>(&R);
// TODO: Support strided store.
// TODO: Transform reverse access into strided access with -1 stride.
// TODO: Transform gather/scatter with uniform address into strided access
// with 0 stride.
// TODO: Transform interleave access into multiple strided accesses.
- if (!MemR || !isa<VPWidenLoadRecipe>(MemR) || MemR->isConsecutive())
+ if (!LoadR || LoadR->isConsecutive())
continue;
- auto *Ptr = dyn_cast<VPWidenGEPRecipe>(MemR->getAddr());
+ auto *Ptr = dyn_cast<VPWidenGEPRecipe>(LoadR->getAddr());
if (!Ptr)
continue;
- Instruction &Ingredient = MemR->getIngredient();
+ Instruction &Ingredient = LoadR->getIngredient();
auto IsProfitable = [&](ElementCount VF) -> bool {
Type *DataTy = toVectorTy(getLoadStoreType(&Ingredient), VF);
const Align Alignment = getLoadStoreAlignment(&Ingredient);
if (!Ctx.TTI.isLegalStridedLoadStore(DataTy, Alignment))
return false;
- const InstructionCost CurrentCost = MemR->computeCost(VF, Ctx);
+ const InstructionCost CurrentCost = LoadR->computeCost(VF, Ctx);
const InstructionCost StridedLoadStoreCost =
Ctx.TTI.getMemIntrinsicInstrCost(
MemIntrinsicCostAttributes(
Intrinsic::experimental_vp_strided_load, DataTy,
- Ptr->getUnderlyingValue(), MemR->isMasked(), Alignment,
+ Ptr->getUnderlyingValue(), LoadR->isMasked(), Alignment,
&Ingredient),
Ctx.CostKind);
return StridedLoadStoreCost < CurrentCost;
@@ -6046,7 +6046,7 @@ void VPlanTransforms::convertToStridedAccesses(VPlan &Plan, VPCostContext &Ctx,
auto *NewPtr = new VPVectorPointerRecipe(
BasePtr, ElementTy, StrideInElement, Ptr->getGEPNoWrapFlags(),
Ptr->getDebugLoc());
- NewPtr->insertBefore(MemR);
+ NewPtr->insertBefore(LoadR);
const DataLayout &DL = Ingredient.getDataLayout();
TypeSize TS = DL.getTypeAllocSize(ElementTy);
@@ -6059,11 +6059,10 @@ void VPlanTransforms::convertToStridedAccesses(VPlan &Plan, VPCostContext &Ctx,
auto *ScaledStride =
new VPInstruction(Instruction::Mul, {StrideInElement, ScaleVPV},
VPRecipeWithIRFlags::WrapFlagsTy{false, false});
- ScaledStride->insertBefore(MemR);
+ ScaledStride->insertBefore(LoadR);
StrideInBytes = ScaledStride;
}
- auto *LoadR = cast<VPWidenLoadRecipe>(MemR);
auto *StridedLoad = new VPWidenStridedLoadRecipe(
*cast<LoadInst>(&Ingredient), NewPtr, StrideInBytes, &Plan.getVF(),
LoadR->getMask(), *LoadR, LoadR->getDebugLoc());
>From 30093252db62e056d6c6cf2d5f6156fe4fcc921b Mon Sep 17 00:00:00 2001
From: Mel Chen <mel.chen at sifive.com>
Date: Fri, 7 Nov 2025 01:14:10 -0800
Subject: [PATCH 14/30] replace getOrAddLiveIn(ConstantInt::get()) with
getConstantInt. nfc
---
llvm/lib/Transforms/Vectorize/LoopVectorize.cpp | 2 +-
llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp | 4 ++--
2 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
index dbae39ef7b271..2fa4c42734e70 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -7754,7 +7754,7 @@ VPRecipeBase *VPRecipeBuilder::tryToWidenMemory(VPInstruction *VPI,
} else {
const DataLayout &DL = I->getDataLayout();
auto *StrideTy = DL.getIndexType(Ptr->getUnderlyingValue()->getType());
- VPValue *StrideOne = Plan.getOrAddLiveIn(ConstantInt::get(StrideTy, 1));
+ VPValue *StrideOne = Plan.getConstantInt(StrideTy, 1);
VectorPtr = new VPVectorPointerRecipe(Ptr, getLoadStoreType(I), StrideOne,
GEP ? GEP->getNoWrapFlags()
: GEPNoWrapFlags::none(),
diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
index b8dc4fc2631c5..760b6dfc21a79 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
@@ -6054,8 +6054,8 @@ void VPlanTransforms::convertToStridedAccesses(VPlan &Plan, VPCostContext &Ctx,
VPValue *StrideInBytes = StrideInElement;
// Scale the stride by the size of the indexed type.
if (TypeScale != 1) {
- VPValue *ScaleVPV = Plan.getOrAddLiveIn(ConstantInt::get(
- TypeInfo.inferScalarType(StrideInElement), TypeScale));
+ VPValue *ScaleVPV = Plan.getConstantInt(
+ TypeInfo.inferScalarType(StrideInElement), TypeScale);
auto *ScaledStride =
new VPInstruction(Instruction::Mul, {StrideInElement, ScaleVPV},
VPRecipeWithIRFlags::WrapFlagsTy{false, false});
>From 6ee6880ae09146dd14f30fbbad8ccba63bcbc11b Mon Sep 17 00:00:00 2001
From: Mel Chen <mel.chen at sifive.com>
Date: Fri, 7 Nov 2025 01:28:12 -0800
Subject: [PATCH 15/30] updated assertion message. nfc
---
llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp | 7 +++----
1 file changed, 3 insertions(+), 4 deletions(-)
diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
index 760b6dfc21a79..27bd878c2dd8c 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
@@ -6036,11 +6036,10 @@ void VPlanTransforms::convertToStridedAccesses(VPlan &Plan, VPCostContext &Ctx,
determineBaseAndStride(Ptr);
// Skip if the memory access is not a strided access.
- if (!BasePtr) {
- assert(!StrideInElement && !ElementTy);
+ if (!BasePtr)
continue;
- }
- assert(StrideInElement && ElementTy);
+ assert(StrideInElement && ElementTy &&
+ "Can not get stride information for a strided access");
// Create a new vector pointer for strided access.
auto *NewPtr = new VPVectorPointerRecipe(
>From af6b588f15e2bcf8c4bf6007beb760476062f26d Mon Sep 17 00:00:00 2001
From: Mel Chen <mel.chen at sifive.com>
Date: Mon, 10 Nov 2025 23:23:08 -0800
Subject: [PATCH 16/30] rebase and remove getLoadStoreAlignment call
---
llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp | 2 --
1 file changed, 2 deletions(-)
diff --git a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
index bd275650e4922..e3bb7956ad35b 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
@@ -3877,7 +3877,6 @@ void VPWidenLoadEVLRecipe::printRecipe(raw_ostream &O, const Twine &Indent,
void VPWidenStridedLoadRecipe::execute(VPTransformState &State) {
Type *ScalarDataTy = getLoadStoreType(&Ingredient);
auto *DataTy = VectorType::get(ScalarDataTy, State.VF);
- const Align Alignment = getLoadStoreAlignment(&Ingredient);
auto &Builder = State.Builder;
Value *Addr = State.get(getAddr(), /*IsScalar*/ true);
@@ -3906,7 +3905,6 @@ VPWidenStridedLoadRecipe::computeCost(ElementCount VF,
VPCostContext &Ctx) const {
Type *Ty = toVectorTy(getLoadStoreType(&Ingredient), VF);
const Value *Ptr = getLoadStorePointerOperand(&Ingredient);
- const Align Alignment = getLoadStoreAlignment(&Ingredient);
return Ctx.TTI.getMemIntrinsicInstrCost(
MemIntrinsicCostAttributes(Intrinsic::experimental_vp_strided_load, Ty,
Ptr, IsMasked, Alignment, &Ingredient),
>From 2aa02894d1ec03b9c9ca39b81076b55d425c423e Mon Sep 17 00:00:00 2001
From: Mel Chen <mel.chen at sifive.com>
Date: Tue, 2 Dec 2025 07:38:14 -0800
Subject: [PATCH 17/30] Intro VPWidenMemIntrinsicRecipe
---
llvm/lib/Analysis/VectorUtils.cpp | 4 +
.../Transforms/Vectorize/LoopVectorize.cpp | 1 +
llvm/lib/Transforms/Vectorize/VPlan.h | 96 ++++++++++++++
.../Transforms/Vectorize/VPlanAnalysis.cpp | 2 +-
.../lib/Transforms/Vectorize/VPlanRecipes.cpp | 125 ++++++++++++++++++
.../Transforms/Vectorize/VPlanTransforms.cpp | 43 +++++-
llvm/lib/Transforms/Vectorize/VPlanValue.h | 1 +
.../Transforms/Vectorize/VPlanVerifier.cpp | 3 +
.../LoopVectorize/RISCV/strided-accesses.ll | 31 ++---
.../RISCV/tail-folding-gather-scatter.ll | 2 +-
.../RISCV/tail-folding-interleave.ll | 27 ++--
11 files changed, 292 insertions(+), 43 deletions(-)
diff --git a/llvm/lib/Analysis/VectorUtils.cpp b/llvm/lib/Analysis/VectorUtils.cpp
index d4083c49626fe..2e83682b70a7d 100644
--- a/llvm/lib/Analysis/VectorUtils.cpp
+++ b/llvm/lib/Analysis/VectorUtils.cpp
@@ -170,6 +170,8 @@ bool llvm::isVectorIntrinsicWithScalarOpAtArg(Intrinsic::ID ID,
return (ScalarOpdIdx == 2);
case Intrinsic::experimental_vp_splice:
return ScalarOpdIdx == 2 || ScalarOpdIdx == 4;
+ case Intrinsic::experimental_vp_strided_load:
+ return ScalarOpdIdx == 0 || ScalarOpdIdx == 1;
default:
return false;
}
@@ -207,6 +209,8 @@ bool llvm::isVectorIntrinsicWithOverloadTypeAtArg(
case Intrinsic::powi:
case Intrinsic::ldexp:
return OpdIdx == -1 || OpdIdx == 1;
+ case Intrinsic::experimental_vp_strided_load:
+ return OpdIdx == -1 || OpdIdx == 0 || OpdIdx == 1;
default:
return OpdIdx == -1;
}
diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
index 2fa4c42734e70..f57877ba6025c 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -4145,6 +4145,7 @@ static bool willGenerateVectors(VPlan &Plan, ElementCount VF,
case VPRecipeBase::VPWidenCastSC:
case VPRecipeBase::VPWidenGEPSC:
case VPRecipeBase::VPWidenIntrinsicSC:
+ case VPRecipeBase::VPWidenMemIntrinsicSC:
case VPRecipeBase::VPWidenSC:
case VPRecipeBase::VPBlendSC:
case VPRecipeBase::VPFirstOrderRecurrencePHISC:
diff --git a/llvm/lib/Transforms/Vectorize/VPlan.h b/llvm/lib/Transforms/Vectorize/VPlan.h
index be583acf1b76c..c02f527625777 100644
--- a/llvm/lib/Transforms/Vectorize/VPlan.h
+++ b/llvm/lib/Transforms/Vectorize/VPlan.h
@@ -422,6 +422,7 @@ class LLVM_ABI_FOR_TEST VPRecipeBase
VPWidenCastSC,
VPWidenGEPSC,
VPWidenIntrinsicSC,
+ VPWidenMemIntrinsicSC,
VPWidenStridedLoadSC,
VPWidenLoadEVLSC,
VPWidenLoadSC,
@@ -629,6 +630,7 @@ class VPSingleDefRecipe : public VPRecipeBase, public VPRecipeValue {
case VPRecipeBase::VPInterleaveEVLSC:
case VPRecipeBase::VPInterleaveSC:
case VPRecipeBase::VPIRInstructionSC:
+ case VPRecipeBase::VPWidenMemIntrinsicSC:
case VPRecipeBase::VPWidenStridedLoadSC:
case VPRecipeBase::VPWidenLoadEVLSC:
case VPRecipeBase::VPWidenLoadSC:
@@ -1905,6 +1907,98 @@ class VPWidenIntrinsicRecipe : public VPRecipeWithIRFlags, public VPIRMetadata {
#endif
};
+/// A recipe for widening vector memory intrinsics.
+class VPWidenMemIntrinsicRecipe : public VPRecipeBase,
+ public VPIRMetadata{
+ Instruction &Ingredient;
+
+ /// Alignment information for this memory access.
+ Align Alignment;
+
+ /// ID of the vector intrinsic to widen.
+ Intrinsic::ID VectorIntrinsicID;
+
+ /// Scalar return type of the intrinsic.
+ Type *ResultTy;
+
+ /// True if the intrinsic may read from memory.
+ bool MayReadFromMemory;
+
+ /// True if the intrinsic may read write to memory.
+ bool MayWriteToMemory;
+
+ /// True if the intrinsic may have side-effects.
+ bool MayHaveSideEffects;
+
+public:
+ // TODO: support StoreInst for strided store
+ VPWidenMemIntrinsicRecipe(LoadInst &LI, Intrinsic::ID VectorIntrinsicID,
+ ArrayRef<VPValue *> CallArguments,
+ const VPIRMetadata &MD = {},
+ DebugLoc DL = DebugLoc::getUnknown())
+ : VPRecipeBase(VPRecipeBase::VPWidenMemIntrinsicSC, CallArguments, DL),
+ VPIRMetadata(MD), Ingredient(LI), Alignment(LI.getAlign()),
+ VectorIntrinsicID(VectorIntrinsicID), ResultTy(LI.getType()),
+ MayReadFromMemory(LI.mayReadFromMemory()),
+ MayHaveSideEffects(LI.mayHaveSideEffects()) {
+ new VPRecipeValue(this, &LI);
+ }
+
+ ~VPWidenMemIntrinsicRecipe() override = default;
+
+ VPWidenMemIntrinsicRecipe *clone() override {
+ return new VPWidenMemIntrinsicRecipe(*cast<LoadInst>(&Ingredient),
+ VectorIntrinsicID, operands(), *this,
+ getDebugLoc());
+ }
+
+ VP_CLASSOF_IMPL(VPRecipeBase::VPWidenMemIntrinsicSC)
+
+ /// Produce a widened version of the vector memory intrinsic.
+ void execute(VPTransformState &State) override;
+
+ /// Return the cost of this vector memory intrinsic.
+ InstructionCost computeCost(ElementCount VF,
+ VPCostContext &Ctx) const override;
+
+ /// Return the ID of the intrinsic.
+ Intrinsic::ID getVectorIntrinsicID() const { return VectorIntrinsicID; }
+
+ /// Return the scalar return type of the intrinsic.
+ Type *getResultType() const { return ResultTy; }
+
+ /// Return to name of the intrinsic as string.
+ StringRef getIntrinsicName() const {
+ return Intrinsic::getBaseName(VectorIntrinsicID);
+ }
+
+ /// Returns true if the intrinsic may read from memory.
+ bool mayReadFromMemory() const { return MayReadFromMemory; }
+
+ /// Returns true if the intrinsic may write to memory.
+ bool mayWriteToMemory() const { return MayWriteToMemory; }
+
+ /// Returns true if the intrinsic may have side-effects.
+ bool mayHaveSideEffects() const { return MayHaveSideEffects; }
+
+ unsigned getMemoryPointerParamPos() const;
+
+ unsigned getMaskParamPos() const;
+
+ void setMask(VPValue *Mask) { setOperand(getMaskParamPos(), Mask); }
+
+ VPValue *getMask() const { return getOperand(getMaskParamPos()); }
+
+ bool usesFirstLaneOnly(const VPValue *Op) const override;
+
+protected:
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+ /// Print the recipe.
+ void printRecipe(raw_ostream &O, const Twine &Indent,
+ VPSlotTracker &SlotTracker) const override;
+#endif
+};
+
/// A recipe for widening Call instructions using library calls.
class LLVM_ABI_FOR_TEST VPWidenCallRecipe : public VPRecipeWithIRFlags,
public VPIRMetadata {
@@ -4178,6 +4272,8 @@ static inline auto castToVPIRMetadata(RecipeBasePtrTy R) -> DstTy {
return cast<VPWidenCastRecipe>(R);
case VPRecipeBase::VPWidenIntrinsicSC:
return cast<VPWidenIntrinsicRecipe>(R);
+ case VPRecipeBase::VPWidenMemIntrinsicSC:
+ return cast<VPWidenMemIntrinsicRecipe>(R);
case VPRecipeBase::VPWidenCallSC:
return cast<VPWidenCallRecipe>(R);
case VPRecipeBase::VPReplicateSC:
diff --git a/llvm/lib/Transforms/Vectorize/VPlanAnalysis.cpp b/llvm/lib/Transforms/Vectorize/VPlanAnalysis.cpp
index 9b62532cecda7..a5aaa071afbde 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanAnalysis.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanAnalysis.cpp
@@ -291,7 +291,7 @@ Type *VPTypeAnalysis::inferScalarType(const VPValue *V) {
})
// VPInstructionWithType must be handled before VPInstruction.
.Case<VPInstructionWithType, VPWidenIntrinsicRecipe,
- VPWidenCastRecipe>(
+ VPWidenMemIntrinsicRecipe, VPWidenCastRecipe>(
[](const auto *R) { return R->getResultType(); })
.Case<VPBlendRecipe, VPInstruction, VPWidenRecipe, VPReplicateRecipe,
VPWidenCallRecipe, VPWidenMemoryRecipe>(
diff --git a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
index e3bb7956ad35b..3c9bde9bbb3f9 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
@@ -73,6 +73,8 @@ bool VPRecipeBase::mayWriteToMemory() const {
->onlyReadsMemory();
case VPWidenIntrinsicSC:
return cast<VPWidenIntrinsicRecipe>(this)->mayWriteToMemory();
+ case VPWidenMemIntrinsicSC:
+ return cast<VPWidenMemIntrinsicRecipe>(this)->mayWriteToMemory();
case VPActiveLaneMaskPHISC:
case VPCanonicalIVPHISC:
case VPBranchOnMaskSC:
@@ -127,6 +129,8 @@ bool VPRecipeBase::mayReadFromMemory() const {
->onlyWritesMemory();
case VPWidenIntrinsicSC:
return cast<VPWidenIntrinsicRecipe>(this)->mayReadFromMemory();
+ case VPWidenMemIntrinsicSC:
+ return cast<VPWidenMemIntrinsicRecipe>(this)->mayReadFromMemory();
case VPBranchOnMaskSC:
case VPDerivedIVSC:
case VPFirstOrderRecurrencePHISC:
@@ -184,6 +188,8 @@ bool VPRecipeBase::mayHaveSideEffects() const {
}
case VPWidenIntrinsicSC:
return cast<VPWidenIntrinsicRecipe>(this)->mayHaveSideEffects();
+ case VPWidenMemIntrinsicSC:
+ return cast<VPWidenMemIntrinsicRecipe>(this)->mayHaveSideEffects();
case VPBlendSC:
case VPReductionEVLSC:
case VPReductionSC:
@@ -1981,6 +1987,125 @@ void VPWidenIntrinsicRecipe::printRecipe(raw_ostream &O, const Twine &Indent,
}
#endif
+unsigned VPWidenMemIntrinsicRecipe::getMemoryPointerParamPos() const {
+ if (auto Pos = VPIntrinsic::getMemoryPointerParamPos(VectorIntrinsicID))
+ return *Pos;
+
+ switch (VectorIntrinsicID) {
+ case Intrinsic::masked_load:
+ case Intrinsic::masked_gather:
+ case Intrinsic::masked_expandload:
+ return 0;
+ case Intrinsic::masked_store:
+ case Intrinsic::masked_scatter:
+ case Intrinsic::masked_compressstore:
+ return 1;
+ default:
+ llvm_unreachable("unknown vector memory intrinsic");
+ }
+}
+
+unsigned VPWidenMemIntrinsicRecipe::getMaskParamPos() const {
+ if (auto Pos = VPIntrinsic::getMaskParamPos(VectorIntrinsicID))
+ return *Pos;
+
+ switch (VectorIntrinsicID) {
+ case Intrinsic::masked_load:
+ case Intrinsic::masked_gather:
+ case Intrinsic::masked_expandload:
+ return 1;
+ case Intrinsic::masked_store:
+ case Intrinsic::masked_scatter:
+ case Intrinsic::masked_compressstore:
+ return 2;
+ default:
+ llvm_unreachable("unknown vector memory intrinsic");
+ }
+}
+
+void VPWidenMemIntrinsicRecipe::execute(VPTransformState &State) {
+ assert(State.VF.isVector() && "not widening");
+
+ SmallVector<Type *, 2> TysForDecl;
+ // Add return type if intrinsic is overloaded on it.
+ if (isVectorIntrinsicWithOverloadTypeAtArg(VectorIntrinsicID, -1, State.TTI))
+ TysForDecl.push_back(VectorType::get(getResultType(), State.VF));
+ SmallVector<Value *, 4> Args;
+ for (const auto &I : enumerate(operands())) {
+ // Some intrinsics have a scalar argument - don't replace it with a
+ // vector.
+ Value *Arg;
+ if (isVectorIntrinsicWithScalarOpAtArg(VectorIntrinsicID, I.index(),
+ State.TTI))
+ Arg = State.get(I.value(), VPLane(0));
+ else
+ Arg = State.get(I.value(), usesFirstLaneOnly(I.value()));
+ if (isVectorIntrinsicWithOverloadTypeAtArg(VectorIntrinsicID, I.index(),
+ State.TTI))
+ TysForDecl.push_back(Arg->getType());
+ Args.push_back(Arg);
+ }
+
+ // Use vector version of the intrinsic.
+ Module *M = State.Builder.GetInsertBlock()->getModule();
+ Function *VectorF =
+ Intrinsic::getOrInsertDeclaration(M, VectorIntrinsicID, TysForDecl);
+ assert(VectorF &&
+ "Can't retrieve vector intrinsic or vector-predication intrinsics.");
+
+ CallInst *MemI = State.Builder.CreateCall(VectorF, Args);
+ MemI->addParamAttr(
+ getMemoryPointerParamPos(),
+ Attribute::getWithAlignment(MemI->getContext(), Alignment));
+ applyMetadata(*MemI);
+
+ if (!MemI->getType()->isVoidTy())
+ State.set(getVPSingleValue(), MemI);
+}
+
+InstructionCost
+VPWidenMemIntrinsicRecipe::computeCost(ElementCount VF,
+ VPCostContext &Ctx) const {
+ Type *Ty = toVectorTy(getLoadStoreType(&Ingredient), VF);
+ const Value *Ptr = getLoadStorePointerOperand(&Ingredient);
+ return Ctx.TTI.getMemIntrinsicInstrCost(
+ MemIntrinsicCostAttributes(VectorIntrinsicID, Ty, Ptr,
+ match(getMask(), m_True()), Alignment,
+ &Ingredient),
+ Ctx.CostKind);
+}
+
+// Copy from VPWidenIntrinsicRecipe::usesFirstLaneOnly.
+bool VPWidenMemIntrinsicRecipe::usesFirstLaneOnly(const VPValue *Op) const {
+ assert(is_contained(operands(), Op) && "Op must be an operand of the recipe");
+ return all_of(enumerate(operands()), [this, &Op](const auto &X) {
+ auto [Idx, V] = X;
+ return V != Op || isVectorIntrinsicWithScalarOpAtArg(getVectorIntrinsicID(),
+ Idx, nullptr);
+ });
+}
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+void VPWidenMemIntrinsicRecipe::printRecipe(raw_ostream &O, const Twine &Indent,
+ VPSlotTracker &SlotTracker) const {
+ O << Indent << "WIDEN-MEM-INTRINSIC ";
+ if (ResultTy->isVoidTy()) {
+ O << "void ";
+ } else {
+ getVPSingleValue()->printAsOperand(O, SlotTracker);
+ O << " = ";
+ }
+
+ O << "call ";
+ O << getIntrinsicName() << "(";
+
+ interleaveComma(operands(), O, [&O, &SlotTracker](VPValue *Op) {
+ Op->printAsOperand(O, SlotTracker);
+ });
+ O << ")";
+}
+#endif
+
void VPHistogramRecipe::execute(VPTransformState &State) {
IRBuilderBase &Builder = State.Builder;
diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
index 27bd878c2dd8c..131f06c014866 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
@@ -2980,6 +2980,18 @@ static VPRecipeBase *optimizeMaskToEVL(VPValue *HeaderMask,
TypeInfo.inferScalarType(LoadR), {}, {}, DL);
}
+ if (auto *MI = dyn_cast<VPWidenMemIntrinsicRecipe>(&CurRecipe))
+ if (MI->getVectorIntrinsicID() == Intrinsic::experimental_vp_strided_load &&
+ match(MI->getMask(), m_RemoveMask(HeaderMask, Mask))) {
+ VPWidenMemIntrinsicRecipe *NewMI = MI->clone();
+ if (Mask)
+ NewMI->setMask(Mask);
+ else
+ NewMI->setMask(Plan->getTrue());
+ NewMI->setOperand(3, &EVL);
+ return NewMI;
+ }
+
if (auto *StridedL = dyn_cast<VPWidenStridedLoadRecipe>(&CurRecipe))
if (StridedL->isMasked() &&
match(StridedL->getMask(), m_RemoveMask(HeaderMask, Mask)))
@@ -3093,8 +3105,13 @@ static void fixupVFUsersForEVL(VPlan &Plan, VPValue &EVL) {
assert(
all_of(
Plan.getVF().users(),
- IsaPred<VPVectorEndPointerRecipe, VPScalarIVStepsRecipe,
- VPWidenIntOrFpInductionRecipe, VPWidenStridedLoadRecipe>) &&
+ [&LoopRegion](VPUser *U) {
+ auto *R = cast<VPRecipeBase>(U);
+ return (R->getParent()->getParent() != LoopRegion) ||
+ isa<VPVectorEndPointerRecipe, VPScalarIVStepsRecipe,
+ VPWidenIntOrFpInductionRecipe, VPWidenStridedLoadRecipe>(
+ R);
+ }) &&
"User of VF that we can't transform to EVL.");
Plan.getVF().replaceUsesWithIf(&EVL, [](VPUser &U, unsigned Idx) {
return isa<VPWidenIntOrFpInductionRecipe, VPScalarIVStepsRecipe>(U);
@@ -5988,6 +6005,7 @@ void VPlanTransforms::convertToStridedAccesses(VPlan &Plan, VPCostContext &Ctx,
DenseMap<VPWidenGEPRecipe *, std::tuple<VPValue *, VPValue *, Type *>>
StrideCache;
SmallVector<VPWidenMemoryRecipe *> ToErase;
+ VPValue *I32VF = nullptr;
for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(
vp_depth_first_shallow(Plan.getVectorLoopRegion()->getEntry()))) {
for (VPRecipeBase &R : make_early_inc_range(*VPBB)) {
@@ -6041,6 +6059,14 @@ void VPlanTransforms::convertToStridedAccesses(VPlan &Plan, VPCostContext &Ctx,
assert(StrideInElement && ElementTy &&
"Can not get stride information for a strided access");
+ // Add VF of i32 version for EVL.
+ if (!I32VF) {
+ VPBuilder Builder(Plan.getVectorPreheader());
+ I32VF = Builder.createScalarZExtOrTrunc(
+ &Plan.getVF(), Type::getInt32Ty(Plan.getContext()),
+ TypeInfo.inferScalarType(&Plan.getVF()), DebugLoc::getUnknown());
+ }
+
// Create a new vector pointer for strided access.
auto *NewPtr = new VPVectorPointerRecipe(
BasePtr, ElementTy, StrideInElement, Ptr->getGEPNoWrapFlags(),
@@ -6062,11 +6088,16 @@ void VPlanTransforms::convertToStridedAccesses(VPlan &Plan, VPCostContext &Ctx,
StrideInBytes = ScaledStride;
}
- auto *StridedLoad = new VPWidenStridedLoadRecipe(
- *cast<LoadInst>(&Ingredient), NewPtr, StrideInBytes, &Plan.getVF(),
- LoadR->getMask(), *LoadR, LoadR->getDebugLoc());
+ VPValue *Mask;
+ if (VPValue *LoadMask = LoadR->getMask())
+ Mask = LoadMask;
+ else
+ Mask = Plan.getTrue();
+ auto *StridedLoad = new VPWidenMemIntrinsicRecipe(
+ *cast<LoadInst>(&Ingredient), Intrinsic::experimental_vp_strided_load,
+ {NewPtr, StrideInBytes, Mask, I32VF}, *LoadR, LoadR->getDebugLoc());
StridedLoad->insertBefore(LoadR);
- LoadR->replaceAllUsesWith(StridedLoad);
+ LoadR->replaceAllUsesWith(StridedLoad->getVPSingleValue());
ToErase.push_back(LoadR);
}
diff --git a/llvm/lib/Transforms/Vectorize/VPlanValue.h b/llvm/lib/Transforms/Vectorize/VPlanValue.h
index 4ef78341e0654..25d56a2b4c3a8 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanValue.h
+++ b/llvm/lib/Transforms/Vectorize/VPlanValue.h
@@ -48,6 +48,7 @@ class LLVM_ABI_FOR_TEST VPValue {
friend struct VPIRValue;
friend struct VPSymbolicValue;
friend class VPRecipeValue;
+ friend class VPWidenMemIntrinsicRecipe;
const unsigned char SubclassID; ///< Subclass identifier (for isa/dyn_cast).
diff --git a/llvm/lib/Transforms/Vectorize/VPlanVerifier.cpp b/llvm/lib/Transforms/Vectorize/VPlanVerifier.cpp
index 8943652f3b281..6542ca8e13db7 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanVerifier.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanVerifier.cpp
@@ -167,6 +167,9 @@ bool VPlanVerifier::verifyEVLRecipe(const VPInstruction &EVL) const {
.Case([&](const VPWidenIntrinsicRecipe *S) {
return VerifyEVLUse(*S, S->getNumOperands() - 1);
})
+ .Case([&](const VPWidenMemIntrinsicRecipe *S) {
+ return VerifyEVLUse(*S, S->getNumOperands() - 1);
+ })
.Case<VPWidenStoreEVLRecipe, VPReductionEVLRecipe,
VPWidenIntOrFpInductionRecipe, VPWidenPointerInductionRecipe,
VPWidenStridedLoadRecipe>(
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/strided-accesses.ll b/llvm/test/Transforms/LoopVectorize/RISCV/strided-accesses.ll
index a1d094b83f0ca..eb6da12e84bfa 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/strided-accesses.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/strided-accesses.ll
@@ -53,6 +53,7 @@ define void @single_constant_stride_int_scaled(ptr %p) {
; CHECK-UF2-NEXT: [[TMP5:%.*]] = icmp eq i64 [[N_MOD_VF]], 0
; CHECK-UF2-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], i64 [[TMP4]], i64 [[N_MOD_VF]]
; CHECK-UF2-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[TMP6]]
+; CHECK-UF2-NEXT: [[TMP19:%.*]] = trunc i64 [[TMP3]] to i32
; CHECK-UF2-NEXT: [[TMP7:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
; CHECK-UF2-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-UF2: vector.body:
@@ -67,10 +68,8 @@ define void @single_constant_stride_int_scaled(ptr %p) {
; CHECK-UF2-NEXT: [[TMP12:%.*]] = getelementptr i32, ptr [[P]], <vscale x 4 x i64> [[TMP10]]
; CHECK-UF2-NEXT: [[TMP17:%.*]] = shl i64 [[TMP3]], 3
; CHECK-UF2-NEXT: [[TMP18:%.*]] = getelementptr i32, ptr [[TMP22]], i64 [[TMP17]]
-; CHECK-UF2-NEXT: [[TMP19:%.*]] = trunc i64 [[TMP3]] to i32
; CHECK-UF2-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP22]], i64 32, <vscale x 4 x i1> splat (i1 true), i32 [[TMP19]])
-; CHECK-UF2-NEXT: [[TMP20:%.*]] = trunc i64 [[TMP3]] to i32
-; CHECK-UF2-NEXT: [[WIDE_MASKED_GATHER1:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP18]], i64 32, <vscale x 4 x i1> splat (i1 true), i32 [[TMP20]])
+; CHECK-UF2-NEXT: [[WIDE_MASKED_GATHER1:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP18]], i64 32, <vscale x 4 x i1> splat (i1 true), i32 [[TMP19]])
; CHECK-UF2-NEXT: [[TMP13:%.*]] = add <vscale x 4 x i32> [[WIDE_MASKED_GATHER]], splat (i32 1)
; CHECK-UF2-NEXT: [[TMP14:%.*]] = add <vscale x 4 x i32> [[WIDE_MASKED_GATHER1]], splat (i32 1)
; CHECK-UF2-NEXT: call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[TMP13]], <vscale x 4 x ptr> align 4 [[TMP11]], <vscale x 4 x i1> splat (i1 true))
@@ -163,6 +162,7 @@ define void @single_constant_stride_int_iv(ptr %p) {
; CHECK-UF2-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP4]]
; CHECK-UF2-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
; CHECK-UF2-NEXT: [[TMP5:%.*]] = mul i64 [[N_VEC]], 64
+; CHECK-UF2-NEXT: [[TMP16:%.*]] = trunc i64 [[TMP3]] to i32
; CHECK-UF2-NEXT: [[TMP6:%.*]] = shl <vscale x 4 x i64> [[BROADCAST_SPLAT]], splat (i64 6)
; CHECK-UF2-NEXT: [[TMP7:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
; CHECK-UF2-NEXT: [[TMP8:%.*]] = mul nuw nsw <vscale x 4 x i64> [[TMP7]], splat (i64 64)
@@ -177,10 +177,8 @@ define void @single_constant_stride_int_iv(ptr %p) {
; CHECK-UF2-NEXT: [[TMP10:%.*]] = getelementptr i32, ptr [[P]], <vscale x 4 x i64> [[STEP_ADD]]
; CHECK-UF2-NEXT: [[TMP14:%.*]] = shl i64 [[TMP3]], 6
; CHECK-UF2-NEXT: [[TMP15:%.*]] = getelementptr i32, ptr [[TMP18]], i64 [[TMP14]]
-; CHECK-UF2-NEXT: [[TMP16:%.*]] = trunc i64 [[TMP3]] to i32
; CHECK-UF2-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP18]], i64 256, <vscale x 4 x i1> splat (i1 true), i32 [[TMP16]])
-; CHECK-UF2-NEXT: [[TMP17:%.*]] = trunc i64 [[TMP3]] to i32
-; CHECK-UF2-NEXT: [[WIDE_MASKED_GATHER1:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP15]], i64 256, <vscale x 4 x i1> splat (i1 true), i32 [[TMP17]])
+; CHECK-UF2-NEXT: [[WIDE_MASKED_GATHER1:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP15]], i64 256, <vscale x 4 x i1> splat (i1 true), i32 [[TMP16]])
; CHECK-UF2-NEXT: [[TMP11:%.*]] = add <vscale x 4 x i32> [[WIDE_MASKED_GATHER]], splat (i32 1)
; CHECK-UF2-NEXT: [[TMP12:%.*]] = add <vscale x 4 x i32> [[WIDE_MASKED_GATHER1]], splat (i32 1)
; CHECK-UF2-NEXT: call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[TMP11]], <vscale x 4 x ptr> align 4 [[TMP9]], <vscale x 4 x i1> splat (i1 true))
@@ -890,6 +888,7 @@ define void @double_stride_int_scaled(ptr %p, ptr %p2, i64 %stride) {
; STRIDED-UF2-NEXT: [[TMP30:%.*]] = shl nuw i64 [[TMP29]], 1
; STRIDED-UF2-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP30]]
; STRIDED-UF2-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
+; STRIDED-UF2-NEXT: [[TMP35:%.*]] = trunc i64 [[TMP29]] to i32
; STRIDED-UF2-NEXT: [[TMP32:%.*]] = shl i64 [[STRIDE]], 2
; STRIDED-UF2-NEXT: [[BROADCAST_SPLATINSERT10:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[STRIDE]], i64 0
; STRIDED-UF2-NEXT: [[BROADCAST_SPLAT11:%.*]] = shufflevector <vscale x 4 x i64> [[BROADCAST_SPLATINSERT10]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
@@ -905,10 +904,8 @@ define void @double_stride_int_scaled(ptr %p, ptr %p2, i64 %stride) {
; STRIDED-UF2-NEXT: [[TMP44:%.*]] = getelementptr i32, ptr [[P]], i64 [[TMP36]]
; STRIDED-UF2-NEXT: [[TMP47:%.*]] = mul i64 [[TMP29]], [[STRIDE]]
; STRIDED-UF2-NEXT: [[TMP48:%.*]] = getelementptr i32, ptr [[TMP44]], i64 [[TMP47]]
-; STRIDED-UF2-NEXT: [[TMP42:%.*]] = trunc i64 [[TMP29]] to i32
-; STRIDED-UF2-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP44]], i64 [[TMP32]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP42]]), !alias.scope [[META8:![0-9]+]]
-; STRIDED-UF2-NEXT: [[TMP43:%.*]] = trunc i64 [[TMP29]] to i32
-; STRIDED-UF2-NEXT: [[WIDE_MASKED_GATHER12:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP48]], i64 [[TMP32]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP43]]), !alias.scope [[META8]]
+; STRIDED-UF2-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP44]], i64 [[TMP32]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP35]]), !alias.scope [[META8:![0-9]+]]
+; STRIDED-UF2-NEXT: [[WIDE_MASKED_GATHER12:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP48]], i64 [[TMP32]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP35]]), !alias.scope [[META8]]
; STRIDED-UF2-NEXT: [[TMP37:%.*]] = add <vscale x 4 x i32> [[WIDE_MASKED_GATHER]], splat (i32 1)
; STRIDED-UF2-NEXT: [[TMP38:%.*]] = add <vscale x 4 x i32> [[WIDE_MASKED_GATHER12]], splat (i32 1)
; STRIDED-UF2-NEXT: [[TMP39:%.*]] = getelementptr i32, ptr [[P2]], <vscale x 4 x i64> [[TMP33]]
@@ -1342,15 +1339,14 @@ define void @constant_stride_reinterpret(ptr noalias %in, ptr noalias %out) {
; NOSTRIDED-UF2-NEXT: [[TMP4:%.*]] = shl nuw i64 [[TMP3]], 1
; NOSTRIDED-UF2-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP4]]
; NOSTRIDED-UF2-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
+; NOSTRIDED-UF2-NEXT: [[TMP15:%.*]] = trunc i64 [[TMP3]] to i32
; NOSTRIDED-UF2-NEXT: br label [[VECTOR_BODY:%.*]]
; NOSTRIDED-UF2: vector.body:
; NOSTRIDED-UF2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; NOSTRIDED-UF2-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw i32, ptr [[IN:%.*]], i64 [[INDEX]]
; NOSTRIDED-UF2-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw i32, ptr [[TMP5]], i64 [[TMP3]]
-; NOSTRIDED-UF2-NEXT: [[TMP7:%.*]] = trunc i64 [[TMP3]] to i32
-; NOSTRIDED-UF2-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vp.strided.load.nxv2i64.p0.i64(ptr align 8 [[TMP5]], i64 4, <vscale x 2 x i1> splat (i1 true), i32 [[TMP7]])
-; NOSTRIDED-UF2-NEXT: [[TMP8:%.*]] = trunc i64 [[TMP3]] to i32
-; NOSTRIDED-UF2-NEXT: [[WIDE_MASKED_GATHER1:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vp.strided.load.nxv2i64.p0.i64(ptr align 8 [[TMP6]], i64 4, <vscale x 2 x i1> splat (i1 true), i32 [[TMP8]])
+; NOSTRIDED-UF2-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vp.strided.load.nxv2i64.p0.i64(ptr align 8 [[TMP5]], i64 4, <vscale x 2 x i1> splat (i1 true), i32 [[TMP15]])
+; NOSTRIDED-UF2-NEXT: [[WIDE_MASKED_GATHER1:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vp.strided.load.nxv2i64.p0.i64(ptr align 8 [[TMP6]], i64 4, <vscale x 2 x i1> splat (i1 true), i32 [[TMP15]])
; NOSTRIDED-UF2-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw i64, ptr [[OUT:%.*]], i64 [[INDEX]]
; NOSTRIDED-UF2-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw i64, ptr [[TMP9]], i64 [[TMP3]]
; NOSTRIDED-UF2-NEXT: store <vscale x 2 x i64> [[WIDE_MASKED_GATHER]], ptr [[TMP9]], align 8
@@ -1411,15 +1407,14 @@ define void @constant_stride_reinterpret(ptr noalias %in, ptr noalias %out) {
; STRIDED-UF2-NEXT: [[TMP4:%.*]] = shl nuw i64 [[TMP3]], 1
; STRIDED-UF2-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP4]]
; STRIDED-UF2-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
+; STRIDED-UF2-NEXT: [[TMP15:%.*]] = trunc i64 [[TMP3]] to i32
; STRIDED-UF2-NEXT: br label [[VECTOR_BODY:%.*]]
; STRIDED-UF2: vector.body:
; STRIDED-UF2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; STRIDED-UF2-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw i32, ptr [[IN:%.*]], i64 [[INDEX]]
; STRIDED-UF2-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw i32, ptr [[TMP5]], i64 [[TMP3]]
-; STRIDED-UF2-NEXT: [[TMP7:%.*]] = trunc i64 [[TMP3]] to i32
-; STRIDED-UF2-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vp.strided.load.nxv2i64.p0.i64(ptr align 8 [[TMP5]], i64 4, <vscale x 2 x i1> splat (i1 true), i32 [[TMP7]])
-; STRIDED-UF2-NEXT: [[TMP8:%.*]] = trunc i64 [[TMP3]] to i32
-; STRIDED-UF2-NEXT: [[WIDE_MASKED_GATHER1:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vp.strided.load.nxv2i64.p0.i64(ptr align 8 [[TMP6]], i64 4, <vscale x 2 x i1> splat (i1 true), i32 [[TMP8]])
+; STRIDED-UF2-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vp.strided.load.nxv2i64.p0.i64(ptr align 8 [[TMP5]], i64 4, <vscale x 2 x i1> splat (i1 true), i32 [[TMP15]])
+; STRIDED-UF2-NEXT: [[WIDE_MASKED_GATHER1:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vp.strided.load.nxv2i64.p0.i64(ptr align 8 [[TMP6]], i64 4, <vscale x 2 x i1> splat (i1 true), i32 [[TMP15]])
; STRIDED-UF2-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw i64, ptr [[OUT:%.*]], i64 [[INDEX]]
; STRIDED-UF2-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw i64, ptr [[TMP9]], i64 [[TMP3]]
; STRIDED-UF2-NEXT: store <vscale x 2 x i64> [[WIDE_MASKED_GATHER]], ptr [[TMP9]], align 8
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-gather-scatter.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-gather-scatter.ll
index a17b92f930a94..787a9ad770885 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-gather-scatter.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-gather-scatter.ll
@@ -44,11 +44,11 @@ define void @gather_scatter(ptr noalias %in, ptr noalias %out, ptr noalias %inde
; NO-VP-NEXT: [[TMP3:%.*]] = shl nuw i64 [[TMP2]], 1
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; NO-VP-NEXT: [[TMP5:%.*]] = trunc i64 [[TMP3]] to i32
; NO-VP-NEXT: br label [[FOR_BODY1:%.*]]
; NO-VP: vector.body:
; NO-VP-NEXT: [[INDVARS_IV1:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[INDVARS_IV_NEXT1:%.*]], [[FOR_BODY1]] ]
; NO-VP-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i32, ptr [[INDEX:%.*]], i64 [[INDVARS_IV1]]
-; NO-VP-NEXT: [[TMP5:%.*]] = trunc i64 [[TMP3]] to i32
; NO-VP-NEXT: [[WIDE_STRIDED_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vp.strided.load.nxv2i64.p0.i64(ptr align 8 [[ARRAYIDX3]], i64 4, <vscale x 2 x i1> splat (i1 true), i32 [[TMP5]])
; NO-VP-NEXT: [[TMP6:%.*]] = getelementptr inbounds float, ptr [[IN:%.*]], <vscale x 2 x i64> [[WIDE_STRIDED_LOAD]]
; NO-VP-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 2 x float> @llvm.masked.gather.nxv2f32.nxv2p0(<vscale x 2 x ptr> align 4 [[TMP6]], <vscale x 2 x i1> splat (i1 true), <vscale x 2 x float> poison)
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-interleave.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-interleave.ll
index 9228418900470..541e7c7a100b3 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-interleave.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-interleave.ll
@@ -157,20 +157,18 @@ define i32 @load_factor_4_with_gap(i64 %n, ptr noalias %a) {
; NO-VP-NEXT: [[TMP4:%.*]] = icmp eq i64 [[N_MOD_VF]], 0
; NO-VP-NEXT: [[TMP5:%.*]] = select i1 [[TMP4]], i64 [[TMP3]], i64 [[N_MOD_VF]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[TMP5]]
+; NO-VP-NEXT: [[TMP7:%.*]] = trunc i64 [[TMP3]] to i32
; NO-VP-NEXT: br label [[VECTOR_BODY:%.*]]
; NO-VP: vector.body:
; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; NO-VP-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP13:%.*]], [[VECTOR_BODY]] ]
; NO-VP-NEXT: [[TMP6:%.*]] = getelementptr inbounds [4 x i32], ptr [[A:%.*]], i64 [[INDEX]], i32 0
-; NO-VP-NEXT: [[TMP7:%.*]] = trunc i64 [[TMP3]] to i32
; NO-VP-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP6]], i64 16, <vscale x 4 x i1> splat (i1 true), i32 [[TMP7]])
; NO-VP-NEXT: [[TMP10:%.*]] = add <vscale x 4 x i32> [[VEC_PHI]], [[WIDE_MASKED_GATHER]]
-; NO-VP-NEXT: [[TMP9:%.*]] = trunc i64 [[TMP3]] to i32
-; NO-VP-NEXT: [[WIDE_MASKED_GATHER1:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP6]], i64 16, <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
+; NO-VP-NEXT: [[WIDE_MASKED_GATHER1:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP6]], i64 16, <vscale x 4 x i1> splat (i1 true), i32 [[TMP7]])
; NO-VP-NEXT: [[TMP11:%.*]] = add <vscale x 4 x i32> [[TMP10]], [[WIDE_MASKED_GATHER1]]
; NO-VP-NEXT: [[TMP19:%.*]] = getelementptr inbounds [4 x i32], ptr [[A]], i64 [[INDEX]], i32 3
-; NO-VP-NEXT: [[TMP12:%.*]] = trunc i64 [[TMP3]] to i32
-; NO-VP-NEXT: [[WIDE_MASKED_GATHER2:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP19]], i64 16, <vscale x 4 x i1> splat (i1 true), i32 [[TMP12]])
+; NO-VP-NEXT: [[WIDE_MASKED_GATHER2:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP19]], i64 16, <vscale x 4 x i1> splat (i1 true), i32 [[TMP7]])
; NO-VP-NEXT: [[TMP13]] = add <vscale x 4 x i32> [[TMP11]], [[WIDE_MASKED_GATHER2]]
; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; NO-VP-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
@@ -384,20 +382,18 @@ define i32 @load_factor_4_with_tail_gap(i64 %n, ptr noalias %a) {
; NO-VP-NEXT: [[TMP4:%.*]] = icmp eq i64 [[N_MOD_VF]], 0
; NO-VP-NEXT: [[TMP5:%.*]] = select i1 [[TMP4]], i64 [[TMP3]], i64 [[N_MOD_VF]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[TMP5]]
+; NO-VP-NEXT: [[TMP7:%.*]] = trunc i64 [[TMP3]] to i32
; NO-VP-NEXT: br label [[VECTOR_BODY:%.*]]
; NO-VP: vector.body:
; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; NO-VP-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP13:%.*]], [[VECTOR_BODY]] ]
; NO-VP-NEXT: [[TMP6:%.*]] = getelementptr inbounds [4 x i32], ptr [[A:%.*]], i64 [[INDEX]], i32 0
-; NO-VP-NEXT: [[TMP7:%.*]] = trunc i64 [[TMP3]] to i32
; NO-VP-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP6]], i64 16, <vscale x 4 x i1> splat (i1 true), i32 [[TMP7]])
; NO-VP-NEXT: [[TMP10:%.*]] = add <vscale x 4 x i32> [[VEC_PHI]], [[WIDE_MASKED_GATHER]]
-; NO-VP-NEXT: [[TMP9:%.*]] = trunc i64 [[TMP3]] to i32
-; NO-VP-NEXT: [[WIDE_MASKED_GATHER1:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP6]], i64 16, <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
+; NO-VP-NEXT: [[WIDE_MASKED_GATHER1:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP6]], i64 16, <vscale x 4 x i1> splat (i1 true), i32 [[TMP7]])
; NO-VP-NEXT: [[TMP11:%.*]] = add <vscale x 4 x i32> [[TMP10]], [[WIDE_MASKED_GATHER1]]
; NO-VP-NEXT: [[TMP19:%.*]] = getelementptr inbounds [4 x i32], ptr [[A]], i64 [[INDEX]], i32 2
-; NO-VP-NEXT: [[TMP12:%.*]] = trunc i64 [[TMP3]] to i32
-; NO-VP-NEXT: [[WIDE_MASKED_GATHER2:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP19]], i64 16, <vscale x 4 x i1> splat (i1 true), i32 [[TMP12]])
+; NO-VP-NEXT: [[WIDE_MASKED_GATHER2:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP19]], i64 16, <vscale x 4 x i1> splat (i1 true), i32 [[TMP7]])
; NO-VP-NEXT: [[TMP13]] = add <vscale x 4 x i32> [[TMP11]], [[WIDE_MASKED_GATHER2]]
; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; NO-VP-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
@@ -610,25 +606,22 @@ define i32 @load_factor_4_reverse(i64 %n, ptr noalias %a) {
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP1]], [[TMP5]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP1]], [[N_MOD_VF]]
; NO-VP-NEXT: [[TMP6:%.*]] = sub i64 [[N]], [[N_VEC]]
+; NO-VP-NEXT: [[TMP8:%.*]] = trunc i64 [[TMP5]] to i32
; NO-VP-NEXT: br label [[VECTOR_BODY:%.*]]
; NO-VP: vector.body:
; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; NO-VP-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP16:%.*]], [[VECTOR_BODY]] ]
; NO-VP-NEXT: [[OFFSET_IDX:%.*]] = sub i64 [[N]], [[INDEX]]
; NO-VP-NEXT: [[TMP7:%.*]] = getelementptr inbounds [4 x i32], ptr [[A:%.*]], i64 [[OFFSET_IDX]], i32 0
-; NO-VP-NEXT: [[TMP8:%.*]] = trunc i64 [[TMP5]] to i32
; NO-VP-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP7]], i64 -16, <vscale x 4 x i1> splat (i1 true), i32 [[TMP8]])
; NO-VP-NEXT: [[TMP11:%.*]] = add <vscale x 4 x i32> [[VEC_PHI]], [[WIDE_MASKED_GATHER]]
-; NO-VP-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP5]] to i32
-; NO-VP-NEXT: [[WIDE_MASKED_GATHER3:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP7]], i64 -16, <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
+; NO-VP-NEXT: [[WIDE_MASKED_GATHER3:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP7]], i64 -16, <vscale x 4 x i1> splat (i1 true), i32 [[TMP8]])
; NO-VP-NEXT: [[TMP12:%.*]] = add <vscale x 4 x i32> [[TMP11]], [[WIDE_MASKED_GATHER3]]
; NO-VP-NEXT: [[TMP23:%.*]] = getelementptr inbounds [4 x i32], ptr [[A]], i64 [[OFFSET_IDX]], i32 2
-; NO-VP-NEXT: [[TMP13:%.*]] = trunc i64 [[TMP5]] to i32
-; NO-VP-NEXT: [[WIDE_MASKED_GATHER4:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP23]], i64 -16, <vscale x 4 x i1> splat (i1 true), i32 [[TMP13]])
+; NO-VP-NEXT: [[WIDE_MASKED_GATHER4:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP23]], i64 -16, <vscale x 4 x i1> splat (i1 true), i32 [[TMP8]])
; NO-VP-NEXT: [[TMP14:%.*]] = add <vscale x 4 x i32> [[TMP12]], [[WIDE_MASKED_GATHER4]]
; NO-VP-NEXT: [[TMP15:%.*]] = getelementptr inbounds [4 x i32], ptr [[A]], i64 [[OFFSET_IDX]], i32 3
-; NO-VP-NEXT: [[TMP24:%.*]] = trunc i64 [[TMP5]] to i32
-; NO-VP-NEXT: [[WIDE_MASKED_GATHER5:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP15]], i64 -16, <vscale x 4 x i1> splat (i1 true), i32 [[TMP24]])
+; NO-VP-NEXT: [[WIDE_MASKED_GATHER5:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP15]], i64 -16, <vscale x 4 x i1> splat (i1 true), i32 [[TMP8]])
; NO-VP-NEXT: [[TMP16]] = add <vscale x 4 x i32> [[TMP14]], [[WIDE_MASKED_GATHER5]]
; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
; NO-VP-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
>From 61c4d4aae8ba1c66481a74881ffdd28e095d0ee4 Mon Sep 17 00:00:00 2001
From: Mel Chen <mel.chen at sifive.com>
Date: Mon, 8 Dec 2025 17:52:55 -0800
Subject: [PATCH 18/30] Inherit from VPWidenIntrinsicRecipe
---
llvm/lib/Transforms/Vectorize/VPlan.h | 129 ++++++++----------
.../Transforms/Vectorize/VPlanAnalysis.cpp | 2 +-
.../lib/Transforms/Vectorize/VPlanRecipes.cpp | 104 ++++----------
llvm/lib/Transforms/Vectorize/VPlanValue.h | 1 -
.../Transforms/Vectorize/VPlanVerifier.cpp | 3 -
5 files changed, 82 insertions(+), 157 deletions(-)
diff --git a/llvm/lib/Transforms/Vectorize/VPlan.h b/llvm/lib/Transforms/Vectorize/VPlan.h
index c02f527625777..e538c7693f2b6 100644
--- a/llvm/lib/Transforms/Vectorize/VPlan.h
+++ b/llvm/lib/Transforms/Vectorize/VPlan.h
@@ -615,6 +615,7 @@ class VPSingleDefRecipe : public VPRecipeBase, public VPRecipeValue {
case VPRecipeBase::VPWidenCastSC:
case VPRecipeBase::VPWidenGEPSC:
case VPRecipeBase::VPWidenIntrinsicSC:
+ case VPRecipeBase::VPWidenMemIntrinsicSC:
case VPRecipeBase::VPWidenSC:
case VPRecipeBase::VPBlendSC:
case VPRecipeBase::VPPredInstPHISC:
@@ -630,7 +631,6 @@ class VPSingleDefRecipe : public VPRecipeBase, public VPRecipeValue {
case VPRecipeBase::VPInterleaveEVLSC:
case VPRecipeBase::VPInterleaveSC:
case VPRecipeBase::VPIRInstructionSC:
- case VPRecipeBase::VPWidenMemIntrinsicSC:
case VPRecipeBase::VPWidenStridedLoadSC:
case VPRecipeBase::VPWidenLoadEVLSC:
case VPRecipeBase::VPWidenLoadSC:
@@ -1057,6 +1057,7 @@ struct VPRecipeWithIRFlags : public VPSingleDefRecipe, public VPIRFlags {
R->getVPRecipeID() == VPRecipeBase::VPWidenCallSC ||
R->getVPRecipeID() == VPRecipeBase::VPWidenCastSC ||
R->getVPRecipeID() == VPRecipeBase::VPWidenIntrinsicSC ||
+ R->getVPRecipeID() == VPRecipeBase::VPWidenMemIntrinsicSC ||
R->getVPRecipeID() == VPRecipeBase::VPReductionSC ||
R->getVPRecipeID() == VPRecipeBase::VPReductionEVLSC ||
R->getVPRecipeID() == VPRecipeBase::VPReplicateSC ||
@@ -1825,6 +1826,29 @@ class VPWidenIntrinsicRecipe : public VPRecipeWithIRFlags, public VPIRMetadata {
/// True if the intrinsic may have side-effects.
bool MayHaveSideEffects;
+protected:
+ VPWidenIntrinsicRecipe(const unsigned char SC,
+ Intrinsic::ID VectorIntrinsicID,
+ ArrayRef<VPValue *> CallArguments, Type *Ty,
+ const VPIRFlags &Flags = {},
+ const VPIRMetadata &Metadata = {},
+ DebugLoc DL = DebugLoc::getUnknown())
+ : VPRecipeWithIRFlags(SC, CallArguments, Flags, DL),
+ VPIRMetadata(Metadata), VectorIntrinsicID(VectorIntrinsicID),
+ ResultTy(Ty) {
+ LLVMContext &Ctx = Ty->getContext();
+ AttributeSet Attrs = Intrinsic::getFnAttributes(Ctx, VectorIntrinsicID);
+ MemoryEffects ME = Attrs.getMemoryEffects();
+ MayReadFromMemory = !ME.onlyWritesMemory();
+ MayWriteToMemory = !ME.onlyReadsMemory();
+ MayHaveSideEffects = MayWriteToMemory ||
+ !Attrs.hasAttribute(Attribute::NoUnwind) ||
+ !Attrs.hasAttribute(Attribute::WillReturn);
+ }
+
+ /// Helper function to produce the widened intrinsic call.
+ CallInst *createVectorCall(VPTransformState &State);
+
public:
VPWidenIntrinsicRecipe(CallInst &CI, Intrinsic::ID VectorIntrinsicID,
ArrayRef<VPValue *> CallArguments, Type *Ty,
@@ -1845,19 +1869,9 @@ class VPWidenIntrinsicRecipe : public VPRecipeWithIRFlags, public VPIRMetadata {
const VPIRFlags &Flags = {},
const VPIRMetadata &Metadata = {},
DebugLoc DL = DebugLoc::getUnknown())
- : VPRecipeWithIRFlags(VPRecipeBase::VPWidenIntrinsicSC, CallArguments,
- Flags, DL),
- VPIRMetadata(Metadata), VectorIntrinsicID(VectorIntrinsicID),
- ResultTy(Ty) {
- LLVMContext &Ctx = Ty->getContext();
- AttributeSet Attrs = Intrinsic::getFnAttributes(Ctx, VectorIntrinsicID);
- MemoryEffects ME = Attrs.getMemoryEffects();
- MayReadFromMemory = !ME.onlyWritesMemory();
- MayWriteToMemory = !ME.onlyReadsMemory();
- MayHaveSideEffects = MayWriteToMemory ||
- !Attrs.hasAttribute(Attribute::NoUnwind) ||
- !Attrs.hasAttribute(Attribute::WillReturn);
- }
+ : VPWidenIntrinsicRecipe(VPRecipeBase::VPWidenIntrinsicSC,
+ VectorIntrinsicID, CallArguments, Ty, Flags,
+ Metadata, DL) {}
~VPWidenIntrinsicRecipe() override = default;
@@ -1870,7 +1884,24 @@ class VPWidenIntrinsicRecipe : public VPRecipeWithIRFlags, public VPIRMetadata {
*this, *this, getDebugLoc());
}
- VP_CLASSOF_IMPL(VPRecipeBase::VPWidenIntrinsicSC)
+ static inline bool classof(const VPRecipeBase *R) {
+ return R->getVPRecipeID() == VPRecipeBase::VPWidenIntrinsicSC ||
+ R->getVPRecipeID() == VPRecipeBase::VPWidenMemIntrinsicSC;
+ }
+
+ static inline bool classof(const VPUser *U) {
+ auto *R = dyn_cast<VPRecipeBase>(U);
+ return R && classof(R);
+ }
+
+ static inline bool classof(const VPValue *V) {
+ auto *R = V->getDefiningRecipe();
+ return R && classof(R);
+ }
+
+ static inline bool classof(const VPSingleDefRecipe *R) {
+ return classof(static_cast<const VPRecipeBase *>(R));
+ }
/// Produce a widened version of the vector intrinsic.
LLVM_ABI_FOR_TEST void execute(VPTransformState &State) override;
@@ -1908,48 +1939,29 @@ class VPWidenIntrinsicRecipe : public VPRecipeWithIRFlags, public VPIRMetadata {
};
/// A recipe for widening vector memory intrinsics.
-class VPWidenMemIntrinsicRecipe : public VPRecipeBase,
- public VPIRMetadata{
- Instruction &Ingredient;
-
+class VPWidenMemIntrinsicRecipe final : public VPWidenIntrinsicRecipe {
/// Alignment information for this memory access.
Align Alignment;
- /// ID of the vector intrinsic to widen.
- Intrinsic::ID VectorIntrinsicID;
-
- /// Scalar return type of the intrinsic.
- Type *ResultTy;
-
- /// True if the intrinsic may read from memory.
- bool MayReadFromMemory;
-
- /// True if the intrinsic may read write to memory.
- bool MayWriteToMemory;
-
- /// True if the intrinsic may have side-effects.
- bool MayHaveSideEffects;
-
public:
// TODO: support StoreInst for strided store
VPWidenMemIntrinsicRecipe(LoadInst &LI, Intrinsic::ID VectorIntrinsicID,
ArrayRef<VPValue *> CallArguments,
const VPIRMetadata &MD = {},
DebugLoc DL = DebugLoc::getUnknown())
- : VPRecipeBase(VPRecipeBase::VPWidenMemIntrinsicSC, CallArguments, DL),
- VPIRMetadata(MD), Ingredient(LI), Alignment(LI.getAlign()),
- VectorIntrinsicID(VectorIntrinsicID), ResultTy(LI.getType()),
- MayReadFromMemory(LI.mayReadFromMemory()),
- MayHaveSideEffects(LI.mayHaveSideEffects()) {
- new VPRecipeValue(this, &LI);
+ : VPWidenIntrinsicRecipe(VPRecipeBase::VPWidenMemIntrinsicSC,
+ VectorIntrinsicID, CallArguments, LI.getType(),
+ {}, MD, DL),
+ Alignment(LI.getAlign()) {
+ setUnderlyingValue(&LI);
}
~VPWidenMemIntrinsicRecipe() override = default;
VPWidenMemIntrinsicRecipe *clone() override {
- return new VPWidenMemIntrinsicRecipe(*cast<LoadInst>(&Ingredient),
- VectorIntrinsicID, operands(), *this,
- getDebugLoc());
+ return new VPWidenMemIntrinsicRecipe(*cast<LoadInst>(getUnderlyingInstr()),
+ getVectorIntrinsicID(), operands(),
+ *this, getDebugLoc());
}
VP_CLASSOF_IMPL(VPRecipeBase::VPWidenMemIntrinsicSC)
@@ -1961,42 +1973,15 @@ class VPWidenMemIntrinsicRecipe : public VPRecipeBase,
InstructionCost computeCost(ElementCount VF,
VPCostContext &Ctx) const override;
- /// Return the ID of the intrinsic.
- Intrinsic::ID getVectorIntrinsicID() const { return VectorIntrinsicID; }
-
- /// Return the scalar return type of the intrinsic.
- Type *getResultType() const { return ResultTy; }
-
- /// Return to name of the intrinsic as string.
- StringRef getIntrinsicName() const {
- return Intrinsic::getBaseName(VectorIntrinsicID);
- }
-
- /// Returns true if the intrinsic may read from memory.
- bool mayReadFromMemory() const { return MayReadFromMemory; }
-
- /// Returns true if the intrinsic may write to memory.
- bool mayWriteToMemory() const { return MayWriteToMemory; }
-
- /// Returns true if the intrinsic may have side-effects.
- bool mayHaveSideEffects() const { return MayHaveSideEffects; }
-
+ /// Return the index of pointer parameter.
unsigned getMemoryPointerParamPos() const;
+ /// Return the index of mask parameter.
unsigned getMaskParamPos() const;
void setMask(VPValue *Mask) { setOperand(getMaskParamPos(), Mask); }
VPValue *getMask() const { return getOperand(getMaskParamPos()); }
-
- bool usesFirstLaneOnly(const VPValue *Op) const override;
-
-protected:
-#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
- /// Print the recipe.
- void printRecipe(raw_ostream &O, const Twine &Indent,
- VPSlotTracker &SlotTracker) const override;
-#endif
};
/// A recipe for widening Call instructions using library calls.
diff --git a/llvm/lib/Transforms/Vectorize/VPlanAnalysis.cpp b/llvm/lib/Transforms/Vectorize/VPlanAnalysis.cpp
index a5aaa071afbde..9b62532cecda7 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanAnalysis.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanAnalysis.cpp
@@ -291,7 +291,7 @@ Type *VPTypeAnalysis::inferScalarType(const VPValue *V) {
})
// VPInstructionWithType must be handled before VPInstruction.
.Case<VPInstructionWithType, VPWidenIntrinsicRecipe,
- VPWidenMemIntrinsicRecipe, VPWidenCastRecipe>(
+ VPWidenCastRecipe>(
[](const auto *R) { return R->getResultType(); })
.Case<VPBlendRecipe, VPInstruction, VPWidenRecipe, VPReplicateRecipe,
VPWidenCallRecipe, VPWidenMemoryRecipe>(
diff --git a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
index 3c9bde9bbb3f9..431c0ede0607d 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
@@ -71,10 +71,9 @@ bool VPRecipeBase::mayWriteToMemory() const {
return !cast<VPWidenCallRecipe>(this)
->getCalledScalarFunction()
->onlyReadsMemory();
+ case VPWidenMemIntrinsicSC:
case VPWidenIntrinsicSC:
return cast<VPWidenIntrinsicRecipe>(this)->mayWriteToMemory();
- case VPWidenMemIntrinsicSC:
- return cast<VPWidenMemIntrinsicRecipe>(this)->mayWriteToMemory();
case VPActiveLaneMaskPHISC:
case VPCanonicalIVPHISC:
case VPBranchOnMaskSC:
@@ -127,10 +126,9 @@ bool VPRecipeBase::mayReadFromMemory() const {
return !cast<VPWidenCallRecipe>(this)
->getCalledScalarFunction()
->onlyWritesMemory();
+ case VPWidenMemIntrinsicSC:
case VPWidenIntrinsicSC:
return cast<VPWidenIntrinsicRecipe>(this)->mayReadFromMemory();
- case VPWidenMemIntrinsicSC:
- return cast<VPWidenMemIntrinsicRecipe>(this)->mayReadFromMemory();
case VPBranchOnMaskSC:
case VPDerivedIVSC:
case VPFirstOrderRecurrencePHISC:
@@ -186,10 +184,9 @@ bool VPRecipeBase::mayHaveSideEffects() const {
Function *Fn = cast<VPWidenCallRecipe>(this)->getCalledScalarFunction();
return mayWriteToMemory() || !Fn->doesNotThrow() || !Fn->willReturn();
}
+ case VPWidenMemIntrinsicSC:
case VPWidenIntrinsicSC:
return cast<VPWidenIntrinsicRecipe>(this)->mayHaveSideEffects();
- case VPWidenMemIntrinsicSC:
- return cast<VPWidenMemIntrinsicRecipe>(this)->mayHaveSideEffects();
case VPBlendSC:
case VPReductionEVLSC:
case VPReductionSC:
@@ -1850,9 +1847,7 @@ void VPWidenCallRecipe::printRecipe(raw_ostream &O, const Twine &Indent,
}
#endif
-void VPWidenIntrinsicRecipe::execute(VPTransformState &State) {
- assert(State.VF.isVector() && "not widening");
-
+CallInst *VPWidenIntrinsicRecipe::createVectorCall(VPTransformState &State) {
SmallVector<Type *, 2> TysForDecl;
// Add return type if intrinsic is overloaded on it.
if (isVectorIntrinsicWithOverloadTypeAtArg(VectorIntrinsicID, -1,
@@ -1888,7 +1883,7 @@ void VPWidenIntrinsicRecipe::execute(VPTransformState &State) {
assert(VectorF &&
"Can't retrieve vector intrinsic or vector-predication intrinsics.");
- auto *CI = cast_or_null<CallInst>(getUnderlyingValue());
+ auto *CI = dyn_cast_or_null<CallInst>(getUnderlyingValue());
SmallVector<OperandBundleDef, 1> OpBundles;
if (CI)
CI->getOperandBundlesAsDefs(OpBundles);
@@ -1898,6 +1893,12 @@ void VPWidenIntrinsicRecipe::execute(VPTransformState &State) {
applyFlags(*V);
applyMetadata(*V);
+ return V;
+}
+
+void VPWidenIntrinsicRecipe::execute(VPTransformState &State) {
+ assert(State.VF.isVector() && "not widening");
+ CallInst *V = createVectorCall(State);
if (!V->getType()->isVoidTy())
State.set(this, V);
}
@@ -1988,10 +1989,11 @@ void VPWidenIntrinsicRecipe::printRecipe(raw_ostream &O, const Twine &Indent,
#endif
unsigned VPWidenMemIntrinsicRecipe::getMemoryPointerParamPos() const {
- if (auto Pos = VPIntrinsic::getMemoryPointerParamPos(VectorIntrinsicID))
+ Intrinsic::ID IID = getVectorIntrinsicID();
+ if (auto Pos = VPIntrinsic::getMemoryPointerParamPos(IID))
return *Pos;
- switch (VectorIntrinsicID) {
+ switch (IID) {
case Intrinsic::masked_load:
case Intrinsic::masked_gather:
case Intrinsic::masked_expandload:
@@ -2006,10 +2008,11 @@ unsigned VPWidenMemIntrinsicRecipe::getMemoryPointerParamPos() const {
}
unsigned VPWidenMemIntrinsicRecipe::getMaskParamPos() const {
- if (auto Pos = VPIntrinsic::getMaskParamPos(VectorIntrinsicID))
+ Intrinsic::ID IID = getVectorIntrinsicID();
+ if (auto Pos = VPIntrinsic::getMaskParamPos(IID))
return *Pos;
- switch (VectorIntrinsicID) {
+ switch (IID) {
case Intrinsic::masked_load:
case Intrinsic::masked_gather:
case Intrinsic::masked_expandload:
@@ -2025,87 +2028,28 @@ unsigned VPWidenMemIntrinsicRecipe::getMaskParamPos() const {
void VPWidenMemIntrinsicRecipe::execute(VPTransformState &State) {
assert(State.VF.isVector() && "not widening");
-
- SmallVector<Type *, 2> TysForDecl;
- // Add return type if intrinsic is overloaded on it.
- if (isVectorIntrinsicWithOverloadTypeAtArg(VectorIntrinsicID, -1, State.TTI))
- TysForDecl.push_back(VectorType::get(getResultType(), State.VF));
- SmallVector<Value *, 4> Args;
- for (const auto &I : enumerate(operands())) {
- // Some intrinsics have a scalar argument - don't replace it with a
- // vector.
- Value *Arg;
- if (isVectorIntrinsicWithScalarOpAtArg(VectorIntrinsicID, I.index(),
- State.TTI))
- Arg = State.get(I.value(), VPLane(0));
- else
- Arg = State.get(I.value(), usesFirstLaneOnly(I.value()));
- if (isVectorIntrinsicWithOverloadTypeAtArg(VectorIntrinsicID, I.index(),
- State.TTI))
- TysForDecl.push_back(Arg->getType());
- Args.push_back(Arg);
- }
-
- // Use vector version of the intrinsic.
- Module *M = State.Builder.GetInsertBlock()->getModule();
- Function *VectorF =
- Intrinsic::getOrInsertDeclaration(M, VectorIntrinsicID, TysForDecl);
- assert(VectorF &&
- "Can't retrieve vector intrinsic or vector-predication intrinsics.");
-
- CallInst *MemI = State.Builder.CreateCall(VectorF, Args);
+ CallInst *MemI = createVectorCall(State);
MemI->addParamAttr(
getMemoryPointerParamPos(),
Attribute::getWithAlignment(MemI->getContext(), Alignment));
- applyMetadata(*MemI);
if (!MemI->getType()->isVoidTy())
- State.set(getVPSingleValue(), MemI);
+ State.set(this, MemI);
}
InstructionCost
VPWidenMemIntrinsicRecipe::computeCost(ElementCount VF,
VPCostContext &Ctx) const {
- Type *Ty = toVectorTy(getLoadStoreType(&Ingredient), VF);
- const Value *Ptr = getLoadStorePointerOperand(&Ingredient);
+ const Instruction *Ingredient = getUnderlyingInstr();
+ Type *Ty = toVectorTy(getLoadStoreType(Ingredient), VF);
+ const Value *Ptr = getLoadStorePointerOperand(Ingredient);
return Ctx.TTI.getMemIntrinsicInstrCost(
- MemIntrinsicCostAttributes(VectorIntrinsicID, Ty, Ptr,
+ MemIntrinsicCostAttributes(getVectorIntrinsicID(), Ty, Ptr,
match(getMask(), m_True()), Alignment,
- &Ingredient),
+ Ingredient),
Ctx.CostKind);
}
-// Copy from VPWidenIntrinsicRecipe::usesFirstLaneOnly.
-bool VPWidenMemIntrinsicRecipe::usesFirstLaneOnly(const VPValue *Op) const {
- assert(is_contained(operands(), Op) && "Op must be an operand of the recipe");
- return all_of(enumerate(operands()), [this, &Op](const auto &X) {
- auto [Idx, V] = X;
- return V != Op || isVectorIntrinsicWithScalarOpAtArg(getVectorIntrinsicID(),
- Idx, nullptr);
- });
-}
-
-#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
-void VPWidenMemIntrinsicRecipe::printRecipe(raw_ostream &O, const Twine &Indent,
- VPSlotTracker &SlotTracker) const {
- O << Indent << "WIDEN-MEM-INTRINSIC ";
- if (ResultTy->isVoidTy()) {
- O << "void ";
- } else {
- getVPSingleValue()->printAsOperand(O, SlotTracker);
- O << " = ";
- }
-
- O << "call ";
- O << getIntrinsicName() << "(";
-
- interleaveComma(operands(), O, [&O, &SlotTracker](VPValue *Op) {
- Op->printAsOperand(O, SlotTracker);
- });
- O << ")";
-}
-#endif
-
void VPHistogramRecipe::execute(VPTransformState &State) {
IRBuilderBase &Builder = State.Builder;
diff --git a/llvm/lib/Transforms/Vectorize/VPlanValue.h b/llvm/lib/Transforms/Vectorize/VPlanValue.h
index 25d56a2b4c3a8..4ef78341e0654 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanValue.h
+++ b/llvm/lib/Transforms/Vectorize/VPlanValue.h
@@ -48,7 +48,6 @@ class LLVM_ABI_FOR_TEST VPValue {
friend struct VPIRValue;
friend struct VPSymbolicValue;
friend class VPRecipeValue;
- friend class VPWidenMemIntrinsicRecipe;
const unsigned char SubclassID; ///< Subclass identifier (for isa/dyn_cast).
diff --git a/llvm/lib/Transforms/Vectorize/VPlanVerifier.cpp b/llvm/lib/Transforms/Vectorize/VPlanVerifier.cpp
index 6542ca8e13db7..8943652f3b281 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanVerifier.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanVerifier.cpp
@@ -167,9 +167,6 @@ bool VPlanVerifier::verifyEVLRecipe(const VPInstruction &EVL) const {
.Case([&](const VPWidenIntrinsicRecipe *S) {
return VerifyEVLUse(*S, S->getNumOperands() - 1);
})
- .Case([&](const VPWidenMemIntrinsicRecipe *S) {
- return VerifyEVLUse(*S, S->getNumOperands() - 1);
- })
.Case<VPWidenStoreEVLRecipe, VPReductionEVLRecipe,
VPWidenIntOrFpInductionRecipe, VPWidenPointerInductionRecipe,
VPWidenStridedLoadRecipe>(
>From 057e0991b1ff962f858be64082fbc442cc7ece2e Mon Sep 17 00:00:00 2001
From: Mel Chen <mel.chen at sifive.com>
Date: Mon, 8 Dec 2025 18:20:05 -0800
Subject: [PATCH 19/30] narrow to Intrinsic::experimental_vp_strided_load
---
llvm/lib/Transforms/Vectorize/VPlan.h | 20 +++-----
.../lib/Transforms/Vectorize/VPlanRecipes.cpp | 51 ++-----------------
.../Transforms/Vectorize/VPlanTransforms.cpp | 6 +--
3 files changed, 14 insertions(+), 63 deletions(-)
diff --git a/llvm/lib/Transforms/Vectorize/VPlan.h b/llvm/lib/Transforms/Vectorize/VPlan.h
index e538c7693f2b6..dc5ecabdf741d 100644
--- a/llvm/lib/Transforms/Vectorize/VPlan.h
+++ b/llvm/lib/Transforms/Vectorize/VPlan.h
@@ -1945,13 +1945,12 @@ class VPWidenMemIntrinsicRecipe final : public VPWidenIntrinsicRecipe {
public:
// TODO: support StoreInst for strided store
- VPWidenMemIntrinsicRecipe(LoadInst &LI, Intrinsic::ID VectorIntrinsicID,
- ArrayRef<VPValue *> CallArguments,
+ VPWidenMemIntrinsicRecipe(LoadInst &LI, ArrayRef<VPValue *> CallArguments,
const VPIRMetadata &MD = {},
DebugLoc DL = DebugLoc::getUnknown())
: VPWidenIntrinsicRecipe(VPRecipeBase::VPWidenMemIntrinsicSC,
- VectorIntrinsicID, CallArguments, LI.getType(),
- {}, MD, DL),
+ Intrinsic::experimental_vp_strided_load,
+ CallArguments, LI.getType(), {}, MD, DL),
Alignment(LI.getAlign()) {
setUnderlyingValue(&LI);
}
@@ -1960,8 +1959,7 @@ class VPWidenMemIntrinsicRecipe final : public VPWidenIntrinsicRecipe {
VPWidenMemIntrinsicRecipe *clone() override {
return new VPWidenMemIntrinsicRecipe(*cast<LoadInst>(getUnderlyingInstr()),
- getVectorIntrinsicID(), operands(),
- *this, getDebugLoc());
+ operands(), *this, getDebugLoc());
}
VP_CLASSOF_IMPL(VPRecipeBase::VPWidenMemIntrinsicSC)
@@ -1973,15 +1971,9 @@ class VPWidenMemIntrinsicRecipe final : public VPWidenIntrinsicRecipe {
InstructionCost computeCost(ElementCount VF,
VPCostContext &Ctx) const override;
- /// Return the index of pointer parameter.
- unsigned getMemoryPointerParamPos() const;
+ void setMask(VPValue *Mask) { setOperand(2, Mask); }
- /// Return the index of mask parameter.
- unsigned getMaskParamPos() const;
-
- void setMask(VPValue *Mask) { setOperand(getMaskParamPos(), Mask); }
-
- VPValue *getMask() const { return getOperand(getMaskParamPos()); }
+ VPValue *getMask() const { return getOperand(2); }
};
/// A recipe for widening Call instructions using library calls.
diff --git a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
index 431c0ede0607d..3f35694dc0170 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
@@ -1848,6 +1848,8 @@ void VPWidenCallRecipe::printRecipe(raw_ostream &O, const Twine &Indent,
#endif
CallInst *VPWidenIntrinsicRecipe::createVectorCall(VPTransformState &State) {
+ assert(State.VF.isVector() && "not widening");
+
SmallVector<Type *, 2> TysForDecl;
// Add return type if intrinsic is overloaded on it.
if (isVectorIntrinsicWithOverloadTypeAtArg(VectorIntrinsicID, -1,
@@ -1897,7 +1899,6 @@ CallInst *VPWidenIntrinsicRecipe::createVectorCall(VPTransformState &State) {
}
void VPWidenIntrinsicRecipe::execute(VPTransformState &State) {
- assert(State.VF.isVector() && "not widening");
CallInst *V = createVectorCall(State);
if (!V->getType()->isVoidTy())
State.set(this, V);
@@ -1988,60 +1989,18 @@ void VPWidenIntrinsicRecipe::printRecipe(raw_ostream &O, const Twine &Indent,
}
#endif
-unsigned VPWidenMemIntrinsicRecipe::getMemoryPointerParamPos() const {
- Intrinsic::ID IID = getVectorIntrinsicID();
- if (auto Pos = VPIntrinsic::getMemoryPointerParamPos(IID))
- return *Pos;
-
- switch (IID) {
- case Intrinsic::masked_load:
- case Intrinsic::masked_gather:
- case Intrinsic::masked_expandload:
- return 0;
- case Intrinsic::masked_store:
- case Intrinsic::masked_scatter:
- case Intrinsic::masked_compressstore:
- return 1;
- default:
- llvm_unreachable("unknown vector memory intrinsic");
- }
-}
-
-unsigned VPWidenMemIntrinsicRecipe::getMaskParamPos() const {
- Intrinsic::ID IID = getVectorIntrinsicID();
- if (auto Pos = VPIntrinsic::getMaskParamPos(IID))
- return *Pos;
-
- switch (IID) {
- case Intrinsic::masked_load:
- case Intrinsic::masked_gather:
- case Intrinsic::masked_expandload:
- return 1;
- case Intrinsic::masked_store:
- case Intrinsic::masked_scatter:
- case Intrinsic::masked_compressstore:
- return 2;
- default:
- llvm_unreachable("unknown vector memory intrinsic");
- }
-}
-
void VPWidenMemIntrinsicRecipe::execute(VPTransformState &State) {
- assert(State.VF.isVector() && "not widening");
CallInst *MemI = createVectorCall(State);
MemI->addParamAttr(
- getMemoryPointerParamPos(),
- Attribute::getWithAlignment(MemI->getContext(), Alignment));
-
- if (!MemI->getType()->isVoidTy())
- State.set(this, MemI);
+ 0, Attribute::getWithAlignment(MemI->getContext(), Alignment));
+ State.set(this, MemI);
}
InstructionCost
VPWidenMemIntrinsicRecipe::computeCost(ElementCount VF,
VPCostContext &Ctx) const {
+ Type *Ty = toVectorTy(getResultType(), VF);
const Instruction *Ingredient = getUnderlyingInstr();
- Type *Ty = toVectorTy(getLoadStoreType(Ingredient), VF);
const Value *Ptr = getLoadStorePointerOperand(Ingredient);
return Ctx.TTI.getMemIntrinsicInstrCost(
MemIntrinsicCostAttributes(getVectorIntrinsicID(), Ty, Ptr,
diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
index 131f06c014866..eb648c1f56a57 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
@@ -6094,10 +6094,10 @@ void VPlanTransforms::convertToStridedAccesses(VPlan &Plan, VPCostContext &Ctx,
else
Mask = Plan.getTrue();
auto *StridedLoad = new VPWidenMemIntrinsicRecipe(
- *cast<LoadInst>(&Ingredient), Intrinsic::experimental_vp_strided_load,
- {NewPtr, StrideInBytes, Mask, I32VF}, *LoadR, LoadR->getDebugLoc());
+ *cast<LoadInst>(&Ingredient), {NewPtr, StrideInBytes, Mask, I32VF},
+ *LoadR, LoadR->getDebugLoc());
StridedLoad->insertBefore(LoadR);
- LoadR->replaceAllUsesWith(StridedLoad->getVPSingleValue());
+ LoadR->replaceAllUsesWith(StridedLoad);
ToErase.push_back(LoadR);
}
>From 4c1be7ab2b6fb6ece413f5fbfeadc9621dfb95ff Mon Sep 17 00:00:00 2001
From: Mel Chen <mel.chen at sifive.com>
Date: Mon, 8 Dec 2025 19:35:12 -0800
Subject: [PATCH 20/30] pattern matching
Intrinsic::experimental_vp_strided_load
---
llvm/lib/Transforms/Vectorize/VPlan.h | 4 ----
.../lib/Transforms/Vectorize/VPlanRecipes.cpp | 2 +-
.../Transforms/Vectorize/VPlanTransforms.cpp | 24 ++++++++++---------
3 files changed, 14 insertions(+), 16 deletions(-)
diff --git a/llvm/lib/Transforms/Vectorize/VPlan.h b/llvm/lib/Transforms/Vectorize/VPlan.h
index dc5ecabdf741d..7148c71101e35 100644
--- a/llvm/lib/Transforms/Vectorize/VPlan.h
+++ b/llvm/lib/Transforms/Vectorize/VPlan.h
@@ -1970,10 +1970,6 @@ class VPWidenMemIntrinsicRecipe final : public VPWidenIntrinsicRecipe {
/// Return the cost of this vector memory intrinsic.
InstructionCost computeCost(ElementCount VF,
VPCostContext &Ctx) const override;
-
- void setMask(VPValue *Mask) { setOperand(2, Mask); }
-
- VPValue *getMask() const { return getOperand(2); }
};
/// A recipe for widening Call instructions using library calls.
diff --git a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
index 3f35694dc0170..e65e7271b5669 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
@@ -2004,7 +2004,7 @@ VPWidenMemIntrinsicRecipe::computeCost(ElementCount VF,
const Value *Ptr = getLoadStorePointerOperand(Ingredient);
return Ctx.TTI.getMemIntrinsicInstrCost(
MemIntrinsicCostAttributes(getVectorIntrinsicID(), Ty, Ptr,
- match(getMask(), m_True()), Alignment,
+ match(getOperand(2), m_True()), Alignment,
Ingredient),
Ctx.CostKind);
}
diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
index eb648c1f56a57..103b170d1721c 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
@@ -2980,17 +2980,19 @@ static VPRecipeBase *optimizeMaskToEVL(VPValue *HeaderMask,
TypeInfo.inferScalarType(LoadR), {}, {}, DL);
}
- if (auto *MI = dyn_cast<VPWidenMemIntrinsicRecipe>(&CurRecipe))
- if (MI->getVectorIntrinsicID() == Intrinsic::experimental_vp_strided_load &&
- match(MI->getMask(), m_RemoveMask(HeaderMask, Mask))) {
- VPWidenMemIntrinsicRecipe *NewMI = MI->clone();
- if (Mask)
- NewMI->setMask(Mask);
- else
- NewMI->setMask(Plan->getTrue());
- NewMI->setOperand(3, &EVL);
- return NewMI;
- }
+ VPValue *Stride;
+ if (match(&CurRecipe, m_Intrinsic<Intrinsic::experimental_vp_strided_load>(
+ m_VPValue(Addr), m_VPValue(Stride),
+ m_RemoveMask(HeaderMask, Mask),
+ m_VPInstruction<Instruction::Trunc>(
+ m_Specific(&Plan->getVF()))))) {
+ auto *I = cast<VPWidenIntrinsicRecipe>(&CurRecipe);
+ if (!Mask)
+ Mask = Plan->getTrue();
+ return new VPWidenMemIntrinsicRecipe(
+ *cast<LoadInst>(I->getUnderlyingInstr()), {Addr, Stride, Mask, &EVL},
+ *I, I->getDebugLoc());
+ }
if (auto *StridedL = dyn_cast<VPWidenStridedLoadRecipe>(&CurRecipe))
if (StridedL->isMasked() &&
>From e4cf7320d912ae19d39eaa5ba13911e439397cd3 Mon Sep 17 00:00:00 2001
From: Mel Chen <mel.chen at sifive.com>
Date: Mon, 8 Dec 2025 22:57:25 -0800
Subject: [PATCH 21/30] assert scalable and vector indexed type
---
llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
index 103b170d1721c..b4dd6f8c76250 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
@@ -5977,8 +5977,9 @@ determineBaseAndStride(VPWidenGEPRecipe *WidenGEP) {
return {nullptr, nullptr, nullptr};
Type *ElementTy = WidenGEP->getIndexedType(*VarIndex);
- if (ElementTy->isScalableTy() || ElementTy->isStructTy() ||
- ElementTy->isVectorTy())
+ assert(!ElementTy->isScalableTy() && !ElementTy->isVectorTy() &&
+ "Unexpected indexed type");
+ if (ElementTy->isStructTy())
return {nullptr, nullptr, nullptr};
unsigned VarOp = *VarIndex + 1;
>From 018b5f138c3fe97e99c1f81db0d645109fdc0695 Mon Sep 17 00:00:00 2001
From: Mel Chen <mel.chen at sifive.com>
Date: Mon, 8 Dec 2025 23:09:23 -0800
Subject: [PATCH 22/30] remove unused VPWidenStridedLoadRecipe
---
.../Transforms/Vectorize/LoopVectorize.cpp | 6 +-
llvm/lib/Transforms/Vectorize/VPlan.h | 56 +------------------
.../Transforms/Vectorize/VPlanAnalysis.cpp | 6 +-
.../lib/Transforms/Vectorize/VPlanRecipes.cpp | 54 ------------------
.../Transforms/Vectorize/VPlanTransforms.cpp | 27 +++------
.../Transforms/Vectorize/VPlanVerifier.cpp | 3 +-
6 files changed, 14 insertions(+), 138 deletions(-)
diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
index f57877ba6025c..a00b1a7cea0ce 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -4053,8 +4053,7 @@ void LoopVectorizationPlanner::emitInvalidCostRemarks(
.Case([](const VPHeaderPHIRecipe *R) { return Instruction::PHI; })
.Case(
[](const VPWidenStoreRecipe *R) { return Instruction::Store; })
- .Case<VPWidenLoadRecipe, VPWidenStridedLoadRecipe>(
- [](const auto *R) { return Instruction::Load; })
+ .Case([](const VPWidenLoadRecipe *R) { return Instruction::Load; })
.Case<VPWidenCallRecipe, VPWidenIntrinsicRecipe>(
[](const auto *R) { return Instruction::Call; })
.Case<VPInstruction, VPWidenRecipe, VPReplicateRecipe,
@@ -4156,7 +4155,6 @@ static bool willGenerateVectors(VPlan &Plan, ElementCount VF,
case VPRecipeBase::VPReductionPHISC:
case VPRecipeBase::VPInterleaveEVLSC:
case VPRecipeBase::VPInterleaveSC:
- case VPRecipeBase::VPWidenStridedLoadSC:
case VPRecipeBase::VPWidenLoadEVLSC:
case VPRecipeBase::VPWidenLoadSC:
case VPRecipeBase::VPWidenStoreEVLSC:
@@ -7169,7 +7167,7 @@ static bool planContainsAdditionalSimplifications(VPlan &Plan,
// The strided load is transformed from a gather through VPlanTransform,
// and its cost will be lower than the original gather.
- if (isa<VPWidenStridedLoadRecipe>(&R))
+ if (isa<VPWidenMemIntrinsicRecipe>(&R))
return true;
if (Instruction *UI = GetInstructionForCost(&R)) {
diff --git a/llvm/lib/Transforms/Vectorize/VPlan.h b/llvm/lib/Transforms/Vectorize/VPlan.h
index 7148c71101e35..11ed9f840824e 100644
--- a/llvm/lib/Transforms/Vectorize/VPlan.h
+++ b/llvm/lib/Transforms/Vectorize/VPlan.h
@@ -423,7 +423,6 @@ class LLVM_ABI_FOR_TEST VPRecipeBase
VPWidenGEPSC,
VPWidenIntrinsicSC,
VPWidenMemIntrinsicSC,
- VPWidenStridedLoadSC,
VPWidenLoadEVLSC,
VPWidenLoadSC,
VPWidenStoreEVLSC,
@@ -631,7 +630,6 @@ class VPSingleDefRecipe : public VPRecipeBase, public VPRecipeValue {
case VPRecipeBase::VPInterleaveEVLSC:
case VPRecipeBase::VPInterleaveSC:
case VPRecipeBase::VPIRInstructionSC:
- case VPRecipeBase::VPWidenStridedLoadSC:
case VPRecipeBase::VPWidenLoadEVLSC:
case VPRecipeBase::VPWidenLoadSC:
case VPRecipeBase::VPWidenStoreEVLSC:
@@ -3565,8 +3563,7 @@ class LLVM_ABI_FOR_TEST VPWidenMemoryRecipe : public VPRecipeBase,
return R->getVPRecipeID() == VPRecipeBase::VPWidenLoadSC ||
R->getVPRecipeID() == VPRecipeBase::VPWidenStoreSC ||
R->getVPRecipeID() == VPRecipeBase::VPWidenLoadEVLSC ||
- R->getVPRecipeID() == VPRecipeBase::VPWidenStoreEVLSC ||
- R->getVPRecipeID() == VPRecipeBase::VPWidenStridedLoadSC;
+ R->getVPRecipeID() == VPRecipeBase::VPWidenStoreEVLSC;
}
static inline bool classof(const VPUser *U) {
@@ -3693,57 +3690,6 @@ struct VPWidenLoadEVLRecipe final : public VPWidenMemoryRecipe,
#endif
};
-/// A recipe for strided load operations, using the base address, stride, VF,
-/// and an optional mask. This recipe will generate a vp.strided.load intrinsic
-/// call to represent memory accesses with a fixed stride.
-struct VPWidenStridedLoadRecipe final : public VPWidenMemoryRecipe,
- public VPRecipeValue {
- VPWidenStridedLoadRecipe(LoadInst &Load, VPValue *Addr, VPValue *Stride,
- VPValue *VF, VPValue *Mask,
- const VPIRMetadata &Metadata, DebugLoc DL)
- : VPWidenMemoryRecipe(
- VPRecipeBase::VPWidenStridedLoadSC, Load, {Addr, Stride, VF},
- /*Consecutive=*/false, /*Reverse=*/false, Metadata, DL),
- VPRecipeValue(this, &Load) {
- setMask(Mask);
- }
-
- VPWidenStridedLoadRecipe *clone() override {
- return new VPWidenStridedLoadRecipe(cast<LoadInst>(Ingredient), getAddr(),
- getStride(), getVF(), getMask(), *this,
- getDebugLoc());
- }
-
- VP_CLASSOF_IMPL(VPRecipeBase::VPWidenStridedLoadSC);
-
- /// Return the stride operand.
- VPValue *getStride() const { return getOperand(1); }
-
- /// Return the VF operand.
- VPValue *getVF() const { return getOperand(2); }
-
- /// Generate a strided load.
- void execute(VPTransformState &State) override;
-
- /// Return the cost of this VPWidenStridedLoadRecipe.
- InstructionCost computeCost(ElementCount VF,
- VPCostContext &Ctx) const override;
-
-#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
- /// Print the recipe.
- void printRecipe(raw_ostream &O, const Twine &Indent,
- VPSlotTracker &SlotTracker) const override;
-#endif
-
- /// Returns true if the recipe only uses the first lane of operand \p Op.
- bool usesFirstLaneOnly(const VPValue *Op) const override {
- assert(is_contained(operands(), Op) &&
- "Op must be an operand of the recipe");
- // All operands except the mask are only used for the first lane.
- return Op == getAddr() || Op == getStride() || Op == getVF();
- }
-};
-
/// A recipe for widening store operations, using the stored value, the address
/// to store to and an optional mask.
struct LLVM_ABI_FOR_TEST VPWidenStoreRecipe final : public VPWidenMemoryRecipe {
diff --git a/llvm/lib/Transforms/Vectorize/VPlanAnalysis.cpp b/llvm/lib/Transforms/Vectorize/VPlanAnalysis.cpp
index 9b62532cecda7..4f97f8000c187 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanAnalysis.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanAnalysis.cpp
@@ -193,10 +193,8 @@ Type *VPTypeAnalysis::inferScalarTypeForRecipe(const VPWidenCallRecipe *R) {
}
Type *VPTypeAnalysis::inferScalarTypeForRecipe(const VPWidenMemoryRecipe *R) {
- assert(
- (isa<VPWidenLoadRecipe, VPWidenLoadEVLRecipe, VPWidenStridedLoadRecipe>(
- R)) &&
- "Store recipes should not define any values");
+ assert((isa<VPWidenLoadRecipe, VPWidenLoadEVLRecipe>(R)) &&
+ "Store recipes should not define any values");
return cast<LoadInst>(&R->getIngredient())->getType();
}
diff --git a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
index e65e7271b5669..1a01cd5b23fa8 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
@@ -91,7 +91,6 @@ bool VPRecipeBase::mayWriteToMemory() const {
case VPWidenCastSC:
case VPWidenGEPSC:
case VPWidenIntOrFpInductionSC:
- case VPWidenStridedLoadSC:
case VPWidenLoadEVLSC:
case VPWidenLoadSC:
case VPWidenPHISC:
@@ -115,7 +114,6 @@ bool VPRecipeBase::mayReadFromMemory() const {
return cast<VPExpressionRecipe>(this)->mayReadOrWriteMemory();
case VPInstructionSC:
return cast<VPInstruction>(this)->opcodeMayReadOrWriteFromMemory();
- case VPWidenStridedLoadSC:
case VPWidenLoadEVLSC:
case VPWidenLoadSC:
return true;
@@ -209,7 +207,6 @@ bool VPRecipeBase::mayHaveSideEffects() const {
case VPInterleaveEVLSC:
case VPInterleaveSC:
return mayWriteToMemory();
- case VPWidenStridedLoadSC:
case VPWidenLoadEVLSC:
case VPWidenLoadSC:
case VPWidenStoreEVLSC:
@@ -3902,57 +3899,6 @@ void VPWidenLoadEVLRecipe::printRecipe(raw_ostream &O, const Twine &Indent,
}
#endif
-void VPWidenStridedLoadRecipe::execute(VPTransformState &State) {
- Type *ScalarDataTy = getLoadStoreType(&Ingredient);
- auto *DataTy = VectorType::get(ScalarDataTy, State.VF);
-
- auto &Builder = State.Builder;
- Value *Addr = State.get(getAddr(), /*IsScalar*/ true);
- Value *StrideInBytes = State.get(getStride(), /*IsScalar*/ true);
- Value *Mask = nullptr;
- if (VPValue *VPMask = getMask())
- Mask = State.get(VPMask);
- else
- Mask = Builder.CreateVectorSplat(State.VF, Builder.getTrue());
- Value *RunTimeVF = Builder.CreateZExtOrTrunc(State.get(getVF(), VPLane(0)),
- Builder.getInt32Ty());
-
- auto *PtrTy = Addr->getType();
- auto *StrideTy = StrideInBytes->getType();
- CallInst *NewLI = Builder.CreateIntrinsic(
- Intrinsic::experimental_vp_strided_load, {DataTy, PtrTy, StrideTy},
- {Addr, StrideInBytes, Mask, RunTimeVF}, nullptr, "wide.strided.load");
- NewLI->addParamAttr(
- 0, Attribute::getWithAlignment(NewLI->getContext(), Alignment));
- applyMetadata(*NewLI);
- State.set(this, NewLI);
-}
-
-InstructionCost
-VPWidenStridedLoadRecipe::computeCost(ElementCount VF,
- VPCostContext &Ctx) const {
- Type *Ty = toVectorTy(getLoadStoreType(&Ingredient), VF);
- const Value *Ptr = getLoadStorePointerOperand(&Ingredient);
- return Ctx.TTI.getMemIntrinsicInstrCost(
- MemIntrinsicCostAttributes(Intrinsic::experimental_vp_strided_load, Ty,
- Ptr, IsMasked, Alignment, &Ingredient),
- Ctx.CostKind);
-}
-
-#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
-void VPWidenStridedLoadRecipe::printRecipe(raw_ostream &O, const Twine &Indent,
- VPSlotTracker &SlotTracker) const {
- O << Indent << "WIDEN ";
- printAsOperand(O, SlotTracker);
- O << " = load ";
- getAddr()->printAsOperand(O, SlotTracker);
- O << ", stride = ";
- getStride()->printAsOperand(O, SlotTracker);
- O << ", runtimeVF = ";
- getVF()->printAsOperand(O, SlotTracker);
-}
-#endif
-
void VPWidenStoreRecipe::execute(VPTransformState &State) {
VPValue *StoredVPValue = getStoredValue();
bool CreateScatter = !isConsecutive();
diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
index b4dd6f8c76250..b64cf52014f58 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
@@ -2994,14 +2994,6 @@ static VPRecipeBase *optimizeMaskToEVL(VPValue *HeaderMask,
*I, I->getDebugLoc());
}
- if (auto *StridedL = dyn_cast<VPWidenStridedLoadRecipe>(&CurRecipe))
- if (StridedL->isMasked() &&
- match(StridedL->getMask(), m_RemoveMask(HeaderMask, Mask)))
- return new VPWidenStridedLoadRecipe(
- *cast<LoadInst>(&StridedL->getIngredient()), StridedL->getAddr(),
- StridedL->getStride(), &EVL, Mask, *StridedL,
- StridedL->getDebugLoc());
-
VPValue *StoredVal;
if (match(&CurRecipe, m_MaskedStore(m_VPValue(Addr), m_VPValue(StoredVal),
m_RemoveMask(HeaderMask, Mask))) &&
@@ -3104,17 +3096,14 @@ static void fixupVFUsersForEVL(VPlan &Plan, VPValue &EVL) {
VPRegionBlock *LoopRegion = Plan.getVectorLoopRegion();
VPBasicBlock *Header = LoopRegion->getEntryBasicBlock();
- assert(
- all_of(
- Plan.getVF().users(),
- [&LoopRegion](VPUser *U) {
- auto *R = cast<VPRecipeBase>(U);
- return (R->getParent()->getParent() != LoopRegion) ||
- isa<VPVectorEndPointerRecipe, VPScalarIVStepsRecipe,
- VPWidenIntOrFpInductionRecipe, VPWidenStridedLoadRecipe>(
- R);
- }) &&
- "User of VF that we can't transform to EVL.");
+ assert(all_of(Plan.getVF().users(),
+ [&LoopRegion](VPUser *U) {
+ auto *R = cast<VPRecipeBase>(U);
+ return (R->getParent()->getParent() != LoopRegion) ||
+ isa<VPVectorEndPointerRecipe, VPScalarIVStepsRecipe,
+ VPWidenIntOrFpInductionRecipe>(R);
+ }) &&
+ "User of VF that we can't transform to EVL.");
Plan.getVF().replaceUsesWithIf(&EVL, [](VPUser &U, unsigned Idx) {
return isa<VPWidenIntOrFpInductionRecipe, VPScalarIVStepsRecipe>(U);
});
diff --git a/llvm/lib/Transforms/Vectorize/VPlanVerifier.cpp b/llvm/lib/Transforms/Vectorize/VPlanVerifier.cpp
index 8943652f3b281..9098b9ce8562d 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanVerifier.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanVerifier.cpp
@@ -168,8 +168,7 @@ bool VPlanVerifier::verifyEVLRecipe(const VPInstruction &EVL) const {
return VerifyEVLUse(*S, S->getNumOperands() - 1);
})
.Case<VPWidenStoreEVLRecipe, VPReductionEVLRecipe,
- VPWidenIntOrFpInductionRecipe, VPWidenPointerInductionRecipe,
- VPWidenStridedLoadRecipe>(
+ VPWidenIntOrFpInductionRecipe, VPWidenPointerInductionRecipe>(
[&](const VPRecipeBase *S) { return VerifyEVLUse(*S, 2); })
.Case([&](const VPScalarIVStepsRecipe *R) {
if (R->getNumOperands() != 3) {
>From 397cdd8982c7ad1598d6e521485b941c2a82e559 Mon Sep 17 00:00:00 2001
From: Mel Chen <mel.chen at sifive.com>
Date: Mon, 8 Dec 2025 23:59:31 -0800
Subject: [PATCH 23/30] inline getUniqueVariantIndex()
---
llvm/lib/Transforms/Vectorize/VPlan.h | 17 ++---------------
.../Transforms/Vectorize/VPlanTransforms.cpp | 16 ++++++++++++----
2 files changed, 14 insertions(+), 19 deletions(-)
diff --git a/llvm/lib/Transforms/Vectorize/VPlan.h b/llvm/lib/Transforms/Vectorize/VPlan.h
index 11ed9f840824e..81df91245d30c 100644
--- a/llvm/lib/Transforms/Vectorize/VPlan.h
+++ b/llvm/lib/Transforms/Vectorize/VPlan.h
@@ -2073,10 +2073,6 @@ class VPHistogramRecipe : public VPRecipeBase {
class LLVM_ABI_FOR_TEST VPWidenGEPRecipe : public VPRecipeWithIRFlags {
Type *SourceElementTy;
- bool isIndexLoopInvariant(unsigned I) const {
- return getOperand(I + 1)->isDefinedOutsideLoopRegions();
- }
-
public:
VPWidenGEPRecipe(GetElementPtrInst *GEP, ArrayRef<VPValue *> Operands,
const VPIRFlags &Flags = {},
@@ -2106,17 +2102,8 @@ class LLVM_ABI_FOR_TEST VPWidenGEPRecipe : public VPRecipeWithIRFlags {
return getOperand(0)->isDefinedOutsideLoopRegions();
}
- std::optional<unsigned> getUniqueVariantIndex() const {
- std::optional<unsigned> VarIdx;
- for (unsigned I = 0, E = getNumOperands() - 1; I < E; ++I) {
- if (isIndexLoopInvariant(I))
- continue;
-
- if (VarIdx)
- return std::nullopt;
- VarIdx = I;
- }
- return VarIdx;
+ bool isIndexLoopInvariant(unsigned I) const {
+ return getOperand(I + 1)->isDefinedOutsideLoopRegions();
}
/// Returns the element type for the first \p I indices of this recipe.
diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
index b64cf52014f58..52f80fedada0b 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
@@ -5961,17 +5961,25 @@ determineBaseAndStride(VPWidenGEPRecipe *WidenGEP) {
return {nullptr, nullptr, nullptr};
// Find the only one variant index.
- std::optional<unsigned> VarIndex = WidenGEP->getUniqueVariantIndex();
- if (!VarIndex)
+ unsigned VarOp = 0;
+ for (unsigned I = 1, E = WidenGEP->getNumOperands(); I < E; ++I) {
+ if (WidenGEP->isIndexLoopInvariant(I - 1))
+ continue;
+
+ if (VarOp != 0)
+ return {nullptr, nullptr, nullptr};
+ VarOp = I;
+ }
+
+ if (!VarOp)
return {nullptr, nullptr, nullptr};
- Type *ElementTy = WidenGEP->getIndexedType(*VarIndex);
+ Type *ElementTy = WidenGEP->getIndexedType(VarOp - 1);
assert(!ElementTy->isScalableTy() && !ElementTy->isVectorTy() &&
"Unexpected indexed type");
if (ElementTy->isStructTy())
return {nullptr, nullptr, nullptr};
- unsigned VarOp = *VarIndex + 1;
VPValue *IndexVPV = WidenGEP->getOperand(VarOp);
auto [Start, Stride] = matchStridedStart(IndexVPV);
if (!Start)
>From da9376ea9f42a713713a962736ec3b98a56490c7 Mon Sep 17 00:00:00 2001
From: Mel Chen <mel.chen at sifive.com>
Date: Tue, 9 Dec 2025 01:00:32 -0800
Subject: [PATCH 24/30] use createOverflowingOp to generate VPInstruction
recipe
---
.../Vectorize/LoopVectorizationPlanner.h | 14 ++++++++++++++
.../Transforms/Vectorize/VPlanTransforms.cpp | 19 +++++++------------
2 files changed, 21 insertions(+), 12 deletions(-)
diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorizationPlanner.h b/llvm/lib/Transforms/Vectorize/LoopVectorizationPlanner.h
index 0b8796f646ae3..0bc5bf11135c3 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorizationPlanner.h
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorizationPlanner.h
@@ -376,6 +376,20 @@ class VPBuilder {
return tryInsertInstruction(new VPExpandSCEVRecipe(Expr));
}
+ VPVectorPointerRecipe *
+ createVectorPointer(VPValue *Ptr, Type *SourceElementTy, VPValue *Stride,
+ GEPNoWrapFlags GEPFlags, DebugLoc DL) {
+ return tryInsertInstruction(
+ new VPVectorPointerRecipe(Ptr, SourceElementTy, Stride, GEPFlags, DL));
+ }
+
+ VPWidenMemIntrinsicRecipe *
+ createWidenMemIntrinsic(LoadInst &LI, ArrayRef<VPValue *> CallArguments,
+ const VPIRMetadata &MD, DebugLoc DL) {
+ return tryInsertInstruction(
+ new VPWidenMemIntrinsicRecipe(LI, CallArguments, MD, DL));
+ }
+
//===--------------------------------------------------------------------===//
// RAII helpers.
//===--------------------------------------------------------------------===//
diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
index 52f80fedada0b..e6068e7eb9ccf 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
@@ -5942,11 +5942,10 @@ static std::pair<VPValue *, VPValue *> matchStridedStart(VPValue *CurIndex) {
WidenR->getDebugLoc());
StartR->insertBefore(WidenR);
+ VPBuilder Builder(WidenR);
unsigned InvIdx = VarIdx == 0 ? 1 : 0;
auto *StrideR =
- new VPInstruction(Opcode, {Stride, WidenR->getOperand(InvIdx)},
- VPRecipeWithIRFlags::WrapFlagsTy{false, false});
- StrideR->insertBefore(WidenR);
+ Builder.createOverflowingOp(Opcode, {Stride, WidenR->getOperand(InvIdx)});
return {StartR, StrideR};
}
@@ -6067,11 +6066,11 @@ void VPlanTransforms::convertToStridedAccesses(VPlan &Plan, VPCostContext &Ctx,
TypeInfo.inferScalarType(&Plan.getVF()), DebugLoc::getUnknown());
}
+ VPBuilder Builder(LoadR);
// Create a new vector pointer for strided access.
- auto *NewPtr = new VPVectorPointerRecipe(
+ auto *NewPtr = Builder.createVectorPointer(
BasePtr, ElementTy, StrideInElement, Ptr->getGEPNoWrapFlags(),
Ptr->getDebugLoc());
- NewPtr->insertBefore(LoadR);
const DataLayout &DL = Ingredient.getDataLayout();
TypeSize TS = DL.getTypeAllocSize(ElementTy);
@@ -6081,11 +6080,8 @@ void VPlanTransforms::convertToStridedAccesses(VPlan &Plan, VPCostContext &Ctx,
if (TypeScale != 1) {
VPValue *ScaleVPV = Plan.getConstantInt(
TypeInfo.inferScalarType(StrideInElement), TypeScale);
- auto *ScaledStride =
- new VPInstruction(Instruction::Mul, {StrideInElement, ScaleVPV},
- VPRecipeWithIRFlags::WrapFlagsTy{false, false});
- ScaledStride->insertBefore(LoadR);
- StrideInBytes = ScaledStride;
+ StrideInBytes = Builder.createOverflowingOp(
+ Instruction::Mul, {StrideInElement, ScaleVPV});
}
VPValue *Mask;
@@ -6093,10 +6089,9 @@ void VPlanTransforms::convertToStridedAccesses(VPlan &Plan, VPCostContext &Ctx,
Mask = LoadMask;
else
Mask = Plan.getTrue();
- auto *StridedLoad = new VPWidenMemIntrinsicRecipe(
+ auto *StridedLoad = Builder.createWidenMemIntrinsic(
*cast<LoadInst>(&Ingredient), {NewPtr, StrideInBytes, Mask, I32VF},
*LoadR, LoadR->getDebugLoc());
- StridedLoad->insertBefore(LoadR);
LoadR->replaceAllUsesWith(StridedLoad);
ToErase.push_back(LoadR);
>From a3a86b5543455e4375b70ce8fceebcbd6c825c9c Mon Sep 17 00:00:00 2001
From: Mel Chen <mel.chen at sifive.com>
Date: Tue, 9 Dec 2025 01:41:47 -0800
Subject: [PATCH 25/30] Replace VPReplicateRecipe with VPInstruction
---
llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp | 12 +++++-------
1 file changed, 5 insertions(+), 7 deletions(-)
diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
index e6068e7eb9ccf..d15ce61d37738 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
@@ -5934,15 +5934,13 @@ static std::pair<VPValue *, VPValue *> matchStridedStart(VPValue *CurIndex) {
if (!Start)
return {nullptr, nullptr};
+ VPBuilder Builder(WidenR);
SmallVector<VPValue *> StartOps(WidenR->operands());
StartOps[VarIdx] = Start;
- auto *StartR = new VPReplicateRecipe(WidenR->getUnderlyingInstr(), StartOps,
- /*IsUniform*/ true, /*Mask*/ nullptr,
- /*Flags*/ *WidenR, /*Metadata*/ *WidenR,
- WidenR->getDebugLoc());
- StartR->insertBefore(WidenR);
-
- VPBuilder Builder(WidenR);
+ auto *StartR = Builder.createOverflowingOp(
+ Opcode, StartOps,
+ {WidenR->hasNoUnsignedWrap(), WidenR->hasNoSignedWrap()},
+ WidenR->getDebugLoc());
unsigned InvIdx = VarIdx == 0 ? 1 : 0;
auto *StrideR =
Builder.createOverflowingOp(Opcode, {Stride, WidenR->getOperand(InvIdx)});
>From 1be71ab2ef181c08ff4705194beb016e63687770 Mon Sep 17 00:00:00 2001
From: Mel Chen <mel.chen at sifive.com>
Date: Mon, 12 Jan 2026 01:10:31 -0800
Subject: [PATCH 26/30] Rename Offset to VFxPart
---
llvm/lib/Transforms/Vectorize/VPlan.h | 10 +++++-----
llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp | 12 +++++++++---
llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp | 2 +-
llvm/lib/Transforms/Vectorize/VPlanUnroll.cpp | 7 +------
.../LoopVectorize/RISCV/strided-accesses.ll | 4 ++--
5 files changed, 18 insertions(+), 17 deletions(-)
diff --git a/llvm/lib/Transforms/Vectorize/VPlan.h b/llvm/lib/Transforms/Vectorize/VPlan.h
index 81df91245d30c..275b6a04326d6 100644
--- a/llvm/lib/Transforms/Vectorize/VPlan.h
+++ b/llvm/lib/Transforms/Vectorize/VPlan.h
@@ -2201,8 +2201,8 @@ class VPVectorEndPointerRecipe : public VPRecipeWithIRFlags,
/// A recipe to compute the pointers for widened memory accesses of \p
/// SourceElementTy, with the \p Stride expressed in units of \p
-/// SourceElementTy. Unrolling adds an extra offset operand for unrolled parts >
-/// 0 and it produces `GEP Ptr, Offset`. The offset for unrolled part 0 is 0.
+/// SourceElementTy. Unrolling adds an extra \p VFxPart operand for unrolled
+/// parts > 0 and it produces `GEP Ptr, VFxPart * Stride`.
class VPVectorPointerRecipe : public VPRecipeWithIRFlags {
Type *SourceElementTy;
@@ -2217,7 +2217,7 @@ class VPVectorPointerRecipe : public VPRecipeWithIRFlags {
VPValue *getStride() const { return getOperand(1); }
- VPValue *getOffset() {
+ VPValue *getVFxPart() const {
return getNumOperands() > 2 ? getOperand(2) : nullptr;
}
@@ -2243,8 +2243,8 @@ class VPVectorPointerRecipe : public VPRecipeWithIRFlags {
auto *Clone =
new VPVectorPointerRecipe(getOperand(0), SourceElementTy, getStride(),
getGEPNoWrapFlags(), getDebugLoc());
- if (auto *Off = getOffset())
- Clone->addOperand(Off);
+ if (auto *VFxPart = getVFxPart())
+ Clone->addOperand(VFxPart);
return Clone;
}
diff --git a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
index 1a01cd5b23fa8..49a727704c13c 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
@@ -2735,11 +2735,17 @@ void VPVectorEndPointerRecipe::printRecipe(raw_ostream &O, const Twine &Indent,
#endif
void VPVectorPointerRecipe::execute(VPTransformState &State) {
+ assert(getVFxPart() &&
+ "Expected prior simplification of recipe without VFxPart");
+
auto &Builder = State.Builder;
- assert(getOffset() &&
- "Expected prior simplification of recipe without offset");
Value *Ptr = State.get(getOperand(0), VPLane(0));
- Value *Offset = State.get(getOffset(), true);
+ Value *Offset = State.get(getVFxPart(), true);
+ if (!match(getStride(), m_One())) {
+ Value *Stride = Builder.CreateZExtOrTrunc(State.get(getStride(), true),
+ Offset->getType());
+ Offset = Builder.CreateMul(Offset, Stride);
+ }
Value *ResultPtr = Builder.CreateGEP(getSourceElementType(), Ptr, Offset, "",
getGEPNoWrapFlags());
State.set(this, ResultPtr, /*IsScalar*/ true);
diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
index d15ce61d37738..70af3762eec14 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
@@ -1559,7 +1559,7 @@ static void simplifyRecipe(VPSingleDefRecipe *Def, VPTypeAnalysis &TypeInfo) {
// Simplify unrolled VectorPointer without offset, or with zero offset, to
// just the pointer operand.
if (auto *VPR = dyn_cast<VPVectorPointerRecipe>(Def))
- if (!VPR->getOffset() || match(VPR->getOffset(), m_ZeroInt()))
+ if (!VPR->getVFxPart() || match(VPR->getVFxPart(), m_ZeroInt()))
return VPR->replaceAllUsesWith(VPR->getOperand(0));
// VPScalarIVSteps after unrolling can be replaced by their start value, if
diff --git a/llvm/lib/Transforms/Vectorize/VPlanUnroll.cpp b/llvm/lib/Transforms/Vectorize/VPlanUnroll.cpp
index 03356f36e476b..2383d60561bdd 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanUnroll.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanUnroll.cpp
@@ -344,14 +344,9 @@ void UnrollState::unrollRecipeByUF(VPRecipeBase &R) {
VPValue *VFxPart = Builder.createOverflowingOp(
Instruction::Mul, {VF, Plan.getConstantInt(IndexTy, Part)},
{true, true});
- VPValue *Stride = Builder.createScalarZExtOrTrunc(
- VPR->getStride(), IndexTy, TypeInfo.inferScalarType(VPR->getStride()),
- DebugLoc::getUnknown());
- VPValue *Offset =
- Builder.createOverflowingOp(Instruction::Mul, {VFxPart, Stride});
Copy->setOperand(0, VPR->getOperand(0));
Copy->setOperand(1, VPR->getOperand(1));
- Copy->addOperand(Offset);
+ Copy->addOperand(VFxPart);
continue;
}
if (auto *Red = dyn_cast<VPReductionRecipe>(&R)) {
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/strided-accesses.ll b/llvm/test/Transforms/LoopVectorize/RISCV/strided-accesses.ll
index eb6da12e84bfa..7c5725f6c128b 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/strided-accesses.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/strided-accesses.ll
@@ -66,7 +66,7 @@ define void @single_constant_stride_int_scaled(ptr %p) {
; CHECK-UF2-NEXT: [[TMP22:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[TMP8]]
; CHECK-UF2-NEXT: [[TMP11:%.*]] = getelementptr i32, ptr [[P]], <vscale x 4 x i64> [[TMP9]]
; CHECK-UF2-NEXT: [[TMP12:%.*]] = getelementptr i32, ptr [[P]], <vscale x 4 x i64> [[TMP10]]
-; CHECK-UF2-NEXT: [[TMP17:%.*]] = shl i64 [[TMP3]], 3
+; CHECK-UF2-NEXT: [[TMP17:%.*]] = mul i64 [[TMP3]], 8
; CHECK-UF2-NEXT: [[TMP18:%.*]] = getelementptr i32, ptr [[TMP22]], i64 [[TMP17]]
; CHECK-UF2-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP22]], i64 32, <vscale x 4 x i1> splat (i1 true), i32 [[TMP19]])
; CHECK-UF2-NEXT: [[WIDE_MASKED_GATHER1:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP18]], i64 32, <vscale x 4 x i1> splat (i1 true), i32 [[TMP19]])
@@ -175,7 +175,7 @@ define void @single_constant_stride_int_iv(ptr %p) {
; CHECK-UF2-NEXT: [[TMP18:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[OFFSET_IDX]]
; CHECK-UF2-NEXT: [[TMP9:%.*]] = getelementptr i32, ptr [[P]], <vscale x 4 x i64> [[VEC_IND]]
; CHECK-UF2-NEXT: [[TMP10:%.*]] = getelementptr i32, ptr [[P]], <vscale x 4 x i64> [[STEP_ADD]]
-; CHECK-UF2-NEXT: [[TMP14:%.*]] = shl i64 [[TMP3]], 6
+; CHECK-UF2-NEXT: [[TMP14:%.*]] = mul i64 [[TMP3]], 64
; CHECK-UF2-NEXT: [[TMP15:%.*]] = getelementptr i32, ptr [[TMP18]], i64 [[TMP14]]
; CHECK-UF2-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP18]], i64 256, <vscale x 4 x i1> splat (i1 true), i32 [[TMP16]])
; CHECK-UF2-NEXT: [[WIDE_MASKED_GATHER1:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP15]], i64 256, <vscale x 4 x i1> splat (i1 true), i32 [[TMP16]])
>From ef78285ba410d25e87e9c1d549adc671b3f4d64e Mon Sep 17 00:00:00 2001
From: Mel Chen <mel.chen at sifive.com>
Date: Mon, 9 Feb 2026 17:20:55 -0800
Subject: [PATCH 27/30] Use VPlan-based SCEV analysis
---
.../Transforms/Vectorize/LoopVectorize.cpp | 4 +-
llvm/lib/Transforms/Vectorize/VPlan.h | 23 +--
.../Transforms/Vectorize/VPlanTransforms.cpp | 150 ++++--------------
.../Transforms/Vectorize/VPlanTransforms.h | 5 +-
.../RISCV/blocks-with-dead-instructions.ll | 4 +-
.../LoopVectorize/RISCV/dead-ops-cost.ll | 9 +-
.../LoopVectorize/RISCV/induction-costs.ll | 4 +
.../RISCV/masked_gather_scatter.ll | 24 +--
.../LoopVectorize/RISCV/pointer-induction.ll | 8 +-
.../LoopVectorize/RISCV/reg-usage-prune-vf.ll | 104 ++++--------
.../LoopVectorize/RISCV/strided-accesses.ll | 70 ++++----
.../RISCV/tail-folding-gather-scatter.ll | 6 +-
.../RISCV/tail-folding-interleave.ll | 52 ++++--
.../truncate-to-minimal-bitwidth-cost.ll | 82 +++++++++-
14 files changed, 261 insertions(+), 284 deletions(-)
diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
index a00b1a7cea0ce..ef6fff741100c 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -8378,8 +8378,8 @@ VPlanPtr LoopVectorizationPlanner::tryToBuildVPlanWithVPRecipes(
// Convert memory recipes to strided access recipes if the strided access is
// legal and profitable.
- RUN_VPLAN_PASS(VPlanTransforms::convertToStridedAccesses, *Plan, CostCtx,
- Range);
+ RUN_VPLAN_PASS(VPlanTransforms::convertToStridedAccesses, *Plan, PSE,
+ *OrigLoop, CostCtx, Range);
for (ElementCount VF : Range)
Plan->addVF(VF);
diff --git a/llvm/lib/Transforms/Vectorize/VPlan.h b/llvm/lib/Transforms/Vectorize/VPlan.h
index 275b6a04326d6..3565bfc8e83a2 100644
--- a/llvm/lib/Transforms/Vectorize/VPlan.h
+++ b/llvm/lib/Transforms/Vectorize/VPlan.h
@@ -2073,6 +2073,14 @@ class VPHistogramRecipe : public VPRecipeBase {
class LLVM_ABI_FOR_TEST VPWidenGEPRecipe : public VPRecipeWithIRFlags {
Type *SourceElementTy;
+ bool isPointerLoopInvariant() const {
+ return getOperand(0)->isDefinedOutsideLoopRegions();
+ }
+
+ bool isIndexLoopInvariant(unsigned I) const {
+ return getOperand(I + 1)->isDefinedOutsideLoopRegions();
+ }
+
public:
VPWidenGEPRecipe(GetElementPtrInst *GEP, ArrayRef<VPValue *> Operands,
const VPIRFlags &Flags = {},
@@ -2098,21 +2106,6 @@ class LLVM_ABI_FOR_TEST VPWidenGEPRecipe : public VPRecipeWithIRFlags {
/// This recipe generates a GEP instruction.
unsigned getOpcode() const { return Instruction::GetElementPtr; }
- bool isPointerLoopInvariant() const {
- return getOperand(0)->isDefinedOutsideLoopRegions();
- }
-
- bool isIndexLoopInvariant(unsigned I) const {
- return getOperand(I + 1)->isDefinedOutsideLoopRegions();
- }
-
- /// Returns the element type for the first \p I indices of this recipe.
- Type *getIndexedType(unsigned I) const {
- auto *GEP = cast<GetElementPtrInst>(getUnderlyingInstr());
- SmallVector<Value *, 4> Ops(GEP->idx_begin(), GEP->idx_begin() + I);
- return GetElementPtrInst::getIndexedType(SourceElementTy, Ops);
- }
-
/// Generate the gep nodes.
void execute(VPTransformState &State) override;
diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
index 70af3762eec14..eb64475d24efd 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
@@ -2984,8 +2984,7 @@ static VPRecipeBase *optimizeMaskToEVL(VPValue *HeaderMask,
if (match(&CurRecipe, m_Intrinsic<Intrinsic::experimental_vp_strided_load>(
m_VPValue(Addr), m_VPValue(Stride),
m_RemoveMask(HeaderMask, Mask),
- m_VPInstruction<Instruction::Trunc>(
- m_Specific(&Plan->getVF()))))) {
+ m_TruncOrSelf(m_Specific(&Plan->getVF()))))) {
auto *I = cast<VPWidenIntrinsicRecipe>(&CurRecipe);
if (!Mask)
Mask = Plan->getTrue();
@@ -3101,7 +3100,8 @@ static void fixupVFUsersForEVL(VPlan &Plan, VPValue &EVL) {
auto *R = cast<VPRecipeBase>(U);
return (R->getParent()->getParent() != LoopRegion) ||
isa<VPVectorEndPointerRecipe, VPScalarIVStepsRecipe,
- VPWidenIntOrFpInductionRecipe>(R);
+ VPWidenIntOrFpInductionRecipe,
+ VPWidenMemIntrinsicRecipe>(R);
}) &&
"User of VF that we can't transform to EVL.");
Plan.getVF().replaceUsesWithIf(&EVL, [](VPUser &U, unsigned Idx) {
@@ -5908,103 +5908,19 @@ void VPlanTransforms::createPartialReductions(VPlan &Plan,
transformToPartialReduction(Chain, Range, CostCtx, Plan);
}
-static std::pair<VPValue *, VPValue *> matchStridedStart(VPValue *CurIndex) {
- // TODO: Support VPWidenPointerInductionRecipe.
- if (auto *WidenIV = dyn_cast<VPWidenIntOrFpInductionRecipe>(CurIndex))
- return {WidenIV, WidenIV->getStepValue()};
-
- auto *WidenR = dyn_cast<VPWidenRecipe>(CurIndex);
- if (!WidenR || !WidenR->getUnderlyingInstr())
- return {nullptr, nullptr};
-
- unsigned Opcode = WidenR->getOpcode();
- // TODO: Support Instruction::Add and Instruction::Or.
- if (Opcode != Instruction::Shl && Opcode != Instruction::Mul)
- return {nullptr, nullptr};
-
- // Match the pattern binop(variant, uniform), or binop(uniform, variant) if
- // the binary operator is commutative.
- bool IsLHSUniform = vputils::isSingleScalar(WidenR->getOperand(0));
- if (IsLHSUniform == vputils::isSingleScalar(WidenR->getOperand(1)) ||
- (IsLHSUniform && !Instruction::isCommutative(Opcode)))
- return {nullptr, nullptr};
- unsigned VarIdx = IsLHSUniform ? 1 : 0;
-
- auto [Start, Stride] = matchStridedStart(WidenR->getOperand(VarIdx));
- if (!Start)
- return {nullptr, nullptr};
-
- VPBuilder Builder(WidenR);
- SmallVector<VPValue *> StartOps(WidenR->operands());
- StartOps[VarIdx] = Start;
- auto *StartR = Builder.createOverflowingOp(
- Opcode, StartOps,
- {WidenR->hasNoUnsignedWrap(), WidenR->hasNoSignedWrap()},
- WidenR->getDebugLoc());
- unsigned InvIdx = VarIdx == 0 ? 1 : 0;
- auto *StrideR =
- Builder.createOverflowingOp(Opcode, {Stride, WidenR->getOperand(InvIdx)});
- return {StartR, StrideR};
-}
-
-/// Checks if the given VPWidenGEPRecipe \p WidenGEP represents a strided
-/// access. If so, it creates recipes representing the base pointer and stride
-/// in element units, and returns a tuple of {base pointer, stride, element
-/// type}. Otherwise, returns a tuple where all elements are nullptr.
-static std::tuple<VPValue *, VPValue *, Type *>
-determineBaseAndStride(VPWidenGEPRecipe *WidenGEP) {
- // TODO: Check if the base pointer is strided.
- if (!WidenGEP->isPointerLoopInvariant())
- return {nullptr, nullptr, nullptr};
-
- // Find the only one variant index.
- unsigned VarOp = 0;
- for (unsigned I = 1, E = WidenGEP->getNumOperands(); I < E; ++I) {
- if (WidenGEP->isIndexLoopInvariant(I - 1))
- continue;
-
- if (VarOp != 0)
- return {nullptr, nullptr, nullptr};
- VarOp = I;
- }
-
- if (!VarOp)
- return {nullptr, nullptr, nullptr};
-
- Type *ElementTy = WidenGEP->getIndexedType(VarOp - 1);
- assert(!ElementTy->isScalableTy() && !ElementTy->isVectorTy() &&
- "Unexpected indexed type");
- if (ElementTy->isStructTy())
- return {nullptr, nullptr, nullptr};
-
- VPValue *IndexVPV = WidenGEP->getOperand(VarOp);
- auto [Start, Stride] = matchStridedStart(IndexVPV);
- if (!Start)
- return {nullptr, nullptr, nullptr};
-
- SmallVector<VPValue *> Ops(WidenGEP->operands());
- Ops[VarOp] = Start;
- auto *BasePtr = new VPReplicateRecipe(
- WidenGEP->getUnderlyingInstr(), Ops,
- /*IsUniform*/ true, /*Mask*/ nullptr, /*Flags*/ *WidenGEP,
- /*Metadata*/ {}, WidenGEP->getDebugLoc());
- BasePtr->insertBefore(WidenGEP);
-
- return {BasePtr, Stride, ElementTy};
-}
-
-void VPlanTransforms::convertToStridedAccesses(VPlan &Plan, VPCostContext &Ctx,
+void VPlanTransforms::convertToStridedAccesses(VPlan &Plan,
+ PredicatedScalarEvolution &PSE,
+ Loop &L, VPCostContext &Ctx,
VFRange &Range) {
if (Plan.hasScalarVFOnly())
return;
VPTypeAnalysis TypeInfo(Plan);
- DenseMap<VPWidenGEPRecipe *, std::tuple<VPValue *, VPValue *, Type *>>
- StrideCache;
+ VPRegionBlock *VectorLoop = Plan.getVectorLoopRegion();
SmallVector<VPWidenMemoryRecipe *> ToErase;
VPValue *I32VF = nullptr;
for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(
- vp_depth_first_shallow(Plan.getVectorLoopRegion()->getEntry()))) {
+ vp_depth_first_shallow(VectorLoop->getEntry()))) {
for (VPRecipeBase &R : make_early_inc_range(*VPBB)) {
auto *LoadR = dyn_cast<VPWidenLoadRecipe>(&R);
// TODO: Support strided store.
@@ -6040,21 +5956,13 @@ void VPlanTransforms::convertToStridedAccesses(VPlan &Plan, VPCostContext &Ctx,
Range))
continue;
- // Try to get base and stride here.
- VPValue *BasePtr, *StrideInElement;
- Type *ElementTy;
- auto It = StrideCache.find(Ptr);
- if (It != StrideCache.end())
- std::tie(BasePtr, StrideInElement, ElementTy) = It->second;
- else
- std::tie(BasePtr, StrideInElement, ElementTy) = StrideCache[Ptr] =
- determineBaseAndStride(Ptr);
-
- // Skip if the memory access is not a strided access.
- if (!BasePtr)
+ const SCEV *PtrSCEV = vputils::getSCEVExprForVPValue(Ptr, PSE, &L);
+ const SCEV *Start;
+ const APInt *Step;
+ // TODO: Support loop invariant stride.
+ if (!match(PtrSCEV,
+ m_scev_AffineAddRec(m_SCEV(Start), m_scev_APInt(Step))))
continue;
- assert(StrideInElement && ElementTy &&
- "Can not get stride information for a strided access");
// Add VF of i32 version for EVL.
if (!I32VF) {
@@ -6065,22 +5973,24 @@ void VPlanTransforms::convertToStridedAccesses(VPlan &Plan, VPCostContext &Ctx,
}
VPBuilder Builder(LoadR);
+ // Create the base pointer of strided access.
+ VPValue *StartVPV = vputils::getOrCreateVPValueForSCEVExpr(Plan, Start);
+ VPValue *StrideInBytes =
+ Plan.getConstantInt(VectorLoop->getCanonicalIVType(),
+ Step->getSExtValue(), /*IsSigned=*/true);
+ auto *AddRecPtr = cast<SCEVAddRecExpr>(PtrSCEV);
+ auto *Offset = Builder.createOverflowingOp(
+ Instruction::Mul, {VectorLoop->getCanonicalIV(), StrideInBytes},
+ {AddRecPtr->hasNoUnsignedWrap(), AddRecPtr->hasNoSignedWrap()});
+ auto *BasePtr = Builder.createNoWrapPtrAdd(
+ StartVPV, Offset,
+ AddRecPtr->hasNoUnsignedWrap() ? GEPNoWrapFlags::noUnsignedWrap()
+ : GEPNoWrapFlags::none());
+
// Create a new vector pointer for strided access.
auto *NewPtr = Builder.createVectorPointer(
- BasePtr, ElementTy, StrideInElement, Ptr->getGEPNoWrapFlags(),
- Ptr->getDebugLoc());
-
- const DataLayout &DL = Ingredient.getDataLayout();
- TypeSize TS = DL.getTypeAllocSize(ElementTy);
- unsigned TypeScale = TS.getFixedValue();
- VPValue *StrideInBytes = StrideInElement;
- // Scale the stride by the size of the indexed type.
- if (TypeScale != 1) {
- VPValue *ScaleVPV = Plan.getConstantInt(
- TypeInfo.inferScalarType(StrideInElement), TypeScale);
- StrideInBytes = Builder.createOverflowingOp(
- Instruction::Mul, {StrideInElement, ScaleVPV});
- }
+ BasePtr, Type::getInt8Ty(Plan.getContext()), StrideInBytes,
+ Ptr->getGEPNoWrapFlags(), Ptr->getDebugLoc());
VPValue *Mask;
if (VPValue *LoadMask = LoadR->getMask())
diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.h b/llvm/lib/Transforms/Vectorize/VPlanTransforms.h
index 1bccc4317ff95..7e1b369c49e95 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.h
+++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.h
@@ -302,8 +302,9 @@ struct VPlanTransforms {
/// Transform widen memory recipes into strided access recipes when legal
/// and profitable. Clamps \p Range to maintain consistency with widen
/// decisions of \p Plan, and uses \p Ctx to evaluate the cost.
- static void convertToStridedAccesses(VPlan &Plan, VPCostContext &Ctx,
- VFRange &Range);
+ static void convertToStridedAccesses(VPlan &Plan,
+ PredicatedScalarEvolution &PSE, Loop &L,
+ VPCostContext &Ctx, VFRange &Range);
/// Remove dead recipes from \p Plan.
static void removeDeadRecipes(VPlan &Plan);
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/blocks-with-dead-instructions.ll b/llvm/test/Transforms/LoopVectorize/RISCV/blocks-with-dead-instructions.ll
index f023324baf9f1..b08af505d4134 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/blocks-with-dead-instructions.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/blocks-with-dead-instructions.ll
@@ -325,9 +325,9 @@ define void @multiple_blocks_with_dead_inst_multiple_successors_6(ptr %src, i1 %
; CHECK-NEXT: [[TMP16:%.*]] = mul nsw i64 3, [[TMP12]]
; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[TMP16]], i64 0
; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 8 x i64> [[DOTSPLATINSERT]], <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-; CHECK-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[EVL_BASED_IV]], 3
-; CHECK-NEXT: [[TMP21:%.*]] = getelementptr i16, ptr [[SRC]], i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[TMP20:%.*]] = getelementptr i16, ptr [[SRC]], <vscale x 8 x i64> [[VEC_IND]]
+; CHECK-NEXT: [[TMP10:%.*]] = mul i64 [[EVL_BASED_IV]], 6
+; CHECK-NEXT: [[TMP21:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP10]]
; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vp.strided.load.nxv8i16.p0.i64(ptr align 2 [[TMP21]], i64 6, <vscale x 8 x i1> splat (i1 true), i32 [[TMP27]])
; CHECK-NEXT: [[TMP17:%.*]] = icmp eq <vscale x 8 x i16> [[WIDE_MASKED_GATHER]], zeroinitializer
; CHECK-NEXT: [[TMP14:%.*]] = select <vscale x 8 x i1> [[TMP17]], <vscale x 8 x i1> [[TMP8]], <vscale x 8 x i1> zeroinitializer
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/dead-ops-cost.ll b/llvm/test/Transforms/LoopVectorize/RISCV/dead-ops-cost.ll
index bb926ab181cb7..91cc28cd0b806 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/dead-ops-cost.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/dead-ops-cost.ll
@@ -18,6 +18,8 @@ define void @dead_load(ptr %p, i16 %start) {
; CHECK-NEXT: [[TMP3:%.*]] = udiv i64 [[TMP2]], 3
; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[UMIN]], [[TMP3]]
; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[TMP4]], 1
+; CHECK-NEXT: [[TMP6:%.*]] = shl nsw i64 [[START_EXT]], 1
+; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP6]]
; CHECK-NEXT: br label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
; CHECK-NEXT: [[TMP15:%.*]] = call <vscale x 8 x i64> @llvm.stepvector.nxv8i64()
@@ -83,6 +85,7 @@ define i8 @dead_live_out_due_to_scalar_epilogue_required(ptr %src, ptr %dst) {
; CHECK-NEXT: [[TMP1:%.*]] = mul <vscale x 16 x i32> [[TMP0]], splat (i32 4)
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 16 x i32> [ [[TMP1]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[AVL:%.*]] = phi i32 [ 252, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.experimental.get.vector.length.i32(i32 [[AVL]], i32 16, i1 true)
@@ -90,10 +93,12 @@ define i8 @dead_live_out_due_to_scalar_epilogue_required(ptr %src, ptr %dst) {
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[TMP3]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 16 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
; CHECK-NEXT: [[TMP9:%.*]] = sext <vscale x 16 x i32> [[VEC_IND]] to <vscale x 16 x i64>
-; CHECK-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[SRC]], <vscale x 16 x i64> [[TMP9]]
-; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 16 x i8> @llvm.vp.gather.nxv16i8.nxv16p0(<vscale x 16 x ptr> align 1 [[TMP6]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP2]]), !alias.scope [[META3:![0-9]+]]
+; CHECK-NEXT: [[TMP12:%.*]] = shl i32 [[EVL_BASED_IV]], 2
+; CHECK-NEXT: [[TMP13:%.*]] = getelementptr i8, ptr [[SRC]], i32 [[TMP12]]
+; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vp.strided.load.nxv16i8.p0.i32(ptr align 1 [[TMP13]], i32 4, <vscale x 16 x i1> splat (i1 true), i32 [[TMP2]]), !alias.scope [[META3:![0-9]+]]
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[DST]], <vscale x 16 x i64> [[TMP9]]
; CHECK-NEXT: call void @llvm.vp.scatter.nxv16i8.nxv16p0(<vscale x 16 x i8> zeroinitializer, <vscale x 16 x ptr> align 1 [[TMP7]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP2]]), !alias.scope [[META6:![0-9]+]], !noalias [[META3]]
+; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i32 [[TMP2]], [[EVL_BASED_IV]]
; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i32 [[AVL]], [[TMP2]]
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT]]
; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i32 [[AVL_NEXT]], 0
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/induction-costs.ll b/llvm/test/Transforms/LoopVectorize/RISCV/induction-costs.ll
index 66a7493b067c8..12f9db141024d 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/induction-costs.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/induction-costs.ll
@@ -20,6 +20,10 @@ define void @skip_free_iv_truncate(i16 %x, ptr %A) #0 {
; CHECK-NEXT: [[TMP3:%.*]] = udiv i64 [[TMP2]], 3
; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[UMIN21]], [[TMP3]]
; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[TMP4]], 1
+; CHECK-NEXT: [[TMP6:%.*]] = shl nsw i64 [[X_I64]], 3
+; CHECK-NEXT: [[SCEVGEP11:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP6]]
+; CHECK-NEXT: [[TMP7:%.*]] = add nsw i64 [[TMP6]], -8
+; CHECK-NEXT: [[SCEVGEP17:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP7]]
; CHECK-NEXT: br label %[[VECTOR_MEMCHECK:.*]]
; CHECK: [[VECTOR_MEMCHECK]]:
; CHECK-NEXT: [[TMP31:%.*]] = shl nsw i64 [[X_I64]], 1
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/masked_gather_scatter.ll b/llvm/test/Transforms/LoopVectorize/RISCV/masked_gather_scatter.ll
index fb3d1bd277e3e..08efc9e306764 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/masked_gather_scatter.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/masked_gather_scatter.ll
@@ -57,13 +57,13 @@ define void @foo4(ptr nocapture %A, ptr nocapture readonly %B, ptr nocapture rea
; RV32-NEXT: [[TMP11:%.*]] = shl nuw nsw i64 [[TMP8]], 4
; RV32-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP11]], i64 0
; RV32-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-; RV32-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[EVL_BASED_IV]], 16
-; RV32-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, ptr [[TRIGGER]], i64 [[OFFSET_IDX]]
-; RV32-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 2 x i32> @llvm.experimental.vp.strided.load.nxv2i32.p0.i64(ptr align 4 [[TMP16]], i64 64, <vscale x 2 x i1> splat (i1 true), i32 [[TMP10]]), !alias.scope [[META0:![0-9]+]], !noalias [[META3:![0-9]+]]
+; RV32-NEXT: [[TMP12:%.*]] = shl i64 [[EVL_BASED_IV]], 6
+; RV32-NEXT: [[TMP13:%.*]] = getelementptr i8, ptr [[TRIGGER]], i64 [[TMP12]]
+; RV32-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 2 x i32> @llvm.experimental.vp.strided.load.nxv2i32.p0.i64(ptr align 4 [[TMP13]], i64 64, <vscale x 2 x i1> splat (i1 true), i32 [[TMP10]]), !alias.scope [[META0:![0-9]+]], !noalias [[META3:![0-9]+]]
; RV32-NEXT: [[TMP14:%.*]] = icmp slt <vscale x 2 x i32> [[WIDE_MASKED_GATHER]], splat (i32 100)
-; RV32-NEXT: [[TMP20:%.*]] = shl nuw nsw i64 [[OFFSET_IDX]], 1
-; RV32-NEXT: [[TMP21:%.*]] = getelementptr inbounds double, ptr [[B]], i64 [[TMP20]]
-; RV32-NEXT: [[WIDE_MASKED_GATHER6:%.*]] = call <vscale x 2 x double> @llvm.experimental.vp.strided.load.nxv2f64.p0.i64(ptr align 8 [[TMP21]], i64 256, <vscale x 2 x i1> [[TMP14]], i32 [[TMP10]]), !alias.scope [[META5:![0-9]+]]
+; RV32-NEXT: [[TMP16:%.*]] = shl nuw nsw <vscale x 2 x i64> [[VEC_IND]], splat (i64 1)
+; RV32-NEXT: [[TMP20:%.*]] = getelementptr inbounds double, ptr [[B]], <vscale x 2 x i64> [[TMP16]]
+; RV32-NEXT: [[WIDE_MASKED_GATHER6:%.*]] = call <vscale x 2 x double> @llvm.vp.gather.nxv2f64.nxv2p0(<vscale x 2 x ptr> align 8 [[TMP20]], <vscale x 2 x i1> [[TMP14]], i32 [[TMP10]]), !alias.scope [[META5:![0-9]+]]
; RV32-NEXT: [[TMP17:%.*]] = sitofp <vscale x 2 x i32> [[WIDE_MASKED_GATHER]] to <vscale x 2 x double>
; RV32-NEXT: [[TMP18:%.*]] = fadd <vscale x 2 x double> [[WIDE_MASKED_GATHER6]], [[TMP17]]
; RV32-NEXT: [[TMP19:%.*]] = getelementptr inbounds double, ptr [[A]], <vscale x 2 x i64> [[VEC_IND]]
@@ -128,13 +128,13 @@ define void @foo4(ptr nocapture %A, ptr nocapture readonly %B, ptr nocapture rea
; RV64-NEXT: [[TMP4:%.*]] = shl nuw nsw i64 [[TMP8]], 4
; RV64-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP4]], i64 0
; RV64-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-; RV64-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[EVL_BASED_IV]], 16
-; RV64-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[TRIGGER]], i64 [[OFFSET_IDX]]
-; RV64-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 2 x i32> @llvm.experimental.vp.strided.load.nxv2i32.p0.i64(ptr align 4 [[TMP9]], i64 64, <vscale x 2 x i1> splat (i1 true), i32 [[TMP10]]), !alias.scope [[META0:![0-9]+]]
+; RV64-NEXT: [[TMP5:%.*]] = shl nuw i64 [[EVL_BASED_IV]], 6
+; RV64-NEXT: [[TMP6:%.*]] = getelementptr nuw i8, ptr [[TRIGGER]], i64 [[TMP5]]
+; RV64-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 2 x i32> @llvm.experimental.vp.strided.load.nxv2i32.p0.i64(ptr align 4 [[TMP6]], i64 64, <vscale x 2 x i1> splat (i1 true), i32 [[TMP10]]), !alias.scope [[META0:![0-9]+]]
; RV64-NEXT: [[TMP14:%.*]] = icmp slt <vscale x 2 x i32> [[WIDE_MASKED_GATHER]], splat (i32 100)
-; RV64-NEXT: [[TMP13:%.*]] = shl nuw nsw i64 [[OFFSET_IDX]], 1
-; RV64-NEXT: [[TMP28:%.*]] = getelementptr inbounds double, ptr [[B]], i64 [[TMP13]]
-; RV64-NEXT: [[WIDE_MASKED_GATHER6:%.*]] = call <vscale x 2 x double> @llvm.experimental.vp.strided.load.nxv2f64.p0.i64(ptr align 8 [[TMP28]], i64 256, <vscale x 2 x i1> [[TMP14]], i32 [[TMP10]]), !alias.scope [[META3:![0-9]+]]
+; RV64-NEXT: [[TMP9:%.*]] = shl nuw nsw <vscale x 2 x i64> [[VEC_IND]], splat (i64 1)
+; RV64-NEXT: [[TMP11:%.*]] = getelementptr inbounds double, ptr [[B]], <vscale x 2 x i64> [[TMP9]]
+; RV64-NEXT: [[WIDE_MASKED_GATHER6:%.*]] = call <vscale x 2 x double> @llvm.vp.gather.nxv2f64.nxv2p0(<vscale x 2 x ptr> align 8 [[TMP11]], <vscale x 2 x i1> [[TMP14]], i32 [[TMP10]]), !alias.scope [[META3:![0-9]+]]
; RV64-NEXT: [[TMP17:%.*]] = sitofp <vscale x 2 x i32> [[WIDE_MASKED_GATHER]] to <vscale x 2 x double>
; RV64-NEXT: [[TMP18:%.*]] = fadd <vscale x 2 x double> [[WIDE_MASKED_GATHER6]], [[TMP17]]
; RV64-NEXT: [[TMP19:%.*]] = getelementptr inbounds double, ptr [[A]], <vscale x 2 x i64> [[VEC_IND]]
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/pointer-induction.ll b/llvm/test/Transforms/LoopVectorize/RISCV/pointer-induction.ll
index 786ef735fc7ad..bd3ccea350324 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/pointer-induction.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/pointer-induction.ll
@@ -71,6 +71,7 @@ define i1 @scalarize_ptr_induction(ptr %start, ptr %end, ptr noalias %dst, i1 %c
; CHECK-NEXT: [[TMP1:%.*]] = sub i64 [[TMP0]], [[START5]]
; CHECK-NEXT: [[TMP2:%.*]] = udiv i64 [[TMP1]], 12
; CHECK-NEXT: [[TMP3:%.*]] = add nuw nsw i64 [[TMP2]], 1
+; CHECK-NEXT: [[SCEVGEP6:%.*]] = getelementptr i8, ptr [[START]], i64 4
; CHECK-NEXT: br label %[[VECTOR_MEMCHECK:.*]]
; CHECK: [[VECTOR_MEMCHECK]]:
; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[DST]], i64 8
@@ -91,19 +92,22 @@ define i1 @scalarize_ptr_induction(ptr %start, ptr %end, ptr noalias %dst, i1 %c
; CHECK-NEXT: [[BROADCAST_SPLAT7:%.*]] = shufflevector <vscale x 2 x ptr> [[BROADCAST_SPLATINSERT6]], <vscale x 2 x ptr> poison, <vscale x 2 x i32> zeroinitializer
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[POINTER_PHI:%.*]] = phi ptr [ [[START]], %[[VECTOR_PH]] ], [ [[PTR_IND:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[TMP3]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP13:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
; CHECK-NEXT: [[TMP14:%.*]] = mul <vscale x 2 x i64> [[TMP13]], splat (i64 12)
; CHECK-NEXT: [[VECTOR_GEP:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <vscale x 2 x i64> [[TMP14]]
; CHECK-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
-; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i8, <vscale x 2 x ptr> [[VECTOR_GEP]], i64 4
-; CHECK-NEXT: [[TMP18:%.*]] = call <vscale x 2 x i32> @llvm.vp.gather.nxv2i32.nxv2p0(<vscale x 2 x ptr> align 4 [[TMP12]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP11]]), !alias.scope [[META3:![0-9]+]]
+; CHECK-NEXT: [[TMP12:%.*]] = mul i64 [[EVL_BASED_IV]], 12
+; CHECK-NEXT: [[TMP15:%.*]] = getelementptr i8, ptr [[SCEVGEP6]], i64 [[TMP12]]
+; CHECK-NEXT: [[TMP18:%.*]] = call <vscale x 2 x i32> @llvm.experimental.vp.strided.load.nxv2i32.p0.i64(ptr align 4 [[TMP15]], i64 12, <vscale x 2 x i1> splat (i1 true), i32 [[TMP11]]), !alias.scope [[META3:![0-9]+]]
; CHECK-NEXT: [[TMP19:%.*]] = zext <vscale x 2 x i32> [[TMP18]] to <vscale x 2 x i64>
; CHECK-NEXT: [[TMP20:%.*]] = mul <vscale x 2 x i64> [[TMP19]], splat (i64 -7070675565921424023)
; CHECK-NEXT: [[TMP21:%.*]] = add <vscale x 2 x i64> [[TMP20]], splat (i64 -4)
; CHECK-NEXT: call void @llvm.vp.scatter.nxv2i64.nxv2p0(<vscale x 2 x i64> [[TMP21]], <vscale x 2 x ptr> align 1 [[BROADCAST_SPLAT]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP11]]), !alias.scope [[META6:![0-9]+]], !noalias [[META3]]
; CHECK-NEXT: [[TMP26:%.*]] = zext i32 [[TMP11]] to i64
+; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP26]], [[EVL_BASED_IV]]
; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP26]]
; CHECK-NEXT: [[TMP27:%.*]] = mul i64 12, [[TMP26]]
; CHECK-NEXT: [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i64 [[TMP27]]
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/reg-usage-prune-vf.ll b/llvm/test/Transforms/LoopVectorize/RISCV/reg-usage-prune-vf.ll
index 2ae485fc758f2..19435c1026175 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/reg-usage-prune-vf.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/reg-usage-prune-vf.ll
@@ -7,50 +7,32 @@ define void @f(ptr noalias %p0, ptr noalias %p1, ptr noalias %p2) {
; CHECK-LABEL: define void @f(
; CHECK-SAME: ptr noalias [[P0:%.*]], ptr noalias [[P1:%.*]], ptr noalias [[P2:%.*]]) #[[ATTR0:[0-9]+]] {
; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[P0]], i64 -1
; CHECK-NEXT: br label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
-; CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul <vscale x 4 x i64> [[TMP0]], splat (i64 2)
-; CHECK-NEXT: [[TMP3:%.*]] = mul <vscale x 4 x i64> [[TMP0]], splat (i64 3)
-; CHECK-NEXT: [[TMP5:%.*]] = mul <vscale x 4 x i64> [[TMP0]], splat (i64 4)
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[TMP1]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[VEC_IND3:%.*]] = phi <vscale x 4 x i64> [ [[TMP3]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT9:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[VEC_IND4:%.*]] = phi <vscale x 4 x i64> [ [[TMP5]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT10:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP7:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1025, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
-; CHECK-NEXT: [[TMP7:%.*]] = zext i32 [[TMP6]] to i64
-; CHECK-NEXT: [[TMP9:%.*]] = shl i64 [[TMP7]], 2
-; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP9]], i64 0
-; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP10:%.*]] = mul i64 3, [[TMP7]]
-; CHECK-NEXT: [[BROADCAST_SPLATINSERT3:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP10]], i64 0
-; CHECK-NEXT: [[BROADCAST_SPLAT4:%.*]] = shufflevector <vscale x 4 x i64> [[BROADCAST_SPLATINSERT3]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 16, i1 true)
; CHECK-NEXT: [[TMP8:%.*]] = shl i64 [[TMP7]], 1
-; CHECK-NEXT: [[BROADCAST_SPLATINSERT5:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP8]], i64 0
-; CHECK-NEXT: [[BROADCAST_SPLAT6:%.*]] = shufflevector <vscale x 4 x i64> [[BROADCAST_SPLATINSERT5]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP13:%.*]] = sub <vscale x 4 x i64> [[VEC_IND]], splat (i64 1)
-; CHECK-NEXT: [[TMP14:%.*]] = getelementptr i8, ptr [[P0]], <vscale x 4 x i64> [[TMP13]]
-; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i8> @llvm.vp.gather.nxv4i8.nxv4p0(<vscale x 4 x ptr> align 1 [[TMP14]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP6]])
-; CHECK-NEXT: [[TMP15:%.*]] = sub <vscale x 4 x i64> [[VEC_IND3]], splat (i64 1)
-; CHECK-NEXT: [[TMP16:%.*]] = getelementptr i8, ptr [[P0]], <vscale x 4 x i64> [[TMP15]]
-; CHECK-NEXT: [[WIDE_MASKED_GATHER9:%.*]] = call <vscale x 4 x i8> @llvm.vp.gather.nxv4i8.nxv4p0(<vscale x 4 x ptr> align 1 [[TMP16]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP6]])
-; CHECK-NEXT: [[TMP17:%.*]] = sub <vscale x 4 x i64> [[VEC_IND4]], splat (i64 1)
-; CHECK-NEXT: [[TMP18:%.*]] = getelementptr i8, ptr [[P0]], <vscale x 4 x i64> [[TMP17]]
-; CHECK-NEXT: [[WIDE_MASKED_GATHER10:%.*]] = call <vscale x 4 x i8> @llvm.vp.gather.nxv4i8.nxv4p0(<vscale x 4 x ptr> align 1 [[TMP18]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP6]])
-; CHECK-NEXT: [[TMP19:%.*]] = mul i64 [[EVL_BASED_IV]], 3
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[SCEVGEP]], i64 [[TMP8]]
+; CHECK-NEXT: [[TMP3:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vp.strided.load.nxv16i8.p0.i64(ptr align 1 [[TMP2]], i64 2, <vscale x 16 x i1> splat (i1 true), i32 [[TMP6]])
+; CHECK-NEXT: [[TMP4:%.*]] = mul i64 [[TMP7]], 3
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[SCEVGEP]], i64 [[TMP4]]
+; CHECK-NEXT: [[TMP10:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vp.strided.load.nxv16i8.p0.i64(ptr align 1 [[TMP5]], i64 3, <vscale x 16 x i1> splat (i1 true), i32 [[TMP6]])
+; CHECK-NEXT: [[TMP11:%.*]] = shl i64 [[TMP7]], 2
+; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[SCEVGEP]], i64 [[TMP11]]
+; CHECK-NEXT: [[TMP9:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vp.strided.load.nxv16i8.p0.i64(ptr align 1 [[TMP12]], i64 4, <vscale x 16 x i1> splat (i1 true), i32 [[TMP6]])
+; CHECK-NEXT: [[TMP19:%.*]] = mul i64 [[TMP7]], 3
; CHECK-NEXT: [[TMP20:%.*]] = getelementptr i8, ptr [[P1]], i64 [[TMP19]]
; CHECK-NEXT: [[TMP21:%.*]] = getelementptr i8, ptr [[TMP20]], i8 0
; CHECK-NEXT: [[INTERLEAVE_EVL:%.*]] = mul nuw nsw i32 [[TMP6]], 3
-; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 12 x i8> @llvm.vector.interleave3.nxv12i8(<vscale x 4 x i8> [[WIDE_MASKED_GATHER]], <vscale x 4 x i8> [[WIDE_MASKED_GATHER9]], <vscale x 4 x i8> [[WIDE_MASKED_GATHER10]])
-; CHECK-NEXT: call void @llvm.vp.store.nxv12i8.p0(<vscale x 12 x i8> [[INTERLEAVED_VEC]], ptr align 1 [[TMP21]], <vscale x 12 x i1> splat (i1 true), i32 [[INTERLEAVE_EVL]])
-; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP7]], [[EVL_BASED_IV]]
-; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP7]]
-; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT6]]
-; CHECK-NEXT: [[VEC_IND_NEXT9]] = add <vscale x 4 x i64> [[VEC_IND3]], [[BROADCAST_SPLAT4]]
-; CHECK-NEXT: [[VEC_IND_NEXT10]] = add <vscale x 4 x i64> [[VEC_IND4]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 48 x i8> @llvm.vector.interleave3.nxv48i8(<vscale x 16 x i8> [[TMP3]], <vscale x 16 x i8> [[TMP10]], <vscale x 16 x i8> [[TMP9]])
+; CHECK-NEXT: call void @llvm.vp.store.nxv48i8.p0(<vscale x 48 x i8> [[INTERLEAVED_VEC]], ptr align 1 [[TMP21]], <vscale x 48 x i1> splat (i1 true), i32 [[INTERLEAVE_EVL]])
+; CHECK-NEXT: [[TMP13:%.*]] = zext i32 [[TMP6]] to i64
+; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP13]], [[TMP7]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP13]]
; CHECK-NEXT: [[TMP23:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
; CHECK-NEXT: br i1 [[TMP23]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
@@ -61,50 +43,32 @@ define void @f(ptr noalias %p0, ptr noalias %p1, ptr noalias %p2) {
; NO-REG-PRESSURE-CHECK-LABEL: define void @f(
; NO-REG-PRESSURE-CHECK-SAME: ptr noalias [[P0:%.*]], ptr noalias [[P1:%.*]], ptr noalias [[P2:%.*]]) #[[ATTR0:[0-9]+]] {
; NO-REG-PRESSURE-CHECK-NEXT: [[ENTRY:.*:]]
+; NO-REG-PRESSURE-CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[P0]], i64 -1
; NO-REG-PRESSURE-CHECK-NEXT: br label %[[VECTOR_PH:.*]]
; NO-REG-PRESSURE-CHECK: [[VECTOR_PH]]:
-; NO-REG-PRESSURE-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.stepvector.nxv8i64()
-; NO-REG-PRESSURE-CHECK-NEXT: [[TMP1:%.*]] = mul <vscale x 8 x i64> [[TMP0]], splat (i64 2)
-; NO-REG-PRESSURE-CHECK-NEXT: [[TMP3:%.*]] = mul <vscale x 8 x i64> [[TMP0]], splat (i64 3)
-; NO-REG-PRESSURE-CHECK-NEXT: [[TMP5:%.*]] = mul <vscale x 8 x i64> [[TMP0]], splat (i64 4)
; NO-REG-PRESSURE-CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; NO-REG-PRESSURE-CHECK: [[VECTOR_BODY]]:
-; NO-REG-PRESSURE-CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; NO-REG-PRESSURE-CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 8 x i64> [ [[TMP1]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; NO-REG-PRESSURE-CHECK-NEXT: [[VEC_IND3:%.*]] = phi <vscale x 8 x i64> [ [[TMP3]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT9:%.*]], %[[VECTOR_BODY]] ]
-; NO-REG-PRESSURE-CHECK-NEXT: [[VEC_IND4:%.*]] = phi <vscale x 8 x i64> [ [[TMP5]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT10:%.*]], %[[VECTOR_BODY]] ]
+; NO-REG-PRESSURE-CHECK-NEXT: [[TMP7:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; NO-REG-PRESSURE-CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1025, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; NO-REG-PRESSURE-CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 8, i1 true)
-; NO-REG-PRESSURE-CHECK-NEXT: [[TMP7:%.*]] = zext i32 [[TMP6]] to i64
-; NO-REG-PRESSURE-CHECK-NEXT: [[TMP10:%.*]] = shl i64 [[TMP7]], 2
-; NO-REG-PRESSURE-CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[TMP10]], i64 0
-; NO-REG-PRESSURE-CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 8 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-; NO-REG-PRESSURE-CHECK-NEXT: [[TMP11:%.*]] = mul i64 3, [[TMP7]]
-; NO-REG-PRESSURE-CHECK-NEXT: [[BROADCAST_SPLATINSERT3:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[TMP11]], i64 0
-; NO-REG-PRESSURE-CHECK-NEXT: [[BROADCAST_SPLAT4:%.*]] = shufflevector <vscale x 8 x i64> [[BROADCAST_SPLATINSERT3]], <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
+; NO-REG-PRESSURE-CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 16, i1 true)
; NO-REG-PRESSURE-CHECK-NEXT: [[TMP8:%.*]] = shl i64 [[TMP7]], 1
-; NO-REG-PRESSURE-CHECK-NEXT: [[BROADCAST_SPLATINSERT5:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[TMP8]], i64 0
-; NO-REG-PRESSURE-CHECK-NEXT: [[BROADCAST_SPLAT6:%.*]] = shufflevector <vscale x 8 x i64> [[BROADCAST_SPLATINSERT5]], <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-; NO-REG-PRESSURE-CHECK-NEXT: [[TMP9:%.*]] = sub <vscale x 8 x i64> [[VEC_IND]], splat (i64 1)
-; NO-REG-PRESSURE-CHECK-NEXT: [[TMP14:%.*]] = getelementptr i8, ptr [[P0]], <vscale x 8 x i64> [[TMP9]]
-; NO-REG-PRESSURE-CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 8 x i8> @llvm.vp.gather.nxv8i8.nxv8p0(<vscale x 8 x ptr> align 1 [[TMP14]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP6]])
-; NO-REG-PRESSURE-CHECK-NEXT: [[TMP15:%.*]] = sub <vscale x 8 x i64> [[VEC_IND3]], splat (i64 1)
-; NO-REG-PRESSURE-CHECK-NEXT: [[TMP16:%.*]] = getelementptr i8, ptr [[P0]], <vscale x 8 x i64> [[TMP15]]
-; NO-REG-PRESSURE-CHECK-NEXT: [[WIDE_MASKED_GATHER9:%.*]] = call <vscale x 8 x i8> @llvm.vp.gather.nxv8i8.nxv8p0(<vscale x 8 x ptr> align 1 [[TMP16]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP6]])
-; NO-REG-PRESSURE-CHECK-NEXT: [[TMP17:%.*]] = sub <vscale x 8 x i64> [[VEC_IND4]], splat (i64 1)
-; NO-REG-PRESSURE-CHECK-NEXT: [[TMP18:%.*]] = getelementptr i8, ptr [[P0]], <vscale x 8 x i64> [[TMP17]]
-; NO-REG-PRESSURE-CHECK-NEXT: [[WIDE_MASKED_GATHER10:%.*]] = call <vscale x 8 x i8> @llvm.vp.gather.nxv8i8.nxv8p0(<vscale x 8 x ptr> align 1 [[TMP18]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP6]])
-; NO-REG-PRESSURE-CHECK-NEXT: [[TMP19:%.*]] = mul i64 [[EVL_BASED_IV]], 3
+; NO-REG-PRESSURE-CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[SCEVGEP]], i64 [[TMP8]]
+; NO-REG-PRESSURE-CHECK-NEXT: [[TMP3:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vp.strided.load.nxv16i8.p0.i64(ptr align 1 [[TMP2]], i64 2, <vscale x 16 x i1> splat (i1 true), i32 [[TMP6]])
+; NO-REG-PRESSURE-CHECK-NEXT: [[TMP4:%.*]] = mul i64 [[TMP7]], 3
+; NO-REG-PRESSURE-CHECK-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[SCEVGEP]], i64 [[TMP4]]
+; NO-REG-PRESSURE-CHECK-NEXT: [[TMP10:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vp.strided.load.nxv16i8.p0.i64(ptr align 1 [[TMP5]], i64 3, <vscale x 16 x i1> splat (i1 true), i32 [[TMP6]])
+; NO-REG-PRESSURE-CHECK-NEXT: [[TMP11:%.*]] = shl i64 [[TMP7]], 2
+; NO-REG-PRESSURE-CHECK-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[SCEVGEP]], i64 [[TMP11]]
+; NO-REG-PRESSURE-CHECK-NEXT: [[TMP9:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vp.strided.load.nxv16i8.p0.i64(ptr align 1 [[TMP12]], i64 4, <vscale x 16 x i1> splat (i1 true), i32 [[TMP6]])
+; NO-REG-PRESSURE-CHECK-NEXT: [[TMP19:%.*]] = mul i64 [[TMP7]], 3
; NO-REG-PRESSURE-CHECK-NEXT: [[TMP20:%.*]] = getelementptr i8, ptr [[P1]], i64 [[TMP19]]
; NO-REG-PRESSURE-CHECK-NEXT: [[TMP21:%.*]] = getelementptr i8, ptr [[TMP20]], i8 0
; NO-REG-PRESSURE-CHECK-NEXT: [[INTERLEAVE_EVL:%.*]] = mul nuw nsw i32 [[TMP6]], 3
-; NO-REG-PRESSURE-CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 24 x i8> @llvm.vector.interleave3.nxv24i8(<vscale x 8 x i8> [[WIDE_MASKED_GATHER]], <vscale x 8 x i8> [[WIDE_MASKED_GATHER9]], <vscale x 8 x i8> [[WIDE_MASKED_GATHER10]])
-; NO-REG-PRESSURE-CHECK-NEXT: call void @llvm.vp.store.nxv24i8.p0(<vscale x 24 x i8> [[INTERLEAVED_VEC]], ptr align 1 [[TMP21]], <vscale x 24 x i1> splat (i1 true), i32 [[INTERLEAVE_EVL]])
-; NO-REG-PRESSURE-CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP7]], [[EVL_BASED_IV]]
-; NO-REG-PRESSURE-CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP7]]
-; NO-REG-PRESSURE-CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 8 x i64> [[VEC_IND]], [[BROADCAST_SPLAT6]]
-; NO-REG-PRESSURE-CHECK-NEXT: [[VEC_IND_NEXT9]] = add <vscale x 8 x i64> [[VEC_IND3]], [[BROADCAST_SPLAT4]]
-; NO-REG-PRESSURE-CHECK-NEXT: [[VEC_IND_NEXT10]] = add <vscale x 8 x i64> [[VEC_IND4]], [[BROADCAST_SPLAT]]
+; NO-REG-PRESSURE-CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 48 x i8> @llvm.vector.interleave3.nxv48i8(<vscale x 16 x i8> [[TMP3]], <vscale x 16 x i8> [[TMP10]], <vscale x 16 x i8> [[TMP9]])
+; NO-REG-PRESSURE-CHECK-NEXT: call void @llvm.vp.store.nxv48i8.p0(<vscale x 48 x i8> [[INTERLEAVED_VEC]], ptr align 1 [[TMP21]], <vscale x 48 x i1> splat (i1 true), i32 [[INTERLEAVE_EVL]])
+; NO-REG-PRESSURE-CHECK-NEXT: [[TMP13:%.*]] = zext i32 [[TMP6]] to i64
+; NO-REG-PRESSURE-CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP13]], [[TMP7]]
+; NO-REG-PRESSURE-CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP13]]
; NO-REG-PRESSURE-CHECK-NEXT: [[TMP22:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
; NO-REG-PRESSURE-CHECK-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; NO-REG-PRESSURE-CHECK: [[MIDDLE_BLOCK]]:
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/strided-accesses.ll b/llvm/test/Transforms/LoopVectorize/RISCV/strided-accesses.ll
index 7c5725f6c128b..393ce60db9958 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/strided-accesses.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/strided-accesses.ll
@@ -20,10 +20,10 @@ define void @single_constant_stride_int_scaled(ptr %p) {
; CHECK-NEXT: [[TMP12:%.*]] = zext i32 [[TMP11]] to i64
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP12]], i64 0
; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP7:%.*]] = shl nuw nsw i64 [[EVL_BASED_IV]], 3
; CHECK-NEXT: [[TMP14:%.*]] = shl nuw nsw <vscale x 4 x i64> [[VEC_IND]], splat (i64 3)
-; CHECK-NEXT: [[TMP9:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[TMP7]]
-; CHECK-NEXT: [[TMP15:%.*]] = getelementptr i32, ptr [[P]], <vscale x 4 x i64> [[TMP14]]
+; CHECK-NEXT: [[TMP15:%.*]] = getelementptr i32, ptr [[P:%.*]], <vscale x 4 x i64> [[TMP14]]
+; CHECK-NEXT: [[TMP5:%.*]] = shl i64 [[EVL_BASED_IV]], 5
+; CHECK-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP5]]
; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP9]], i64 32, <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
; CHECK-NEXT: [[TMP16:%.*]] = add <vscale x 4 x i32> [[WIDE_MASKED_GATHER]], splat (i32 1)
; CHECK-NEXT: call void @llvm.vp.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[TMP16]], <vscale x 4 x ptr> align 4 [[TMP15]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
@@ -60,14 +60,14 @@ define void @single_constant_stride_int_scaled(ptr %p) {
; CHECK-UF2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-UF2-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[TMP7]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-UF2-NEXT: [[STEP_ADD:%.*]] = add <vscale x 4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]]
-; CHECK-UF2-NEXT: [[TMP8:%.*]] = shl nuw nsw i64 [[INDEX]], 3
; CHECK-UF2-NEXT: [[TMP9:%.*]] = shl nuw nsw <vscale x 4 x i64> [[VEC_IND]], splat (i64 3)
; CHECK-UF2-NEXT: [[TMP10:%.*]] = shl nuw nsw <vscale x 4 x i64> [[STEP_ADD]], splat (i64 3)
-; CHECK-UF2-NEXT: [[TMP22:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[TMP8]]
-; CHECK-UF2-NEXT: [[TMP11:%.*]] = getelementptr i32, ptr [[P]], <vscale x 4 x i64> [[TMP9]]
+; CHECK-UF2-NEXT: [[TMP11:%.*]] = getelementptr i32, ptr [[P:%.*]], <vscale x 4 x i64> [[TMP9]]
; CHECK-UF2-NEXT: [[TMP12:%.*]] = getelementptr i32, ptr [[P]], <vscale x 4 x i64> [[TMP10]]
-; CHECK-UF2-NEXT: [[TMP17:%.*]] = mul i64 [[TMP3]], 8
-; CHECK-UF2-NEXT: [[TMP18:%.*]] = getelementptr i32, ptr [[TMP22]], i64 [[TMP17]]
+; CHECK-UF2-NEXT: [[TMP16:%.*]] = shl i64 [[INDEX]], 5
+; CHECK-UF2-NEXT: [[TMP22:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP16]]
+; CHECK-UF2-NEXT: [[TMP17:%.*]] = mul i64 [[TMP3]], 32
+; CHECK-UF2-NEXT: [[TMP18:%.*]] = getelementptr i8, ptr [[TMP22]], i64 [[TMP17]]
; CHECK-UF2-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP22]], i64 32, <vscale x 4 x i1> splat (i1 true), i32 [[TMP19]])
; CHECK-UF2-NEXT: [[WIDE_MASKED_GATHER1:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP18]], i64 32, <vscale x 4 x i1> splat (i1 true), i32 [[TMP19]])
; CHECK-UF2-NEXT: [[TMP13:%.*]] = add <vscale x 4 x i32> [[WIDE_MASKED_GATHER]], splat (i32 1)
@@ -131,9 +131,9 @@ define void @single_constant_stride_int_iv(ptr %p) {
; CHECK-NEXT: [[TMP4:%.*]] = shl nuw nsw i64 [[TMP11]], 6
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP4]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-; CHECK-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[EVL_BASED_IV]], 64
-; CHECK-NEXT: [[TMP9:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[OFFSET_IDX]]
-; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i32, ptr [[P]], <vscale x 4 x i64> [[VEC_IND]]
+; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i32, ptr [[P:%.*]], <vscale x 4 x i64> [[VEC_IND]]
+; CHECK-NEXT: [[TMP8:%.*]] = shl i64 [[EVL_BASED_IV]], 8
+; CHECK-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP8]]
; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP9]], i64 256, <vscale x 4 x i1> splat (i1 true), i32 [[TMP7]])
; CHECK-NEXT: [[TMP13:%.*]] = add <vscale x 4 x i32> [[WIDE_MASKED_GATHER]], splat (i32 1)
; CHECK-NEXT: call void @llvm.vp.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[TMP13]], <vscale x 4 x ptr> align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP7]])
@@ -171,12 +171,12 @@ define void @single_constant_stride_int_iv(ptr %p) {
; CHECK-UF2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-UF2-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[TMP8]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-UF2-NEXT: [[STEP_ADD:%.*]] = add <vscale x 4 x i64> [[VEC_IND]], [[TMP6]]
-; CHECK-UF2-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 64
-; CHECK-UF2-NEXT: [[TMP18:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[OFFSET_IDX]]
-; CHECK-UF2-NEXT: [[TMP9:%.*]] = getelementptr i32, ptr [[P]], <vscale x 4 x i64> [[VEC_IND]]
+; CHECK-UF2-NEXT: [[TMP9:%.*]] = getelementptr i32, ptr [[P:%.*]], <vscale x 4 x i64> [[VEC_IND]]
; CHECK-UF2-NEXT: [[TMP10:%.*]] = getelementptr i32, ptr [[P]], <vscale x 4 x i64> [[STEP_ADD]]
-; CHECK-UF2-NEXT: [[TMP14:%.*]] = mul i64 [[TMP3]], 64
-; CHECK-UF2-NEXT: [[TMP15:%.*]] = getelementptr i32, ptr [[TMP18]], i64 [[TMP14]]
+; CHECK-UF2-NEXT: [[TMP17:%.*]] = shl i64 [[INDEX]], 8
+; CHECK-UF2-NEXT: [[TMP18:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP17]]
+; CHECK-UF2-NEXT: [[TMP14:%.*]] = mul i64 [[TMP3]], 256
+; CHECK-UF2-NEXT: [[TMP15:%.*]] = getelementptr i8, ptr [[TMP18]], i64 [[TMP14]]
; CHECK-UF2-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP18]], i64 256, <vscale x 4 x i1> splat (i1 true), i32 [[TMP16]])
; CHECK-UF2-NEXT: [[WIDE_MASKED_GATHER1:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP15]], i64 256, <vscale x 4 x i1> splat (i1 true), i32 [[TMP16]])
; CHECK-UF2-NEXT: [[TMP11:%.*]] = add <vscale x 4 x i32> [[WIDE_MASKED_GATHER]], splat (i32 1)
@@ -782,27 +782,23 @@ define void @double_stride_int_scaled(ptr %p, ptr %p2, i64 %stride) {
; STRIDED-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
; STRIDED-NEXT: br i1 [[FOUND_CONFLICT]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
; STRIDED: vector.ph:
-; STRIDED-NEXT: [[TMP47:%.*]] = shl i64 [[STRIDE]], 2
; STRIDED-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[STRIDE]], i64 0
; STRIDED-NEXT: [[BROADCAST_SPLAT1:%.*]] = shufflevector <vscale x 4 x i64> [[BROADCAST_SPLATINSERT1]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
; STRIDED-NEXT: [[TMP12:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
; STRIDED-NEXT: br label [[VECTOR_BODY:%.*]]
; STRIDED: vector.body:
-; STRIDED-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; STRIDED-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[TMP12]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
; STRIDED-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; STRIDED-NEXT: [[TMP43:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; STRIDED-NEXT: [[TMP44:%.*]] = zext i32 [[TMP43]] to i64
; STRIDED-NEXT: [[BROADCAST_SPLATINSERT9:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP44]], i64 0
; STRIDED-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[BROADCAST_SPLATINSERT9]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-; STRIDED-NEXT: [[TMP50:%.*]] = mul nuw nsw i64 [[EVL_BASED_IV]], [[STRIDE]]
; STRIDED-NEXT: [[TMP18:%.*]] = mul nuw nsw <vscale x 4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT1]]
-; STRIDED-NEXT: [[TMP42:%.*]] = getelementptr i32, ptr [[P]], i64 [[TMP50]]
-; STRIDED-NEXT: [[WIDE_STRIDED_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP42]], i64 [[TMP47]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP43]]), !alias.scope [[META5:![0-9]+]]
+; STRIDED-NEXT: [[TMP42:%.*]] = getelementptr i32, ptr [[P]], <vscale x 4 x i64> [[TMP18]]
+; STRIDED-NEXT: [[WIDE_STRIDED_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> align 4 [[TMP42]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP43]]), !alias.scope [[META5:![0-9]+]]
; STRIDED-NEXT: [[TMP45:%.*]] = add <vscale x 4 x i32> [[WIDE_STRIDED_LOAD]], splat (i32 1)
; STRIDED-NEXT: [[TMP46:%.*]] = getelementptr i32, ptr [[P2]], <vscale x 4 x i64> [[TMP18]]
; STRIDED-NEXT: call void @llvm.vp.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[TMP45]], <vscale x 4 x ptr> align 4 [[TMP46]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP43]]), !alias.scope [[META8:![0-9]+]], !noalias [[META5]]
-; STRIDED-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP44]], [[EVL_BASED_IV]]
; STRIDED-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP44]]
; STRIDED-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]]
; STRIDED-NEXT: [[TMP41:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
@@ -888,8 +884,6 @@ define void @double_stride_int_scaled(ptr %p, ptr %p2, i64 %stride) {
; STRIDED-UF2-NEXT: [[TMP30:%.*]] = shl nuw i64 [[TMP29]], 1
; STRIDED-UF2-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP30]]
; STRIDED-UF2-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
-; STRIDED-UF2-NEXT: [[TMP35:%.*]] = trunc i64 [[TMP29]] to i32
-; STRIDED-UF2-NEXT: [[TMP32:%.*]] = shl i64 [[STRIDE]], 2
; STRIDED-UF2-NEXT: [[BROADCAST_SPLATINSERT10:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[STRIDE]], i64 0
; STRIDED-UF2-NEXT: [[BROADCAST_SPLAT11:%.*]] = shufflevector <vscale x 4 x i64> [[BROADCAST_SPLATINSERT10]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
; STRIDED-UF2-NEXT: [[TMP31:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
@@ -898,14 +892,12 @@ define void @double_stride_int_scaled(ptr %p, ptr %p2, i64 %stride) {
; STRIDED-UF2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; STRIDED-UF2-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[TMP31]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
; STRIDED-UF2-NEXT: [[STEP_ADD:%.*]] = add <vscale x 4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]]
-; STRIDED-UF2-NEXT: [[TMP36:%.*]] = mul nuw nsw i64 [[INDEX]], [[STRIDE]]
; STRIDED-UF2-NEXT: [[TMP33:%.*]] = mul nuw nsw <vscale x 4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT11]]
; STRIDED-UF2-NEXT: [[TMP34:%.*]] = mul nuw nsw <vscale x 4 x i64> [[STEP_ADD]], [[BROADCAST_SPLAT11]]
-; STRIDED-UF2-NEXT: [[TMP44:%.*]] = getelementptr i32, ptr [[P]], i64 [[TMP36]]
-; STRIDED-UF2-NEXT: [[TMP47:%.*]] = mul i64 [[TMP29]], [[STRIDE]]
-; STRIDED-UF2-NEXT: [[TMP48:%.*]] = getelementptr i32, ptr [[TMP44]], i64 [[TMP47]]
-; STRIDED-UF2-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP44]], i64 [[TMP32]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP35]]), !alias.scope [[META8:![0-9]+]]
-; STRIDED-UF2-NEXT: [[WIDE_MASKED_GATHER12:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP48]], i64 [[TMP32]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP35]]), !alias.scope [[META8]]
+; STRIDED-UF2-NEXT: [[TMP36:%.*]] = getelementptr i32, ptr [[P]], <vscale x 4 x i64> [[TMP33]]
+; STRIDED-UF2-NEXT: [[TMP35:%.*]] = getelementptr i32, ptr [[P]], <vscale x 4 x i64> [[TMP34]]
+; STRIDED-UF2-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> align 4 [[TMP36]], <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> poison), !alias.scope [[META8:![0-9]+]]
+; STRIDED-UF2-NEXT: [[WIDE_MASKED_GATHER12:%.*]] = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> align 4 [[TMP35]], <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> poison), !alias.scope [[META8]]
; STRIDED-UF2-NEXT: [[TMP37:%.*]] = add <vscale x 4 x i32> [[WIDE_MASKED_GATHER]], splat (i32 1)
; STRIDED-UF2-NEXT: [[TMP38:%.*]] = add <vscale x 4 x i32> [[WIDE_MASKED_GATHER12]], splat (i32 1)
; STRIDED-UF2-NEXT: [[TMP39:%.*]] = getelementptr i32, ptr [[P2]], <vscale x 4 x i64> [[TMP33]]
@@ -1313,7 +1305,8 @@ define void @constant_stride_reinterpret(ptr noalias %in, ptr noalias %out) {
; NOSTRIDED-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; NOSTRIDED-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; NOSTRIDED-NEXT: [[TMP2:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
-; NOSTRIDED-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw i32, ptr [[IN:%.*]], i64 [[EVL_BASED_IV]]
+; NOSTRIDED-NEXT: [[TMP1:%.*]] = shl nuw i64 [[EVL_BASED_IV]], 2
+; NOSTRIDED-NEXT: [[TMP9:%.*]] = getelementptr nuw i8, ptr [[IN:%.*]], i64 [[TMP1]]
; NOSTRIDED-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vp.strided.load.nxv2i64.p0.i64(ptr align 8 [[TMP9]], i64 4, <vscale x 2 x i1> splat (i1 true), i32 [[TMP2]])
; NOSTRIDED-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw i64, ptr [[OUT:%.*]], i64 [[EVL_BASED_IV]]
; NOSTRIDED-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[WIDE_MASKED_GATHER]], ptr align 8 [[TMP5]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP2]])
@@ -1343,8 +1336,10 @@ define void @constant_stride_reinterpret(ptr noalias %in, ptr noalias %out) {
; NOSTRIDED-UF2-NEXT: br label [[VECTOR_BODY:%.*]]
; NOSTRIDED-UF2: vector.body:
; NOSTRIDED-UF2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; NOSTRIDED-UF2-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw i32, ptr [[IN:%.*]], i64 [[INDEX]]
-; NOSTRIDED-UF2-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw i32, ptr [[TMP5]], i64 [[TMP3]]
+; NOSTRIDED-UF2-NEXT: [[TMP7:%.*]] = shl nuw i64 [[INDEX]], 2
+; NOSTRIDED-UF2-NEXT: [[TMP5:%.*]] = getelementptr nuw i8, ptr [[IN:%.*]], i64 [[TMP7]]
+; NOSTRIDED-UF2-NEXT: [[TMP8:%.*]] = mul i64 [[TMP3]], 4
+; NOSTRIDED-UF2-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP5]], i64 [[TMP8]]
; NOSTRIDED-UF2-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vp.strided.load.nxv2i64.p0.i64(ptr align 8 [[TMP5]], i64 4, <vscale x 2 x i1> splat (i1 true), i32 [[TMP15]])
; NOSTRIDED-UF2-NEXT: [[WIDE_MASKED_GATHER1:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vp.strided.load.nxv2i64.p0.i64(ptr align 8 [[TMP6]], i64 4, <vscale x 2 x i1> splat (i1 true), i32 [[TMP15]])
; NOSTRIDED-UF2-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw i64, ptr [[OUT:%.*]], i64 [[INDEX]]
@@ -1381,7 +1376,8 @@ define void @constant_stride_reinterpret(ptr noalias %in, ptr noalias %out) {
; STRIDED-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; STRIDED-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; STRIDED-NEXT: [[TMP2:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
-; STRIDED-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw i32, ptr [[IN:%.*]], i64 [[EVL_BASED_IV]]
+; STRIDED-NEXT: [[TMP1:%.*]] = shl nuw i64 [[EVL_BASED_IV]], 2
+; STRIDED-NEXT: [[TMP9:%.*]] = getelementptr nuw i8, ptr [[IN:%.*]], i64 [[TMP1]]
; STRIDED-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vp.strided.load.nxv2i64.p0.i64(ptr align 8 [[TMP9]], i64 4, <vscale x 2 x i1> splat (i1 true), i32 [[TMP2]])
; STRIDED-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw i64, ptr [[OUT:%.*]], i64 [[EVL_BASED_IV]]
; STRIDED-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[WIDE_MASKED_GATHER]], ptr align 8 [[TMP5]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP2]])
@@ -1411,8 +1407,10 @@ define void @constant_stride_reinterpret(ptr noalias %in, ptr noalias %out) {
; STRIDED-UF2-NEXT: br label [[VECTOR_BODY:%.*]]
; STRIDED-UF2: vector.body:
; STRIDED-UF2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; STRIDED-UF2-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw i32, ptr [[IN:%.*]], i64 [[INDEX]]
-; STRIDED-UF2-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw i32, ptr [[TMP5]], i64 [[TMP3]]
+; STRIDED-UF2-NEXT: [[TMP7:%.*]] = shl nuw i64 [[INDEX]], 2
+; STRIDED-UF2-NEXT: [[TMP5:%.*]] = getelementptr nuw i8, ptr [[IN:%.*]], i64 [[TMP7]]
+; STRIDED-UF2-NEXT: [[TMP8:%.*]] = mul i64 [[TMP3]], 4
+; STRIDED-UF2-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP5]], i64 [[TMP8]]
; STRIDED-UF2-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vp.strided.load.nxv2i64.p0.i64(ptr align 8 [[TMP5]], i64 4, <vscale x 2 x i1> splat (i1 true), i32 [[TMP15]])
; STRIDED-UF2-NEXT: [[WIDE_MASKED_GATHER1:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vp.strided.load.nxv2i64.p0.i64(ptr align 8 [[TMP6]], i64 4, <vscale x 2 x i1> splat (i1 true), i32 [[TMP15]])
; STRIDED-UF2-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw i64, ptr [[OUT:%.*]], i64 [[INDEX]]
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-gather-scatter.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-gather-scatter.ll
index 787a9ad770885..060df8166c775 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-gather-scatter.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-gather-scatter.ll
@@ -17,7 +17,8 @@ define void @gather_scatter(ptr noalias %in, ptr noalias %out, ptr noalias %inde
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[FOR_BODY1]] ]
; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N:%.*]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[FOR_BODY1]] ]
; IF-EVL-NEXT: [[TMP2:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
-; IF-EVL-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[INDEX:%.*]], i64 [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[TMP1:%.*]] = shl nuw i64 [[EVL_BASED_IV]], 2
+; IF-EVL-NEXT: [[TMP5:%.*]] = getelementptr nuw i8, ptr [[INDEX:%.*]], i64 [[TMP1]]
; IF-EVL-NEXT: [[WIDE_STRIDED_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vp.strided.load.nxv2i64.p0.i64(ptr align 8 [[TMP5]], i64 4, <vscale x 2 x i1> splat (i1 true), i32 [[TMP2]])
; IF-EVL-NEXT: [[TMP7:%.*]] = getelementptr inbounds float, ptr [[IN:%.*]], <vscale x 2 x i64> [[WIDE_STRIDED_LOAD]]
; IF-EVL-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 2 x float> @llvm.vp.gather.nxv2f32.nxv2p0(<vscale x 2 x ptr> align 4 [[TMP7]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP2]])
@@ -48,7 +49,8 @@ define void @gather_scatter(ptr noalias %in, ptr noalias %out, ptr noalias %inde
; NO-VP-NEXT: br label [[FOR_BODY1:%.*]]
; NO-VP: vector.body:
; NO-VP-NEXT: [[INDVARS_IV1:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[INDVARS_IV_NEXT1:%.*]], [[FOR_BODY1]] ]
-; NO-VP-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i32, ptr [[INDEX:%.*]], i64 [[INDVARS_IV1]]
+; NO-VP-NEXT: [[TMP10:%.*]] = shl nuw i64 [[INDVARS_IV1]], 2
+; NO-VP-NEXT: [[ARRAYIDX3:%.*]] = getelementptr nuw i8, ptr [[INDEX:%.*]], i64 [[TMP10]]
; NO-VP-NEXT: [[WIDE_STRIDED_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vp.strided.load.nxv2i64.p0.i64(ptr align 8 [[ARRAYIDX3]], i64 4, <vscale x 2 x i1> splat (i1 true), i32 [[TMP5]])
; NO-VP-NEXT: [[TMP6:%.*]] = getelementptr inbounds float, ptr [[IN:%.*]], <vscale x 2 x i64> [[WIDE_STRIDED_LOAD]]
; NO-VP-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 2 x float> @llvm.masked.gather.nxv2f32.nxv2p0(<vscale x 2 x ptr> align 4 [[TMP6]], <vscale x 2 x i1> splat (i1 true), <vscale x 2 x float> poison)
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-interleave.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-interleave.ll
index 541e7c7a100b3..81fd059bd0dda 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-interleave.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-interleave.ll
@@ -116,6 +116,7 @@ for.cond.cleanup:
define i32 @load_factor_4_with_gap(i64 %n, ptr noalias %a) {
; IF-EVL-LABEL: @load_factor_4_with_gap(
; IF-EVL-NEXT: entry:
+; IF-EVL-NEXT: [[SCEVGEP:%.*]] = getelementptr nuw i8, ptr [[A:%.*]], i64 12
; IF-EVL-NEXT: br label [[VECTOR_PH:%.*]]
; IF-EVL: vector.ph:
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
@@ -124,12 +125,13 @@ define i32 @load_factor_4_with_gap(i64 %n, ptr noalias %a) {
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP12:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N:%.*]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP4:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
-; IF-EVL-NEXT: [[TMP5:%.*]] = getelementptr inbounds [4 x i32], ptr [[A:%.*]], i64 [[EVL_BASED_IV]], i32 0
+; IF-EVL-NEXT: [[TMP1:%.*]] = shl nuw i64 [[EVL_BASED_IV]], 4
+; IF-EVL-NEXT: [[TMP5:%.*]] = getelementptr nuw i8, ptr [[A]], i64 [[TMP1]]
; IF-EVL-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP5]], i64 16, <vscale x 4 x i1> splat (i1 true), i32 [[TMP4]])
; IF-EVL-NEXT: [[TMP8:%.*]] = add <vscale x 4 x i32> [[VEC_PHI]], [[WIDE_MASKED_GATHER]]
; IF-EVL-NEXT: [[WIDE_MASKED_GATHER1:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP5]], i64 16, <vscale x 4 x i1> splat (i1 true), i32 [[TMP4]])
; IF-EVL-NEXT: [[TMP9:%.*]] = add <vscale x 4 x i32> [[TMP8]], [[WIDE_MASKED_GATHER1]]
-; IF-EVL-NEXT: [[TMP10:%.*]] = getelementptr inbounds [4 x i32], ptr [[A]], i64 [[EVL_BASED_IV]], i32 3
+; IF-EVL-NEXT: [[TMP10:%.*]] = getelementptr nuw i8, ptr [[SCEVGEP]], i64 [[TMP1]]
; IF-EVL-NEXT: [[WIDE_MASKED_GATHER2:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP10]], i64 16, <vscale x 4 x i1> splat (i1 true), i32 [[TMP4]])
; IF-EVL-NEXT: [[TMP11:%.*]] = add <vscale x 4 x i32> [[TMP9]], [[WIDE_MASKED_GATHER2]]
; IF-EVL-NEXT: [[TMP12]] = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> [[TMP11]], <vscale x 4 x i32> [[VEC_PHI]], i32 [[TMP4]])
@@ -146,6 +148,7 @@ define i32 @load_factor_4_with_gap(i64 %n, ptr noalias %a) {
;
; NO-VP-LABEL: @load_factor_4_with_gap(
; NO-VP-NEXT: entry:
+; NO-VP-NEXT: [[SCEVGEP:%.*]] = getelementptr nuw i8, ptr [[A:%.*]], i64 12
; NO-VP-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
; NO-VP-NEXT: [[TMP1:%.*]] = shl nuw i64 [[TMP0]], 2
; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ule i64 [[N:%.*]], [[TMP1]]
@@ -162,12 +165,13 @@ define i32 @load_factor_4_with_gap(i64 %n, ptr noalias %a) {
; NO-VP: vector.body:
; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; NO-VP-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP13:%.*]], [[VECTOR_BODY]] ]
-; NO-VP-NEXT: [[TMP6:%.*]] = getelementptr inbounds [4 x i32], ptr [[A:%.*]], i64 [[INDEX]], i32 0
+; NO-VP-NEXT: [[TMP8:%.*]] = shl nuw i64 [[INDEX]], 4
+; NO-VP-NEXT: [[TMP6:%.*]] = getelementptr nuw i8, ptr [[A]], i64 [[TMP8]]
; NO-VP-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP6]], i64 16, <vscale x 4 x i1> splat (i1 true), i32 [[TMP7]])
; NO-VP-NEXT: [[TMP10:%.*]] = add <vscale x 4 x i32> [[VEC_PHI]], [[WIDE_MASKED_GATHER]]
; NO-VP-NEXT: [[WIDE_MASKED_GATHER1:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP6]], i64 16, <vscale x 4 x i1> splat (i1 true), i32 [[TMP7]])
; NO-VP-NEXT: [[TMP11:%.*]] = add <vscale x 4 x i32> [[TMP10]], [[WIDE_MASKED_GATHER1]]
-; NO-VP-NEXT: [[TMP19:%.*]] = getelementptr inbounds [4 x i32], ptr [[A]], i64 [[INDEX]], i32 3
+; NO-VP-NEXT: [[TMP19:%.*]] = getelementptr nuw i8, ptr [[SCEVGEP]], i64 [[TMP8]]
; NO-VP-NEXT: [[WIDE_MASKED_GATHER2:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP19]], i64 16, <vscale x 4 x i1> splat (i1 true), i32 [[TMP7]])
; NO-VP-NEXT: [[TMP13]] = add <vscale x 4 x i32> [[TMP11]], [[WIDE_MASKED_GATHER2]]
; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
@@ -341,6 +345,7 @@ exit:
define i32 @load_factor_4_with_tail_gap(i64 %n, ptr noalias %a) {
; IF-EVL-LABEL: @load_factor_4_with_tail_gap(
; IF-EVL-NEXT: entry:
+; IF-EVL-NEXT: [[SCEVGEP:%.*]] = getelementptr nuw i8, ptr [[A:%.*]], i64 8
; IF-EVL-NEXT: br label [[VECTOR_PH:%.*]]
; IF-EVL: vector.ph:
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
@@ -349,12 +354,13 @@ define i32 @load_factor_4_with_tail_gap(i64 %n, ptr noalias %a) {
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP12:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N:%.*]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP4:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
-; IF-EVL-NEXT: [[TMP5:%.*]] = getelementptr inbounds [4 x i32], ptr [[A:%.*]], i64 [[EVL_BASED_IV]], i32 0
+; IF-EVL-NEXT: [[TMP1:%.*]] = shl nuw i64 [[EVL_BASED_IV]], 4
+; IF-EVL-NEXT: [[TMP5:%.*]] = getelementptr nuw i8, ptr [[A]], i64 [[TMP1]]
; IF-EVL-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP5]], i64 16, <vscale x 4 x i1> splat (i1 true), i32 [[TMP4]])
; IF-EVL-NEXT: [[TMP8:%.*]] = add <vscale x 4 x i32> [[VEC_PHI]], [[WIDE_MASKED_GATHER]]
; IF-EVL-NEXT: [[WIDE_MASKED_GATHER1:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP5]], i64 16, <vscale x 4 x i1> splat (i1 true), i32 [[TMP4]])
; IF-EVL-NEXT: [[TMP9:%.*]] = add <vscale x 4 x i32> [[TMP8]], [[WIDE_MASKED_GATHER1]]
-; IF-EVL-NEXT: [[TMP10:%.*]] = getelementptr inbounds [4 x i32], ptr [[A]], i64 [[EVL_BASED_IV]], i32 2
+; IF-EVL-NEXT: [[TMP10:%.*]] = getelementptr nuw i8, ptr [[SCEVGEP]], i64 [[TMP1]]
; IF-EVL-NEXT: [[WIDE_MASKED_GATHER2:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP10]], i64 16, <vscale x 4 x i1> splat (i1 true), i32 [[TMP4]])
; IF-EVL-NEXT: [[TMP11:%.*]] = add <vscale x 4 x i32> [[TMP9]], [[WIDE_MASKED_GATHER2]]
; IF-EVL-NEXT: [[TMP12]] = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> [[TMP11]], <vscale x 4 x i32> [[VEC_PHI]], i32 [[TMP4]])
@@ -371,6 +377,7 @@ define i32 @load_factor_4_with_tail_gap(i64 %n, ptr noalias %a) {
;
; NO-VP-LABEL: @load_factor_4_with_tail_gap(
; NO-VP-NEXT: entry:
+; NO-VP-NEXT: [[SCEVGEP:%.*]] = getelementptr nuw i8, ptr [[A:%.*]], i64 8
; NO-VP-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
; NO-VP-NEXT: [[TMP1:%.*]] = shl nuw i64 [[TMP0]], 2
; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ule i64 [[N:%.*]], [[TMP1]]
@@ -387,12 +394,13 @@ define i32 @load_factor_4_with_tail_gap(i64 %n, ptr noalias %a) {
; NO-VP: vector.body:
; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; NO-VP-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP13:%.*]], [[VECTOR_BODY]] ]
-; NO-VP-NEXT: [[TMP6:%.*]] = getelementptr inbounds [4 x i32], ptr [[A:%.*]], i64 [[INDEX]], i32 0
+; NO-VP-NEXT: [[TMP8:%.*]] = shl nuw i64 [[INDEX]], 4
+; NO-VP-NEXT: [[TMP6:%.*]] = getelementptr nuw i8, ptr [[A]], i64 [[TMP8]]
; NO-VP-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP6]], i64 16, <vscale x 4 x i1> splat (i1 true), i32 [[TMP7]])
; NO-VP-NEXT: [[TMP10:%.*]] = add <vscale x 4 x i32> [[VEC_PHI]], [[WIDE_MASKED_GATHER]]
; NO-VP-NEXT: [[WIDE_MASKED_GATHER1:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP6]], i64 16, <vscale x 4 x i1> splat (i1 true), i32 [[TMP7]])
; NO-VP-NEXT: [[TMP11:%.*]] = add <vscale x 4 x i32> [[TMP10]], [[WIDE_MASKED_GATHER1]]
-; NO-VP-NEXT: [[TMP19:%.*]] = getelementptr inbounds [4 x i32], ptr [[A]], i64 [[INDEX]], i32 2
+; NO-VP-NEXT: [[TMP19:%.*]] = getelementptr nuw i8, ptr [[SCEVGEP]], i64 [[TMP8]]
; NO-VP-NEXT: [[WIDE_MASKED_GATHER2:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP19]], i64 16, <vscale x 4 x i1> splat (i1 true), i32 [[TMP7]])
; NO-VP-NEXT: [[TMP13]] = add <vscale x 4 x i32> [[TMP11]], [[WIDE_MASKED_GATHER2]]
; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
@@ -559,6 +567,12 @@ define i32 @load_factor_4_reverse(i64 %n, ptr noalias %a) {
; IF-EVL-NEXT: [[TMP0:%.*]] = add nsw i64 [[N:%.*]], -1
; IF-EVL-NEXT: [[SMIN:%.*]] = call i64 @llvm.smin.i64(i64 [[TMP0]], i64 0)
; IF-EVL-NEXT: [[TMP1:%.*]] = sub i64 [[N]], [[SMIN]]
+; IF-EVL-NEXT: [[TMP2:%.*]] = shl i64 [[N]], 4
+; IF-EVL-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 [[TMP2]]
+; IF-EVL-NEXT: [[TMP3:%.*]] = add nuw nsw i64 [[TMP2]], 8
+; IF-EVL-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP3]]
+; IF-EVL-NEXT: [[TMP4:%.*]] = add nuw nsw i64 [[TMP2]], 12
+; IF-EVL-NEXT: [[SCEVGEP2:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP4]]
; IF-EVL-NEXT: br label [[VECTOR_PH:%.*]]
; IF-EVL: vector.ph:
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
@@ -567,16 +581,16 @@ define i32 @load_factor_4_reverse(i64 %n, ptr noalias %a) {
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP16:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[TMP1]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP6:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
-; IF-EVL-NEXT: [[OFFSET_IDX:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[TMP7:%.*]] = getelementptr inbounds [4 x i32], ptr [[A:%.*]], i64 [[OFFSET_IDX]], i32 0
+; IF-EVL-NEXT: [[TMP8:%.*]] = mul i64 [[EVL_BASED_IV]], -16
+; IF-EVL-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[SCEVGEP]], i64 [[TMP8]]
; IF-EVL-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP7]], i64 -16, <vscale x 4 x i1> splat (i1 true), i32 [[TMP6]])
; IF-EVL-NEXT: [[TMP10:%.*]] = add <vscale x 4 x i32> [[VEC_PHI]], [[WIDE_MASKED_GATHER]]
; IF-EVL-NEXT: [[WIDE_MASKED_GATHER3:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP7]], i64 -16, <vscale x 4 x i1> splat (i1 true), i32 [[TMP6]])
; IF-EVL-NEXT: [[TMP11:%.*]] = add <vscale x 4 x i32> [[TMP10]], [[WIDE_MASKED_GATHER3]]
-; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i32], ptr [[A]], i64 [[OFFSET_IDX]], i32 2
+; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[SCEVGEP1]], i64 [[TMP8]]
; IF-EVL-NEXT: [[WIDE_MASKED_GATHER4:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP12]], i64 -16, <vscale x 4 x i1> splat (i1 true), i32 [[TMP6]])
; IF-EVL-NEXT: [[TMP13:%.*]] = add <vscale x 4 x i32> [[TMP11]], [[WIDE_MASKED_GATHER4]]
-; IF-EVL-NEXT: [[TMP25:%.*]] = getelementptr inbounds [4 x i32], ptr [[A]], i64 [[OFFSET_IDX]], i32 3
+; IF-EVL-NEXT: [[TMP25:%.*]] = getelementptr i8, ptr [[SCEVGEP2]], i64 [[TMP8]]
; IF-EVL-NEXT: [[WIDE_MASKED_GATHER5:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP25]], i64 -16, <vscale x 4 x i1> splat (i1 true), i32 [[TMP6]])
; IF-EVL-NEXT: [[TMP15:%.*]] = add <vscale x 4 x i32> [[TMP13]], [[WIDE_MASKED_GATHER5]]
; IF-EVL-NEXT: [[TMP16]] = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> [[TMP15]], <vscale x 4 x i32> [[VEC_PHI]], i32 [[TMP6]])
@@ -596,6 +610,12 @@ define i32 @load_factor_4_reverse(i64 %n, ptr noalias %a) {
; NO-VP-NEXT: [[TMP0:%.*]] = add nsw i64 [[N:%.*]], -1
; NO-VP-NEXT: [[SMIN:%.*]] = call i64 @llvm.smin.i64(i64 [[TMP0]], i64 0)
; NO-VP-NEXT: [[TMP1:%.*]] = sub i64 [[N]], [[SMIN]]
+; NO-VP-NEXT: [[TMP9:%.*]] = shl i64 [[N]], 4
+; NO-VP-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 [[TMP9]]
+; NO-VP-NEXT: [[TMP10:%.*]] = add nuw nsw i64 [[TMP9]], 8
+; NO-VP-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP10]]
+; NO-VP-NEXT: [[TMP13:%.*]] = add nuw nsw i64 [[TMP9]], 12
+; NO-VP-NEXT: [[SCEVGEP2:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP13]]
; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; NO-VP-NEXT: [[TMP3:%.*]] = shl nuw i64 [[TMP2]], 2
; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP1]], [[TMP3]]
@@ -611,16 +631,16 @@ define i32 @load_factor_4_reverse(i64 %n, ptr noalias %a) {
; NO-VP: vector.body:
; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; NO-VP-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP16:%.*]], [[VECTOR_BODY]] ]
-; NO-VP-NEXT: [[OFFSET_IDX:%.*]] = sub i64 [[N]], [[INDEX]]
-; NO-VP-NEXT: [[TMP7:%.*]] = getelementptr inbounds [4 x i32], ptr [[A:%.*]], i64 [[OFFSET_IDX]], i32 0
+; NO-VP-NEXT: [[TMP24:%.*]] = mul i64 [[INDEX]], -16
+; NO-VP-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[SCEVGEP]], i64 [[TMP24]]
; NO-VP-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP7]], i64 -16, <vscale x 4 x i1> splat (i1 true), i32 [[TMP8]])
; NO-VP-NEXT: [[TMP11:%.*]] = add <vscale x 4 x i32> [[VEC_PHI]], [[WIDE_MASKED_GATHER]]
; NO-VP-NEXT: [[WIDE_MASKED_GATHER3:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP7]], i64 -16, <vscale x 4 x i1> splat (i1 true), i32 [[TMP8]])
; NO-VP-NEXT: [[TMP12:%.*]] = add <vscale x 4 x i32> [[TMP11]], [[WIDE_MASKED_GATHER3]]
-; NO-VP-NEXT: [[TMP23:%.*]] = getelementptr inbounds [4 x i32], ptr [[A]], i64 [[OFFSET_IDX]], i32 2
+; NO-VP-NEXT: [[TMP23:%.*]] = getelementptr i8, ptr [[SCEVGEP1]], i64 [[TMP24]]
; NO-VP-NEXT: [[WIDE_MASKED_GATHER4:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP23]], i64 -16, <vscale x 4 x i1> splat (i1 true), i32 [[TMP8]])
; NO-VP-NEXT: [[TMP14:%.*]] = add <vscale x 4 x i32> [[TMP12]], [[WIDE_MASKED_GATHER4]]
-; NO-VP-NEXT: [[TMP15:%.*]] = getelementptr inbounds [4 x i32], ptr [[A]], i64 [[OFFSET_IDX]], i32 3
+; NO-VP-NEXT: [[TMP15:%.*]] = getelementptr i8, ptr [[SCEVGEP2]], i64 [[TMP24]]
; NO-VP-NEXT: [[WIDE_MASKED_GATHER5:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i64(ptr align 4 [[TMP15]], i64 -16, <vscale x 4 x i1> splat (i1 true), i32 [[TMP8]])
; NO-VP-NEXT: [[TMP16]] = add <vscale x 4 x i32> [[TMP14]], [[WIDE_MASKED_GATHER5]]
; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-cost.ll b/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-cost.ll
index f4c7c6f6fba1b..0e4ab71bacbdc 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-cost.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-cost.ll
@@ -250,10 +250,76 @@ exit:
define void @test_minbws_for_trunc(i32 %n, ptr noalias %p1, ptr noalias %p2) {
; CHECK-LABEL: define void @test_minbws_for_trunc(
; CHECK-SAME: i32 [[N:%.*]], ptr noalias [[P1:%.*]], ptr noalias [[P2:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[ENTRY:.*:]]
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i16 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[MUL:%.*]] = call { i16, i1 } @llvm.umul.with.overflow.i16(i16 4, i16 255)
+; CHECK-NEXT: [[MUL_RESULT:%.*]] = extractvalue { i16, i1 } [[MUL]], 0
+; CHECK-NEXT: [[MUL_OVERFLOW:%.*]] = extractvalue { i16, i1 } [[MUL]], 1
+; CHECK-NEXT: [[TMP0:%.*]] = add i16 4, [[MUL_RESULT]]
+; CHECK-NEXT: [[TMP1:%.*]] = icmp slt i16 [[TMP0]], 4
+; CHECK-NEXT: [[TMP2:%.*]] = or i1 [[TMP1]], [[MUL_OVERFLOW]]
+; CHECK-NEXT: [[MUL1:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 32, i64 255)
+; CHECK-NEXT: [[MUL_RESULT2:%.*]] = extractvalue { i64, i1 } [[MUL1]], 0
+; CHECK-NEXT: [[MUL_OVERFLOW3:%.*]] = extractvalue { i64, i1 } [[MUL1]], 1
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[P2]], i64 [[MUL_RESULT2]]
+; CHECK-NEXT: [[TMP4:%.*]] = icmp ult ptr [[TMP3]], [[P2]]
+; CHECK-NEXT: [[TMP5:%.*]] = or i1 [[TMP4]], [[MUL_OVERFLOW3]]
+; CHECK-NEXT: [[TMP6:%.*]] = or i1 [[TMP2]], [[TMP5]]
+; CHECK-NEXT: br i1 [[TMP6]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
+; CHECK: [[VECTOR_MEMCHECK]]:
+; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[P2]], i64 2042
+; CHECK-NEXT: [[SCEVGEP4:%.*]] = getelementptr i8, ptr [[P2]], i64 1021
+; CHECK-NEXT: [[SCEVGEP5:%.*]] = getelementptr i8, ptr [[P2]], i64 8168
+; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[P2]], [[SCEVGEP4]]
+; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[P2]], [[SCEVGEP]]
+; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
+; CHECK-NEXT: [[BOUND06:%.*]] = icmp ult ptr [[P2]], [[SCEVGEP5]]
+; CHECK-NEXT: [[BOUND17:%.*]] = icmp ult ptr [[P2]], [[SCEVGEP]]
+; CHECK-NEXT: [[FOUND_CONFLICT8:%.*]] = and i1 [[BOUND06]], [[BOUND17]]
+; CHECK-NEXT: [[CONFLICT_RDX:%.*]] = or i1 [[FOUND_CONFLICT]], [[FOUND_CONFLICT8]]
+; CHECK-NEXT: [[BOUND09:%.*]] = icmp ult ptr [[P2]], [[SCEVGEP5]]
+; CHECK-NEXT: [[BOUND110:%.*]] = icmp ult ptr [[P2]], [[SCEVGEP4]]
+; CHECK-NEXT: [[FOUND_CONFLICT11:%.*]] = and i1 [[BOUND09]], [[BOUND110]]
+; CHECK-NEXT: [[CONFLICT_RDX12:%.*]] = or i1 [[CONFLICT_RDX]], [[FOUND_CONFLICT11]]
+; CHECK-NEXT: br i1 [[CONFLICT_RDX12]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[TMP9:%.*]] = call <vscale x 2 x i16> @llvm.stepvector.nxv2i16()
+; CHECK-NEXT: [[TMP10:%.*]] = mul <vscale x 2 x i16> [[TMP9]], splat (i16 4)
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 2 x i16> [ [[TMP10]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i32 [ 256, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i32(i32 [[AVL]], i32 2, i1 true)
+; CHECK-NEXT: [[TMP12:%.*]] = trunc i32 [[TMP11]] to i16
+; CHECK-NEXT: [[TMP13:%.*]] = shl i16 [[TMP12]], 2
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i16> poison, i16 [[TMP13]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i16> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP16:%.*]] = sext <vscale x 2 x i16> [[VEC_IND]] to <vscale x 2 x i64>
+; CHECK-NEXT: [[TMP17:%.*]] = shl i32 [[EVL_BASED_IV]], 4
+; CHECK-NEXT: [[TMP18:%.*]] = getelementptr i8, ptr [[P1]], i32 [[TMP17]]
+; CHECK-NEXT: [[TMP19:%.*]] = call <vscale x 2 x i32> @llvm.experimental.vp.strided.load.nxv2i32.p0.i32(ptr align 4 [[TMP18]], i32 16, <vscale x 2 x i1> splat (i1 true), i32 [[TMP11]])
+; CHECK-NEXT: [[TMP20:%.*]] = trunc <vscale x 2 x i32> [[TMP19]] to <vscale x 2 x i16>
+; CHECK-NEXT: [[TMP21:%.*]] = getelementptr [1 x [1 x i16]], ptr [[P2]], <vscale x 2 x i64> [[TMP16]]
+; CHECK-NEXT: call void @llvm.vp.scatter.nxv2i16.nxv2p0(<vscale x 2 x i16> [[TMP20]], <vscale x 2 x ptr> align 2 [[TMP21]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP11]]), !alias.scope [[META6:![0-9]+]], !noalias [[META9:![0-9]+]]
+; CHECK-NEXT: [[TMP22:%.*]] = trunc <vscale x 2 x i32> [[TMP19]] to <vscale x 2 x i8>
+; CHECK-NEXT: [[TMP23:%.*]] = getelementptr i8, ptr [[P2]], <vscale x 2 x i64> [[TMP16]]
+; CHECK-NEXT: call void @llvm.vp.scatter.nxv2i8.nxv2p0(<vscale x 2 x i8> [[TMP22]], <vscale x 2 x ptr> align 1 [[TMP23]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP11]]), !alias.scope [[META12:![0-9]+]], !noalias [[META13:![0-9]+]]
+; CHECK-NEXT: [[TMP24:%.*]] = getelementptr [1 x i64], ptr [[P2]], <vscale x 2 x i64> [[TMP16]]
+; CHECK-NEXT: call void @llvm.vp.scatter.nxv2i64.nxv2p0(<vscale x 2 x i64> zeroinitializer, <vscale x 2 x ptr> align 8 [[TMP24]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP11]]), !alias.scope [[META13]]
+; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i32 [[TMP11]], [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i32 [[AVL]], [[TMP11]]
+; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i16> [[VEC_IND]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT: [[TMP25:%.*]] = icmp eq i32 [[AVL_NEXT]], 0
+; CHECK-NEXT: br i1 [[TMP25]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: br label %[[EXIT:.*]]
+; CHECK: [[SCALAR_PH]]:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i16 [ 0, %[[LOOP]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
+; CHECK-NEXT: br label %[[LOOP1:.*]]
+; CHECK: [[LOOP1]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i16 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP1]] ]
; CHECK-NEXT: [[IV_EXT:%.*]] = sext i16 [[IV]] to i64
; CHECK-NEXT: [[GEP1:%.*]] = getelementptr i32, ptr [[P1]], i64 [[IV_EXT]]
; CHECK-NEXT: [[V1:%.*]] = load i32, ptr [[GEP1]], align 4
@@ -268,7 +334,7 @@ define void @test_minbws_for_trunc(i32 %n, ptr noalias %p1, ptr noalias %p2) {
; CHECK-NEXT: [[IV_NEXT]] = add i16 [[IV]], 4
; CHECK-NEXT: [[IV_NEXT_EXT:%.*]] = sext i16 [[IV_NEXT]] to i32
; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[IV_NEXT_EXT]], 1024
-; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP]], label %[[EXIT:.*]]
+; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP1]], label %[[EXIT]], !llvm.loop [[LOOP15:![0-9]+]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
@@ -307,4 +373,14 @@ attributes #1 = { "target-features"="+64bit,+v" }
; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META1]], [[META2]]}
; CHECK: [[LOOP4]] = distinct !{[[LOOP4]], [[META1]], [[META2]]}
; CHECK: [[LOOP5]] = distinct !{[[LOOP5]], [[META1]], [[META2]]}
+; CHECK: [[META6]] = !{[[META7:![0-9]+]]}
+; CHECK: [[META7]] = distinct !{[[META7]], [[META8:![0-9]+]]}
+; CHECK: [[META8]] = distinct !{[[META8]], !"LVerDomain"}
+; CHECK: [[META9]] = !{[[META10:![0-9]+]], [[META11:![0-9]+]]}
+; CHECK: [[META10]] = distinct !{[[META10]], [[META8]]}
+; CHECK: [[META11]] = distinct !{[[META11]], [[META8]]}
+; CHECK: [[META12]] = !{[[META10]]}
+; CHECK: [[META13]] = !{[[META11]]}
+; CHECK: [[LOOP14]] = distinct !{[[LOOP14]], [[META1]], [[META2]]}
+; CHECK: [[LOOP15]] = distinct !{[[LOOP15]], [[META1]]}
;.
>From 4b24020441152bcc93164a5d4596c61741334b8d Mon Sep 17 00:00:00 2001
From: Mel Chen <mel.chen at sifive.com>
Date: Mon, 9 Feb 2026 18:20:53 -0800
Subject: [PATCH 28/30] remove unrelated change
---
llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp | 1 +
1 file changed, 1 insertion(+)
diff --git a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
index 49a727704c13c..fee74f1bd2917 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
@@ -3759,6 +3759,7 @@ InstructionCost VPWidenMemoryRecipe::computeCost(ElementCount VF,
const Value *Ptr = getLoadStorePointerOperand(&Ingredient);
Type *PtrTy = Ptr->getType();
+
// If the address value is uniform across all lanes, then the address can be
// calculated with scalar type and broadcast.
if (!vputils::isSingleScalar(getAddr()))
>From 6aa84e2b1d03a6bb2168c6dbf436320d66ffb90d Mon Sep 17 00:00:00 2001
From: Mel Chen <mel.chen at sifive.com>
Date: Mon, 9 Feb 2026 18:39:01 -0800
Subject: [PATCH 29/30] updated code by comment
---
llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp | 8 +++-----
1 file changed, 3 insertions(+), 5 deletions(-)
diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
index eb64475d24efd..9cb9b37270c70 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
@@ -5988,14 +5988,12 @@ void VPlanTransforms::convertToStridedAccesses(VPlan &Plan,
: GEPNoWrapFlags::none());
// Create a new vector pointer for strided access.
- auto *NewPtr = Builder.createVectorPointer(
+ VPValue *NewPtr = Builder.createVectorPointer(
BasePtr, Type::getInt8Ty(Plan.getContext()), StrideInBytes,
Ptr->getGEPNoWrapFlags(), Ptr->getDebugLoc());
- VPValue *Mask;
- if (VPValue *LoadMask = LoadR->getMask())
- Mask = LoadMask;
- else
+ VPValue *Mask = LoadR->getMask();
+ if (!Mask)
Mask = Plan.getTrue();
auto *StridedLoad = Builder.createWidenMemIntrinsic(
*cast<LoadInst>(&Ingredient), {NewPtr, StrideInBytes, Mask, I32VF},
>From 6598a99a204c998d264f22409a32c05c61123aa7 Mon Sep 17 00:00:00 2001
From: Mel Chen <mel.chen at sifive.com>
Date: Mon, 9 Feb 2026 20:31:20 -0800
Subject: [PATCH 30/30] update allowed VF users function
---
llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp | 15 +++++++++------
1 file changed, 9 insertions(+), 6 deletions(-)
diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
index 9cb9b37270c70..4baa25fdb2d38 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
@@ -3096,12 +3096,15 @@ static void fixupVFUsersForEVL(VPlan &Plan, VPValue &EVL) {
VPBasicBlock *Header = LoopRegion->getEntryBasicBlock();
assert(all_of(Plan.getVF().users(),
- [&LoopRegion](VPUser *U) {
- auto *R = cast<VPRecipeBase>(U);
- return (R->getParent()->getParent() != LoopRegion) ||
- isa<VPVectorEndPointerRecipe, VPScalarIVStepsRecipe,
- VPWidenIntOrFpInductionRecipe,
- VPWidenMemIntrinsicRecipe>(R);
+ [&Plan](VPUser *U) {
+ auto IsAllowedUser =
+ IsaPred<VPVectorEndPointerRecipe, VPScalarIVStepsRecipe,
+ VPWidenIntOrFpInductionRecipe,
+ VPWidenMemIntrinsicRecipe>;
+ if (match(U, m_Trunc(m_Specific(&Plan.getVF()))))
+ return all_of(cast<VPSingleDefRecipe>(U)->users(),
+ IsAllowedUser);
+ return IsAllowedUser(U);
}) &&
"User of VF that we can't transform to EVL.");
Plan.getVF().replaceUsesWithIf(&EVL, [](VPUser &U, unsigned Idx) {
More information about the llvm-commits
mailing list