[llvm] [VPlan] Materialize VectorTripCount in narrowInterleaveGroups. (PR #182146)
Florian Hahn via llvm-commits
llvm-commits at lists.llvm.org
Sat Mar 7 14:12:55 PST 2026
https://github.com/fhahn updated https://github.com/llvm/llvm-project/pull/182146
>From 5fc6c42b072b51d7259fa5e74f0062b078259ae7 Mon Sep 17 00:00:00 2001
From: Florian Hahn <flo at fhahn.com>
Date: Sat, 7 Mar 2026 18:38:38 +0000
Subject: [PATCH 1/3] !fixup add test
---
...-narrow-interleave-to-widen-memory-cost.ll | 131 ++++++++++++++++++
1 file changed, 131 insertions(+)
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-cost.ll b/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-cost.ll
index 5e37f9eff4ba2..f25e19dab833a 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-cost.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-cost.ll
@@ -502,4 +502,135 @@ exit:
ret void
}
+; Test that loop-vectorize does not crash when connecting the epilogue vector
+; loop for a loop with an interleave group and a preheader phi.
+define void @test_interleave_group_epilogue_with_preheader_phi(ptr %src, ptr %dst) #0 {
+; CHECK-LABEL: define void @test_interleave_group_epilogue_with_preheader_phi(
+; CHECK-SAME: ptr [[SRC:%.*]], ptr [[DST:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[SRC1:%.*]] = ptrtoint ptr [[SRC]] to i64
+; CHECK-NEXT: br label %[[ITER_CHECK:.*]]
+; CHECK: [[ITER_CHECK]]:
+; CHECK-NEXT: [[DST_PRE:%.*]] = phi ptr [ [[DST]], %[[ENTRY]] ]
+; CHECK-NEXT: [[TMP0:%.*]] = sub i64 0, [[SRC1]]
+; CHECK-NEXT: [[TMP1:%.*]] = lshr i64 [[TMP0]], 4
+; CHECK-NEXT: [[TMP2:%.*]] = add nuw nsw i64 [[TMP1]], 1
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP2]], 2
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH:.*]], label %[[VECTOR_SCEVCHECK:.*]]
+; CHECK: [[VECTOR_SCEVCHECK]]:
+; CHECK-NEXT: [[TMP3:%.*]] = trunc i64 [[SRC1]] to i4
+; CHECK-NEXT: [[TMP4:%.*]] = sub i4 0, [[TMP3]]
+; CHECK-NEXT: [[TMP5:%.*]] = zext i4 [[TMP4]] to i64
+; CHECK-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i64 [[TMP5]], 0
+; CHECK-NEXT: [[TMP6:%.*]] = sub i64 0, [[SRC1]]
+; CHECK-NEXT: [[TMP7:%.*]] = lshr i64 [[TMP6]], 4
+; CHECK-NEXT: [[MUL:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 16, i64 [[TMP7]])
+; CHECK-NEXT: [[MUL_RESULT:%.*]] = extractvalue { i64, i1 } [[MUL]], 0
+; CHECK-NEXT: [[MUL_OVERFLOW:%.*]] = extractvalue { i64, i1 } [[MUL]], 1
+; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[DST]], i64 [[MUL_RESULT]]
+; CHECK-NEXT: [[TMP9:%.*]] = icmp ult ptr [[TMP8]], [[DST]]
+; CHECK-NEXT: [[TMP10:%.*]] = or i1 [[TMP9]], [[MUL_OVERFLOW]]
+; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[DST]], i64 8
+; CHECK-NEXT: [[MUL2:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 16, i64 [[TMP7]])
+; CHECK-NEXT: [[MUL_RESULT3:%.*]] = extractvalue { i64, i1 } [[MUL2]], 0
+; CHECK-NEXT: [[MUL_OVERFLOW4:%.*]] = extractvalue { i64, i1 } [[MUL2]], 1
+; CHECK-NEXT: [[TMP11:%.*]] = getelementptr i8, ptr [[SCEVGEP]], i64 [[MUL_RESULT3]]
+; CHECK-NEXT: [[TMP12:%.*]] = icmp ult ptr [[TMP11]], [[SCEVGEP]]
+; CHECK-NEXT: [[TMP13:%.*]] = or i1 [[TMP12]], [[MUL_OVERFLOW4]]
+; CHECK-NEXT: [[TMP14:%.*]] = or i1 [[IDENT_CHECK]], [[TMP10]]
+; CHECK-NEXT: [[TMP15:%.*]] = or i1 [[TMP14]], [[TMP13]]
+; CHECK-NEXT: br i1 [[TMP15]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VECTOR_MAIN_LOOP_ITER_CHECK:.*]]
+; CHECK: [[VECTOR_MAIN_LOOP_ITER_CHECK]]:
+; CHECK-NEXT: [[MIN_ITERS_CHECK5:%.*]] = icmp ult i64 [[TMP2]], 8
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK5]], label %[[VEC_EPILOG_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP2]], 8
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP2]], [[N_MOD_VF]]
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP16:%.*]] = mul i64 [[INDEX]], 16
+; CHECK-NEXT: [[TMP17:%.*]] = add i64 [[TMP16]], 16
+; CHECK-NEXT: [[TMP18:%.*]] = add i64 [[TMP16]], 32
+; CHECK-NEXT: [[TMP19:%.*]] = add i64 [[TMP16]], 48
+; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[DST_PRE]], i64 [[TMP16]]
+; CHECK-NEXT: [[NEXT_GEP6:%.*]] = getelementptr i8, ptr [[DST_PRE]], i64 [[TMP17]]
+; CHECK-NEXT: [[NEXT_GEP7:%.*]] = getelementptr i8, ptr [[DST_PRE]], i64 [[TMP18]]
+; CHECK-NEXT: [[NEXT_GEP8:%.*]] = getelementptr i8, ptr [[DST_PRE]], i64 [[TMP19]]
+; CHECK-NEXT: store <2 x double> splat (double 1.000000e+00), ptr [[NEXT_GEP]], align 8
+; CHECK-NEXT: store <2 x double> splat (double 1.000000e+00), ptr [[NEXT_GEP6]], align 8
+; CHECK-NEXT: store <2 x double> splat (double 1.000000e+00), ptr [[NEXT_GEP7]], align 8
+; CHECK-NEXT: store <2 x double> splat (double 1.000000e+00), ptr [[NEXT_GEP8]], align 8
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
+; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP2]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[VEC_EPILOG_ITER_CHECK:.*]]
+; CHECK: [[VEC_EPILOG_ITER_CHECK]]:
+; CHECK-NEXT: [[TMP21:%.*]] = mul i64 [[N_VEC]], 16
+; CHECK-NEXT: [[IND_END:%.*]] = getelementptr i8, ptr [[DST_PRE]], i64 [[TMP21]]
+; CHECK-NEXT: [[TMP22:%.*]] = mul i64 [[N_VEC]], 16
+; CHECK-NEXT: [[IND_END16:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP22]]
+; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 2
+; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]], !prof [[PROF7]]
+; CHECK: [[VEC_EPILOG_PH]]:
+; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ]
+; CHECK-NEXT: [[N_MOD_VF9:%.*]] = urem i64 [[TMP2]], 2
+; CHECK-NEXT: [[N_VEC10:%.*]] = sub i64 [[TMP2]], [[N_MOD_VF9]]
+; CHECK-NEXT: [[TMP23:%.*]] = mul i64 [[N_VEC10]], 16
+; CHECK-NEXT: [[TMP24:%.*]] = getelementptr i8, ptr [[DST_PRE]], i64 [[TMP23]]
+; CHECK-NEXT: [[TMP25:%.*]] = mul i64 [[N_VEC10]], 16
+; CHECK-NEXT: [[TMP26:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP25]]
+; CHECK-NEXT: br label %[[VEC_EPILOG_VECTOR_BODY:.*]]
+; CHECK: [[VEC_EPILOG_VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX11:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], %[[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT13:%.*]], %[[VEC_EPILOG_VECTOR_BODY]] ]
+; CHECK-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX11]], 16
+; CHECK-NEXT: [[NEXT_GEP12:%.*]] = getelementptr i8, ptr [[DST_PRE]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: store <2 x double> splat (double 1.000000e+00), ptr [[NEXT_GEP12]], align 8
+; CHECK-NEXT: [[INDEX_NEXT13]] = add nuw i64 [[INDEX11]], 1
+; CHECK-NEXT: [[TMP27:%.*]] = icmp eq i64 [[INDEX_NEXT13]], [[N_VEC10]]
+; CHECK-NEXT: br i1 [[TMP27]], label %[[VEC_EPILOG_MIDDLE_BLOCK:.*]], label %[[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
+; CHECK: [[VEC_EPILOG_MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[CMP_N14:%.*]] = icmp eq i64 [[TMP2]], [[N_VEC10]]
+; CHECK-NEXT: br i1 [[CMP_N14]], label %[[EXIT]], label %[[VEC_EPILOG_SCALAR_PH]]
+; CHECK: [[VEC_EPILOG_SCALAR_PH]]:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi ptr [ [[TMP24]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[IND_END]], %[[VEC_EPILOG_ITER_CHECK]] ], [ [[DST_PRE]], %[[VECTOR_SCEVCHECK]] ], [ [[DST_PRE]], %[[ITER_CHECK]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL15:%.*]] = phi ptr [ [[TMP26]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[IND_END16]], %[[VEC_EPILOG_ITER_CHECK]] ], [ [[SRC]], %[[VECTOR_SCEVCHECK]] ], [ [[SRC]], %[[ITER_CHECK]] ]
+; CHECK-NEXT: br label %[[LOOP:.*]]
+; CHECK: [[LOOP]]:
+; CHECK-NEXT: [[DST_PHI:%.*]] = phi ptr [ [[DST_NEXT:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[VEC_EPILOG_SCALAR_PH]] ]
+; CHECK-NEXT: [[SRC_PHI:%.*]] = phi ptr [ [[SRC_NEXT:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL15]], %[[VEC_EPILOG_SCALAR_PH]] ]
+; CHECK-NEXT: store double 1.000000e+00, ptr [[DST_PHI]], align 8
+; CHECK-NEXT: [[DST_IM:%.*]] = getelementptr i8, ptr [[DST_PHI]], i64 8
+; CHECK-NEXT: store double 1.000000e+00, ptr [[DST_IM]], align 8
+; CHECK-NEXT: [[SRC_NEXT]] = getelementptr i8, ptr [[SRC_PHI]], i64 16
+; CHECK-NEXT: [[DST_NEXT]] = getelementptr i8, ptr [[DST_PHI]], i64 16
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr [[SRC_PHI]], null
+; CHECK-NEXT: br i1 [[CMP]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP16:![0-9]+]]
+; CHECK: [[EXIT]]:
+; CHECK-NEXT: ret void
+;
+entry:
+ br label %preheader
+
+preheader:
+ %dst.pre = phi ptr [ %dst, %entry ]
+ br label %loop
+
+loop:
+ %dst.phi = phi ptr [ %dst.next, %loop ], [ %dst.pre, %preheader ]
+ %src.phi = phi ptr [ %src.next, %loop ], [ %src, %preheader ]
+ store double 1.0, ptr %dst.phi, align 8
+ %dst.im = getelementptr i8, ptr %dst.phi, i64 8
+ store double 1.0, ptr %dst.im, align 8
+ %src.next = getelementptr i8, ptr %src.phi, i64 16
+ %dst.next = getelementptr i8, ptr %dst.phi, i64 16
+ %cmp = icmp eq ptr %src.phi, null
+ br i1 %cmp, label %exit, label %loop
+
+exit:
+ ret void
+}
+
attributes #0 = { "target-cpu"="neoverse-v2" }
>From 81e4d3b6cfff07a6f656c063dcef65f35a85d4b2 Mon Sep 17 00:00:00 2001
From: Florian Hahn <flo at fhahn.com>
Date: Tue, 17 Feb 2026 19:01:30 +0000
Subject: [PATCH 2/3] [VPlan] Materialize VectorTripCount in
narrowInterleaveGroups.
When narrowInterleaveGroups transforms a plan, VF and VFxUF are
materialized (replaced with concrete values). This patch also
materializes the VectorTripCount in the same transform.
This ensures that VectorTripCount is properly computed when the
narrow interleave transform is applied, instead of using the original VF
+ UF to compute the vector trip count. The previous behavior generated
correct code, but executed fewer iterations in the vector loop.
The change also enables stricter verification prevent accesses of
VPSymbolicValues after materialization as follow-up.
Note that in some cases we no miss branch folding, but that should be
addressed separately, https://github.com/llvm/llvm-project/pull/181252
---
.../Transforms/Vectorize/LoopVectorize.cpp | 22 ++-
.../Transforms/Vectorize/VPlanTransforms.cpp | 42 ++++--
.../Transforms/Vectorize/VPlanTransforms.h | 4 +-
...terleave-group-requires-scalar-epilogue.ll | 4 +-
...-narrow-interleave-to-widen-memory-cost.ll | 13 +-
...-interleave-to-widen-memory-derived-ivs.ll | 8 +-
...row-interleave-to-widen-memory-scalable.ll | 9 +-
...sform-narrow-interleave-to-widen-memory.ll | 6 +-
...ow-interleave-to-widen-memory-live-outs.ll | 2 +-
...sform-narrow-interleave-to-widen-memory.ll | 128 +++++++++++++++++-
10 files changed, 194 insertions(+), 44 deletions(-)
diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
index bb4eef5a41c09..d517a7e35e3e6 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -7469,7 +7469,7 @@ DenseMap<const SCEV *, Value *> LoopVectorizationPlanner::executePlan(
VPlanTransforms::materializeBackedgeTakenCount(BestVPlan, VectorPH);
VPlanTransforms::materializeVectorTripCount(
BestVPlan, VectorPH, CM.foldTailByMasking(),
- CM.requiresScalarEpilogue(BestVF.isVector()));
+ CM.requiresScalarEpilogue(BestVF.isVector()), &BestVPlan.getVFxUF());
VPlanTransforms::materializeFactors(BestVPlan, VectorPH, BestVF);
VPlanTransforms::cse(BestVPlan);
VPlanTransforms::simplifyRecipes(BestVPlan);
@@ -9345,12 +9345,28 @@ static void fixScalarResumeValuesFromBypass(BasicBlock *BypassBlock, Loop *L,
// Fix induction resume values from the additional bypass block.
IRBuilder<> BypassBuilder(BypassBlock, BypassBlock->getFirstInsertionPt());
for (const auto &[IVPhi, II] : LVL.getInductionVars()) {
- auto *Inc = cast<PHINode>(IVPhi->getIncomingValueForBlock(PH));
Value *V = createInductionAdditionalBypassValues(
IVPhi, II, BypassBuilder, ExpandedSCEVs, MainVectorTripCount,
LVL.getPrimaryInduction());
// TODO: Directly add as extra operand to the VPResumePHI recipe.
- Inc->setIncomingValueForBlock(BypassBlock, V);
+ if (auto *Inc = dyn_cast<PHINode>(IVPhi->getIncomingValueForBlock(PH))) {
+ Inc->setIncomingValueForBlock(BypassBlock, V);
+ } else {
+ // If the resume value in the scalar preheader was simplified (e.g., when
+ // narrowInterleaveGroups optimized away the resume PHIs), create a new
+ // PHI to merge the bypass value with the original value.
+ Value *OrigVal = IVPhi->getIncomingValueForBlock(PH);
+ PHINode *NewPhi =
+ PHINode::Create(IVPhi->getType(), pred_size(PH), "bc.resume.val",
+ PH->getFirstNonPHIIt());
+ for (auto *Pred : predecessors(PH)) {
+ if (Pred == BypassBlock)
+ NewPhi->addIncoming(V, Pred);
+ else
+ NewPhi->addIncoming(OrigVal, Pred);
+ }
+ IVPhi->setIncomingValueForBlock(PH, NewPhi);
+ }
}
}
diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
index dc0edb178efbb..fe6059076a9d9 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
@@ -5060,7 +5060,8 @@ void VPlanTransforms::materializePacksAndUnpacks(VPlan &Plan) {
void VPlanTransforms::materializeVectorTripCount(VPlan &Plan,
VPBasicBlock *VectorPHVPBB,
bool TailByMasking,
- bool RequiresScalarEpilogue) {
+ bool RequiresScalarEpilogue,
+ VPValue *Step) {
VPSymbolicValue &VectorTC = Plan.getVectorTripCount();
// There's nothing to do if there are no users of the vector trip count or its
// IR value has already been set.
@@ -5069,8 +5070,14 @@ void VPlanTransforms::materializeVectorTripCount(VPlan &Plan,
VPValue *TC = Plan.getTripCount();
Type *TCTy = VPTypeAnalysis(Plan).inferScalarType(TC);
- VPBuilder Builder(VectorPHVPBB, VectorPHVPBB->begin());
- VPValue *Step = &Plan.getVFxUF();
+ VPBasicBlock::iterator InsertPt = VectorPHVPBB->begin();
+ if (auto *StepR = Step->getDefiningRecipe()) {
+ assert(StepR->getParent() == VectorPHVPBB &&
+ "Step must be defined in VectorPHVPBB");
+ // Insert after Step's definition to maintain valid def-use ordering.
+ InsertPt = std::next(StepR->getIterator());
+ }
+ VPBuilder Builder(VectorPHVPBB, InsertPt);
// If the tail is to be folded by masking, round the number of iterations N
// up to a multiple of Step instead of rounding down. This is done by first
@@ -5451,6 +5458,16 @@ VPlanTransforms::narrowInterleaveGroups(VPlan &Plan,
if (StoreGroups.empty())
return nullptr;
+ // Determine if a scalar epilogue is required. The middle block has exactly 2
+ // successors in the normal case, and 1 successor when a scalar epilogue must
+ // execute (unconditional branch to scalar preheader).
+ bool RequiresScalarEpilogue =
+ Plan.getMiddleBlock()->getNumSuccessors() == 1 &&
+ Plan.getMiddleBlock()->getSingleSuccessor() == Plan.getScalarPreheader();
+ // Bail out for tail-folding (middle block with a single successor to exit).
+ if (Plan.getMiddleBlock()->getNumSuccessors() != 2 && !RequiresScalarEpilogue)
+ return nullptr;
+
// All interleave groups in Plan can be narrowed for VFToOptimize. Split the
// original Plan into 2: a) a new clone which contains all VFs of Plan, except
// VFToOptimize, and b) the original Plan with VFToOptimize as single VF.
@@ -5481,21 +5498,30 @@ VPlanTransforms::narrowInterleaveGroups(VPlan &Plan,
// original iteration.
auto *CanIV = VectorLoop->getCanonicalIV();
auto *Inc = cast<VPInstruction>(CanIV->getBackedgeValue());
- VPBuilder PHBuilder(Plan.getVectorPreheader());
+ VPBasicBlock *VectorPH = Plan.getVectorPreheader();
+ VPBuilder PHBuilder(VectorPH, VectorPH->begin());
VPValue *UF = &Plan.getUF();
+ VPValue *Step;
if (VFToOptimize->isScalable()) {
VPValue *VScale = PHBuilder.createElementCount(
VectorLoop->getCanonicalIVType(), ElementCount::getScalable(1));
- VPValue *VScaleUF = PHBuilder.createOverflowingOp(
- Instruction::Mul, {VScale, UF}, {true, false});
- Inc->setOperand(1, VScaleUF);
+ Step = PHBuilder.createOverflowingOp(Instruction::Mul, {VScale, UF},
+ {true, false});
Plan.getVF().replaceAllUsesWith(VScale);
} else {
- Inc->setOperand(1, UF);
+ Step = UF;
Plan.getVF().replaceAllUsesWith(
Plan.getConstantInt(CanIV->getScalarType(), 1));
}
+
+ // Materialize vector trip count with the narrowed step.
+ materializeVectorTripCount(Plan, VectorPH, /*TailByMasking=*/false,
+ RequiresScalarEpilogue, Step);
+
+ Inc->setOperand(1, Step);
+ Plan.getVFxUF().replaceAllUsesWith(Step);
+
removeDeadRecipes(Plan);
assert(none_of(*VectorLoop->getEntryBasicBlock(),
IsaPred<VPVectorPointerRecipe>) &&
diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.h b/llvm/lib/Transforms/Vectorize/VPlanTransforms.h
index 2956659e5df8b..5f060b32da847 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.h
+++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.h
@@ -401,10 +401,12 @@ struct VPlanTransforms {
PredicatedScalarEvolution &PSE);
/// Materialize vector trip count computations to a set of VPInstructions.
+ /// \p Step is used as the step value for the trip count computation.
static void materializeVectorTripCount(VPlan &Plan,
VPBasicBlock *VectorPHVPBB,
bool TailByMasking,
- bool RequiresScalarEpilogue);
+ bool RequiresScalarEpilogue,
+ VPValue *Step);
/// Materialize the backedge-taken count to be computed explicitly using
/// VPInstructions.
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-group-requires-scalar-epilogue.ll b/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-group-requires-scalar-epilogue.ll
index 7f1826eb95d60..0de23b2e5e483 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-group-requires-scalar-epilogue.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-group-requires-scalar-epilogue.ll
@@ -29,9 +29,9 @@ define void @interleave_group_exit_in_header(i64 %n, ptr %dst) {
; CHECK-NEXT: [[TMP7:%.*]] = or i1 [[TMP3]], [[TMP6]]
; CHECK-NEXT: br i1 [[TMP7]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], 2
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], 1
; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[N_MOD_VF]], 0
-; CHECK-NEXT: [[TMP9:%.*]] = select i1 [[TMP8]], i64 2, i64 [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP9:%.*]] = select i1 [[TMP8]], i64 1, i64 [[N_MOD_VF]]
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP0]], [[TMP9]]
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-cost.ll b/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-cost.ll
index f25e19dab833a..b90ba59e95caa 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-cost.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-cost.ll
@@ -87,7 +87,7 @@ define void @test_complex_add_double(ptr %res, ptr noalias %A, ptr noalias %B, i
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], 4
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], 4
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], 2
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
@@ -314,7 +314,7 @@ define void @single_fmul_used_by_each_member(ptr noalias %A, ptr noalias %B, ptr
; CHECK-NEXT: [[MIN_ITERS_CHECK11:%.*]] = icmp ult i64 [[TMP0]], 8
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK11]], label %[[VEC_EPILOG_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], 8
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], 4
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]]
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
@@ -369,8 +369,6 @@ define void @single_fmul_used_by_each_member(ptr noalias %A, ptr noalias %B, ptr
; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]], !prof [[PROF7]]
; CHECK: [[VEC_EPILOG_PH]]:
; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ]
-; CHECK-NEXT: [[N_MOD_VF22:%.*]] = urem i64 [[TMP0]], 2
-; CHECK-NEXT: [[N_VEC23:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF22]]
; CHECK-NEXT: br label %[[VEC_EPILOG_VECTOR_BODY:.*]]
; CHECK: [[VEC_EPILOG_VECTOR_BODY]]:
; CHECK-NEXT: [[INDEX24:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], %[[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT25:%.*]], %[[VEC_EPILOG_VECTOR_BODY]] ]
@@ -384,13 +382,12 @@ define void @single_fmul_used_by_each_member(ptr noalias %A, ptr noalias %B, ptr
; CHECK-NEXT: [[TMP50:%.*]] = getelementptr { double, double }, ptr [[C]], i64 [[INDEX24]]
; CHECK-NEXT: store <2 x double> [[TMP48]], ptr [[TMP50]], align 8
; CHECK-NEXT: [[INDEX_NEXT25]] = add nuw i64 [[INDEX24]], 1
-; CHECK-NEXT: [[TMP51:%.*]] = icmp eq i64 [[INDEX_NEXT25]], [[N_VEC23]]
+; CHECK-NEXT: [[TMP51:%.*]] = icmp eq i64 [[INDEX_NEXT25]], [[TMP0]]
; CHECK-NEXT: br i1 [[TMP51]], label %[[VEC_EPILOG_MIDDLE_BLOCK:.*]], label %[[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; CHECK: [[VEC_EPILOG_MIDDLE_BLOCK]]:
-; CHECK-NEXT: [[CMP_N26:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC23]]
-; CHECK-NEXT: br i1 [[CMP_N26]], label %[[EXIT]], label %[[VEC_EPILOG_SCALAR_PH]]
+; CHECK-NEXT: br label %[[EXIT]]
; CHECK: [[VEC_EPILOG_SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC23]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_SCEVCHECK]] ], [ 0, %[[ITER_CHECK]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[VECTOR_SCEVCHECK]] ], [ 0, %[[ITER_CHECK]] ], [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ]
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[VEC_EPILOG_SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-derived-ivs.ll b/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-derived-ivs.ll
index fab0369de8aa0..da0e54ab5d8c8 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-derived-ivs.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-derived-ivs.ll
@@ -16,7 +16,7 @@ define void @derived_int_ivs(ptr noalias %a, ptr noalias %b, i64 %end) {
; VF2-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP2]], 2
; VF2-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; VF2: [[VECTOR_PH]]:
-; VF2-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP2]], 2
+; VF2-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP2]], 1
; VF2-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP2]], [[N_MOD_VF]]
; VF2-NEXT: [[TMP3:%.*]] = mul i64 [[N_VEC]], 16
; VF2-NEXT: [[TMP4:%.*]] = add i64 16, [[TMP3]]
@@ -46,7 +46,7 @@ define void @derived_int_ivs(ptr noalias %a, ptr noalias %b, i64 %end) {
; VF2IC2-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP2]], 4
; VF2IC2-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; VF2IC2: [[VECTOR_PH]]:
-; VF2IC2-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP2]], 4
+; VF2IC2-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP2]], 2
; VF2IC2-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP2]], [[N_MOD_VF]]
; VF2IC2-NEXT: [[TMP3:%.*]] = mul i64 [[N_VEC]], 16
; VF2IC2-NEXT: [[TMP4:%.*]] = add i64 16, [[TMP3]]
@@ -154,7 +154,7 @@ define void @derived_pointer_ivs(ptr noalias %a, ptr noalias %b, ptr %end) {
; VF2-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
; VF2-NEXT: br i1 [[FOUND_CONFLICT]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
; VF2: [[VECTOR_PH]]:
-; VF2-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP3]], 2
+; VF2-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP3]], 1
; VF2-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP3]], [[N_MOD_VF]]
; VF2-NEXT: [[TMP9:%.*]] = mul i64 [[N_VEC]], 16
; VF2-NEXT: [[TMP10:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP9]]
@@ -203,7 +203,7 @@ define void @derived_pointer_ivs(ptr noalias %a, ptr noalias %b, ptr %end) {
; VF2IC2-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
; VF2IC2-NEXT: br i1 [[FOUND_CONFLICT]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
; VF2IC2: [[VECTOR_PH]]:
-; VF2IC2-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP3]], 4
+; VF2IC2-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP3]], 2
; VF2IC2-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP3]], [[N_MOD_VF]]
; VF2IC2-NEXT: [[TMP9:%.*]] = mul i64 [[N_VEC]], 16
; VF2IC2-NEXT: [[TMP10:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP9]]
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-scalable.ll b/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-scalable.ll
index 3a9268cfe6013..f4e1dd836da73 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-scalable.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-scalable.ll
@@ -13,8 +13,7 @@ define void @load_store_interleave_group(ptr noalias %data) {
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP3:%.*]] = shl nuw i64 [[TMP2]], 1
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 100, [[TMP3]]
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 100, [[TMP2]]
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 100, [[N_MOD_VF]]
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
@@ -62,8 +61,7 @@ define void @test_2xi64_unary_op_load_interleave_group(ptr noalias %data, ptr no
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP3:%.*]] = shl nuw i64 [[TMP2]], 1
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1111, [[TMP3]]
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1111, [[TMP2]]
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1111, [[N_MOD_VF]]
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
@@ -114,8 +112,7 @@ define void @narrow_interleave_tc_16_vf_4_if_4(ptr noalias %data) vscale_range(2
; CHECK-NEXT: br label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP3:%.*]] = shl nuw i64 [[TMP2]], 2
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 16, [[TMP3]]
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 16, [[TMP2]]
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 16, [[N_MOD_VF]]
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/transform-narrow-interleave-to-widen-memory.ll b/llvm/test/Transforms/LoopVectorize/RISCV/transform-narrow-interleave-to-widen-memory.ll
index 5c82ecf0cc834..cbac950cd7f04 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/transform-narrow-interleave-to-widen-memory.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/transform-narrow-interleave-to-widen-memory.ll
@@ -42,8 +42,7 @@ define void @load_store_interleave_group(ptr noalias %data) {
; EPILOGUE-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; EPILOGUE: [[VECTOR_PH]]:
; EPILOGUE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; EPILOGUE-NEXT: [[TMP3:%.*]] = shl nuw i64 [[TMP2]], 1
-; EPILOGUE-NEXT: [[N_MOD_VF:%.*]] = urem i64 100, [[TMP3]]
+; EPILOGUE-NEXT: [[N_MOD_VF:%.*]] = urem i64 100, [[TMP2]]
; EPILOGUE-NEXT: [[N_VEC:%.*]] = sub i64 100, [[N_MOD_VF]]
; EPILOGUE-NEXT: br label %[[VECTOR_BODY:.*]]
; EPILOGUE: [[VECTOR_BODY]]:
@@ -289,8 +288,7 @@ define void @load_store_interleave_group_i32(ptr noalias %data) {
; EPILOGUE-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; EPILOGUE: [[VECTOR_PH]]:
; EPILOGUE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; EPILOGUE-NEXT: [[TMP3:%.*]] = shl nuw i64 [[TMP2]], 2
-; EPILOGUE-NEXT: [[N_MOD_VF:%.*]] = urem i64 100, [[TMP3]]
+; EPILOGUE-NEXT: [[N_MOD_VF:%.*]] = urem i64 100, [[TMP2]]
; EPILOGUE-NEXT: [[N_VEC:%.*]] = sub i64 100, [[N_MOD_VF]]
; EPILOGUE-NEXT: br label %[[VECTOR_BODY:.*]]
; EPILOGUE: [[VECTOR_BODY]]:
diff --git a/llvm/test/Transforms/LoopVectorize/X86/transform-narrow-interleave-to-widen-memory-live-outs.ll b/llvm/test/Transforms/LoopVectorize/X86/transform-narrow-interleave-to-widen-memory-live-outs.ll
index 6781835ecce23..2cd645e7b1773 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/transform-narrow-interleave-to-widen-memory-live-outs.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/transform-narrow-interleave-to-widen-memory-live-outs.ll
@@ -11,7 +11,7 @@ define i64 @test_4xi64_induction_live_out(ptr noalias %data, ptr noalias %factor
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], 16
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], 16
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], 4
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
diff --git a/llvm/test/Transforms/LoopVectorize/X86/transform-narrow-interleave-to-widen-memory.ll b/llvm/test/Transforms/LoopVectorize/X86/transform-narrow-interleave-to-widen-memory.ll
index cfa601469464f..a8d68692428dc 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/transform-narrow-interleave-to-widen-memory.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/transform-narrow-interleave-to-widen-memory.ll
@@ -15,7 +15,7 @@ define void @test_4xi64(ptr noalias %data, ptr noalias %factor, i64 noundef %n)
; CHECK-NEXT: [[MIN_ITERS_CHECK1:%.*]] = icmp ult i64 [[N]], 16
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK1]], label %[[VEC_EPILOG_PH:.*]], label %[[VECTOR_PH1:.*]]
; CHECK: [[VECTOR_PH1]]:
-; CHECK-NEXT: [[N_MOD_VF1:%.*]] = urem i64 [[N]], 16
+; CHECK-NEXT: [[N_MOD_VF1:%.*]] = urem i64 [[N]], 4
; CHECK-NEXT: [[N_VEC1:%.*]] = sub i64 [[N]], [[N_MOD_VF1]]
; CHECK-NEXT: br label %[[VECTOR_BODY1:.*]]
; CHECK: [[VECTOR_BODY1]]:
@@ -66,8 +66,6 @@ define void @test_4xi64(ptr noalias %data, ptr noalias %factor, i64 noundef %n)
; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[SCALAR_PH]], label %[[VEC_EPILOG_PH]], !prof [[PROF3:![0-9]+]]
; CHECK: [[VEC_EPILOG_PH]]:
; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC1]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_PH]] ]
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], 4
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], %[[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
@@ -80,13 +78,12 @@ define void @test_4xi64(ptr noalias %data, ptr noalias %factor, i64 noundef %n)
; CHECK-NEXT: [[TMP4:%.*]] = mul <4 x i64> [[BROADCAST_SPLAT]], [[WIDE_LOAD]]
; CHECK-NEXT: store <4 x i64> [[TMP4]], ptr [[TMP3]], align 8
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], 1
-; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N]]
; CHECK-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT]], label %[[SCALAR_PH]]
+; CHECK-NEXT: br label %[[EXIT]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ [[N_VEC1]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[N_VEC1]], %[[VEC_EPILOG_ITER_CHECK]] ]
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
; CHECK-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
@@ -761,6 +758,121 @@ loop:
exit:
ret void
}
+
+; Test with induction live-out to verify exit value is correctly computed.
+define i64 @test_4xi64_induction_live_out(ptr noalias %data, ptr noalias %factor, i64 noundef %n) {
+; CHECK-LABEL: define i64 @test_4xi64_induction_live_out(
+; CHECK-SAME: ptr noalias [[DATA:%.*]], ptr noalias [[FACTOR:%.*]], i64 noundef [[N:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], 16
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], 4
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 1
+; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 2
+; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 3
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i64, ptr [[FACTOR]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i64, ptr [[FACTOR]], i64 [[TMP0]]
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i64, ptr [[FACTOR]], i64 [[TMP1]]
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i64, ptr [[FACTOR]], i64 [[TMP2]]
+; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr [[TMP3]], align 8
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[TMP7]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr [[TMP4]], align 8
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT4:%.*]] = insertelement <4 x i64> poison, i64 [[TMP8]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT5:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT4]], <4 x i64> poison, <4 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP9:%.*]] = load i64, ptr [[TMP5]], align 8
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT6:%.*]] = insertelement <4 x i64> poison, i64 [[TMP9]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT7:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT6]], <4 x i64> poison, <4 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP6]], align 8
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT8:%.*]] = insertelement <4 x i64> poison, i64 [[TMP10]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT9:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT8]], <4 x i64> poison, <4 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds { i64, i64, i64, i64 }, ptr [[DATA]], i64 [[INDEX]], i32 0
+; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds { i64, i64, i64, i64 }, ptr [[DATA]], i64 [[TMP0]], i32 0
+; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds { i64, i64, i64, i64 }, ptr [[DATA]], i64 [[TMP1]], i32 0
+; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds { i64, i64, i64, i64 }, ptr [[DATA]], i64 [[TMP2]], i32 0
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[TMP11]], align 8
+; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i64>, ptr [[TMP12]], align 8
+; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x i64>, ptr [[TMP13]], align 8
+; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <4 x i64>, ptr [[TMP14]], align 8
+; CHECK-NEXT: [[TMP15:%.*]] = mul <4 x i64> [[BROADCAST_SPLAT]], [[WIDE_LOAD]]
+; CHECK-NEXT: [[TMP16:%.*]] = mul <4 x i64> [[BROADCAST_SPLAT5]], [[WIDE_LOAD1]]
+; CHECK-NEXT: [[TMP17:%.*]] = mul <4 x i64> [[BROADCAST_SPLAT7]], [[WIDE_LOAD2]]
+; CHECK-NEXT: [[TMP18:%.*]] = mul <4 x i64> [[BROADCAST_SPLAT9]], [[WIDE_LOAD3]]
+; CHECK-NEXT: store <4 x i64> [[TMP15]], ptr [[TMP11]], align 8
+; CHECK-NEXT: store <4 x i64> [[TMP16]], ptr [[TMP12]], align 8
+; CHECK-NEXT: store <4 x i64> [[TMP17]], ptr [[TMP13]], align 8
+; CHECK-NEXT: store <4 x i64> [[TMP18]], ptr [[TMP14]], align 8
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
+; CHECK-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP19]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; CHECK: [[SCALAR_PH]]:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: br label %[[LOOP:.*]]
+; CHECK: [[LOOP]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[FACTOR]], i64 [[IV]]
+; CHECK-NEXT: [[L_FACTOR:%.*]] = load i64, ptr [[ARRAYIDX]], align 8
+; CHECK-NEXT: [[DATA_0:%.*]] = getelementptr inbounds { i64, i64, i64, i64 }, ptr [[DATA]], i64 [[IV]], i32 0
+; CHECK-NEXT: [[L_0:%.*]] = load i64, ptr [[DATA_0]], align 8
+; CHECK-NEXT: [[MUL_0:%.*]] = mul i64 [[L_FACTOR]], [[L_0]]
+; CHECK-NEXT: store i64 [[MUL_0]], ptr [[DATA_0]], align 8
+; CHECK-NEXT: [[DATA_1:%.*]] = getelementptr inbounds { i64, i64, i64, i64 }, ptr [[DATA]], i64 [[IV]], i32 1
+; CHECK-NEXT: [[L_1:%.*]] = load i64, ptr [[DATA_1]], align 8
+; CHECK-NEXT: [[MUL_1:%.*]] = mul i64 [[L_FACTOR]], [[L_1]]
+; CHECK-NEXT: store i64 [[MUL_1]], ptr [[DATA_1]], align 8
+; CHECK-NEXT: [[DATA_2:%.*]] = getelementptr inbounds { i64, i64, i64, i64 }, ptr [[DATA]], i64 [[IV]], i32 2
+; CHECK-NEXT: [[L_2:%.*]] = load i64, ptr [[DATA_2]], align 8
+; CHECK-NEXT: [[MUL_2:%.*]] = mul i64 [[L_FACTOR]], [[L_2]]
+; CHECK-NEXT: store i64 [[MUL_2]], ptr [[DATA_2]], align 8
+; CHECK-NEXT: [[DATA_3:%.*]] = getelementptr inbounds { i64, i64, i64, i64 }, ptr [[DATA]], i64 [[IV]], i32 3
+; CHECK-NEXT: [[L_3:%.*]] = load i64, ptr [[DATA_3]], align 8
+; CHECK-NEXT: [[MUL_3:%.*]] = mul i64 [[L_FACTOR]], [[L_3]]
+; CHECK-NEXT: store i64 [[MUL_3]], ptr [[DATA_3]], align 8
+; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP21:![0-9]+]]
+; CHECK: [[EXIT]]:
+; CHECK-NEXT: [[IV_NEXT_LCSSA:%.*]] = phi i64 [ [[IV_NEXT]], %[[LOOP]] ], [ [[N_VEC]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: ret i64 [[IV_NEXT_LCSSA]]
+;
+entry:
+ br label %loop
+
+loop:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
+ %arrayidx = getelementptr inbounds i64, ptr %factor, i64 %iv
+ %l.factor = load i64, ptr %arrayidx, align 8
+ %data.0 = getelementptr inbounds { i64 , i64, i64, i64 }, ptr %data, i64 %iv, i32 0
+ %l.0 = load i64, ptr %data.0, align 8
+ %mul.0 = mul i64 %l.factor, %l.0
+ store i64 %mul.0, ptr %data.0, align 8
+ %data.1 = getelementptr inbounds { i64 , i64, i64, i64 }, ptr %data, i64 %iv, i32 1
+ %l.1 = load i64, ptr %data.1, align 8
+ %mul.1 = mul i64 %l.factor, %l.1
+ store i64 %mul.1, ptr %data.1, align 8
+ %data.2 = getelementptr inbounds { i64 , i64, i64, i64 }, ptr %data, i64 %iv, i32 2
+ %l.2 = load i64, ptr %data.2, align 8
+ %mul.2 = mul i64 %l.factor, %l.2
+ store i64 %mul.2, ptr %data.2, align 8
+ %data.3 = getelementptr inbounds { i64 , i64, i64, i64 }, ptr %data, i64 %iv, i32 3
+ %l.3 = load i64, ptr %data.3, align 8
+ %mul.3 = mul i64 %l.factor, %l.3
+ store i64 %mul.3, ptr %data.3, align 8
+ %iv.next = add nuw nsw i64 %iv, 1
+ %ec = icmp eq i64 %iv.next, %n
+ br i1 %ec, label %exit, label %loop
+
+exit:
+ ret i64 %iv.next
+}
;.
; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
@@ -782,4 +894,6 @@ exit:
; CHECK: [[LOOP17]] = distinct !{[[LOOP17]], [[META2]], [[META1]]}
; CHECK: [[LOOP18]] = distinct !{[[LOOP18]], [[META1]], [[META2]]}
; CHECK: [[LOOP19]] = distinct !{[[LOOP19]], [[META2]], [[META1]]}
+; CHECK: [[LOOP20]] = distinct !{[[LOOP20]], [[META1]], [[META2]]}
+; CHECK: [[LOOP21]] = distinct !{[[LOOP21]], [[META2]], [[META1]]}
;.
>From e748bc524a9812fb937a5634551e6c02bbea85fa Mon Sep 17 00:00:00 2001
From: Florian Hahn <flo at fhahn.com>
Date: Mon, 2 Mar 2026 16:46:31 +0000
Subject: [PATCH 3/3] !fixup simplify checks
Fix
---
.../Transforms/Vectorize/LoopVectorize.cpp | 3 ++-
.../Transforms/Vectorize/VPlanTransforms.cpp | 11 ++++------
...-narrow-interleave-to-widen-memory-cost.ll | 22 ++++++-------------
...sform-narrow-interleave-to-widen-memory.ll | 10 ++++-----
4 files changed, 17 insertions(+), 29 deletions(-)
diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
index d517a7e35e3e6..2535cc83ffe9c 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -9350,7 +9350,8 @@ static void fixScalarResumeValuesFromBypass(BasicBlock *BypassBlock, Loop *L,
LVL.getPrimaryInduction());
// TODO: Directly add as extra operand to the VPResumePHI recipe.
if (auto *Inc = dyn_cast<PHINode>(IVPhi->getIncomingValueForBlock(PH))) {
- Inc->setIncomingValueForBlock(BypassBlock, V);
+ if (Inc->getBasicBlockIndex(BypassBlock) != -1)
+ Inc->setIncomingValueForBlock(BypassBlock, V);
} else {
// If the resume value in the scalar preheader was simplified (e.g., when
// narrowInterleaveGroups optimized away the resume PHIs), create a new
diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
index fe6059076a9d9..8d0b8a235dcbe 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
@@ -5458,14 +5458,12 @@ VPlanTransforms::narrowInterleaveGroups(VPlan &Plan,
if (StoreGroups.empty())
return nullptr;
- // Determine if a scalar epilogue is required. The middle block has exactly 2
- // successors in the normal case, and 1 successor when a scalar epilogue must
- // execute (unconditional branch to scalar preheader).
+ VPBasicBlock *MiddleVPBB = Plan.getMiddleBlock();
bool RequiresScalarEpilogue =
- Plan.getMiddleBlock()->getNumSuccessors() == 1 &&
- Plan.getMiddleBlock()->getSingleSuccessor() == Plan.getScalarPreheader();
+ MiddleVPBB->getNumSuccessors() == 1 &&
+ MiddleVPBB->getSingleSuccessor() == Plan.getScalarPreheader();
// Bail out for tail-folding (middle block with a single successor to exit).
- if (Plan.getMiddleBlock()->getNumSuccessors() != 2 && !RequiresScalarEpilogue)
+ if (MiddleVPBB->getNumSuccessors() != 2 && !RequiresScalarEpilogue)
return nullptr;
// All interleave groups in Plan can be narrowed for VFToOptimize. Split the
@@ -5514,7 +5512,6 @@ VPlanTransforms::narrowInterleaveGroups(VPlan &Plan,
Plan.getVF().replaceAllUsesWith(
Plan.getConstantInt(CanIV->getScalarType(), 1));
}
-
// Materialize vector trip count with the narrowed step.
materializeVectorTripCount(Plan, VectorPH, /*TailByMasking=*/false,
RequiresScalarEpilogue, Step);
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-cost.ll b/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-cost.ll
index b90ba59e95caa..2896618cd853a 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-cost.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-cost.ll
@@ -541,7 +541,7 @@ define void @test_interleave_group_epilogue_with_preheader_phi(ptr %src, ptr %ds
; CHECK-NEXT: [[MIN_ITERS_CHECK5:%.*]] = icmp ult i64 [[TMP2]], 8
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK5]], label %[[VEC_EPILOG_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP2]], 8
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP2]], 4
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP2]], [[N_MOD_VF]]
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
@@ -573,12 +573,6 @@ define void @test_interleave_group_epilogue_with_preheader_phi(ptr %src, ptr %ds
; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]], !prof [[PROF7]]
; CHECK: [[VEC_EPILOG_PH]]:
; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ]
-; CHECK-NEXT: [[N_MOD_VF9:%.*]] = urem i64 [[TMP2]], 2
-; CHECK-NEXT: [[N_VEC10:%.*]] = sub i64 [[TMP2]], [[N_MOD_VF9]]
-; CHECK-NEXT: [[TMP23:%.*]] = mul i64 [[N_VEC10]], 16
-; CHECK-NEXT: [[TMP24:%.*]] = getelementptr i8, ptr [[DST_PRE]], i64 [[TMP23]]
-; CHECK-NEXT: [[TMP25:%.*]] = mul i64 [[N_VEC10]], 16
-; CHECK-NEXT: [[TMP26:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP25]]
; CHECK-NEXT: br label %[[VEC_EPILOG_VECTOR_BODY:.*]]
; CHECK: [[VEC_EPILOG_VECTOR_BODY]]:
; CHECK-NEXT: [[INDEX11:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], %[[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT13:%.*]], %[[VEC_EPILOG_VECTOR_BODY]] ]
@@ -586,18 +580,16 @@ define void @test_interleave_group_epilogue_with_preheader_phi(ptr %src, ptr %ds
; CHECK-NEXT: [[NEXT_GEP12:%.*]] = getelementptr i8, ptr [[DST_PRE]], i64 [[OFFSET_IDX]]
; CHECK-NEXT: store <2 x double> splat (double 1.000000e+00), ptr [[NEXT_GEP12]], align 8
; CHECK-NEXT: [[INDEX_NEXT13]] = add nuw i64 [[INDEX11]], 1
-; CHECK-NEXT: [[TMP27:%.*]] = icmp eq i64 [[INDEX_NEXT13]], [[N_VEC10]]
-; CHECK-NEXT: br i1 [[TMP27]], label %[[VEC_EPILOG_MIDDLE_BLOCK:.*]], label %[[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
+; CHECK-NEXT: [[TMP23:%.*]] = icmp eq i64 [[INDEX_NEXT13]], [[TMP2]]
+; CHECK-NEXT: br i1 [[TMP23]], label %[[VEC_EPILOG_MIDDLE_BLOCK:.*]], label %[[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
; CHECK: [[VEC_EPILOG_MIDDLE_BLOCK]]:
-; CHECK-NEXT: [[CMP_N14:%.*]] = icmp eq i64 [[TMP2]], [[N_VEC10]]
-; CHECK-NEXT: br i1 [[CMP_N14]], label %[[EXIT]], label %[[VEC_EPILOG_SCALAR_PH]]
+; CHECK-NEXT: br label %[[EXIT]]
; CHECK: [[VEC_EPILOG_SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi ptr [ [[TMP24]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[IND_END]], %[[VEC_EPILOG_ITER_CHECK]] ], [ [[DST_PRE]], %[[VECTOR_SCEVCHECK]] ], [ [[DST_PRE]], %[[ITER_CHECK]] ]
-; CHECK-NEXT: [[BC_RESUME_VAL15:%.*]] = phi ptr [ [[TMP26]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[IND_END16]], %[[VEC_EPILOG_ITER_CHECK]] ], [ [[SRC]], %[[VECTOR_SCEVCHECK]] ], [ [[SRC]], %[[ITER_CHECK]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi ptr [ [[SRC]], %[[VECTOR_SCEVCHECK]] ], [ [[SRC]], %[[ITER_CHECK]] ], [ [[IND_END16]], %[[VEC_EPILOG_ITER_CHECK]] ]
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[DST_PHI:%.*]] = phi ptr [ [[DST_NEXT:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[VEC_EPILOG_SCALAR_PH]] ]
-; CHECK-NEXT: [[SRC_PHI:%.*]] = phi ptr [ [[SRC_NEXT:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL15]], %[[VEC_EPILOG_SCALAR_PH]] ]
+; CHECK-NEXT: [[DST_PHI:%.*]] = phi ptr [ [[DST_NEXT:%.*]], %[[LOOP]] ], [ [[DST_PRE]], %[[VEC_EPILOG_SCALAR_PH]] ]
+; CHECK-NEXT: [[SRC_PHI:%.*]] = phi ptr [ [[SRC_NEXT:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[VEC_EPILOG_SCALAR_PH]] ]
; CHECK-NEXT: store double 1.000000e+00, ptr [[DST_PHI]], align 8
; CHECK-NEXT: [[DST_IM:%.*]] = getelementptr i8, ptr [[DST_PHI]], i64 8
; CHECK-NEXT: store double 1.000000e+00, ptr [[DST_IM]], align 8
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/transform-narrow-interleave-to-widen-memory.ll b/llvm/test/Transforms/LoopVectorize/RISCV/transform-narrow-interleave-to-widen-memory.ll
index cbac950cd7f04..2162ba3fb7ba4 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/transform-narrow-interleave-to-widen-memory.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/transform-narrow-interleave-to-widen-memory.ll
@@ -125,10 +125,9 @@ define void @interleave_group_with_countable_early_exit(i64 %n, ptr %dst) {
; CHECK-NEXT: br i1 [[TMP9]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
; CHECK-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP11:%.*]] = shl nuw i64 [[TMP10]], 1
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], [[TMP11]]
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], [[TMP10]]
; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[N_MOD_VF]], 0
-; CHECK-NEXT: [[TMP13:%.*]] = select i1 [[TMP12]], i64 [[TMP11]], i64 [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP13:%.*]] = select i1 [[TMP12]], i64 [[TMP10]], i64 [[N_MOD_VF]]
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP0]], [[TMP13]]
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
@@ -187,10 +186,9 @@ define void @interleave_group_with_countable_early_exit(i64 %n, ptr %dst) {
; EPILOGUE-NEXT: br i1 [[TMP9]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
; EPILOGUE: [[VECTOR_PH]]:
; EPILOGUE-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64()
-; EPILOGUE-NEXT: [[TMP11:%.*]] = shl nuw i64 [[TMP10]], 1
-; EPILOGUE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], [[TMP11]]
+; EPILOGUE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], [[TMP10]]
; EPILOGUE-NEXT: [[TMP12:%.*]] = icmp eq i64 [[N_MOD_VF]], 0
-; EPILOGUE-NEXT: [[TMP13:%.*]] = select i1 [[TMP12]], i64 [[TMP11]], i64 [[N_MOD_VF]]
+; EPILOGUE-NEXT: [[TMP13:%.*]] = select i1 [[TMP12]], i64 [[TMP10]], i64 [[N_MOD_VF]]
; EPILOGUE-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP0]], [[TMP13]]
; EPILOGUE-NEXT: br label %[[VECTOR_BODY:.*]]
; EPILOGUE: [[VECTOR_BODY]]:
More information about the llvm-commits
mailing list