[llvm] [VPlan] Introduce cannotHoistOrSinkRecipe, fix miscompile (PR #162674)
Ramkumar Ramachandra via llvm-commits
llvm-commits at lists.llvm.org
Mon Oct 27 04:36:45 PDT 2025
https://github.com/artagnon updated https://github.com/llvm/llvm-project/pull/162674
>From 623227f0ccf00df1af0b00d143a306d52dd598dc Mon Sep 17 00:00:00 2001
From: Ramkumar Ramachandra <ramkumar.ramachandra at codasip.com>
Date: Mon, 20 Oct 2025 11:19:17 +0100
Subject: [PATCH 1/2] [LV] cannotHoistOrSinkRecipe pre-commit test
---
...t-order-recurrence-multiply-recurrences.ll | 99 +++++++++++++++++++
1 file changed, 99 insertions(+)
diff --git a/llvm/test/Transforms/LoopVectorize/first-order-recurrence-multiply-recurrences.ll b/llvm/test/Transforms/LoopVectorize/first-order-recurrence-multiply-recurrences.ll
index 74129806ad6fb..444180b91e02f 100644
--- a/llvm/test/Transforms/LoopVectorize/first-order-recurrence-multiply-recurrences.ll
+++ b/llvm/test/Transforms/LoopVectorize/first-order-recurrence-multiply-recurrences.ll
@@ -425,3 +425,102 @@ loop:
exit:
ret void
}
+
+define void @hoist_previous_value_and_operand_load(ptr %dst) {
+; CHECK-LABEL: @hoist_previous_value_and_operand_load(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br label [[VECTOR_PH:%.*]]
+; CHECK: vector.ph:
+; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
+; CHECK: vector.body:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VECTOR_RECUR:%.*]] = phi <4 x i32> [ <i32 poison, i32 poison, i32 poison, i32 1>, [[VECTOR_PH]] ], [ [[BROADCAST_SPLAT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VECTOR_RECUR1:%.*]] = phi <4 x i32> [ <i32 poison, i32 poison, i32 poison, i32 0>, [[VECTOR_PH]] ], [ [[TMP3:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i64 1, [[INDEX]]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[DST:%.*]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[DST]], align 4
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[TMP1]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <4 x i32> [[VECTOR_RECUR]], <4 x i32> [[BROADCAST_SPLAT]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
+; CHECK-NEXT: [[TMP3]] = or <4 x i32> [[TMP2]], splat (i32 3)
+; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <4 x i32> [[VECTOR_RECUR1]], <4 x i32> [[TMP3]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
+; CHECK-NEXT: store <4 x i32> [[TMP4]], ptr [[TMP0]], align 4
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
+; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 336
+; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
+; CHECK: middle.block:
+; CHECK-NEXT: [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement <4 x i32> [[TMP3]], i32 3
+; CHECK-NEXT: br label [[SCALAR_PH:%.*]]
+; CHECK: scalar.ph:
+; CHECK-NEXT: br label [[LOOP:%.*]]
+; CHECK: loop:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 337, [[SCALAR_PH]] ], [ [[ADD:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[FOR_1:%.*]] = phi i32 [ [[TMP1]], [[SCALAR_PH]] ], [ [[LOAD:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[FOR_2:%.*]] = phi i32 [ [[VECTOR_RECUR_EXTRACT]], [[SCALAR_PH]] ], [ [[OR:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[OR]] = or i32 [[FOR_1]], 3
+; CHECK-NEXT: [[ADD]] = add i64 [[IV]], 1
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[DST]], i64 [[IV]]
+; CHECK-NEXT: store i32 [[FOR_2]], ptr [[GEP]], align 4
+; CHECK-NEXT: [[ICMP:%.*]] = icmp ult i64 [[IV]], 337
+; CHECK-NEXT: [[LOAD]] = load i32, ptr [[DST]], align 4
+; CHECK-NEXT: br i1 [[ICMP]], label [[LOOP]], label [[EXIT:%.*]], !llvm.loop [[LOOP8:![0-9]+]]
+; CHECK: exit:
+; CHECK-NEXT: ret void
+;
+entry:
+ br label %loop
+
+loop:
+ %iv = phi i64 [ 1, %entry ], [ %add, %loop ]
+ %for.1 = phi i32 [ 1, %entry ], [ %load, %loop ]
+ %for.2 = phi i32 [ 0, %entry ], [ %or, %loop ]
+ %or = or i32 %for.1, 3
+ %add = add i64 %iv, 1
+ %gep = getelementptr inbounds i32, ptr %dst, i64 %iv
+ store i32 %for.2, ptr %gep
+ %icmp = icmp ult i64 %iv, 337
+ %load = load i32, ptr %dst
+ br i1 %icmp, label %loop, label %exit
+
+exit:
+ ret void
+}
+
+define void @hoist_previous_value_and_operand_assume(ptr %dst) {
+; CHECK-LABEL: @hoist_previous_value_and_operand_assume(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br label [[LOOP:%.*]]
+; CHECK: loop:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 1, [[ENTRY:%.*]] ], [ [[ADD:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[FOR_1:%.*]] = phi i1 [ true, [[ENTRY]] ], [ [[TRUNC:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[FOR_2:%.*]] = phi i1 [ false, [[ENTRY]] ], [ [[OR:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[OR]] = or i1 [[FOR_1]], true
+; CHECK-NEXT: [[ADD]] = add i64 [[IV]], 1
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[DST:%.*]], i64 [[IV]]
+; CHECK-NEXT: store i1 [[FOR_2]], ptr [[GEP]], align 1
+; CHECK-NEXT: [[ICMP:%.*]] = icmp ult i64 [[IV]], 337
+; CHECK-NEXT: call void @llvm.assume(i1 [[FOR_1]])
+; CHECK-NEXT: [[TRUNC]] = trunc i64 [[IV]] to i1
+; CHECK-NEXT: br i1 [[ICMP]], label [[LOOP]], label [[EXIT:%.*]]
+; CHECK: exit:
+; CHECK-NEXT: ret void
+;
+entry:
+ br label %loop
+
+loop:
+ %iv = phi i64 [ 1, %entry ], [ %add, %loop ]
+ %for.1 = phi i1 [ 1, %entry ], [ %trunc, %loop ]
+ %for.2 = phi i1 [ 0, %entry ], [ %or, %loop ]
+ %or = or i1 %for.1, 3
+ %add = add i64 %iv, 1
+ %gep = getelementptr inbounds i32, ptr %dst, i64 %iv
+ store i1 %for.2, ptr %gep
+ %icmp = icmp ult i64 %iv, 337
+ call void @llvm.assume(i1 %for.1)
+ %trunc = trunc i64 %iv to i1
+ br i1 %icmp, label %loop, label %exit
+
+exit:
+ ret void
+}
>From 63bfa54c21d7e1b1862e19241e57ea5b3a6d5fa3 Mon Sep 17 00:00:00 2001
From: Ramkumar Ramachandra <ramkumar.ramachandra at codasip.com>
Date: Thu, 9 Oct 2025 15:51:44 +0100
Subject: [PATCH 2/2] [VPlan] Introduce cannotHoistOrSinkRecipe
Factor out common code to determine legality of hoisting and sinking. In
the case of the additonal use-sites, functional changes, if any, would
amount to esoteric bugs being fixed.
---
.../lib/Transforms/Vectorize/VPlanRecipes.cpp | 4 ++
.../Transforms/Vectorize/VPlanTransforms.cpp | 49 +++++++++----------
...t-order-recurrence-multiply-recurrences.ll | 33 ++-----------
3 files changed, 31 insertions(+), 55 deletions(-)
diff --git a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
index 931a5b7582c4e..343df72dfe7ba 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
@@ -70,6 +70,7 @@ bool VPRecipeBase::mayWriteToMemory() const {
return cast<VPWidenIntrinsicRecipe>(this)->mayWriteToMemory();
case VPCanonicalIVPHISC:
case VPBranchOnMaskSC:
+ case VPDerivedIVSC:
case VPFirstOrderRecurrencePHISC:
case VPReductionPHISC:
case VPScalarIVStepsSC:
@@ -86,6 +87,7 @@ bool VPRecipeBase::mayWriteToMemory() const {
case VPWidenLoadEVLSC:
case VPWidenLoadSC:
case VPWidenPHISC:
+ case VPWidenPointerInductionSC:
case VPWidenSC:
case VPWidenSelectSC: {
const Instruction *I =
@@ -119,6 +121,7 @@ bool VPRecipeBase::mayReadFromMemory() const {
case VPWidenIntrinsicSC:
return cast<VPWidenIntrinsicRecipe>(this)->mayReadFromMemory();
case VPBranchOnMaskSC:
+ case VPDerivedIVSC:
case VPFirstOrderRecurrencePHISC:
case VPPredInstPHISC:
case VPScalarIVStepsSC:
@@ -134,6 +137,7 @@ bool VPRecipeBase::mayReadFromMemory() const {
case VPWidenGEPSC:
case VPWidenIntOrFpInductionSC:
case VPWidenPHISC:
+ case VPWidenPointerInductionSC:
case VPWidenSC:
case VPWidenSelectSC: {
const Instruction *I =
diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
index 84817d78a077a..f38193cf11cb8 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
@@ -130,6 +130,24 @@ bool VPlanTransforms::tryToConvertVPInstructionsToVPRecipes(
return true;
}
+/// Return true if we do not know how to (mechanically) hoist or sink \p R out
+/// of a loop region.
+static bool cannotHoistOrSinkRecipe(const VPRecipeBase &R) {
+ // Assumes don't alias anything or throw; as long as they're guaranteed to
+ // execute, they're safe to hoist.
+ if (match(&R, m_Intrinsic<Intrinsic::assume>()))
+ return false;
+
+ // TODO: Relax checks in the future, e.g. we could also hoist reads, if their
+ // memory location is not modified in the vector loop.
+ if (R.mayHaveSideEffects() || R.mayReadFromMemory() || R.isPhi())
+ return true;
+
+ // Allocas cannot be hoisted.
+ auto *RepR = dyn_cast<VPReplicateRecipe>(&R);
+ return RepR && RepR->getOpcode() == Instruction::Alloca;
+}
+
static bool sinkScalarOperands(VPlan &Plan) {
auto Iter = vp_depth_first_deep(Plan.getEntry());
bool Changed = false;
@@ -1825,7 +1843,7 @@ sinkRecurrenceUsersAfterPrevious(VPFirstOrderRecurrencePHIRecipe *FOR,
VPDT.properlyDominates(Previous, SinkCandidate))
return true;
- if (SinkCandidate->mayHaveSideEffects())
+ if (cannotHoistOrSinkRecipe(*SinkCandidate))
return false;
WorkList.push_back(SinkCandidate);
@@ -1865,7 +1883,7 @@ sinkRecurrenceUsersAfterPrevious(VPFirstOrderRecurrencePHIRecipe *FOR,
static bool hoistPreviousBeforeFORUsers(VPFirstOrderRecurrencePHIRecipe *FOR,
VPRecipeBase *Previous,
VPDominatorTree &VPDT) {
- if (Previous->mayHaveSideEffects() || Previous->mayReadFromMemory())
+ if (cannotHoistOrSinkRecipe(*Previous))
return false;
// Collect recipes that need hoisting.
@@ -1912,11 +1930,6 @@ static bool hoistPreviousBeforeFORUsers(VPFirstOrderRecurrencePHIRecipe *FOR,
return nullptr;
return HoistCandidate;
};
- auto CanHoist = [&](VPRecipeBase *HoistCandidate) {
- // Avoid hoisting candidates with side-effects, as we do not yet analyze
- // associated dependencies.
- return !HoistCandidate->mayHaveSideEffects();
- };
if (!NeedsHoisting(Previous->getVPSingleValue()))
return true;
@@ -1928,7 +1941,7 @@ static bool hoistPreviousBeforeFORUsers(VPFirstOrderRecurrencePHIRecipe *FOR,
VPRecipeBase *Current = HoistCandidates[I];
assert(Current->getNumDefinedValues() == 1 &&
"only recipes with a single defined value expected");
- if (!CanHoist(Current))
+ if (cannotHoistOrSinkRecipe(*Current))
return false;
for (VPValue *Op : Current->operands()) {
@@ -2143,24 +2156,6 @@ void VPlanTransforms::cse(VPlan &Plan) {
static void licm(VPlan &Plan) {
VPBasicBlock *Preheader = Plan.getVectorPreheader();
- // Return true if we do not know how to (mechanically) hoist a given recipe
- // out of a loop region.
- auto CannotHoistRecipe = [](VPRecipeBase &R) {
- // Assumes don't alias anything or throw; as long as they're guaranteed to
- // execute, they're safe to hoist.
- if (match(&R, m_Intrinsic<Intrinsic::assume>()))
- return false;
-
- // TODO: Relax checks in the future, e.g. we could also hoist reads, if
- // their memory location is not modified in the vector loop.
- if (R.mayHaveSideEffects() || R.mayReadFromMemory() || R.isPhi())
- return true;
-
- // Allocas cannot be hoisted.
- auto *RepR = dyn_cast<VPReplicateRecipe>(&R);
- return RepR && RepR->getOpcode() == Instruction::Alloca;
- };
-
// Hoist any loop invariant recipes from the vector loop region to the
// preheader. Preform a shallow traversal of the vector loop region, to
// exclude recipes in replicate regions. Since the top-level blocks in the
@@ -2172,7 +2167,7 @@ static void licm(VPlan &Plan) {
for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(
vp_depth_first_shallow(LoopRegion->getEntry()))) {
for (VPRecipeBase &R : make_early_inc_range(*VPBB)) {
- if (CannotHoistRecipe(R))
+ if (cannotHoistOrSinkRecipe(R))
continue;
if (any_of(R.operands(), [](VPValue *Op) {
return !Op->isDefinedOutsideLoopRegions();
diff --git a/llvm/test/Transforms/LoopVectorize/first-order-recurrence-multiply-recurrences.ll b/llvm/test/Transforms/LoopVectorize/first-order-recurrence-multiply-recurrences.ll
index 444180b91e02f..12d73a3ffe27e 100644
--- a/llvm/test/Transforms/LoopVectorize/first-order-recurrence-multiply-recurrences.ll
+++ b/llvm/test/Transforms/LoopVectorize/first-order-recurrence-multiply-recurrences.ll
@@ -429,41 +429,18 @@ exit:
define void @hoist_previous_value_and_operand_load(ptr %dst) {
; CHECK-LABEL: @hoist_previous_value_and_operand_load(
; CHECK-NEXT: entry:
-; CHECK-NEXT: br label [[VECTOR_PH:%.*]]
-; CHECK: vector.ph:
-; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
-; CHECK: vector.body:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[VECTOR_RECUR:%.*]] = phi <4 x i32> [ <i32 poison, i32 poison, i32 poison, i32 1>, [[VECTOR_PH]] ], [ [[BROADCAST_SPLAT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[VECTOR_RECUR1:%.*]] = phi <4 x i32> [ <i32 poison, i32 poison, i32 poison, i32 0>, [[VECTOR_PH]] ], [ [[TMP3:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i64 1, [[INDEX]]
-; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[DST:%.*]], i64 [[OFFSET_IDX]]
-; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[DST]], align 4
-; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[TMP1]], i64 0
-; CHECK-NEXT: [[BROADCAST_SPLAT]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <4 x i32> [[VECTOR_RECUR]], <4 x i32> [[BROADCAST_SPLAT]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
-; CHECK-NEXT: [[TMP3]] = or <4 x i32> [[TMP2]], splat (i32 3)
-; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <4 x i32> [[VECTOR_RECUR1]], <4 x i32> [[TMP3]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
-; CHECK-NEXT: store <4 x i32> [[TMP4]], ptr [[TMP0]], align 4
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
-; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 336
-; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
-; CHECK: middle.block:
-; CHECK-NEXT: [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement <4 x i32> [[TMP3]], i32 3
-; CHECK-NEXT: br label [[SCALAR_PH:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 337, [[SCALAR_PH]] ], [ [[ADD:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[FOR_1:%.*]] = phi i32 [ [[TMP1]], [[SCALAR_PH]] ], [ [[LOAD:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[FOR_2:%.*]] = phi i32 [ [[VECTOR_RECUR_EXTRACT]], [[SCALAR_PH]] ], [ [[OR:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 1, [[ENTRY:%.*]] ], [ [[ADD:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[FOR_1:%.*]] = phi i32 [ 1, [[ENTRY]] ], [ [[LOAD:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[FOR_2:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[OR:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[OR]] = or i32 [[FOR_1]], 3
; CHECK-NEXT: [[ADD]] = add i64 [[IV]], 1
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[DST]], i64 [[IV]]
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[DST:%.*]], i64 [[IV]]
; CHECK-NEXT: store i32 [[FOR_2]], ptr [[GEP]], align 4
; CHECK-NEXT: [[ICMP:%.*]] = icmp ult i64 [[IV]], 337
; CHECK-NEXT: [[LOAD]] = load i32, ptr [[DST]], align 4
-; CHECK-NEXT: br i1 [[ICMP]], label [[LOOP]], label [[EXIT:%.*]], !llvm.loop [[LOOP8:![0-9]+]]
+; CHECK-NEXT: br i1 [[ICMP]], label [[LOOP]], label [[EXIT:%.*]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
More information about the llvm-commits
mailing list