[llvm] [SCEV] Rewrite more SCEVAddExpr when applying guards. (PR #159942)
Florian Hahn via llvm-commits
llvm-commits at lists.llvm.org
Sun Nov 2 07:26:08 PST 2025
https://github.com/fhahn updated https://github.com/llvm/llvm-project/pull/159942
>From c424404f033a41e8a4de79dd0e2fff5cbe04aa04 Mon Sep 17 00:00:00 2001
From: Florian Hahn <flo at fhahn.com>
Date: Fri, 19 Sep 2025 10:22:23 +0100
Subject: [PATCH 1/4] [SCEV] Rewrite more SCEVAddExpr when applying guards.
When re-writing SCEVAddExprs to apply information from guards, check if
we have information for the expression itself. If so, apply it.
When we have an expression of the form (Const + A), check if we have
have guard info for (Const + 1 + A) and use it. This is needed to avoid
regressions in a few cases, where we have BTCs with a subtracted
constant.
Rewriting expressions could cause regressions, e.g. when comparing 2
SCEV expressions where we are only able to rewrite one side, but I could
not find any cases where this happens more with this patch in practice.
Depends on https://github.com/llvm/llvm-project/pull/160012 (included in
PR)
Proofs for some of the test changes: https://alive2.llvm.org/ce/z/RPX6t_
---
llvm/lib/Analysis/ScalarEvolution.cpp | 17 +++++++++++
...ge-taken-count-guard-info-apply-to-adds.ll | 6 ++--
.../ScalarEvolution/trip-count-minmax.ll | 12 ++++----
.../IndVarSimplify/canonicalize-cmp.ll | 6 ++--
.../Transforms/LoopIdiom/add-nsw-zext-fold.ll | 6 ++--
.../runtime-unroll-assume-no-remainder.ll | 30 +++++++++++++++++--
.../dont-fold-tail-for-divisible-TC.ll | 2 +-
.../runtime-checks-difference.ll | 7 +----
8 files changed, 60 insertions(+), 26 deletions(-)
diff --git a/llvm/lib/Analysis/ScalarEvolution.cpp b/llvm/lib/Analysis/ScalarEvolution.cpp
index a31f17b1936d6..3d05cb8a7e054 100644
--- a/llvm/lib/Analysis/ScalarEvolution.cpp
+++ b/llvm/lib/Analysis/ScalarEvolution.cpp
@@ -16006,6 +16006,10 @@ const SCEV *ScalarEvolution::LoopGuards::rewrite(const SCEV *Expr) const {
}
const SCEV *visitAddExpr(const SCEVAddExpr *Expr) {
+ if (const SCEV *S = Map.lookup(Expr))
+ return S;
+
+
// Helper to check if S is a subtraction (A - B) where A != B, and if so,
// return UMax(S, 1).
auto RewriteSubtraction = [&](const SCEV *S) -> const SCEV * {
@@ -16040,7 +16044,20 @@ const SCEV *ScalarEvolution::LoopGuards::rewrite(const SCEV *Expr) const {
ScalarEvolution::maskFlags(Expr->getNoWrapFlags(), FlagMask));
if (const SCEV *S = Map.lookup(Add))
return SE.getAddExpr(Expr->getOperand(0), S);
+
+ // For expressions of the form (Const + A), check if we have guard info
+ // for (Const + 1 + A), and rewrite to ((Const + 1 + A) - 1). This makes
+ // sure we don't loose information when rewriting expressions based on
+ // back-edge taken counts in some cases..
+ if (Expr->getNumOperands() == 2) {
+ auto *NewC =
+ SE.getAddExpr(Expr->getOperand(0), SE.getOne(Expr->getType()));
+ if (const SCEV *S =
+ Map.lookup(SE.getAddExpr(NewC, Expr->getOperand(1))))
+ return SE.getMinusSCEV(S, SE.getOne(Expr->getType()));
+ }
}
+
SmallVector<const SCEV *, 2> Operands;
bool Changed = false;
for (const auto *Op : Expr->operands()) {
diff --git a/llvm/test/Analysis/ScalarEvolution/backedge-taken-count-guard-info-apply-to-adds.ll b/llvm/test/Analysis/ScalarEvolution/backedge-taken-count-guard-info-apply-to-adds.ll
index 6b2c78cebc44a..5ea836d3b8067 100644
--- a/llvm/test/Analysis/ScalarEvolution/backedge-taken-count-guard-info-apply-to-adds.ll
+++ b/llvm/test/Analysis/ScalarEvolution/backedge-taken-count-guard-info-apply-to-adds.ll
@@ -33,9 +33,9 @@ declare void @clobber()
define void @test_add_sub_1_guard(ptr %src, i32 %n) {
; CHECK-LABEL: 'test_add_sub_1_guard'
; CHECK-NEXT: Determining loop execution counts for: @test_add_sub_1_guard
-; CHECK-NEXT: Loop %loop: backedge-taken count is (zext i32 (-1 + (%n /u 2))<nsw> to i64)
-; CHECK-NEXT: Loop %loop: constant max backedge-taken count is i64 4294967295
-; CHECK-NEXT: Loop %loop: symbolic max backedge-taken count is (zext i32 (-1 + (%n /u 2))<nsw> to i64)
+; CHECK-NEXT: Loop %loop: backedge-taken count is i64 0
+; CHECK-NEXT: Loop %loop: constant max backedge-taken count is i64 0
+; CHECK-NEXT: Loop %loop: symbolic max backedge-taken count is i64 0
; CHECK-NEXT: Loop %loop: Trip multiple is 1
;
entry:
diff --git a/llvm/test/Analysis/ScalarEvolution/trip-count-minmax.ll b/llvm/test/Analysis/ScalarEvolution/trip-count-minmax.ll
index d38010403dad7..2f0627b7d4476 100644
--- a/llvm/test/Analysis/ScalarEvolution/trip-count-minmax.ll
+++ b/llvm/test/Analysis/ScalarEvolution/trip-count-minmax.ll
@@ -102,12 +102,12 @@ define void @umax(i32 noundef %a, i32 noundef %b) {
; CHECK-NEXT: %cond = select i1 %cmp, i32 %mul, i32 %mul1
; CHECK-NEXT: --> ((2 * %a) umax (4 * %b)) U: [0,-1) S: [-2147483648,2147483647)
; CHECK-NEXT: %i.011 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
-; CHECK-NEXT: --> {0,+,1}<nuw><nsw><%for.body> U: [0,-2147483648) S: [0,-2147483648) Exits: (-1 + ((2 * %a) umax (4 * %b))) LoopDispositions: { %for.body: Computable }
+; CHECK-NEXT: --> {0,+,1}<nuw><nsw><%for.body> U: [0,2147483647) S: [0,2147483647) Exits: (-1 + ((2 * %a) umax (4 * %b))) LoopDispositions: { %for.body: Computable }
; CHECK-NEXT: %inc = add nuw nsw i32 %i.011, 1
-; CHECK-NEXT: --> {1,+,1}<nuw><%for.body> U: [1,-1) S: [1,-1) Exits: ((2 * %a) umax (4 * %b)) LoopDispositions: { %for.body: Computable }
+; CHECK-NEXT: --> {1,+,1}<nuw><nsw><%for.body> U: [1,-2147483648) S: [1,-2147483648) Exits: ((2 * %a) umax (4 * %b)) LoopDispositions: { %for.body: Computable }
; CHECK-NEXT: Determining loop execution counts for: @umax
; CHECK-NEXT: Loop %for.body: backedge-taken count is (-1 + ((2 * %a) umax (4 * %b)))
-; CHECK-NEXT: Loop %for.body: constant max backedge-taken count is i32 -3
+; CHECK-NEXT: Loop %for.body: constant max backedge-taken count is i32 2147483646
; CHECK-NEXT: Loop %for.body: symbolic max backedge-taken count is (-1 + ((2 * %a) umax (4 * %b)))
; CHECK-NEXT: Loop %for.body: Trip multiple is 2
;
@@ -197,12 +197,12 @@ define void @smax(i32 noundef %a, i32 noundef %b) {
; CHECK-NEXT: %cond = select i1 %cmp, i32 %mul, i32 %mul1
; CHECK-NEXT: --> ((2 * %a)<nsw> smax (4 * %b)<nsw>) U: [0,-1) S: [-2147483648,2147483647)
; CHECK-NEXT: %i.011 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
-; CHECK-NEXT: --> {0,+,1}<nuw><nsw><%for.body> U: [0,-2147483648) S: [0,-2147483648) Exits: (-1 + ((2 * %a)<nsw> smax (4 * %b)<nsw>)) LoopDispositions: { %for.body: Computable }
+; CHECK-NEXT: --> {0,+,1}<nuw><nsw><%for.body> U: [0,2147483647) S: [0,2147483647) Exits: (-1 + ((2 * %a)<nsw> smax (4 * %b)<nsw>)) LoopDispositions: { %for.body: Computable }
; CHECK-NEXT: %inc = add nuw nsw i32 %i.011, 1
-; CHECK-NEXT: --> {1,+,1}<nuw><%for.body> U: [1,-1) S: [1,-1) Exits: ((2 * %a)<nsw> smax (4 * %b)<nsw>) LoopDispositions: { %for.body: Computable }
+; CHECK-NEXT: --> {1,+,1}<nuw><nsw><%for.body> U: [1,-2147483648) S: [1,-2147483648) Exits: ((2 * %a)<nsw> smax (4 * %b)<nsw>) LoopDispositions: { %for.body: Computable }
; CHECK-NEXT: Determining loop execution counts for: @smax
; CHECK-NEXT: Loop %for.body: backedge-taken count is (-1 + ((2 * %a)<nsw> smax (4 * %b)<nsw>))
-; CHECK-NEXT: Loop %for.body: constant max backedge-taken count is i32 -3
+; CHECK-NEXT: Loop %for.body: constant max backedge-taken count is i32 2147483646
; CHECK-NEXT: Loop %for.body: symbolic max backedge-taken count is (-1 + ((2 * %a)<nsw> smax (4 * %b)<nsw>))
; CHECK-NEXT: Loop %for.body: Trip multiple is 2
;
diff --git a/llvm/test/Transforms/IndVarSimplify/canonicalize-cmp.ll b/llvm/test/Transforms/IndVarSimplify/canonicalize-cmp.ll
index 4b52479fc6c4d..40e3c63cbe04a 100644
--- a/llvm/test/Transforms/IndVarSimplify/canonicalize-cmp.ll
+++ b/llvm/test/Transforms/IndVarSimplify/canonicalize-cmp.ll
@@ -343,14 +343,13 @@ define void @slt_no_smax_needed(i64 %n, ptr %dst) {
; CHECK-NEXT: [[PRE:%.*]] = icmp ult i32 [[ADD_1]], 8
; CHECK-NEXT: br i1 [[PRE]], label [[EXIT:%.*]], label [[LOOP_PREHEADER:%.*]]
; CHECK: loop.preheader:
-; CHECK-NEXT: [[SMAX:%.*]] = call i32 @llvm.smax.i32(i32 [[SHR]], i32 1)
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], [[LOOP]] ], [ 0, [[LOOP_PREHEADER]] ]
; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i8, ptr [[DST:%.*]], i32 [[IV]]
; CHECK-NEXT: store i8 0, ptr [[GEP]], align 1
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i32 [[IV_NEXT]], [[SMAX]]
+; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i32 [[IV_NEXT]], [[SHR]]
; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[EXIT_LOOPEXIT:%.*]]
; CHECK: exit.loopexit:
; CHECK-NEXT: br label [[EXIT]]
@@ -385,14 +384,13 @@ define void @ult_no_umax_needed(i64 %n, ptr %dst) {
; CHECK-NEXT: [[PRE:%.*]] = icmp ult i32 [[ADD_1]], 8
; CHECK-NEXT: br i1 [[PRE]], label [[EXIT:%.*]], label [[LOOP_PREHEADER:%.*]]
; CHECK: loop.preheader:
-; CHECK-NEXT: [[UMAX:%.*]] = call i32 @llvm.umax.i32(i32 [[SHR]], i32 1)
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], [[LOOP]] ], [ 0, [[LOOP_PREHEADER]] ]
; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i8, ptr [[DST:%.*]], i32 [[IV]]
; CHECK-NEXT: store i8 0, ptr [[GEP]], align 1
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i32 [[IV_NEXT]], [[UMAX]]
+; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i32 [[IV_NEXT]], [[SHR]]
; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[EXIT_LOOPEXIT:%.*]]
; CHECK: exit.loopexit:
; CHECK-NEXT: br label [[EXIT]]
diff --git a/llvm/test/Transforms/LoopIdiom/add-nsw-zext-fold.ll b/llvm/test/Transforms/LoopIdiom/add-nsw-zext-fold.ll
index bc1543d8361a7..09419c13aaeb0 100644
--- a/llvm/test/Transforms/LoopIdiom/add-nsw-zext-fold.ll
+++ b/llvm/test/Transforms/LoopIdiom/add-nsw-zext-fold.ll
@@ -61,9 +61,9 @@ define void @test_memset_size_can_use_info_from_guards(i32 %x, ptr %dst) {
; CHECK: [[LOOP1_BACKEDGE]]:
; CHECK-NEXT: br label %[[LOOP1]]
; CHECK: [[LOOP2_PREHEADER]]:
-; CHECK-NEXT: [[TMP0:%.*]] = zext i32 [[SUB]] to i64
-; CHECK-NEXT: [[TMP1:%.*]] = lshr i64 [[TMP0]], 1
-; CHECK-NEXT: [[UMAX:%.*]] = call i64 @llvm.umax.i64(i64 [[TMP1]], i64 1)
+; CHECK-NEXT: [[TMP0:%.*]] = add nsw i32 [[SHR]], -1
+; CHECK-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
+; CHECK-NEXT: [[UMAX:%.*]] = add nuw nsw i64 [[TMP1]], 1
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 1 [[DST]], i8 0, i64 [[UMAX]], i1 false)
; CHECK-NEXT: br label %[[LOOP2:.*]]
; CHECK: [[LOOP2]]:
diff --git a/llvm/test/Transforms/LoopUnroll/runtime-unroll-assume-no-remainder.ll b/llvm/test/Transforms/LoopUnroll/runtime-unroll-assume-no-remainder.ll
index 73f7fd37a0099..74d4bb0f8d1cb 100644
--- a/llvm/test/Transforms/LoopUnroll/runtime-unroll-assume-no-remainder.ll
+++ b/llvm/test/Transforms/LoopUnroll/runtime-unroll-assume-no-remainder.ll
@@ -19,9 +19,16 @@ define dso_local void @assumeDivisibleTC(ptr noalias nocapture %a, ptr noalias n
; CHECK-NEXT: [[CMP110:%.*]] = icmp sgt i32 [[N]], 0
; CHECK-NEXT: br i1 [[CMP110]], label [[FOR_BODY_PREHEADER:%.*]], label [[EXIT]]
; CHECK: for.body.preheader:
+; CHECK-NEXT: [[TMP2:%.*]] = add i32 [[N]], -1
+; CHECK-NEXT: [[XTRAITER:%.*]] = and i32 [[N]], 1
+; CHECK-NEXT: [[TMP3:%.*]] = icmp ult i32 [[TMP2]], 1
+; CHECK-NEXT: br i1 [[TMP3]], label [[FOR_BODY_EPIL_PREHEADER:%.*]], label [[FOR_BODY_PREHEADER_NEW:%.*]]
+; CHECK: for.body.preheader.new:
+; CHECK-NEXT: [[UNROLL_ITER:%.*]] = sub i32 [[N]], [[XTRAITER]]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[I_011:%.*]] = phi i32 [ 0, [[FOR_BODY_PREHEADER]] ], [ [[INC_1:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[I_011:%.*]] = phi i32 [ 0, [[FOR_BODY_PREHEADER_NEW]] ], [ [[INC_1:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[NITER:%.*]] = phi i32 [ 0, [[FOR_BODY_PREHEADER_NEW]] ], [ [[NITER_NEXT_1:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[B:%.*]], i32 [[I_011]]
; CHECK-NEXT: [[TMP0:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
; CHECK-NEXT: [[ADD:%.*]] = add i8 [[TMP0]], 3
@@ -34,8 +41,25 @@ define dso_local void @assumeDivisibleTC(ptr noalias nocapture %a, ptr noalias n
; CHECK-NEXT: [[ARRAYIDX4_1:%.*]] = getelementptr inbounds i8, ptr [[A]], i32 [[INC]]
; CHECK-NEXT: store i8 [[ADD_1]], ptr [[ARRAYIDX4_1]], align 1
; CHECK-NEXT: [[INC_1]] = add nuw nsw i32 [[I_011]], 2
-; CHECK-NEXT: [[CMP1_1:%.*]] = icmp slt i32 [[INC_1]], [[N]]
-; CHECK-NEXT: br i1 [[CMP1_1]], label [[FOR_BODY]], label [[EXIT_LOOPEXIT:%.*]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK-NEXT: [[NITER_NEXT_1]] = add i32 [[NITER]], 2
+; CHECK-NEXT: [[NITER_NCMP_1:%.*]] = icmp ne i32 [[NITER_NEXT_1]], [[UNROLL_ITER]]
+; CHECK-NEXT: br i1 [[NITER_NCMP_1]], label [[FOR_BODY]], label [[EXIT_LOOPEXIT_UNR_LCSSA:%.*]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK: exit.loopexit.unr-lcssa:
+; CHECK-NEXT: [[I_011_UNR:%.*]] = phi i32 [ [[INC_1]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[LCMP_MOD:%.*]] = icmp ne i32 [[XTRAITER]], 0
+; CHECK-NEXT: br i1 [[LCMP_MOD]], label [[FOR_BODY_EPIL_PREHEADER]], label [[EXIT_LOOPEXIT:%.*]]
+; CHECK: for.body.epil.preheader:
+; CHECK-NEXT: [[I_011_EPIL_INIT:%.*]] = phi i32 [ 0, [[FOR_BODY_PREHEADER]] ], [ [[I_011_UNR]], [[EXIT_LOOPEXIT_UNR_LCSSA]] ]
+; CHECK-NEXT: [[LCMP_MOD1:%.*]] = icmp ne i32 [[XTRAITER]], 0
+; CHECK-NEXT: call void @llvm.assume(i1 [[LCMP_MOD1]])
+; CHECK-NEXT: br label [[FOR_BODY_EPIL:%.*]]
+; CHECK: for.body.epil:
+; CHECK-NEXT: [[ARRAYIDX_EPIL:%.*]] = getelementptr inbounds i8, ptr [[B]], i32 [[I_011_EPIL_INIT]]
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr [[ARRAYIDX_EPIL]], align 1
+; CHECK-NEXT: [[ADD_EPIL:%.*]] = add i8 [[TMP4]], 3
+; CHECK-NEXT: [[ARRAYIDX4_EPIL:%.*]] = getelementptr inbounds i8, ptr [[A]], i32 [[I_011_EPIL_INIT]]
+; CHECK-NEXT: store i8 [[ADD_EPIL]], ptr [[ARRAYIDX4_EPIL]], align 1
+; CHECK-NEXT: br label [[EXIT_LOOPEXIT]]
; CHECK: exit.loopexit:
; CHECK-NEXT: br label [[EXIT]]
; CHECK: exit:
diff --git a/llvm/test/Transforms/LoopVectorize/dont-fold-tail-for-divisible-TC.ll b/llvm/test/Transforms/LoopVectorize/dont-fold-tail-for-divisible-TC.ll
index 156c2bdca7b0e..812d9d928cc8b 100644
--- a/llvm/test/Transforms/LoopVectorize/dont-fold-tail-for-divisible-TC.ll
+++ b/llvm/test/Transforms/LoopVectorize/dont-fold-tail-for-divisible-TC.ll
@@ -193,7 +193,7 @@ define dso_local void @cannotProveAlignedTC(ptr noalias nocapture %A, i32 %p, i3
; CHECK-NEXT: store i32 13, ptr [[TMP12]], align 1
; CHECK-NEXT: br label [[PRED_STORE_CONTINUE6]]
; CHECK: pred.store.continue6:
-; CHECK-NEXT: [[INDEX_NEXT]] = add i32 [[INDEX]], 4
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i32> [[VEC_IND]], splat (i32 4)
; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
diff --git a/llvm/test/Transforms/LoopVectorize/runtime-checks-difference.ll b/llvm/test/Transforms/LoopVectorize/runtime-checks-difference.ll
index 648ebc7e6c3a5..a556b15adbefc 100644
--- a/llvm/test/Transforms/LoopVectorize/runtime-checks-difference.ll
+++ b/llvm/test/Transforms/LoopVectorize/runtime-checks-difference.ll
@@ -465,12 +465,7 @@ define void @remove_diff_checks_via_guards(i32 %x, i32 %y, ptr %A) {
; CHECK-NEXT: [[TMP13:%.*]] = icmp ugt i64 [[SMAX]], 4294967295
; CHECK-NEXT: [[TMP14:%.*]] = or i1 [[TMP12]], [[TMP13]]
; CHECK-NEXT: [[TMP15:%.*]] = or i1 [[TMP9]], [[TMP14]]
-; CHECK-NEXT: br i1 [[TMP15]], [[SCALAR_PH]], label %[[VECTOR_MEMCHECK:.*]]
-; CHECK: [[VECTOR_MEMCHECK]]:
-; CHECK-NEXT: [[TMP16:%.*]] = sext i32 [[OFFSET]] to i64
-; CHECK-NEXT: [[TMP17:%.*]] = shl nsw i64 [[TMP16]], 2
-; CHECK-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP17]], 16
-; CHECK-NEXT: br i1 [[DIFF_CHECK]], [[SCALAR_PH]], [[VECTOR_PH1:label %.*]]
+; CHECK-NEXT: br i1 [[TMP15]], [[SCALAR_PH]], [[VECTOR_PH:label %.*]]
;
entry:
%offset = sub i32 %x, %y
>From 8dad35191ed6aa9777b5ef99fd243e0d40a7b838 Mon Sep 17 00:00:00 2001
From: Florian Hahn <flo at fhahn.com>
Date: Thu, 9 Oct 2025 11:25:57 +0100
Subject: [PATCH 2/4] !fixup getMinsSCEV -> getAddExpr + getMinusOne.
---
llvm/lib/Analysis/ScalarEvolution.cpp | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/llvm/lib/Analysis/ScalarEvolution.cpp b/llvm/lib/Analysis/ScalarEvolution.cpp
index 3d05cb8a7e054..0f6d24496f690 100644
--- a/llvm/lib/Analysis/ScalarEvolution.cpp
+++ b/llvm/lib/Analysis/ScalarEvolution.cpp
@@ -16054,7 +16054,7 @@ const SCEV *ScalarEvolution::LoopGuards::rewrite(const SCEV *Expr) const {
SE.getAddExpr(Expr->getOperand(0), SE.getOne(Expr->getType()));
if (const SCEV *S =
Map.lookup(SE.getAddExpr(NewC, Expr->getOperand(1))))
- return SE.getMinusSCEV(S, SE.getOne(Expr->getType()));
+ return SE.getAddExpr(S, SE.getMinusOne(Expr->getType()));
}
}
>From 277d277637800a32d3613e5fbbf7a4c9bd5794c3 Mon Sep 17 00:00:00 2001
From: Florian Hahn <flo at fhahn.com>
Date: Sat, 11 Oct 2025 21:38:11 +0100
Subject: [PATCH 3/4] !fixup undo test changes, remove stray period.
---
llvm/lib/Analysis/ScalarEvolution.cpp | 2 +-
.../runtime-unroll-assume-no-remainder.ll | 30 ++-----------------
2 files changed, 4 insertions(+), 28 deletions(-)
diff --git a/llvm/lib/Analysis/ScalarEvolution.cpp b/llvm/lib/Analysis/ScalarEvolution.cpp
index 0f6d24496f690..79a23864d9559 100644
--- a/llvm/lib/Analysis/ScalarEvolution.cpp
+++ b/llvm/lib/Analysis/ScalarEvolution.cpp
@@ -16048,7 +16048,7 @@ const SCEV *ScalarEvolution::LoopGuards::rewrite(const SCEV *Expr) const {
// For expressions of the form (Const + A), check if we have guard info
// for (Const + 1 + A), and rewrite to ((Const + 1 + A) - 1). This makes
// sure we don't loose information when rewriting expressions based on
- // back-edge taken counts in some cases..
+ // back-edge taken counts in some cases.
if (Expr->getNumOperands() == 2) {
auto *NewC =
SE.getAddExpr(Expr->getOperand(0), SE.getOne(Expr->getType()));
diff --git a/llvm/test/Transforms/LoopUnroll/runtime-unroll-assume-no-remainder.ll b/llvm/test/Transforms/LoopUnroll/runtime-unroll-assume-no-remainder.ll
index 74d4bb0f8d1cb..73f7fd37a0099 100644
--- a/llvm/test/Transforms/LoopUnroll/runtime-unroll-assume-no-remainder.ll
+++ b/llvm/test/Transforms/LoopUnroll/runtime-unroll-assume-no-remainder.ll
@@ -19,16 +19,9 @@ define dso_local void @assumeDivisibleTC(ptr noalias nocapture %a, ptr noalias n
; CHECK-NEXT: [[CMP110:%.*]] = icmp sgt i32 [[N]], 0
; CHECK-NEXT: br i1 [[CMP110]], label [[FOR_BODY_PREHEADER:%.*]], label [[EXIT]]
; CHECK: for.body.preheader:
-; CHECK-NEXT: [[TMP2:%.*]] = add i32 [[N]], -1
-; CHECK-NEXT: [[XTRAITER:%.*]] = and i32 [[N]], 1
-; CHECK-NEXT: [[TMP3:%.*]] = icmp ult i32 [[TMP2]], 1
-; CHECK-NEXT: br i1 [[TMP3]], label [[FOR_BODY_EPIL_PREHEADER:%.*]], label [[FOR_BODY_PREHEADER_NEW:%.*]]
-; CHECK: for.body.preheader.new:
-; CHECK-NEXT: [[UNROLL_ITER:%.*]] = sub i32 [[N]], [[XTRAITER]]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[I_011:%.*]] = phi i32 [ 0, [[FOR_BODY_PREHEADER_NEW]] ], [ [[INC_1:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[NITER:%.*]] = phi i32 [ 0, [[FOR_BODY_PREHEADER_NEW]] ], [ [[NITER_NEXT_1:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[I_011:%.*]] = phi i32 [ 0, [[FOR_BODY_PREHEADER]] ], [ [[INC_1:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[B:%.*]], i32 [[I_011]]
; CHECK-NEXT: [[TMP0:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
; CHECK-NEXT: [[ADD:%.*]] = add i8 [[TMP0]], 3
@@ -41,25 +34,8 @@ define dso_local void @assumeDivisibleTC(ptr noalias nocapture %a, ptr noalias n
; CHECK-NEXT: [[ARRAYIDX4_1:%.*]] = getelementptr inbounds i8, ptr [[A]], i32 [[INC]]
; CHECK-NEXT: store i8 [[ADD_1]], ptr [[ARRAYIDX4_1]], align 1
; CHECK-NEXT: [[INC_1]] = add nuw nsw i32 [[I_011]], 2
-; CHECK-NEXT: [[NITER_NEXT_1]] = add i32 [[NITER]], 2
-; CHECK-NEXT: [[NITER_NCMP_1:%.*]] = icmp ne i32 [[NITER_NEXT_1]], [[UNROLL_ITER]]
-; CHECK-NEXT: br i1 [[NITER_NCMP_1]], label [[FOR_BODY]], label [[EXIT_LOOPEXIT_UNR_LCSSA:%.*]], !llvm.loop [[LOOP0:![0-9]+]]
-; CHECK: exit.loopexit.unr-lcssa:
-; CHECK-NEXT: [[I_011_UNR:%.*]] = phi i32 [ [[INC_1]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[LCMP_MOD:%.*]] = icmp ne i32 [[XTRAITER]], 0
-; CHECK-NEXT: br i1 [[LCMP_MOD]], label [[FOR_BODY_EPIL_PREHEADER]], label [[EXIT_LOOPEXIT:%.*]]
-; CHECK: for.body.epil.preheader:
-; CHECK-NEXT: [[I_011_EPIL_INIT:%.*]] = phi i32 [ 0, [[FOR_BODY_PREHEADER]] ], [ [[I_011_UNR]], [[EXIT_LOOPEXIT_UNR_LCSSA]] ]
-; CHECK-NEXT: [[LCMP_MOD1:%.*]] = icmp ne i32 [[XTRAITER]], 0
-; CHECK-NEXT: call void @llvm.assume(i1 [[LCMP_MOD1]])
-; CHECK-NEXT: br label [[FOR_BODY_EPIL:%.*]]
-; CHECK: for.body.epil:
-; CHECK-NEXT: [[ARRAYIDX_EPIL:%.*]] = getelementptr inbounds i8, ptr [[B]], i32 [[I_011_EPIL_INIT]]
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr [[ARRAYIDX_EPIL]], align 1
-; CHECK-NEXT: [[ADD_EPIL:%.*]] = add i8 [[TMP4]], 3
-; CHECK-NEXT: [[ARRAYIDX4_EPIL:%.*]] = getelementptr inbounds i8, ptr [[A]], i32 [[I_011_EPIL_INIT]]
-; CHECK-NEXT: store i8 [[ADD_EPIL]], ptr [[ARRAYIDX4_EPIL]], align 1
-; CHECK-NEXT: br label [[EXIT_LOOPEXIT]]
+; CHECK-NEXT: [[CMP1_1:%.*]] = icmp slt i32 [[INC_1]], [[N]]
+; CHECK-NEXT: br i1 [[CMP1_1]], label [[FOR_BODY]], label [[EXIT_LOOPEXIT:%.*]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: exit.loopexit:
; CHECK-NEXT: br label [[EXIT]]
; CHECK: exit:
>From 3a98db616b731186ef8cd02af0731f25c37a4d44 Mon Sep 17 00:00:00 2001
From: Florian Hahn <flo at fhahn.com>
Date: Sun, 2 Nov 2025 15:19:15 +0000
Subject: [PATCH 4/4] !fixup update after rebase
---
llvm/lib/Analysis/ScalarEvolution.cpp | 24 ++++++++++++------------
1 file changed, 12 insertions(+), 12 deletions(-)
diff --git a/llvm/lib/Analysis/ScalarEvolution.cpp b/llvm/lib/Analysis/ScalarEvolution.cpp
index 79a23864d9559..a8224f2db9386 100644
--- a/llvm/lib/Analysis/ScalarEvolution.cpp
+++ b/llvm/lib/Analysis/ScalarEvolution.cpp
@@ -16009,7 +16009,6 @@ const SCEV *ScalarEvolution::LoopGuards::rewrite(const SCEV *Expr) const {
if (const SCEV *S = Map.lookup(Expr))
return S;
-
// Helper to check if S is a subtraction (A - B) where A != B, and if so,
// return UMax(S, 1).
auto RewriteSubtraction = [&](const SCEV *S) -> const SCEV * {
@@ -16034,20 +16033,21 @@ const SCEV *ScalarEvolution::LoopGuards::rewrite(const SCEV *Expr) const {
// (Const + A + B). There may be guard info for A + B, and if so, apply
// it.
// TODO: Could more generally apply guards to Add sub-expressions.
- if (isa<SCEVConstant>(Expr->getOperand(0)) &&
- Expr->getNumOperands() == 3) {
- const SCEV *Add =
- SE.getAddExpr(Expr->getOperand(1), Expr->getOperand(2));
- if (const SCEV *Rewritten = RewriteSubtraction(Add))
- return SE.getAddExpr(
- Expr->getOperand(0), Rewritten,
- ScalarEvolution::maskFlags(Expr->getNoWrapFlags(), FlagMask));
- if (const SCEV *S = Map.lookup(Add))
- return SE.getAddExpr(Expr->getOperand(0), S);
+ if (isa<SCEVConstant>(Expr->getOperand(0))) {
+ if (Expr->getNumOperands() == 3) {
+ const SCEV *Add =
+ SE.getAddExpr(Expr->getOperand(1), Expr->getOperand(2));
+ if (const SCEV *Rewritten = RewriteSubtraction(Add))
+ return SE.getAddExpr(
+ Expr->getOperand(0), Rewritten,
+ ScalarEvolution::maskFlags(Expr->getNoWrapFlags(), FlagMask));
+ if (const SCEV *S = Map.lookup(Add))
+ return SE.getAddExpr(Expr->getOperand(0), S);
+ }
// For expressions of the form (Const + A), check if we have guard info
// for (Const + 1 + A), and rewrite to ((Const + 1 + A) - 1). This makes
- // sure we don't loose information when rewriting expressions based on
+ // sure we don't lose information when rewriting expressions based on
// back-edge taken counts in some cases.
if (Expr->getNumOperands() == 2) {
auto *NewC =
More information about the llvm-commits
mailing list