[llvm] 7f06d8a - [SCEV] Retain SCEVSequentialMinMaxExpr if an operand may trigger UB. (#110824)
via llvm-commits
llvm-commits at lists.llvm.org
Mon Oct 14 05:08:52 PDT 2024
Author: Florian Hahn
Date: 2024-10-14T13:08:49+01:00
New Revision: 7f06d8afb03383dea33379f9c06d010d6ee3f14e
URL: https://github.com/llvm/llvm-project/commit/7f06d8afb03383dea33379f9c06d010d6ee3f14e
DIFF: https://github.com/llvm/llvm-project/commit/7f06d8afb03383dea33379f9c06d010d6ee3f14e.diff
LOG: [SCEV] Retain SCEVSequentialMinMaxExpr if an operand may trigger UB. (#110824)
Retain SCEVSequentialMinMaxExpr if an operand may trigger UB, e.g. if
there is an UDiv operand that may divide by 0 or poison
PR: https://github.com/llvm/llvm-project/pull/110824
Added:
Modified:
llvm/include/llvm/Analysis/ScalarEvolution.h
llvm/include/llvm/Analysis/ScalarEvolutionExpressions.h
llvm/lib/Analysis/ScalarEvolution.cpp
llvm/test/Analysis/ScalarEvolution/umin-seq-operand-may-trigger-ub.ll
llvm/test/Transforms/LoopVectorize/trip-count-expansion-may-introduce-ub.ll
Removed:
################################################################################
diff --git a/llvm/include/llvm/Analysis/ScalarEvolution.h b/llvm/include/llvm/Analysis/ScalarEvolution.h
index 328926f0b7aa65..4b8cb3a39a86db 100644
--- a/llvm/include/llvm/Analysis/ScalarEvolution.h
+++ b/llvm/include/llvm/Analysis/ScalarEvolution.h
@@ -2168,6 +2168,9 @@ class ScalarEvolution {
bool isGuaranteedToTransferExecutionTo(const Instruction *A,
const Instruction *B);
+ /// Returns true if \p Op is guaranteed to not be poison.
+ static bool isGuaranteedNotToBePoison(const SCEV *Op);
+
/// Return true if the SCEV corresponding to \p I is never poison. Proving
/// this is more complex than proving that just \p I is never poison, since
/// SCEV commons expressions across control flow, and you can have cases
diff --git a/llvm/include/llvm/Analysis/ScalarEvolutionExpressions.h b/llvm/include/llvm/Analysis/ScalarEvolutionExpressions.h
index fd884f2a2f55b0..6eb1aca1cf76ad 100644
--- a/llvm/include/llvm/Analysis/ScalarEvolutionExpressions.h
+++ b/llvm/include/llvm/Analysis/ScalarEvolutionExpressions.h
@@ -511,8 +511,9 @@ class SCEVUMinExpr : public SCEVMinMaxExpr {
/// This node is the base class for sequential/in-order min/max selections.
/// Note that their fundamental
diff erence from SCEVMinMaxExpr's is that they
/// are early-returning upon reaching saturation point.
-/// I.e. given `0 umin_seq poison`, the result will be `0`,
-/// while the result of `0 umin poison` is `poison`.
+/// I.e. given `0 umin_seq poison`, the result will be `0`, while the result of
+/// `0 umin poison` is `poison`. When returning early, later expressions are not
+/// executed, so `0 umin_seq (%x u/ 0)` does not result in undefined behavior.
class SCEVSequentialMinMaxExpr : public SCEVNAryExpr {
friend class ScalarEvolution;
diff --git a/llvm/lib/Analysis/ScalarEvolution.cpp b/llvm/lib/Analysis/ScalarEvolution.cpp
index cea3a5bc865fee..97ea405a5267ae 100644
--- a/llvm/lib/Analysis/ScalarEvolution.cpp
+++ b/llvm/lib/Analysis/ScalarEvolution.cpp
@@ -4200,7 +4200,7 @@ bool ScalarEvolution::canReuseInstruction(
// Either the value can't be poison, or the S would also be poison if it
// is.
- if (PoisonVals.contains(V) || isGuaranteedNotToBePoison(V))
+ if (PoisonVals.contains(V) || ::isGuaranteedNotToBePoison(V))
continue;
auto *I = dyn_cast<Instruction>(V);
@@ -4303,6 +4303,16 @@ ScalarEvolution::getSequentialMinMaxExpr(SCEVTypes Kind,
}
for (unsigned i = 1, e = Ops.size(); i != e; ++i) {
+ bool MayBeUB = SCEVExprContains(Ops[i], [this](const SCEV *S) {
+ auto *UDiv = dyn_cast<SCEVUDivExpr>(S);
+ // The UDiv may be UB if the divisor is poison or zero. Unless the divisor
+ // is a non-zero constant, we have to assume the UDiv may be UB.
+ return UDiv && (!isKnownNonZero(UDiv->getOperand(1)) ||
+ !isGuaranteedNotToBePoison(UDiv->getOperand(1)));
+ });
+
+ if (MayBeUB)
+ continue;
// We can replace %x umin_seq %y with %x umin %y if either:
// * %y being poison implies %x is also poison.
// * %x cannot be the saturating value (e.g. zero for umin).
@@ -7298,6 +7308,11 @@ bool ScalarEvolution::isGuaranteedToTransferExecutionTo(const Instruction *A,
return false;
}
+bool ScalarEvolution::isGuaranteedNotToBePoison(const SCEV *Op) {
+ SCEVPoisonCollector PC(/* LookThroughMaybePoisonBlocking */ true);
+ visitAll(Op, PC);
+ return PC.MaybePoison.empty();
+}
bool ScalarEvolution::isSCEVExprNeverPoison(const Instruction *I) {
// Only proceed if we can prove that I does not yield poison.
diff --git a/llvm/test/Analysis/ScalarEvolution/umin-seq-operand-may-trigger-ub.ll b/llvm/test/Analysis/ScalarEvolution/umin-seq-operand-may-trigger-ub.ll
index 8085592111ff5a..f84f35d4e8ca20 100644
--- a/llvm/test/Analysis/ScalarEvolution/umin-seq-operand-may-trigger-ub.ll
+++ b/llvm/test/Analysis/ScalarEvolution/umin-seq-operand-may-trigger-ub.ll
@@ -4,15 +4,14 @@
; The UDiv in the latch may never be executed. The backedge-taken-count
; expressions must account for the fact that evaluating the UDiv
; unconditionally may trigger UB.
-; FIXME: umin_seq should be used instead of umin for BTCs.
define i64 @multi_exit_exit_count_with_udiv_by_value_in_latch(ptr %dst, i64 %N) {
; CHECK-LABEL: 'multi_exit_exit_count_with_udiv_by_value_in_latch'
; CHECK-NEXT: Determining loop execution counts for: @multi_exit_exit_count_with_udiv_by_value_in_latch
-; CHECK-NEXT: Loop %loop.header: <multiple exits> backedge-taken count is ((42 /u %N) umin (0 smax %N))
+; CHECK-NEXT: Loop %loop.header: <multiple exits> backedge-taken count is ((0 smax %N) umin_seq (42 /u %N))
; CHECK-NEXT: exit count for loop.header: (0 smax %N)
; CHECK-NEXT: exit count for loop.latch: (42 /u %N)
; CHECK-NEXT: Loop %loop.header: constant max backedge-taken count is i64 42
-; CHECK-NEXT: Loop %loop.header: symbolic max backedge-taken count is ((42 /u %N) umin (0 smax %N))
+; CHECK-NEXT: Loop %loop.header: symbolic max backedge-taken count is ((0 smax %N) umin_seq (42 /u %N))
; CHECK-NEXT: symbolic max exit count for loop.header: (0 smax %N)
; CHECK-NEXT: symbolic max exit count for loop.latch: (42 /u %N)
; CHECK-NEXT: Loop %loop.header: Trip multiple is 1
@@ -41,7 +40,6 @@ exit:
; The UDiv in the latch may never be executed. The backedge-taken-count
; expressions must account for the fact that evaluating the UDiv
; unconditionally may trigger UB.
-; FIXME: umin_seq should be used instead of umin for BTCs.
define i64 @multi_exit_exit_count_with_udiv_by_value_in_latch_
diff erent_bounds(ptr %dst, i64 %N, i64 %M) {
; CHECK-LABEL: 'multi_exit_exit_count_with_udiv_by_value_in_latch_
diff erent_bounds'
; CHECK-NEXT: Determining loop execution counts for: @multi_exit_exit_count_with_udiv_by_value_in_latch_
diff erent_bounds
diff --git a/llvm/test/Transforms/LoopVectorize/trip-count-expansion-may-introduce-ub.ll b/llvm/test/Transforms/LoopVectorize/trip-count-expansion-may-introduce-ub.ll
index 6fba8ccd590c62..c63dc9979bce3c 100644
--- a/llvm/test/Transforms/LoopVectorize/trip-count-expansion-may-introduce-ub.ll
+++ b/llvm/test/Transforms/LoopVectorize/trip-count-expansion-may-introduce-ub.ll
@@ -463,9 +463,10 @@ define i64 @multi_exit_4_exit_count_with_udiv_by_value_in_latch(ptr %dst, i64 %N
; CHECK-LABEL: define i64 @multi_exit_4_exit_count_with_udiv_by_value_in_latch(
; CHECK-SAME: ptr [[DST:%.*]], i64 [[N:%.*]]) {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[SMAX:%.*]] = call i64 @llvm.smax.i64(i64 [[N]], i64 0)
; CHECK-NEXT: [[TMP0:%.*]] = udiv i64 42, [[N]]
-; CHECK-NEXT: [[UMIN:%.*]] = call i64 @llvm.umin.i64(i64 [[SMAX]], i64 [[TMP0]])
+; CHECK-NEXT: [[TMP8:%.*]] = freeze i64 [[TMP0]]
+; CHECK-NEXT: [[SMAX:%.*]] = call i64 @llvm.smax.i64(i64 [[N]], i64 0)
+; CHECK-NEXT: [[UMIN:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP8]], i64 [[SMAX]])
; CHECK-NEXT: [[TMP1:%.*]] = add nuw nsw i64 [[UMIN]], 1
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ule i64 [[TMP1]], 4
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
@@ -598,9 +599,10 @@ define i64 @multi_exit_4_exit_count_with_udiv_by_frozen_value_in_latch(ptr %dst,
; CHECK-SAME: ptr [[DST:%.*]], i64 [[N:%.*]]) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[FR_N:%.*]] = freeze i64 [[N]]
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.smax.i64(i64 [[N]], i64 0)
; CHECK-NEXT: [[TMP0:%.*]] = udiv i64 42, [[FR_N]]
-; CHECK-NEXT: [[UMIN:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP2]], i64 [[TMP0]])
+; CHECK-NEXT: [[TMP1:%.*]] = freeze i64 [[TMP0]]
+; CHECK-NEXT: [[SMAX:%.*]] = call i64 @llvm.smax.i64(i64 [[N]], i64 0)
+; CHECK-NEXT: [[UMIN:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP1]], i64 [[SMAX]])
; CHECK-NEXT: [[TMP3:%.*]] = add nuw nsw i64 [[UMIN]], 1
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ule i64 [[TMP3]], 4
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
@@ -786,12 +788,13 @@ define i64 @multi_exit_4_exit_count_with_urem_by_value_in_latch(ptr %dst, i64 %N
; CHECK-LABEL: define i64 @multi_exit_4_exit_count_with_urem_by_value_in_latch(
; CHECK-SAME: ptr [[DST:%.*]], i64 [[N:%.*]]) {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[SMAX:%.*]] = call i64 @llvm.smax.i64(i64 [[N]], i64 0)
; CHECK-NEXT: [[TMP0:%.*]] = udiv i64 42, [[N]]
; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[N]], [[TMP0]]
; CHECK-NEXT: [[TMP2:%.*]] = sub i64 42, [[TMP1]]
; CHECK-NEXT: [[SMAX1:%.*]] = call i64 @llvm.smax.i64(i64 [[TMP2]], i64 0)
-; CHECK-NEXT: [[UMIN:%.*]] = call i64 @llvm.umin.i64(i64 [[SMAX]], i64 [[SMAX1]])
+; CHECK-NEXT: [[TMP10:%.*]] = freeze i64 [[SMAX1]]
+; CHECK-NEXT: [[SMAX2:%.*]] = call i64 @llvm.smax.i64(i64 [[N]], i64 0)
+; CHECK-NEXT: [[UMIN:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP10]], i64 [[SMAX2]])
; CHECK-NEXT: [[TMP3:%.*]] = add nuw i64 [[UMIN]], 1
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ule i64 [[TMP3]], 4
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
@@ -1004,9 +1007,10 @@ define i64 @multi_exit_4_exit_count_with_udiv_by_value_in_latch1(ptr %dst, i64 %
; CHECK-LABEL: define i64 @multi_exit_4_exit_count_with_udiv_by_value_in_latch1(
; CHECK-SAME: ptr [[DST:%.*]], i64 [[N:%.*]]) {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[SMAX:%.*]] = call i64 @llvm.smax.i64(i64 [[N]], i64 0)
; CHECK-NEXT: [[TMP0:%.*]] = udiv i64 42, [[N]]
-; CHECK-NEXT: [[UMIN:%.*]] = call i64 @llvm.umin.i64(i64 [[SMAX]], i64 [[TMP0]])
+; CHECK-NEXT: [[TMP8:%.*]] = freeze i64 [[TMP0]]
+; CHECK-NEXT: [[SMAX:%.*]] = call i64 @llvm.smax.i64(i64 [[N]], i64 0)
+; CHECK-NEXT: [[UMIN:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP8]], i64 [[SMAX]])
; CHECK-NEXT: [[TMP1:%.*]] = add nuw nsw i64 [[UMIN]], 1
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ule i64 [[TMP1]], 4
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
More information about the llvm-commits
mailing list