[llvm] Revert "[SCEV] Fold (C1 * A /u C2) -> A /u (C2 /u C1), if C2 > C1." (PR #158328)
via llvm-commits
llvm-commits at lists.llvm.org
Fri Sep 12 09:44:27 PDT 2025
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-llvm-transforms
Author: Reid Kleckner (rnk)
<details>
<summary>Changes</summary>
Reverts llvm/llvm-project#<!-- -->157656
There are multiple reports that this is causing miscompiles in the MSan test suite after bootstrapping and that this is causing miscompiles in rustc. Let's revert for now, and work to capture a reproducer next week.
---
Full diff: https://github.com/llvm/llvm-project/pull/158328.diff
3 Files Affected:
- (modified) llvm/lib/Analysis/ScalarEvolution.cpp (+5-13)
- (modified) llvm/test/Analysis/ScalarEvolution/mul-udiv-folds.ll (+1-1)
- (modified) llvm/test/Transforms/LoopStrengthReduce/duplicated-phis.ll (+2-1)
``````````diff
diff --git a/llvm/lib/Analysis/ScalarEvolution.cpp b/llvm/lib/Analysis/ScalarEvolution.cpp
index 5bcafd96f1aa5..a1703a270952e 100644
--- a/llvm/lib/Analysis/ScalarEvolution.cpp
+++ b/llvm/lib/Analysis/ScalarEvolution.cpp
@@ -3217,26 +3217,18 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
}
// Try to fold (C1 * D /u C2) -> C1/C2 * D, if C1 and C2 are powers-of-2,
- // D is a multiple of C2, and C1 is a multiple of C2. If C2 is a multiple
- // of C1, fold to (D /u (C2 /u C1)).
+ // D is a multiple of C2, and C1 is a multiple of C2.
const SCEV *D;
APInt C1V = LHSC->getAPInt();
- // (C1 * D /u C2) == -1 * -C1 * D /u C2 when C1 != INT_MIN. Don't treat -1
- // as -1 * 1, as it won't enable additional folds.
- if (C1V.isNegative() && !C1V.isMinSignedValue() && !C1V.isAllOnes())
+ // (C1 * D /u C2) == -1 * -C1 * D /u C2 when C1 != INT_MIN.
+ if (C1V.isNegative() && !C1V.isMinSignedValue())
C1V = C1V.abs();
const SCEVConstant *C2;
if (C1V.isPowerOf2() &&
match(Ops[1], m_scev_UDiv(m_SCEV(D), m_SCEVConstant(C2))) &&
- C2->getAPInt().isPowerOf2() &&
+ C2->getAPInt().isPowerOf2() && C1V.uge(C2->getAPInt()) &&
C1V.logBase2() <= getMinTrailingZeros(D)) {
- const SCEV *NewMul;
- if (C1V.uge(C2->getAPInt())) {
- NewMul = getMulExpr(getUDivExpr(getConstant(C1V), C2), D);
- } else {
- assert(C1V.ugt(1) && "C1 <= 1 should have been folded earlier");
- NewMul = getUDivExpr(D, getUDivExpr(C2, getConstant(C1V)));
- }
+ const SCEV *NewMul = getMulExpr(getUDivExpr(getConstant(C1V), C2), D);
return C1V == LHSC->getAPInt() ? NewMul : getNegativeSCEV(NewMul);
}
}
diff --git a/llvm/test/Analysis/ScalarEvolution/mul-udiv-folds.ll b/llvm/test/Analysis/ScalarEvolution/mul-udiv-folds.ll
index 1d34706baadeb..8dd8ec47e7090 100644
--- a/llvm/test/Analysis/ScalarEvolution/mul-udiv-folds.ll
+++ b/llvm/test/Analysis/ScalarEvolution/mul-udiv-folds.ll
@@ -21,7 +21,7 @@ define void @udiv4_and_udiv2(i1 %c, ptr %A) {
; CHECK-NEXT: %gep.8 = getelementptr i8, ptr %A, i64 %iv
; CHECK-NEXT: --> {(((zext i32 %start to i64) /u 4) + %A),+,1}<%loop> U: full-set S: full-set Exits: (((zext i32 %start to i64) /u 2) + %A) LoopDispositions: { %loop: Computable }
; CHECK-NEXT: %gep.16 = getelementptr i16, ptr %A, i64 %iv
-; CHECK-NEXT: --> {(((zext i32 %start to i64) /u 2) + %A),+,2}<%loop> U: full-set S: full-set Exits: ((zext i32 %start to i64) + %A) LoopDispositions: { %loop: Computable }
+; CHECK-NEXT: --> {((2 * ((zext i32 %start to i64) /u 4))<nuw><nsw> + %A),+,2}<%loop> U: full-set S: full-set Exits: ((zext i32 %start to i64) + %A) LoopDispositions: { %loop: Computable }
; CHECK-NEXT: %gep.32 = getelementptr i32, ptr %A, i64 %iv
; CHECK-NEXT: --> {((zext i32 %start to i64) + %A),+,4}<%loop> U: full-set S: full-set Exits: ((2 * (zext i32 %start to i64))<nuw><nsw> + %A) LoopDispositions: { %loop: Computable }
; CHECK-NEXT: %gep.40 = getelementptr <{ i32, i8 }>, ptr %A, i64 %iv
diff --git a/llvm/test/Transforms/LoopStrengthReduce/duplicated-phis.ll b/llvm/test/Transforms/LoopStrengthReduce/duplicated-phis.ll
index c59f7d9c2a41a..cee8c8abdb450 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/duplicated-phis.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/duplicated-phis.ll
@@ -18,7 +18,8 @@ define i64 @test_duplicated_phis(i64 noundef %N) {
; CHECK: [[FOR_BODY_PREHEADER_NEW]]:
; CHECK-NEXT: [[UNROLL_ITER:%.*]] = and i64 [[MUL]], -4
; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[UNROLL_ITER]], -4
-; CHECK-NEXT: [[TMP3:%.*]] = lshr i64 [[TMP4]], 1
+; CHECK-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP4]], 2
+; CHECK-NEXT: [[TMP3:%.*]] = shl nuw nsw i64 [[TMP5]], 1
; CHECK-NEXT: [[LSR_IV_NEXT:%.*]] = sub i64 -3, [[TMP3]]
; CHECK-NEXT: br label %[[FOR_BODY:.*]]
; CHECK: [[FOR_BODY]]:
``````````
</details>
https://github.com/llvm/llvm-project/pull/158328
More information about the llvm-commits
mailing list