[llvm] d94958b - [InstCombine] Fold `icmp samesign u{gt/lt} (X +nsw C2), C` -> `icmp s{gt/lt} X, (C - C2)` (#169960)
via llvm-commits
llvm-commits at lists.llvm.org
Mon Dec 8 04:05:41 PST 2025
Author: Tirthankar Mazumder
Date: 2025-12-08T13:05:37+01:00
New Revision: d94958b2f27affe00c42c1338f99674d2f6271c8
URL: https://github.com/llvm/llvm-project/commit/d94958b2f27affe00c42c1338f99674d2f6271c8
DIFF: https://github.com/llvm/llvm-project/commit/d94958b2f27affe00c42c1338f99674d2f6271c8.diff
LOG: [InstCombine] Fold `icmp samesign u{gt/lt} (X +nsw C2), C` -> `icmp s{gt/lt} X, (C - C2)` (#169960)
Fixes #166973
Partially addresses #134028
Alive2 proof: https://alive2.llvm.org/ce/z/BqHQNN
Added:
Modified:
llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
llvm/test/Transforms/InstCombine/icmp-add.ll
Removed:
################################################################################
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
index 33eee8e059486..abf4381ebd794 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
@@ -3132,7 +3132,7 @@ Instruction *InstCombinerImpl::foldICmpAddConstant(ICmpInst &Cmp,
Value *Op0, *Op1;
Instruction *Ext0, *Ext1;
- const CmpInst::Predicate Pred = Cmp.getPredicate();
+ const CmpPredicate Pred = Cmp.getCmpPredicate();
if (match(Add,
m_Add(m_CombineAnd(m_Instruction(Ext0), m_ZExtOrSExt(m_Value(Op0))),
m_CombineAnd(m_Instruction(Ext1),
@@ -3167,22 +3167,29 @@ Instruction *InstCombinerImpl::foldICmpAddConstant(ICmpInst &Cmp,
// If the add does not wrap, we can always adjust the compare by subtracting
// the constants. Equality comparisons are handled elsewhere. SGE/SLE/UGE/ULE
- // are canonicalized to SGT/SLT/UGT/ULT.
- if ((Add->hasNoSignedWrap() &&
- (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SLT)) ||
- (Add->hasNoUnsignedWrap() &&
- (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_ULT))) {
+ // have been canonicalized to SGT/SLT/UGT/ULT.
+ if (Add->hasNoUnsignedWrap() &&
+ (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_ULT)) {
bool Overflow;
- APInt NewC =
- Cmp.isSigned() ? C.ssub_ov(*C2, Overflow) : C.usub_ov(*C2, Overflow);
+ APInt NewC = C.usub_ov(*C2, Overflow);
// If there is overflow, the result must be true or false.
- // TODO: Can we assert there is no overflow because InstSimplify always
- // handles those cases?
if (!Overflow)
// icmp Pred (add nsw X, C2), C --> icmp Pred X, (C - C2)
return new ICmpInst(Pred, X, ConstantInt::get(Ty, NewC));
}
+ CmpInst::Predicate ChosenPred = Pred.getPreferredSignedPredicate();
+
+ if (Add->hasNoSignedWrap() &&
+ (ChosenPred == ICmpInst::ICMP_SGT || ChosenPred == ICmpInst::ICMP_SLT)) {
+ bool Overflow;
+ APInt NewC = C.ssub_ov(*C2, Overflow);
+ if (!Overflow)
+ // icmp samesign ugt/ult (add nsw X, C2), C
+ // -> icmp sgt/slt X, (C - C2)
+ return new ICmpInst(ChosenPred, X, ConstantInt::get(Ty, NewC));
+ }
+
if (ICmpInst::isUnsigned(Pred) && Add->hasNoSignedWrap() &&
C.isNonNegative() && (C - *C2).isNonNegative() &&
computeConstantRange(X, /*ForSigned=*/true).add(*C2).isAllNonNegative())
diff --git a/llvm/test/Transforms/InstCombine/icmp-add.ll b/llvm/test/Transforms/InstCombine/icmp-add.ll
index 8449c7c5ea935..2806feec9ce29 100644
--- a/llvm/test/Transforms/InstCombine/icmp-add.ll
+++ b/llvm/test/Transforms/InstCombine/icmp-add.ll
@@ -3440,3 +3440,79 @@ define i1 @val_is_aligend_pred_mismatch(i32 %num) {
%_0 = icmp sge i32 %num.masked, %num
ret i1 %_0
}
+
+define i1 @icmp_samesign_with_nsw_add(i32 %arg0) {
+; CHECK-LABEL: @icmp_samesign_with_nsw_add(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[V1:%.*]] = icmp sgt i32 [[ARG0:%.*]], 25
+; CHECK-NEXT: ret i1 [[V1]]
+;
+entry:
+ %v0 = add nsw i32 %arg0, -18
+ %v1 = icmp samesign ugt i32 %v0, 7
+ ret i1 %v1
+}
+
+; Negative test; Fold shouldn't fire since -124 - 12 causes signed overflow
+define i1 @icmp_samesign_with_nsw_add_neg(i8 %arg0) {
+; CHECK-LABEL: @icmp_samesign_with_nsw_add_neg(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = add i8 [[ARG0:%.*]], -121
+; CHECK-NEXT: [[V1:%.*]] = icmp ult i8 [[TMP0]], 123
+; CHECK-NEXT: ret i1 [[V1]]
+;
+entry:
+ %v0 = add nsw i8 %arg0, 12
+ %v1 = icmp samesign ugt i8 %v0, -124
+ ret i1 %v1
+}
+
+define i1 @icmp_with_nuw_add(i32 %arg0) {
+; CHECK-LABEL: @icmp_with_nuw_add(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[V1:%.*]] = icmp ult i32 [[ARG0:%.*]], 11
+; CHECK-NEXT: ret i1 [[V1]]
+;
+entry:
+ %v0 = add nuw i32 %arg0, 7
+ %v1 = icmp ult i32 %v0, 18
+ ret i1 %v1
+}
+
+define i1 @icmp_partial_negative_samesign_ult_to_slt(i8 range(i8 -1, 5) %x) {
+; CHECK-LABEL: @icmp_partial_negative_samesign_ult_to_slt(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 [[X:%.*]], 2
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+entry:
+ %add = add nsw i8 %x, -5
+ %cmp = icmp samesign ult i8 %add, -3
+ ret i1 %cmp
+}
+
+define i1 @icmp_pos_samesign_slt_to_ult(i8 range(i8 1, 5) %x) {
+; CHECK-LABEL: @icmp_pos_samesign_slt_to_ult(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[CMP:%.*]] = icmp samesign ult i8 [[X:%.*]], 2
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+entry:
+ %add = add nsw i8 %x, 1
+ %cmp = icmp samesign slt i8 %add, 3
+ ret i1 %cmp
+}
+
+; Since higher priority is given to unsigned predicates, the predicate should
+; not change
+define i1 @icmp_nuw_nsw_samesign(i32 %arg0) {
+; CHECK-LABEL: @icmp_nuw_nsw_samesign(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[ARG0:%.*]], 9
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+entry:
+ %v0 = add nuw nsw i32 %arg0, 1
+ %cmp = icmp samesign ult i32 %v0, 10
+ ret i1 %cmp
+}
More information about the llvm-commits
mailing list