[llvm] [InstCombine] Recognize non-negative subtraction patterns (PR #182597)

via llvm-commits llvm-commits at lists.llvm.org
Tue Mar 3 21:29:17 PST 2026


https://github.com/user1342234 updated https://github.com/llvm/llvm-project/pull/182597

>From ca482d196d52b1dcfd35fbfa36bf4198b535acdf Mon Sep 17 00:00:00 2001
From: abu <ayywarepremium at gmail.com>
Date: Fri, 20 Feb 2026 12:08:15 -0800
Subject: [PATCH 1/3] [InstCombine] Recognize non-negative subtraction patterns

---
 .../Transforms/InstCombine/sext-nonneg-sub.ll | 123 ++++++++++++++++++
 1 file changed, 123 insertions(+)
 create mode 100644 llvm/test/Transforms/InstCombine/sext-nonneg-sub.ll

diff --git a/llvm/test/Transforms/InstCombine/sext-nonneg-sub.ll b/llvm/test/Transforms/InstCombine/sext-nonneg-sub.ll
new file mode 100644
index 0000000000000..58bfe870e9ab5
--- /dev/null
+++ b/llvm/test/Transforms/InstCombine/sext-nonneg-sub.ll
@@ -0,0 +1,123 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
+; RUN: opt < %s -passes=instcombine -S | FileCheck %s
+
+; Test that b - smin(b, a) is recognized as non-negative
+define i64 @func1(i32 %a, i32 %b) {
+; CHECK-LABEL: define i64 @func1(
+; CHECK-SAME: i32 [[A:%.*]], i32 [[B:%.*]]) {
+; CHECK-NEXT:  [[ENTRY:.*:]]
+; CHECK-NEXT:    [[SPEC_SELECT:%.*]] = tail call i32 @llvm.smin.i32(i32 [[B]], i32 [[A]])
+; CHECK-NEXT:    [[SUB:%.*]] = sub nsw i32 [[B]], [[SPEC_SELECT]]
+; CHECK-NEXT:    [[CONV:%.*]] = sext i32 [[SUB]] to i64
+; CHECK-NEXT:    ret i64 [[CONV]]
+;
+entry:
+  %spec.select = tail call i32 @llvm.smin.i32(i32 %b, i32 %a)
+  %sub = sub nsw i32 %b, %spec.select
+  %conv = sext i32 %sub to i64
+  ret i64 %conv
+}
+
+; Test that select (b < a), 0, (b - a) is recognized as non-negative
+define i64 @func3(i32 %a, i32 %b) {
+; CHECK-LABEL: define i64 @func3(
+; CHECK-SAME: i32 [[A:%.*]], i32 [[B:%.*]]) {
+; CHECK-NEXT:  [[ENTRY:.*:]]
+; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[B]], [[A]]
+; CHECK-NEXT:    [[SUB:%.*]] = sub nsw i32 [[B]], [[A]]
+; CHECK-NEXT:    [[COND:%.*]] = select i1 [[CMP]], i32 0, i32 [[SUB]]
+; CHECK-NEXT:    [[CONV:%.*]] = sext i32 [[COND]] to i64
+; CHECK-NEXT:    ret i64 [[CONV]]
+;
+entry:
+  %cmp = icmp slt i32 %b, %a
+  %sub = sub nsw i32 %b, %a
+  %cond = select i1 %cmp, i32 0, i32 %sub
+  %conv = sext i32 %cond to i64
+  ret i64 %conv
+}
+
+; Test commutative smin pattern: a - smin(a, b) should also optimize
+define i64 @smin_commutative(i32 %a, i32 %b) {
+; CHECK-LABEL: define i64 @smin_commutative(
+; CHECK-SAME: i32 [[A:%.*]], i32 [[B:%.*]]) {
+; CHECK-NEXT:    [[MIN:%.*]] = call i32 @llvm.smin.i32(i32 [[A]], i32 [[B]])
+; CHECK-NEXT:    [[SUB:%.*]] = sub nsw i32 [[A]], [[MIN]]
+; CHECK-NEXT:    [[EXT:%.*]] = sext i32 [[SUB]] to i64
+; CHECK-NEXT:    ret i64 [[EXT]]
+;
+  %min = call i32 @llvm.smin.i32(i32 %a, i32 %b)
+  %sub = sub nsw i32 %a, %min
+  %ext = sext i32 %sub to i64
+  ret i64 %ext
+}
+
+
+; Test select with reversed operands: select (b > a), (b - a), 0
+define i64 @select_reversed(i32 %a, i32 %b) {
+; CHECK-LABEL: define i64 @select_reversed(
+; CHECK-SAME: i32 [[A:%.*]], i32 [[B:%.*]]) {
+; CHECK-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[B]], [[A]]
+; CHECK-NEXT:    [[SUB:%.*]] = sub nsw i32 [[B]], [[A]]
+; CHECK-NEXT:    [[COND:%.*]] = select i1 [[CMP]], i32 [[SUB]], i32 0
+; CHECK-NEXT:    [[EXT:%.*]] = sext i32 [[COND]] to i64
+; CHECK-NEXT:    ret i64 [[EXT]]
+;
+  %cmp = icmp sgt i32 %b, %a
+  %sub = sub nsw i32 %b, %a
+  %cond = select i1 %cmp, i32 %sub, i32 0
+  %ext = sext i32 %cond to i64
+  ret i64 %ext
+}
+
+; NEGATIVE TEST: unguarded subtraction should NOT optimize
+define i64 @neg_unguarded_sub(i32 %a, i32 %b) {
+; CHECK-LABEL: define i64 @neg_unguarded_sub(
+; CHECK-SAME: i32 [[A:%.*]], i32 [[B:%.*]]) {
+; CHECK-NEXT:    [[SUB:%.*]] = sub nsw i32 [[B]], [[A]]
+; CHECK-NEXT:    [[EXT:%.*]] = sext i32 [[SUB]] to i64
+; CHECK-NEXT:    ret i64 [[EXT]]
+;
+  %sub = sub nsw i32 %b, %a
+  %ext = sext i32 %sub to i64
+  ret i64 %ext
+}
+
+; NEGATIVE TEST: wrong comparison operands in select
+define i64 @neg_wrong_cmp_select(i32 %a, i32 %b) {
+; CHECK-LABEL: define i64 @neg_wrong_cmp_select(
+; CHECK-SAME: i32 [[A:%.*]], i32 [[B:%.*]]) {
+; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[A]], [[B]]
+; CHECK-NEXT:    [[SUB:%.*]] = sub nsw i32 [[B]], [[A]]
+; CHECK-NEXT:    [[COND:%.*]] = select i1 [[CMP]], i32 0, i32 [[SUB]]
+; CHECK-NEXT:    [[EXT:%.*]] = sext i32 [[COND]] to i64
+; CHECK-NEXT:    ret i64 [[EXT]]
+;
+  %cmp = icmp slt i32 %a, %b          ; compares a < b, but sub is b - a (mismatched)
+  %sub = sub nsw i32 %b, %a
+  %cond = select i1 %cmp, i32 0, i32 %sub
+  %ext = sext i32 %cond to i64
+  ret i64 %ext
+}
+
+
+
+; NEGATIVE TEST: select with non-zero constant
+define i64 @neg_select_nonzero(i32 %a, i32 %b) {
+; CHECK-LABEL: define i64 @neg_select_nonzero(
+; CHECK-SAME: i32 [[A:%.*]], i32 [[B:%.*]]) {
+; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[B]], [[A]]
+; CHECK-NEXT:    [[SUB:%.*]] = sub nsw i32 [[B]], [[A]]
+; CHECK-NEXT:    [[COND:%.*]] = select i1 [[CMP]], i32 1, i32 [[SUB]]
+; CHECK-NEXT:    [[EXT:%.*]] = sext i32 [[COND]] to i64
+; CHECK-NEXT:    ret i64 [[EXT]]
+;
+  %cmp = icmp slt i32 %b, %a
+  %sub = sub nsw i32 %b, %a
+  %cond = select i1 %cmp, i32 1, i32 %sub  ; not zero!
+  %ext = sext i32 %cond to i64
+  ret i64 %ext
+}
+
+declare i32 @llvm.smin.i32(i32, i32)
+

>From a03b0bcc66115a9bb34ef9bae14df144b50d514d Mon Sep 17 00:00:00 2001
From: abu <ayywarepremium at gmail.com>
Date: Thu, 26 Feb 2026 22:33:13 -0800
Subject: [PATCH 2/3] Canonicalize ICmp to smin intrinsic

---
 llvm/lib/Analysis/ValueTracking.cpp           | 39 ++++++++++++++++++-
 .../InstCombine/InstCombineSelect.cpp         | 15 +++++++
 .../Transforms/InstCombine/sext-nonneg-sub.ll | 18 ++++-----
 3 files changed, 60 insertions(+), 12 deletions(-)

diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp
index 2d78aa4be3455..a6aeeb91511d9 100644
--- a/llvm/lib/Analysis/ValueTracking.cpp
+++ b/llvm/lib/Analysis/ValueTracking.cpp
@@ -483,6 +483,39 @@ static void computeKnownBitsFromLerpPattern(const Value *Op0, const Value *Op1,
   KnownOut.Zero.setHighBits(MinimumNumberOfLeadingZeros);
 }
 
+static bool isKnownNonNegativeICmpWithMinMax(bool Add, bool NSW, bool NUW,
+                                             const Value *Op0, const Value *Op1,
+                                             const Instruction *I,
+                                             KnownBits &KnownOut) {
+  if (Add)
+    return false;
+  // Match against A = smin(A , B) and A = smin(B, A)
+  {
+    Value *V;
+    if (NSW && match(Op1, m_SMin(m_Specific(Op0), m_Value(V))) ||
+        match(Op1, m_SMin(m_Value(V), m_Specific(Op0))))
+      return true;
+  }
+
+  // Match against non-negative select
+  {
+    Value *A, *B;
+    if (match(I, m_Select(
+                     m_SpecificICmp(ICmpInst::ICMP_SLT, m_Value(A), m_Value(B)),
+                     m_Zero(), m_NSWSub(m_Deferred(A), m_Deferred(B)))) ||
+        match(I, m_Select(
+                     m_SpecificICmp(ICmpInst::ICMP_SGT, m_Value(A), m_Value(B)),
+                     m_Zero(), m_NSWSub(m_Deferred(B), m_Deferred(A)))) ||
+        match(I, m_Select(
+                     m_SpecificICmp(ICmpInst::ICMP_SGT, m_Value(A), m_Value(B)),
+                     m_NSWSub(m_Deferred(A), m_Deferred(B)), m_Zero()))) {
+
+      return true;
+    }
+  }
+  return false;
+}
+
 static void computeKnownBitsAddSub(bool Add, const Value *Op0, const Value *Op1,
                                    bool NSW, bool NUW,
                                    const APInt &DemandedElts,
@@ -499,8 +532,10 @@ static void computeKnownBitsAddSub(bool Add, const Value *Op0, const Value *Op1,
   KnownOut = KnownBits::computeForAddSub(Add, NSW, NUW, Known2, KnownOut);
 
   if (!Add && NSW && !KnownOut.isNonNegative() &&
-      isImpliedByDomCondition(ICmpInst::ICMP_SLE, Op1, Op0, Q.CxtI, Q.DL)
-          .value_or(false))
+      (isImpliedByDomCondition(ICmpInst::ICMP_SLE, Op1, Op0, Q.CxtI, Q.DL)
+           .value_or(false) ||
+       isKnownNonNegativeICmpWithMinMax(Add, NSW, NUW, Op0, Op1, Q.CxtI,
+                                        KnownOut)))
     KnownOut.makeNonNegative();
 
   if (Add)
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp b/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
index e57be1dce7b46..3fc656d775dd0 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
@@ -647,6 +647,21 @@ static Value *foldSelectICmpMinMax(const ICmpInst *Cmp, Value *TVal,
   const Value *CmpRHS = Cmp->getOperand(1);
   ICmpInst::Predicate Pred = Cmp->getPredicate();
 
+  if (Pred == CmpInst::ICMP_SLT && match(TVal, m_Zero()) &&
+      match(FVal, m_NSWSub(m_Specific(CmpLHS), m_Specific(CmpRHS)))) {
+    Value *LHS = const_cast<Value *>(CmpLHS);
+    Value *RHS = const_cast<Value *>(CmpRHS);
+    Value *SMin = Builder.CreateBinaryIntrinsic(Intrinsic::smin, LHS, RHS);
+    return Builder.CreateNSWSub(RHS, SMin);
+  }
+
+  if (Pred == CmpInst::ICMP_SGT && match(FVal, m_Zero()) &&
+      match(TVal, m_NSWSub(m_Specific(CmpLHS), m_Specific(CmpRHS)))) {
+    Value *LHS = const_cast<Value *>(CmpLHS);
+    Value *RHS = const_cast<Value *>(CmpRHS);
+    Value *SMin = Builder.CreateBinaryIntrinsic(Intrinsic::smin, LHS, RHS);
+    return Builder.CreateNSWSub(LHS, SMin);
+  }
   // (X > Y) ? X : (Y - 1) ==> MIN(X, Y - 1)
   // (X < Y) ? X : (Y + 1) ==> MAX(X, Y + 1)
   // This transformation is valid when overflow corresponding to the sign of
diff --git a/llvm/test/Transforms/InstCombine/sext-nonneg-sub.ll b/llvm/test/Transforms/InstCombine/sext-nonneg-sub.ll
index 58bfe870e9ab5..a6a873ef7ae63 100644
--- a/llvm/test/Transforms/InstCombine/sext-nonneg-sub.ll
+++ b/llvm/test/Transforms/InstCombine/sext-nonneg-sub.ll
@@ -8,7 +8,7 @@ define i64 @func1(i32 %a, i32 %b) {
 ; CHECK-NEXT:  [[ENTRY:.*:]]
 ; CHECK-NEXT:    [[SPEC_SELECT:%.*]] = tail call i32 @llvm.smin.i32(i32 [[B]], i32 [[A]])
 ; CHECK-NEXT:    [[SUB:%.*]] = sub nsw i32 [[B]], [[SPEC_SELECT]]
-; CHECK-NEXT:    [[CONV:%.*]] = sext i32 [[SUB]] to i64
+; CHECK-NEXT:    [[CONV:%.*]] = zext nneg i32 [[SUB]] to i64
 ; CHECK-NEXT:    ret i64 [[CONV]]
 ;
 entry:
@@ -23,10 +23,9 @@ define i64 @func3(i32 %a, i32 %b) {
 ; CHECK-LABEL: define i64 @func3(
 ; CHECK-SAME: i32 [[A:%.*]], i32 [[B:%.*]]) {
 ; CHECK-NEXT:  [[ENTRY:.*:]]
-; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[B]], [[A]]
-; CHECK-NEXT:    [[SUB:%.*]] = sub nsw i32 [[B]], [[A]]
-; CHECK-NEXT:    [[COND:%.*]] = select i1 [[CMP]], i32 0, i32 [[SUB]]
-; CHECK-NEXT:    [[CONV:%.*]] = sext i32 [[COND]] to i64
+; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.smin.i32(i32 [[B]], i32 [[A]])
+; CHECK-NEXT:    [[COND:%.*]] = sub nsw i32 [[A]], [[TMP0]]
+; CHECK-NEXT:    [[CONV:%.*]] = zext nneg i32 [[COND]] to i64
 ; CHECK-NEXT:    ret i64 [[CONV]]
 ;
 entry:
@@ -43,7 +42,7 @@ define i64 @smin_commutative(i32 %a, i32 %b) {
 ; CHECK-SAME: i32 [[A:%.*]], i32 [[B:%.*]]) {
 ; CHECK-NEXT:    [[MIN:%.*]] = call i32 @llvm.smin.i32(i32 [[A]], i32 [[B]])
 ; CHECK-NEXT:    [[SUB:%.*]] = sub nsw i32 [[A]], [[MIN]]
-; CHECK-NEXT:    [[EXT:%.*]] = sext i32 [[SUB]] to i64
+; CHECK-NEXT:    [[EXT:%.*]] = zext nneg i32 [[SUB]] to i64
 ; CHECK-NEXT:    ret i64 [[EXT]]
 ;
   %min = call i32 @llvm.smin.i32(i32 %a, i32 %b)
@@ -57,10 +56,9 @@ define i64 @smin_commutative(i32 %a, i32 %b) {
 define i64 @select_reversed(i32 %a, i32 %b) {
 ; CHECK-LABEL: define i64 @select_reversed(
 ; CHECK-SAME: i32 [[A:%.*]], i32 [[B:%.*]]) {
-; CHECK-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[B]], [[A]]
-; CHECK-NEXT:    [[SUB:%.*]] = sub nsw i32 [[B]], [[A]]
-; CHECK-NEXT:    [[COND:%.*]] = select i1 [[CMP]], i32 [[SUB]], i32 0
-; CHECK-NEXT:    [[EXT:%.*]] = sext i32 [[COND]] to i64
+; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.smin.i32(i32 [[B]], i32 [[A]])
+; CHECK-NEXT:    [[COND:%.*]] = sub nsw i32 [[B]], [[TMP1]]
+; CHECK-NEXT:    [[EXT:%.*]] = zext nneg i32 [[COND]] to i64
 ; CHECK-NEXT:    ret i64 [[EXT]]
 ;
   %cmp = icmp sgt i32 %b, %a

>From fa5323b3263697c37c5716beb99cd9f4f92ed790 Mon Sep 17 00:00:00 2001
From: abu <ayywarepremium at gmail.com>
Date: Tue, 3 Mar 2026 21:17:43 -0800
Subject: [PATCH 3/3] Removed canonicalization of select instruction so smin

---
 llvm/lib/Analysis/ValueTracking.cpp           | 48 +++++--------
 .../InstCombine/InstCombineSelect.cpp         | 15 ----
 .../Transforms/InstCombine/sext-nonneg-sub.ll | 70 -------------------
 3 files changed, 18 insertions(+), 115 deletions(-)

diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp
index a6aeeb91511d9..47b9d1c90be2b 100644
--- a/llvm/lib/Analysis/ValueTracking.cpp
+++ b/llvm/lib/Analysis/ValueTracking.cpp
@@ -483,36 +483,25 @@ static void computeKnownBitsFromLerpPattern(const Value *Op0, const Value *Op1,
   KnownOut.Zero.setHighBits(MinimumNumberOfLeadingZeros);
 }
 
-static bool isKnownNonNegativeICmpWithMinMax(bool Add, bool NSW, bool NUW,
-                                             const Value *Op0, const Value *Op1,
-                                             const Instruction *I,
-                                             KnownBits &KnownOut) {
-  if (Add)
-    return false;
-  // Match against A = smin(A , B) and A = smin(B, A)
-  {
-    Value *V;
-    if (NSW && match(Op1, m_SMin(m_Specific(Op0), m_Value(V))) ||
-        match(Op1, m_SMin(m_Value(V), m_Specific(Op0))))
-      return true;
-  }
+static bool isKnownNonNegativeFromMinOrGuardedSub(const Value *Op0,
+                                                  const Value *Op1) {
 
-  // Match against non-negative select
-  {
-    Value *A, *B;
-    if (match(I, m_Select(
-                     m_SpecificICmp(ICmpInst::ICMP_SLT, m_Value(A), m_Value(B)),
-                     m_Zero(), m_NSWSub(m_Deferred(A), m_Deferred(B)))) ||
-        match(I, m_Select(
-                     m_SpecificICmp(ICmpInst::ICMP_SGT, m_Value(A), m_Value(B)),
-                     m_Zero(), m_NSWSub(m_Deferred(B), m_Deferred(A)))) ||
-        match(I, m_Select(
-                     m_SpecificICmp(ICmpInst::ICMP_SGT, m_Value(A), m_Value(B)),
-                     m_NSWSub(m_Deferred(A), m_Deferred(B)), m_Zero()))) {
+  Value *V;
+
+  if (match(Op1, m_SMin(m_Specific(Op0), m_Value(V))) ||
+      match(Op1, m_SMin(m_Value(V), m_Specific(Op0))))
+    return true;
+
+  Value *A;
+  if (match(Op1,
+            m_Select(
+                m_SpecificICmp(ICmpInst::ICMP_SLT, m_Specific(Op0), m_Value(A)),
+                m_Zero(), m_NSWSub(m_Specific(Op0), m_Deferred(A)))) ||
+      match(Op1, m_Select(m_SpecificICmp(ICmpInst::ICMP_SGT, m_Specific(Op0),
+                                         m_Value(A)),
+                          m_NSWSub(m_Specific(Op0), m_Deferred(A)), m_Zero())))
+    return true;
 
-      return true;
-    }
-  }
   return false;
 }
 
@@ -534,8 +523,7 @@ static void computeKnownBitsAddSub(bool Add, const Value *Op0, const Value *Op1,
   if (!Add && NSW && !KnownOut.isNonNegative() &&
       (isImpliedByDomCondition(ICmpInst::ICMP_SLE, Op1, Op0, Q.CxtI, Q.DL)
            .value_or(false) ||
-       isKnownNonNegativeICmpWithMinMax(Add, NSW, NUW, Op0, Op1, Q.CxtI,
-                                        KnownOut)))
+       isKnownNonNegativeFromMinOrGuardedSub(Op0, Op1)))
     KnownOut.makeNonNegative();
 
   if (Add)
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp b/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
index 3fc656d775dd0..e57be1dce7b46 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
@@ -647,21 +647,6 @@ static Value *foldSelectICmpMinMax(const ICmpInst *Cmp, Value *TVal,
   const Value *CmpRHS = Cmp->getOperand(1);
   ICmpInst::Predicate Pred = Cmp->getPredicate();
 
-  if (Pred == CmpInst::ICMP_SLT && match(TVal, m_Zero()) &&
-      match(FVal, m_NSWSub(m_Specific(CmpLHS), m_Specific(CmpRHS)))) {
-    Value *LHS = const_cast<Value *>(CmpLHS);
-    Value *RHS = const_cast<Value *>(CmpRHS);
-    Value *SMin = Builder.CreateBinaryIntrinsic(Intrinsic::smin, LHS, RHS);
-    return Builder.CreateNSWSub(RHS, SMin);
-  }
-
-  if (Pred == CmpInst::ICMP_SGT && match(FVal, m_Zero()) &&
-      match(TVal, m_NSWSub(m_Specific(CmpLHS), m_Specific(CmpRHS)))) {
-    Value *LHS = const_cast<Value *>(CmpLHS);
-    Value *RHS = const_cast<Value *>(CmpRHS);
-    Value *SMin = Builder.CreateBinaryIntrinsic(Intrinsic::smin, LHS, RHS);
-    return Builder.CreateNSWSub(LHS, SMin);
-  }
   // (X > Y) ? X : (Y - 1) ==> MIN(X, Y - 1)
   // (X < Y) ? X : (Y + 1) ==> MAX(X, Y + 1)
   // This transformation is valid when overflow corresponding to the sign of
diff --git a/llvm/test/Transforms/InstCombine/sext-nonneg-sub.ll b/llvm/test/Transforms/InstCombine/sext-nonneg-sub.ll
index a6a873ef7ae63..582ba5211b173 100644
--- a/llvm/test/Transforms/InstCombine/sext-nonneg-sub.ll
+++ b/llvm/test/Transforms/InstCombine/sext-nonneg-sub.ll
@@ -18,24 +18,6 @@ entry:
   ret i64 %conv
 }
 
-; Test that select (b < a), 0, (b - a) is recognized as non-negative
-define i64 @func3(i32 %a, i32 %b) {
-; CHECK-LABEL: define i64 @func3(
-; CHECK-SAME: i32 [[A:%.*]], i32 [[B:%.*]]) {
-; CHECK-NEXT:  [[ENTRY:.*:]]
-; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.smin.i32(i32 [[B]], i32 [[A]])
-; CHECK-NEXT:    [[COND:%.*]] = sub nsw i32 [[A]], [[TMP0]]
-; CHECK-NEXT:    [[CONV:%.*]] = zext nneg i32 [[COND]] to i64
-; CHECK-NEXT:    ret i64 [[CONV]]
-;
-entry:
-  %cmp = icmp slt i32 %b, %a
-  %sub = sub nsw i32 %b, %a
-  %cond = select i1 %cmp, i32 0, i32 %sub
-  %conv = sext i32 %cond to i64
-  ret i64 %conv
-}
-
 ; Test commutative smin pattern: a - smin(a, b) should also optimize
 define i64 @smin_commutative(i32 %a, i32 %b) {
 ; CHECK-LABEL: define i64 @smin_commutative(
@@ -52,22 +34,6 @@ define i64 @smin_commutative(i32 %a, i32 %b) {
 }
 
 
-; Test select with reversed operands: select (b > a), (b - a), 0
-define i64 @select_reversed(i32 %a, i32 %b) {
-; CHECK-LABEL: define i64 @select_reversed(
-; CHECK-SAME: i32 [[A:%.*]], i32 [[B:%.*]]) {
-; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.smin.i32(i32 [[B]], i32 [[A]])
-; CHECK-NEXT:    [[COND:%.*]] = sub nsw i32 [[B]], [[TMP1]]
-; CHECK-NEXT:    [[EXT:%.*]] = zext nneg i32 [[COND]] to i64
-; CHECK-NEXT:    ret i64 [[EXT]]
-;
-  %cmp = icmp sgt i32 %b, %a
-  %sub = sub nsw i32 %b, %a
-  %cond = select i1 %cmp, i32 %sub, i32 0
-  %ext = sext i32 %cond to i64
-  ret i64 %ext
-}
-
 ; NEGATIVE TEST: unguarded subtraction should NOT optimize
 define i64 @neg_unguarded_sub(i32 %a, i32 %b) {
 ; CHECK-LABEL: define i64 @neg_unguarded_sub(
@@ -81,41 +47,5 @@ define i64 @neg_unguarded_sub(i32 %a, i32 %b) {
   ret i64 %ext
 }
 
-; NEGATIVE TEST: wrong comparison operands in select
-define i64 @neg_wrong_cmp_select(i32 %a, i32 %b) {
-; CHECK-LABEL: define i64 @neg_wrong_cmp_select(
-; CHECK-SAME: i32 [[A:%.*]], i32 [[B:%.*]]) {
-; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[A]], [[B]]
-; CHECK-NEXT:    [[SUB:%.*]] = sub nsw i32 [[B]], [[A]]
-; CHECK-NEXT:    [[COND:%.*]] = select i1 [[CMP]], i32 0, i32 [[SUB]]
-; CHECK-NEXT:    [[EXT:%.*]] = sext i32 [[COND]] to i64
-; CHECK-NEXT:    ret i64 [[EXT]]
-;
-  %cmp = icmp slt i32 %a, %b          ; compares a < b, but sub is b - a (mismatched)
-  %sub = sub nsw i32 %b, %a
-  %cond = select i1 %cmp, i32 0, i32 %sub
-  %ext = sext i32 %cond to i64
-  ret i64 %ext
-}
-
-
-
-; NEGATIVE TEST: select with non-zero constant
-define i64 @neg_select_nonzero(i32 %a, i32 %b) {
-; CHECK-LABEL: define i64 @neg_select_nonzero(
-; CHECK-SAME: i32 [[A:%.*]], i32 [[B:%.*]]) {
-; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[B]], [[A]]
-; CHECK-NEXT:    [[SUB:%.*]] = sub nsw i32 [[B]], [[A]]
-; CHECK-NEXT:    [[COND:%.*]] = select i1 [[CMP]], i32 1, i32 [[SUB]]
-; CHECK-NEXT:    [[EXT:%.*]] = sext i32 [[COND]] to i64
-; CHECK-NEXT:    ret i64 [[EXT]]
-;
-  %cmp = icmp slt i32 %b, %a
-  %sub = sub nsw i32 %b, %a
-  %cond = select i1 %cmp, i32 1, i32 %sub  ; not zero!
-  %ext = sext i32 %cond to i64
-  ret i64 %ext
-}
 
 declare i32 @llvm.smin.i32(i32, i32)
-



More information about the llvm-commits mailing list