[llvm] [InstCombine] Fold max/min when incrementing/decrementing by 1 (PR #142466)

Alex MacLean via llvm-commits llvm-commits at lists.llvm.org
Thu Jun 5 09:54:33 PDT 2025


https://github.com/AlexMaclean updated https://github.com/llvm/llvm-project/pull/142466

>From 1257dcaba700912f19135baa67e485d25c915c3b Mon Sep 17 00:00:00 2001
From: Alex Maclean <amaclean at nvidia.com>
Date: Sun, 1 Jun 2025 16:30:05 +0000
Subject: [PATCH 1/8] pre-commit tests

---
 .../Transforms/InstCombine/minmax-fold.ll     | 40 +++++++++++++++++++
 1 file changed, 40 insertions(+)

diff --git a/llvm/test/Transforms/InstCombine/minmax-fold.ll b/llvm/test/Transforms/InstCombine/minmax-fold.ll
index 3bb1fd60f3afe..153f7d5404d9e 100644
--- a/llvm/test/Transforms/InstCombine/minmax-fold.ll
+++ b/llvm/test/Transforms/InstCombine/minmax-fold.ll
@@ -1598,3 +1598,43 @@ define <2 x i32> @test_umax_smax_vec_neg(<2 x i32> %x) {
   %umax = call <2 x i32> @llvm.umax.v2i32(<2 x i32> %smax, <2 x i32> <i32 1, i32 10>)
   ret <2 x i32> %umax
 }
+
+define i32 @test_smin_sub1_nsw(i32 %x, i32 %w) {
+; CHECK-LABEL: @test_smin_sub1_nsw(
+; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[X:%.*]], [[W:%.*]]
+; CHECK-NEXT:    [[SUB:%.*]] = add nsw i32 [[W]], -1
+; CHECK-NEXT:    [[R:%.*]] = select i1 [[CMP]], i32 [[X]], i32 [[SUB]]
+; CHECK-NEXT:    ret i32 [[R]]
+;
+  %cmp = icmp slt i32 %x, %w
+  %sub = add nsw i32 %w, -1
+  %r = select i1 %cmp, i32 %x, i32 %sub
+  ret i32 %r
+}
+
+define i32 @test_smax_add1_nsw(i32 %x, i32 %w) {
+; CHECK-LABEL: @test_smax_add1_nsw(
+; CHECK-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[X:%.*]], [[W:%.*]]
+; CHECK-NEXT:    [[X2:%.*]] = add nsw i32 [[W]], 1
+; CHECK-NEXT:    [[R:%.*]] = select i1 [[CMP]], i32 [[X]], i32 [[X2]]
+; CHECK-NEXT:    ret i32 [[R]]
+;
+  %cmp = icmp sgt i32 %x, %w
+  %add = add nsw i32 %w, 1
+  %r = select i1 %cmp, i32 %x, i32 %add
+  ret i32 %r
+}
+
+define i32 @test_umax_add1_nsw(i32 %x, i32 %w) {
+; CHECK-LABEL: @test_umax_add1_nsw(
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ugt i32 [[X:%.*]], [[W:%.*]]
+; CHECK-NEXT:    [[X2:%.*]] = add nuw i32 [[W]], 1
+; CHECK-NEXT:    [[R:%.*]] = select i1 [[CMP]], i32 [[X]], i32 [[X2]]
+; CHECK-NEXT:    ret i32 [[R]]
+;
+  %cmp = icmp ugt i32 %x, %w
+  %add = add nuw i32 %w, 1
+  %r = select i1 %cmp, i32 %x, i32 %add
+  ret i32 %r
+}
+

>From def4aebdd30fdba545154079db75d132226de0d0 Mon Sep 17 00:00:00 2001
From: Alex Maclean <amaclean at nvidia.com>
Date: Sun, 1 Jun 2025 16:32:57 +0000
Subject: [PATCH 2/8] [ValueTracking] Fold max/min when
 incrementing/decrementing by 1

---
 llvm/lib/Analysis/ValueTracking.cpp            | 18 ++++++++++++++++++
 .../test/Transforms/InstCombine/minmax-fold.ll | 15 ++++++---------
 2 files changed, 24 insertions(+), 9 deletions(-)

diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp
index 0a460786d00ea..4d9691a0ead51 100644
--- a/llvm/lib/Analysis/ValueTracking.cpp
+++ b/llvm/lib/Analysis/ValueTracking.cpp
@@ -8390,6 +8390,24 @@ static SelectPatternResult matchMinMax(CmpInst::Predicate Pred,
     }
   }
 
+  // (X > Y) ? X : (Y - 1) ==> MIN(X, Y - 1)
+  // (X < Y) ? X : (Y + 1) ==> MAX(X, Y + 1)
+  // When overflow corresponding to the sign of the comparison is poison.
+  // Note that the UMIN case is not possible as we canonicalize to addition.
+  if (CmpLHS == TrueVal) {
+    if (Pred == CmpInst::ICMP_SGT &&
+        match(FalseVal, m_NSWAddLike(m_Specific(CmpRHS), m_ConstantInt<1>())))
+      return {SPF_SMAX, SPNB_NA, false};
+
+    if (Pred == CmpInst::ICMP_SLT &&
+        match(FalseVal, m_NSWAddLike(m_Specific(CmpRHS), m_ConstantInt<-1>())))
+      return {SPF_SMIN, SPNB_NA, false};
+
+    if (Pred == CmpInst::ICMP_UGT &&
+        match(FalseVal, m_NUWAddLike(m_Specific(CmpRHS), m_ConstantInt<1>())))
+      return {SPF_UMAX, SPNB_NA, false};
+  }
+
   if (Pred != CmpInst::ICMP_SGT && Pred != CmpInst::ICMP_SLT)
     return {SPF_UNKNOWN, SPNB_NA, false};
 
diff --git a/llvm/test/Transforms/InstCombine/minmax-fold.ll b/llvm/test/Transforms/InstCombine/minmax-fold.ll
index 153f7d5404d9e..3f958bdf696cf 100644
--- a/llvm/test/Transforms/InstCombine/minmax-fold.ll
+++ b/llvm/test/Transforms/InstCombine/minmax-fold.ll
@@ -1601,9 +1601,8 @@ define <2 x i32> @test_umax_smax_vec_neg(<2 x i32> %x) {
 
 define i32 @test_smin_sub1_nsw(i32 %x, i32 %w) {
 ; CHECK-LABEL: @test_smin_sub1_nsw(
-; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[X:%.*]], [[W:%.*]]
-; CHECK-NEXT:    [[SUB:%.*]] = add nsw i32 [[W]], -1
-; CHECK-NEXT:    [[R:%.*]] = select i1 [[CMP]], i32 [[X]], i32 [[SUB]]
+; CHECK-NEXT:    [[SUB:%.*]] = add nsw i32 [[W:%.*]], -1
+; CHECK-NEXT:    [[R:%.*]] = call i32 @llvm.smin.i32(i32 [[X:%.*]], i32 [[SUB]])
 ; CHECK-NEXT:    ret i32 [[R]]
 ;
   %cmp = icmp slt i32 %x, %w
@@ -1614,9 +1613,8 @@ define i32 @test_smin_sub1_nsw(i32 %x, i32 %w) {
 
 define i32 @test_smax_add1_nsw(i32 %x, i32 %w) {
 ; CHECK-LABEL: @test_smax_add1_nsw(
-; CHECK-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[X:%.*]], [[W:%.*]]
-; CHECK-NEXT:    [[X2:%.*]] = add nsw i32 [[W]], 1
-; CHECK-NEXT:    [[R:%.*]] = select i1 [[CMP]], i32 [[X]], i32 [[X2]]
+; CHECK-NEXT:    [[X2:%.*]] = add nsw i32 [[W:%.*]], 1
+; CHECK-NEXT:    [[R:%.*]] = call i32 @llvm.smax.i32(i32 [[X:%.*]], i32 [[X2]])
 ; CHECK-NEXT:    ret i32 [[R]]
 ;
   %cmp = icmp sgt i32 %x, %w
@@ -1627,9 +1625,8 @@ define i32 @test_smax_add1_nsw(i32 %x, i32 %w) {
 
 define i32 @test_umax_add1_nsw(i32 %x, i32 %w) {
 ; CHECK-LABEL: @test_umax_add1_nsw(
-; CHECK-NEXT:    [[CMP:%.*]] = icmp ugt i32 [[X:%.*]], [[W:%.*]]
-; CHECK-NEXT:    [[X2:%.*]] = add nuw i32 [[W]], 1
-; CHECK-NEXT:    [[R:%.*]] = select i1 [[CMP]], i32 [[X]], i32 [[X2]]
+; CHECK-NEXT:    [[X2:%.*]] = add nuw i32 [[W:%.*]], 1
+; CHECK-NEXT:    [[R:%.*]] = call i32 @llvm.umax.i32(i32 [[X:%.*]], i32 [[X2]])
 ; CHECK-NEXT:    ret i32 [[R]]
 ;
   %cmp = icmp ugt i32 %x, %w

>From 675aaff24594aedfc64147b5cec959689c98c884 Mon Sep 17 00:00:00 2001
From: Alex Maclean <amaclean at nvidia.com>
Date: Tue, 3 Jun 2025 14:50:52 +0000
Subject: [PATCH 3/8] address comments

---
 llvm/lib/Analysis/ValueTracking.cpp           | 17 +++--
 .../Transforms/InstCombine/minmax-fold.ll     | 71 +++++++++++++++++++
 2 files changed, 83 insertions(+), 5 deletions(-)

diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp
index 4d9691a0ead51..c027d1067af56 100644
--- a/llvm/lib/Analysis/ValueTracking.cpp
+++ b/llvm/lib/Analysis/ValueTracking.cpp
@@ -8392,20 +8392,27 @@ static SelectPatternResult matchMinMax(CmpInst::Predicate Pred,
 
   // (X > Y) ? X : (Y - 1) ==> MIN(X, Y - 1)
   // (X < Y) ? X : (Y + 1) ==> MAX(X, Y + 1)
-  // When overflow corresponding to the sign of the comparison is poison.
-  // Note that the UMIN case is not possible as we canonicalize to addition.
+  // This transformation is valid when overflow corresponding to the sign of
+  // the comparison is poison and we must drop the non-matching overflow flag.
+  // Note: that the UMIN case is not possible as we canonicalize to addition.
   if (CmpLHS == TrueVal) {
     if (Pred == CmpInst::ICMP_SGT &&
-        match(FalseVal, m_NSWAddLike(m_Specific(CmpRHS), m_ConstantInt<1>())))
+        match(FalseVal, m_NSWAddLike(m_Specific(CmpRHS), m_One()))) {
+      cast<Instruction>(FalseVal)->setHasNoUnsignedWrap(false);
       return {SPF_SMAX, SPNB_NA, false};
+    }
 
     if (Pred == CmpInst::ICMP_SLT &&
-        match(FalseVal, m_NSWAddLike(m_Specific(CmpRHS), m_ConstantInt<-1>())))
+        match(FalseVal, m_NSWAddLike(m_Specific(CmpRHS), m_AllOnes()))) {
+      cast<Instruction>(FalseVal)->setHasNoUnsignedWrap(false);
       return {SPF_SMIN, SPNB_NA, false};
+    }
 
     if (Pred == CmpInst::ICMP_UGT &&
-        match(FalseVal, m_NUWAddLike(m_Specific(CmpRHS), m_ConstantInt<1>())))
+        match(FalseVal, m_NUWAddLike(m_Specific(CmpRHS), m_One()))) {
+      cast<Instruction>(FalseVal)->setHasNoSignedWrap(false);
       return {SPF_UMAX, SPNB_NA, false};
+    }
   }
 
   if (Pred != CmpInst::ICMP_SGT && Pred != CmpInst::ICMP_SLT)
diff --git a/llvm/test/Transforms/InstCombine/minmax-fold.ll b/llvm/test/Transforms/InstCombine/minmax-fold.ll
index 3f958bdf696cf..084ff8a22e335 100644
--- a/llvm/test/Transforms/InstCombine/minmax-fold.ll
+++ b/llvm/test/Transforms/InstCombine/minmax-fold.ll
@@ -1635,3 +1635,74 @@ define i32 @test_umax_add1_nsw(i32 %x, i32 %w) {
   ret i32 %r
 }
 
+define <2 x i16> @test_smin_sub1_nsw_vec(<2 x i16> %x, <2 x i16> %w) {
+; CHECK-LABEL: @test_smin_sub1_nsw_vec(
+; CHECK-NEXT:    [[SUB:%.*]] = add nsw <2 x i16> [[W:%.*]], splat (i16 -1)
+; CHECK-NEXT:    [[R:%.*]] = call <2 x i16> @llvm.smin.v2i16(<2 x i16> [[X:%.*]], <2 x i16> [[SUB]])
+; CHECK-NEXT:    ret <2 x i16> [[R]]
+;
+  %cmp = icmp slt <2 x i16> %x, %w
+  %sub = add nsw <2 x i16> %w, splat (i16 -1)
+  %r = select <2 x i1> %cmp, <2 x i16> %x, <2 x i16> %sub
+  ret <2 x i16> %r
+}
+
+define <2 x i16> @test_smax_add1_nsw_vec(<2 x i16> %x, <2 x i16> %w) {
+; CHECK-LABEL: @test_smax_add1_nsw_vec(
+; CHECK-NEXT:    [[ADD:%.*]] = add nsw <2 x i16> [[W:%.*]], splat (i16 1)
+; CHECK-NEXT:    [[R:%.*]] = call <2 x i16> @llvm.smax.v2i16(<2 x i16> [[X:%.*]], <2 x i16> [[ADD]])
+; CHECK-NEXT:    ret <2 x i16> [[R]]
+;
+  %cmp = icmp sgt <2 x i16> %x, %w
+  %add = add nsw <2 x i16> %w, splat (i16 1)
+  %r = select <2 x i1> %cmp, <2 x i16> %x, <2 x i16> %add
+  ret <2 x i16> %r
+}
+
+define <2 x i16> @test_umax_add1_nsw_vec(<2 x i16> %x, <2 x i16> %w) {
+; CHECK-LABEL: @test_umax_add1_nsw_vec(
+; CHECK-NEXT:    [[ADD:%.*]] = add nuw <2 x i16> [[W:%.*]], splat (i16 1)
+; CHECK-NEXT:    [[R:%.*]] = call <2 x i16> @llvm.umax.v2i16(<2 x i16> [[X:%.*]], <2 x i16> [[ADD]])
+; CHECK-NEXT:    ret <2 x i16> [[R]]
+;
+  %cmp = icmp ugt <2 x i16> %x, %w
+  %add = add nuw <2 x i16> %w, splat (i16 1)
+  %r = select <2 x i1> %cmp, <2 x i16> %x, <2 x i16> %add
+  ret <2 x i16> %r
+}
+
+define i32 @test_smin_sub1_nsw_drop_flags(i32 %x, i32 %w) {
+; CHECK-LABEL: @test_smin_sub1_nsw_drop_flags(
+; CHECK-NEXT:    [[SUB:%.*]] = add nsw i32 [[W:%.*]], -1
+; CHECK-NEXT:    [[R:%.*]] = call i32 @llvm.smin.i32(i32 [[X:%.*]], i32 [[SUB]])
+; CHECK-NEXT:    ret i32 [[R]]
+;
+  %cmp = icmp slt i32 %x, %w
+  %sub = add nsw nuw i32 %w, -1
+  %r = select i1 %cmp, i32 %x, i32 %sub
+  ret i32 %r
+}
+
+define i32 @test_smax_add1_nsw_drop_flags(i32 %x, i32 %w) {
+; CHECK-LABEL: @test_smax_add1_nsw_drop_flags(
+; CHECK-NEXT:    [[ADD:%.*]] = add nsw i32 [[W:%.*]], 1
+; CHECK-NEXT:    [[R:%.*]] = call i32 @llvm.smax.i32(i32 [[X:%.*]], i32 [[ADD]])
+; CHECK-NEXT:    ret i32 [[R]]
+;
+  %cmp = icmp sgt i32 %x, %w
+  %add = add nsw nuw i32 %w, 1
+  %r = select i1 %cmp, i32 %x, i32 %add
+  ret i32 %r
+}
+
+define i32 @test_umax_add1_nsw_drop_flags(i32 %x, i32 %w) {
+; CHECK-LABEL: @test_umax_add1_nsw_drop_flags(
+; CHECK-NEXT:    [[ADD:%.*]] = add nuw i32 [[W:%.*]], 1
+; CHECK-NEXT:    [[R:%.*]] = call i32 @llvm.umax.i32(i32 [[X:%.*]], i32 [[ADD]])
+; CHECK-NEXT:    ret i32 [[R]]
+;
+  %cmp = icmp ugt i32 %x, %w
+  %add = add nuw nsw i32 %w, 1
+  %r = select i1 %cmp, i32 %x, i32 %add
+  ret i32 %r
+}

>From 7d41d85fa465dc091d05027e91883610a4ae8553 Mon Sep 17 00:00:00 2001
From: Alex Maclean <amaclean at nvidia.com>
Date: Tue, 3 Jun 2025 16:13:46 +0000
Subject: [PATCH 4/8] address more comments

---
 llvm/lib/Analysis/ValueTracking.cpp           | 25 ------------
 .../InstCombine/InstCombineSelect.cpp         | 40 +++++++++++++++++++
 .../Transforms/InstCombine/minmax-fold.ll     |  4 +-
 3 files changed, 42 insertions(+), 27 deletions(-)

diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp
index c027d1067af56..0a460786d00ea 100644
--- a/llvm/lib/Analysis/ValueTracking.cpp
+++ b/llvm/lib/Analysis/ValueTracking.cpp
@@ -8390,31 +8390,6 @@ static SelectPatternResult matchMinMax(CmpInst::Predicate Pred,
     }
   }
 
-  // (X > Y) ? X : (Y - 1) ==> MIN(X, Y - 1)
-  // (X < Y) ? X : (Y + 1) ==> MAX(X, Y + 1)
-  // This transformation is valid when overflow corresponding to the sign of
-  // the comparison is poison and we must drop the non-matching overflow flag.
-  // Note: that the UMIN case is not possible as we canonicalize to addition.
-  if (CmpLHS == TrueVal) {
-    if (Pred == CmpInst::ICMP_SGT &&
-        match(FalseVal, m_NSWAddLike(m_Specific(CmpRHS), m_One()))) {
-      cast<Instruction>(FalseVal)->setHasNoUnsignedWrap(false);
-      return {SPF_SMAX, SPNB_NA, false};
-    }
-
-    if (Pred == CmpInst::ICMP_SLT &&
-        match(FalseVal, m_NSWAddLike(m_Specific(CmpRHS), m_AllOnes()))) {
-      cast<Instruction>(FalseVal)->setHasNoUnsignedWrap(false);
-      return {SPF_SMIN, SPNB_NA, false};
-    }
-
-    if (Pred == CmpInst::ICMP_UGT &&
-        match(FalseVal, m_NUWAddLike(m_Specific(CmpRHS), m_One()))) {
-      cast<Instruction>(FalseVal)->setHasNoSignedWrap(false);
-      return {SPF_UMAX, SPNB_NA, false};
-    }
-  }
-
   if (Pred != CmpInst::ICMP_SGT && Pred != CmpInst::ICMP_SLT)
     return {SPF_UNKNOWN, SPNB_NA, false};
 
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp b/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
index d7d0431a5b8d0..a6d4adf00c0c3 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
@@ -565,6 +565,43 @@ Instruction *InstCombinerImpl::foldSelectIntoOp(SelectInst &SI, Value *TrueVal,
   return nullptr;
 }
 
+/// Try to fold a select to a min/max intrinsic. Many cases are already handled
+/// by matchDecomposedSelectPattern but here we handle the cases where more
+/// exensive modification of the IR is required.
+static Value *foldSelectICmpMinMax(const ICmpInst *Cmp, Value *TVal,
+                                   Value *FVal,
+                                   InstCombiner::BuilderTy &Builder) {
+  const Value *CmpLHS = Cmp->getOperand(0);
+  const Value *CmpRHS = Cmp->getOperand(1);
+  const ICmpInst::Predicate Pred = Cmp->getPredicate();
+
+  // (X > Y) ? X : (Y - 1) ==> MIN(X, Y - 1)
+  // (X < Y) ? X : (Y + 1) ==> MAX(X, Y + 1)
+  // This transformation is valid when overflow corresponding to the sign of
+  // the comparison is poison and we must drop the non-matching overflow flag.
+  // Note: that the UMIN case is not possible as we canonicalize to addition.
+  if (CmpLHS == TVal) {
+    if (Pred == CmpInst::ICMP_SGT &&
+        match(FVal, m_NSWAddLike(m_Specific(CmpRHS), m_One()))) {
+      cast<Instruction>(FVal)->setHasNoUnsignedWrap(false);
+      return Builder.CreateBinaryIntrinsic(Intrinsic::smax, TVal, FVal);
+    }
+
+    if (Pred == CmpInst::ICMP_SLT &&
+        match(FVal, m_NSWAddLike(m_Specific(CmpRHS), m_AllOnes()))) {
+      cast<Instruction>(FVal)->setHasNoUnsignedWrap(false);
+      return Builder.CreateBinaryIntrinsic(Intrinsic::smin, TVal, FVal);
+    }
+
+    if (Pred == CmpInst::ICMP_UGT &&
+        match(FVal, m_NUWAddLike(m_Specific(CmpRHS), m_One()))) {
+      cast<Instruction>(FVal)->setHasNoSignedWrap(false);
+      return Builder.CreateBinaryIntrinsic(Intrinsic::umax, TVal, FVal);
+    }
+  }
+  return nullptr;
+}
+
 /// We want to turn:
 ///   (select (icmp eq (and X, Y), 0), (and (lshr X, Z), 1), 1)
 /// into:
@@ -1917,6 +1954,9 @@ Instruction *InstCombinerImpl::foldSelectInstWithICmp(SelectInst &SI,
     return &SI;
   }
 
+  if (Value *V = foldSelectICmpMinMax(ICI, TrueVal, FalseVal, Builder))
+    return replaceInstUsesWith(SI, V);
+
   if (Instruction *V =
           foldSelectICmpAndAnd(SI.getType(), ICI, TrueVal, FalseVal, Builder))
     return V;
diff --git a/llvm/test/Transforms/InstCombine/minmax-fold.ll b/llvm/test/Transforms/InstCombine/minmax-fold.ll
index 084ff8a22e335..95c28510bedff 100644
--- a/llvm/test/Transforms/InstCombine/minmax-fold.ll
+++ b/llvm/test/Transforms/InstCombine/minmax-fold.ll
@@ -1673,8 +1673,8 @@ define <2 x i16> @test_umax_add1_nsw_vec(<2 x i16> %x, <2 x i16> %w) {
 
 define i32 @test_smin_sub1_nsw_drop_flags(i32 %x, i32 %w) {
 ; CHECK-LABEL: @test_smin_sub1_nsw_drop_flags(
-; CHECK-NEXT:    [[SUB:%.*]] = add nsw i32 [[W:%.*]], -1
-; CHECK-NEXT:    [[R:%.*]] = call i32 @llvm.smin.i32(i32 [[X:%.*]], i32 [[SUB]])
+; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[X:%.*]], [[W:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = select i1 [[CMP]], i32 [[X]], i32 -1
 ; CHECK-NEXT:    ret i32 [[R]]
 ;
   %cmp = icmp slt i32 %x, %w

>From ffdcb9be5b9923bad81aa3197d45360a74914524 Mon Sep 17 00:00:00 2001
From: Alex Maclean <amaclean at nvidia.com>
Date: Tue, 3 Jun 2025 16:54:05 +0000
Subject: [PATCH 5/8] address more comments

---
 .../InstCombine/InstCombineSelect.cpp         | 15 ++++--
 .../Transforms/InstCombine/minmax-fold.ll     | 53 ++++++++++++++++---
 2 files changed, 57 insertions(+), 11 deletions(-)

diff --git a/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp b/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
index a6d4adf00c0c3..cf9033f8f4db3 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
@@ -570,7 +570,8 @@ Instruction *InstCombinerImpl::foldSelectIntoOp(SelectInst &SI, Value *TrueVal,
 /// exensive modification of the IR is required.
 static Value *foldSelectICmpMinMax(const ICmpInst *Cmp, Value *TVal,
                                    Value *FVal,
-                                   InstCombiner::BuilderTy &Builder) {
+                                   InstCombiner::BuilderTy &Builder,
+                                   const SimplifyQuery &SQ) {
   const Value *CmpLHS = Cmp->getOperand(0);
   const Value *CmpRHS = Cmp->getOperand(1);
   const ICmpInst::Predicate Pred = Cmp->getPredicate();
@@ -579,7 +580,6 @@ static Value *foldSelectICmpMinMax(const ICmpInst *Cmp, Value *TVal,
   // (X < Y) ? X : (Y + 1) ==> MAX(X, Y + 1)
   // This transformation is valid when overflow corresponding to the sign of
   // the comparison is poison and we must drop the non-matching overflow flag.
-  // Note: that the UMIN case is not possible as we canonicalize to addition.
   if (CmpLHS == TVal) {
     if (Pred == CmpInst::ICMP_SGT &&
         match(FVal, m_NSWAddLike(m_Specific(CmpRHS), m_One()))) {
@@ -598,6 +598,15 @@ static Value *foldSelectICmpMinMax(const ICmpInst *Cmp, Value *TVal,
       cast<Instruction>(FVal)->setHasNoSignedWrap(false);
       return Builder.CreateBinaryIntrinsic(Intrinsic::umax, TVal, FVal);
     }
+
+    // Note: We must use isKnownNonZero here because "sub nuw %x, 1" will be
+    // canonicalize to "add %x, -1" discarding the nuw flag.
+    if (Pred == CmpInst::ICMP_ULT &&
+        match(FVal, m_AddLike(m_Specific(CmpRHS), m_AllOnes())) &&
+        isKnownNonZero(CmpRHS, SQ)) {
+      cast<Instruction>(FVal)->setHasNoSignedWrap(false);
+      return Builder.CreateBinaryIntrinsic(Intrinsic::umin, TVal, FVal);
+    }
   }
   return nullptr;
 }
@@ -1954,7 +1963,7 @@ Instruction *InstCombinerImpl::foldSelectInstWithICmp(SelectInst &SI,
     return &SI;
   }
 
-  if (Value *V = foldSelectICmpMinMax(ICI, TrueVal, FalseVal, Builder))
+  if (Value *V = foldSelectICmpMinMax(ICI, TrueVal, FalseVal, Builder, SQ))
     return replaceInstUsesWith(SI, V);
 
   if (Instruction *V =
diff --git a/llvm/test/Transforms/InstCombine/minmax-fold.ll b/llvm/test/Transforms/InstCombine/minmax-fold.ll
index 95c28510bedff..e0ff3150dacf9 100644
--- a/llvm/test/Transforms/InstCombine/minmax-fold.ll
+++ b/llvm/test/Transforms/InstCombine/minmax-fold.ll
@@ -1623,10 +1623,10 @@ define i32 @test_smax_add1_nsw(i32 %x, i32 %w) {
   ret i32 %r
 }
 
-define i32 @test_umax_add1_nsw(i32 %x, i32 %w) {
-; CHECK-LABEL: @test_umax_add1_nsw(
-; CHECK-NEXT:    [[X2:%.*]] = add nuw i32 [[W:%.*]], 1
-; CHECK-NEXT:    [[R:%.*]] = call i32 @llvm.umax.i32(i32 [[X:%.*]], i32 [[X2]])
+define i32 @test_umax_add1_nuw(i32 %x, i32 %w) {
+; CHECK-LABEL: @test_umax_add1_nuw(
+; CHECK-NEXT:    [[ADD:%.*]] = add nuw i32 [[W:%.*]], 1
+; CHECK-NEXT:    [[R:%.*]] = call i32 @llvm.umax.i32(i32 [[X:%.*]], i32 [[ADD]])
 ; CHECK-NEXT:    ret i32 [[R]]
 ;
   %cmp = icmp ugt i32 %x, %w
@@ -1635,6 +1635,18 @@ define i32 @test_umax_add1_nsw(i32 %x, i32 %w) {
   ret i32 %r
 }
 
+define i32 @test_umin_sub1_nuw(i32 %x, i32 range(i32 1, 0) %w) {
+; CHECK-LABEL: @test_umin_sub1_nuw(
+; CHECK-NEXT:    [[SUB:%.*]] = add i32 [[W:%.*]], -1
+; CHECK-NEXT:    [[R:%.*]] = call i32 @llvm.umin.i32(i32 [[X:%.*]], i32 [[SUB]])
+; CHECK-NEXT:    ret i32 [[R]]
+;
+  %cmp = icmp ult i32 %x, %w
+  %sub = add i32 %w, -1
+  %r = select i1 %cmp, i32 %x, i32 %sub
+  ret i32 %r
+}
+
 define <2 x i16> @test_smin_sub1_nsw_vec(<2 x i16> %x, <2 x i16> %w) {
 ; CHECK-LABEL: @test_smin_sub1_nsw_vec(
 ; CHECK-NEXT:    [[SUB:%.*]] = add nsw <2 x i16> [[W:%.*]], splat (i16 -1)
@@ -1659,8 +1671,8 @@ define <2 x i16> @test_smax_add1_nsw_vec(<2 x i16> %x, <2 x i16> %w) {
   ret <2 x i16> %r
 }
 
-define <2 x i16> @test_umax_add1_nsw_vec(<2 x i16> %x, <2 x i16> %w) {
-; CHECK-LABEL: @test_umax_add1_nsw_vec(
+define <2 x i16> @test_umax_add1_nuw_vec(<2 x i16> %x, <2 x i16> %w) {
+; CHECK-LABEL: @test_umax_add1_nuw_vec(
 ; CHECK-NEXT:    [[ADD:%.*]] = add nuw <2 x i16> [[W:%.*]], splat (i16 1)
 ; CHECK-NEXT:    [[R:%.*]] = call <2 x i16> @llvm.umax.v2i16(<2 x i16> [[X:%.*]], <2 x i16> [[ADD]])
 ; CHECK-NEXT:    ret <2 x i16> [[R]]
@@ -1671,6 +1683,19 @@ define <2 x i16> @test_umax_add1_nsw_vec(<2 x i16> %x, <2 x i16> %w) {
   ret <2 x i16> %r
 }
 
+define <2 x i16> @test_umin_sub1_nuw_vec(<2 x i16> %x, <2 x i16> range(i16 1, 0) %w) {
+; CHECK-LABEL: @test_umin_sub1_nuw_vec(
+; CHECK-NEXT:    [[SUB:%.*]] = add <2 x i16> [[W:%.*]], splat (i16 -1)
+; CHECK-NEXT:    [[R:%.*]] = call <2 x i16> @llvm.umin.v2i16(<2 x i16> [[X:%.*]], <2 x i16> [[SUB]])
+; CHECK-NEXT:    ret <2 x i16> [[R]]
+;
+  %cmp = icmp ult <2 x i16> %x, %w
+  %sub = add <2 x i16> %w, splat (i16 -1)
+  %r = select <2 x i1> %cmp, <2 x i16> %x, <2 x i16> %sub
+  ret <2 x i16> %r
+}
+
+
 define i32 @test_smin_sub1_nsw_drop_flags(i32 %x, i32 %w) {
 ; CHECK-LABEL: @test_smin_sub1_nsw_drop_flags(
 ; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[X:%.*]], [[W:%.*]]
@@ -1695,8 +1720,8 @@ define i32 @test_smax_add1_nsw_drop_flags(i32 %x, i32 %w) {
   ret i32 %r
 }
 
-define i32 @test_umax_add1_nsw_drop_flags(i32 %x, i32 %w) {
-; CHECK-LABEL: @test_umax_add1_nsw_drop_flags(
+define i32 @test_umax_add1_nuw_drop_flags(i32 %x, i32 %w) {
+; CHECK-LABEL: @test_umax_add1_nuw_drop_flags(
 ; CHECK-NEXT:    [[ADD:%.*]] = add nuw i32 [[W:%.*]], 1
 ; CHECK-NEXT:    [[R:%.*]] = call i32 @llvm.umax.i32(i32 [[X:%.*]], i32 [[ADD]])
 ; CHECK-NEXT:    ret i32 [[R]]
@@ -1706,3 +1731,15 @@ define i32 @test_umax_add1_nsw_drop_flags(i32 %x, i32 %w) {
   %r = select i1 %cmp, i32 %x, i32 %add
   ret i32 %r
 }
+
+define i32 @test_umin_sub1_nuw_drop_flags(i32 %x, i32 range(i32 1, 0) %w) {
+; CHECK-LABEL: @test_umin_sub1_nuw_drop_flags(
+; CHECK-NEXT:    [[SUB:%.*]] = add i32 [[W:%.*]], -1
+; CHECK-NEXT:    [[R:%.*]] = call i32 @llvm.umin.i32(i32 [[X:%.*]], i32 [[SUB]])
+; CHECK-NEXT:    ret i32 [[R]]
+;
+  %cmp = icmp ult i32 %x, %w
+  %sub = add nsw i32 %w, -1
+  %r = select i1 %cmp, i32 %x, i32 %sub
+  ret i32 %r
+}

>From 00d8f91b8ed1a9ee7d0b5b26531ce32336ea9459 Mon Sep 17 00:00:00 2001
From: Alex Maclean <amaclean at nvidia.com>
Date: Tue, 3 Jun 2025 17:23:57 +0000
Subject: [PATCH 6/8] address more comments

---
 .../InstCombine/InstCombineSelect.cpp         |  7 ++-
 .../Transforms/InstCombine/minmax-fold.ll     | 48 +++++++++++++++++++
 2 files changed, 54 insertions(+), 1 deletion(-)

diff --git a/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp b/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
index cf9033f8f4db3..858642906215d 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
@@ -574,12 +574,17 @@ static Value *foldSelectICmpMinMax(const ICmpInst *Cmp, Value *TVal,
                                    const SimplifyQuery &SQ) {
   const Value *CmpLHS = Cmp->getOperand(0);
   const Value *CmpRHS = Cmp->getOperand(1);
-  const ICmpInst::Predicate Pred = Cmp->getPredicate();
+  ICmpInst::Predicate Pred = Cmp->getPredicate();
 
   // (X > Y) ? X : (Y - 1) ==> MIN(X, Y - 1)
   // (X < Y) ? X : (Y + 1) ==> MAX(X, Y + 1)
   // This transformation is valid when overflow corresponding to the sign of
   // the comparison is poison and we must drop the non-matching overflow flag.
+  if (CmpRHS == TVal) {
+    std::swap(CmpLHS, CmpRHS);
+    Pred = CmpInst::getSwappedPredicate(Pred);
+  }
+
   if (CmpLHS == TVal) {
     if (Pred == CmpInst::ICMP_SGT &&
         match(FVal, m_NSWAddLike(m_Specific(CmpRHS), m_One()))) {
diff --git a/llvm/test/Transforms/InstCombine/minmax-fold.ll b/llvm/test/Transforms/InstCombine/minmax-fold.ll
index e0ff3150dacf9..c8c5b5a497d6d 100644
--- a/llvm/test/Transforms/InstCombine/minmax-fold.ll
+++ b/llvm/test/Transforms/InstCombine/minmax-fold.ll
@@ -1647,6 +1647,54 @@ define i32 @test_umin_sub1_nuw(i32 %x, i32 range(i32 1, 0) %w) {
   ret i32 %r
 }
 
+define i32 @test_smin_sub1_nsw_swapped(i32 %x, i32 %w) {
+; CHECK-LABEL: @test_smin_sub1_nsw_swapped(
+; CHECK-NEXT:    [[SUB:%.*]] = add nsw i32 [[W:%.*]], -1
+; CHECK-NEXT:    [[R:%.*]] = call i32 @llvm.smin.i32(i32 [[X:%.*]], i32 [[SUB]])
+; CHECK-NEXT:    ret i32 [[R]]
+;
+  %cmp = icmp sgt i32 %w, %x
+  %sub = add nsw i32 %w, -1
+  %r = select i1 %cmp, i32 %x, i32 %sub
+  ret i32 %r
+}
+
+define i32 @test_smax_add1_nsw_swapped(i32 %x, i32 %w) {
+; CHECK-LABEL: @test_smax_add1_nsw_swapped(
+; CHECK-NEXT:    [[X2:%.*]] = add nsw i32 [[W:%.*]], 1
+; CHECK-NEXT:    [[R:%.*]] = call i32 @llvm.smax.i32(i32 [[X:%.*]], i32 [[X2]])
+; CHECK-NEXT:    ret i32 [[R]]
+;
+  %cmp = icmp slt i32 %w, %x
+  %add = add nsw i32 %w, 1
+  %r = select i1 %cmp, i32 %x, i32 %add
+  ret i32 %r
+}
+
+define i32 @test_umax_add1_nuw_swapped(i32 %x, i32 %w) {
+; CHECK-LABEL: @test_umax_add1_nuw_swapped(
+; CHECK-NEXT:    [[ADD:%.*]] = add nuw i32 [[W:%.*]], 1
+; CHECK-NEXT:    [[R:%.*]] = call i32 @llvm.umax.i32(i32 [[X:%.*]], i32 [[ADD]])
+; CHECK-NEXT:    ret i32 [[R]]
+;
+  %cmp = icmp ult i32 %w, %x
+  %add = add nuw i32 %w, 1
+  %r = select i1 %cmp, i32 %x, i32 %add
+  ret i32 %r
+}
+
+define i32 @test_umin_sub1_nuw_swapped(i32 %x, i32 range(i32 1, 0) %w) {
+; CHECK-LABEL: @test_umin_sub1_nuw_swapped(
+; CHECK-NEXT:    [[SUB:%.*]] = add i32 [[W:%.*]], -1
+; CHECK-NEXT:    [[R:%.*]] = call i32 @llvm.umin.i32(i32 [[X:%.*]], i32 [[SUB]])
+; CHECK-NEXT:    ret i32 [[R]]
+;
+  %cmp = icmp ugt i32 %w, %x
+  %sub = add i32 %w, -1
+  %r = select i1 %cmp, i32 %x, i32 %sub
+  ret i32 %r
+}
+
 define <2 x i16> @test_smin_sub1_nsw_vec(<2 x i16> %x, <2 x i16> %w) {
 ; CHECK-LABEL: @test_smin_sub1_nsw_vec(
 ; CHECK-NEXT:    [[SUB:%.*]] = add nsw <2 x i16> [[W:%.*]], splat (i16 -1)

>From 5d6aba570c9dea00f1e2a6bb192d470da8ec3255 Mon Sep 17 00:00:00 2001
From: Alex Maclean <amaclean at nvidia.com>
Date: Wed, 4 Jun 2025 15:19:30 +0000
Subject: [PATCH 7/8] fix crash

---
 .../InstCombine/InstCombineSelect.cpp         | 12 +++--
 .../Transforms/InstCombine/minmax-fold.ll     | 51 +++++++++++++++++++
 2 files changed, 58 insertions(+), 5 deletions(-)

diff --git a/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp b/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
index 858642906215d..9069651fdf79f 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
@@ -585,21 +585,23 @@ static Value *foldSelectICmpMinMax(const ICmpInst *Cmp, Value *TVal,
     Pred = CmpInst::getSwappedPredicate(Pred);
   }
 
-  if (CmpLHS == TVal) {
+  // TODO: consider handeling 'or disjoint' as well, though these would need to
+  // be converted to 'add' instructions.
+  if (CmpLHS == TVal && isa<Instruction>(FVal)) {
     if (Pred == CmpInst::ICMP_SGT &&
-        match(FVal, m_NSWAddLike(m_Specific(CmpRHS), m_One()))) {
+        match(FVal, m_NSWAdd(m_Specific(CmpRHS), m_One()))) {
       cast<Instruction>(FVal)->setHasNoUnsignedWrap(false);
       return Builder.CreateBinaryIntrinsic(Intrinsic::smax, TVal, FVal);
     }
 
     if (Pred == CmpInst::ICMP_SLT &&
-        match(FVal, m_NSWAddLike(m_Specific(CmpRHS), m_AllOnes()))) {
+        match(FVal, m_NSWAdd(m_Specific(CmpRHS), m_AllOnes()))) {
       cast<Instruction>(FVal)->setHasNoUnsignedWrap(false);
       return Builder.CreateBinaryIntrinsic(Intrinsic::smin, TVal, FVal);
     }
 
     if (Pred == CmpInst::ICMP_UGT &&
-        match(FVal, m_NUWAddLike(m_Specific(CmpRHS), m_One()))) {
+        match(FVal, m_NUWAdd(m_Specific(CmpRHS), m_One()))) {
       cast<Instruction>(FVal)->setHasNoSignedWrap(false);
       return Builder.CreateBinaryIntrinsic(Intrinsic::umax, TVal, FVal);
     }
@@ -607,7 +609,7 @@ static Value *foldSelectICmpMinMax(const ICmpInst *Cmp, Value *TVal,
     // Note: We must use isKnownNonZero here because "sub nuw %x, 1" will be
     // canonicalize to "add %x, -1" discarding the nuw flag.
     if (Pred == CmpInst::ICMP_ULT &&
-        match(FVal, m_AddLike(m_Specific(CmpRHS), m_AllOnes())) &&
+        match(FVal, m_Add(m_Specific(CmpRHS), m_AllOnes())) &&
         isKnownNonZero(CmpRHS, SQ)) {
       cast<Instruction>(FVal)->setHasNoSignedWrap(false);
       return Builder.CreateBinaryIntrinsic(Intrinsic::umin, TVal, FVal);
diff --git a/llvm/test/Transforms/InstCombine/minmax-fold.ll b/llvm/test/Transforms/InstCombine/minmax-fold.ll
index c8c5b5a497d6d..a982225370620 100644
--- a/llvm/test/Transforms/InstCombine/minmax-fold.ll
+++ b/llvm/test/Transforms/InstCombine/minmax-fold.ll
@@ -1791,3 +1791,54 @@ define i32 @test_umin_sub1_nuw_drop_flags(i32 %x, i32 range(i32 1, 0) %w) {
   %r = select i1 %cmp, i32 %x, i32 %sub
   ret i32 %r
 }
+
+;; Confirm we don't crash on these cases.
+define i32 @test_smin_or_neg1_nsw(i32 %x, i32 %w) {
+; CHECK-LABEL: @test_smin_or_neg1_nsw(
+; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[X:%.*]], [[W:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = select i1 [[CMP]], i32 [[X]], i32 -1
+; CHECK-NEXT:    ret i32 [[R]]
+;
+  %cmp = icmp slt i32 %x, %w
+  %sub = or disjoint i32 %w, -1
+  %r = select i1 %cmp, i32 %x, i32 %sub
+  ret i32 %r
+}
+
+define i32 @test_smax_or_1_nsw(i32 %x, i32 %w) {
+; CHECK-LABEL: @test_smax_or_1_nsw(
+; CHECK-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[X:%.*]], [[W:%.*]]
+; CHECK-NEXT:    [[ADD:%.*]] = or disjoint i32 [[W]], 1
+; CHECK-NEXT:    [[R:%.*]] = select i1 [[CMP]], i32 [[X]], i32 [[ADD]]
+; CHECK-NEXT:    ret i32 [[R]]
+;
+  %cmp = icmp sgt i32 %x, %w
+  %add = or disjoint i32 %w, 1
+  %r = select i1 %cmp, i32 %x, i32 %add
+  ret i32 %r
+}
+
+define i32 @test_umax_or_1_nuw(i32 %x, i32 %w) {
+; CHECK-LABEL: @test_umax_or_1_nuw(
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ugt i32 [[X:%.*]], [[W:%.*]]
+; CHECK-NEXT:    [[ADD:%.*]] = or disjoint i32 [[W]], 1
+; CHECK-NEXT:    [[R:%.*]] = select i1 [[CMP]], i32 [[X]], i32 [[ADD]]
+; CHECK-NEXT:    ret i32 [[R]]
+;
+  %cmp = icmp ugt i32 %x, %w
+  %add = or disjoint i32 %w, 1
+  %r = select i1 %cmp, i32 %x, i32 %add
+  ret i32 %r
+}
+
+define i32 @test_umin_or_neg1_nuw(i32 %x, i32 range(i32 1, 0) %w) {
+; CHECK-LABEL: @test_umin_or_neg1_nuw(
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ult i32 [[X:%.*]], [[W:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = select i1 [[CMP]], i32 [[X]], i32 -1
+; CHECK-NEXT:    ret i32 [[R]]
+;
+  %cmp = icmp ult i32 %x, %w
+  %sub = or disjoint i32 %w, -1
+  %r = select i1 %cmp, i32 %x, i32 %sub
+  ret i32 %r
+}

>From aea0343ff5a0a50755f77ae83d8bccc8e75dad76 Mon Sep 17 00:00:00 2001
From: Alex Maclean <amaclean at nvidia.com>
Date: Thu, 5 Jun 2025 16:54:14 +0000
Subject: [PATCH 8/8] address comments

---
 .../InstCombine/InstCombineSelect.cpp         | 55 ++++++++++---------
 1 file changed, 29 insertions(+), 26 deletions(-)

diff --git a/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp b/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
index 9069651fdf79f..ec9e8ce848c6a 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
@@ -567,7 +567,7 @@ Instruction *InstCombinerImpl::foldSelectIntoOp(SelectInst &SI, Value *TrueVal,
 
 /// Try to fold a select to a min/max intrinsic. Many cases are already handled
 /// by matchDecomposedSelectPattern but here we handle the cases where more
-/// exensive modification of the IR is required.
+/// extensive modification of the IR is required.
 static Value *foldSelectICmpMinMax(const ICmpInst *Cmp, Value *TVal,
                                    Value *FVal,
                                    InstCombiner::BuilderTy &Builder,
@@ -585,36 +585,39 @@ static Value *foldSelectICmpMinMax(const ICmpInst *Cmp, Value *TVal,
     Pred = CmpInst::getSwappedPredicate(Pred);
   }
 
-  // TODO: consider handeling 'or disjoint' as well, though these would need to
+  // TODO: consider handling 'or disjoint' as well, though these would need to
   // be converted to 'add' instructions.
-  if (CmpLHS == TVal && isa<Instruction>(FVal)) {
-    if (Pred == CmpInst::ICMP_SGT &&
-        match(FVal, m_NSWAdd(m_Specific(CmpRHS), m_One()))) {
-      cast<Instruction>(FVal)->setHasNoUnsignedWrap(false);
-      return Builder.CreateBinaryIntrinsic(Intrinsic::smax, TVal, FVal);
-    }
+  if (!(CmpLHS == TVal && isa<Instruction>(FVal)))
+    return nullptr;
 
-    if (Pred == CmpInst::ICMP_SLT &&
-        match(FVal, m_NSWAdd(m_Specific(CmpRHS), m_AllOnes()))) {
-      cast<Instruction>(FVal)->setHasNoUnsignedWrap(false);
-      return Builder.CreateBinaryIntrinsic(Intrinsic::smin, TVal, FVal);
-    }
+  if (Pred == CmpInst::ICMP_SGT &&
+      match(FVal, m_NSWAdd(m_Specific(CmpRHS), m_One()))) {
+    cast<Instruction>(FVal)->setHasNoUnsignedWrap(false);
+    return Builder.CreateBinaryIntrinsic(Intrinsic::smax, TVal, FVal);
+  }
 
-    if (Pred == CmpInst::ICMP_UGT &&
-        match(FVal, m_NUWAdd(m_Specific(CmpRHS), m_One()))) {
-      cast<Instruction>(FVal)->setHasNoSignedWrap(false);
-      return Builder.CreateBinaryIntrinsic(Intrinsic::umax, TVal, FVal);
-    }
+  if (Pred == CmpInst::ICMP_SLT &&
+      match(FVal, m_NSWAdd(m_Specific(CmpRHS), m_AllOnes()))) {
+    cast<Instruction>(FVal)->setHasNoUnsignedWrap(false);
+    return Builder.CreateBinaryIntrinsic(Intrinsic::smin, TVal, FVal);
+  }
 
-    // Note: We must use isKnownNonZero here because "sub nuw %x, 1" will be
-    // canonicalize to "add %x, -1" discarding the nuw flag.
-    if (Pred == CmpInst::ICMP_ULT &&
-        match(FVal, m_Add(m_Specific(CmpRHS), m_AllOnes())) &&
-        isKnownNonZero(CmpRHS, SQ)) {
-      cast<Instruction>(FVal)->setHasNoSignedWrap(false);
-      return Builder.CreateBinaryIntrinsic(Intrinsic::umin, TVal, FVal);
-    }
+  if (Pred == CmpInst::ICMP_UGT &&
+      match(FVal, m_NUWAdd(m_Specific(CmpRHS), m_One()))) {
+    cast<Instruction>(FVal)->setHasNoSignedWrap(false);
+    return Builder.CreateBinaryIntrinsic(Intrinsic::umax, TVal, FVal);
   }
+
+  // Note: We must use isKnownNonZero here because "sub nuw %x, 1" will be
+  // canonicalized to "add %x, -1" discarding the nuw flag.
+  if (Pred == CmpInst::ICMP_ULT &&
+      match(FVal, m_Add(m_Specific(CmpRHS), m_AllOnes())) &&
+      isKnownNonZero(CmpRHS, SQ)) {
+    cast<Instruction>(FVal)->setHasNoSignedWrap(false);
+    cast<Instruction>(FVal)->setHasNoUnsignedWrap(false);
+    return Builder.CreateBinaryIntrinsic(Intrinsic::umin, TVal, FVal);
+  }
+
   return nullptr;
 }
 



More information about the llvm-commits mailing list