[llvm] [ValueTracking] Improve KnownBits for signed min-max clamping (PR #120576)

via llvm-commits llvm-commits at lists.llvm.org
Wed Dec 25 04:16:52 PST 2024


https://github.com/adam-bzowski updated https://github.com/llvm/llvm-project/pull/120576

>From cb2efa67587df08eabfd5c21ac2098080403e317 Mon Sep 17 00:00:00 2001
From: "Bzowski, Adam" <adam.bzowski at intel.com>
Date: Thu, 19 Dec 2024 05:28:38 -0800
Subject: [PATCH 1/5] [ValueTracking] Improve KnownBits for signed min-max
 clamping

A signed min-max clamp is the sequence of smin and smax intrinsics, which constrain a signed value into the range: smin <= value <= smax. The patch improves the calculation of KnownBits for a value subjected to the signed clamping.
---
 llvm/lib/Analysis/ValueTracking.cpp           | 107 ++++++++++--------
 .../knownbits-trunc-with-min-max-clamp.ll     | 100 ++++++++++++++++
 2 files changed, 158 insertions(+), 49 deletions(-)
 create mode 100644 llvm/test/Analysis/ValueTracking/knownbits-trunc-with-min-max-clamp.ll

diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp
index 14d7c2da8a9f8e..dad1c2bb2347ef 100644
--- a/llvm/lib/Analysis/ValueTracking.cpp
+++ b/llvm/lib/Analysis/ValueTracking.cpp
@@ -1065,6 +1065,62 @@ void llvm::adjustKnownBitsForSelectArm(KnownBits &Known, Value *Cond,
   Known = CondRes;
 }
 
+// Match a signed min+max clamp pattern like smax(smin(In, CHigh), CLow).
+// Returns the input and lower/upper bounds.
+static bool isSignedMinMaxClamp(const Value *Select, const Value *&In,
+                                const APInt *&CLow, const APInt *&CHigh) {
+  assert(isa<Operator>(Select) &&
+         cast<Operator>(Select)->getOpcode() == Instruction::Select &&
+         "Input should be a Select!");
+
+  const Value *LHS = nullptr, *RHS = nullptr;
+  SelectPatternFlavor SPF = matchSelectPattern(Select, LHS, RHS).Flavor;
+  if (SPF != SPF_SMAX && SPF != SPF_SMIN)
+    return false;
+
+  if (!match(RHS, m_APInt(CLow)))
+    return false;
+
+  const Value *LHS2 = nullptr, *RHS2 = nullptr;
+  SelectPatternFlavor SPF2 = matchSelectPattern(LHS, LHS2, RHS2).Flavor;
+  if (getInverseMinMaxFlavor(SPF) != SPF2)
+    return false;
+
+  if (!match(RHS2, m_APInt(CHigh)))
+    return false;
+
+  if (SPF == SPF_SMIN)
+    std::swap(CLow, CHigh);
+
+  In = LHS2;
+  return CLow->sle(*CHigh);
+}
+
+static bool isSignedMinMaxIntrinsicClamp(const IntrinsicInst *II,
+                                         const APInt *&CLow,
+                                         const APInt *&CHigh) {
+  assert((II->getIntrinsicID() == Intrinsic::smin ||
+          II->getIntrinsicID() == Intrinsic::smax) && "Must be smin/smax");
+
+  Intrinsic::ID InverseID = getInverseMinMaxIntrinsic(II->getIntrinsicID());
+  auto *InnerII = dyn_cast<IntrinsicInst>(II->getArgOperand(0));
+  if (!InnerII || InnerII->getIntrinsicID() != InverseID ||
+      !match(II->getArgOperand(1), m_APInt(CLow)) ||
+      !match(InnerII->getArgOperand(1), m_APInt(CHigh)))
+    return false;
+
+  if (II->getIntrinsicID() == Intrinsic::smin)
+    std::swap(CLow, CHigh);
+  return CLow->sle(*CHigh);
+}
+
+static void unionWithMinMaxIntrinsicClamp(const IntrinsicInst *II,
+                                          KnownBits &Known) {
+  const APInt *CLow, *CHigh;
+  if (isSignedMinMaxIntrinsicClamp(II, CLow, CHigh))
+    Known = Known.unionWith(ConstantRange(*CLow, *CHigh).toKnownBits());
+}
+
 static void computeKnownBitsFromOperator(const Operator *I,
                                          const APInt &DemandedElts,
                                          KnownBits &Known, unsigned Depth,
@@ -1804,11 +1860,13 @@ static void computeKnownBitsFromOperator(const Operator *I,
         computeKnownBits(I->getOperand(0), DemandedElts, Known, Depth + 1, Q);
         computeKnownBits(I->getOperand(1), DemandedElts, Known2, Depth + 1, Q);
         Known = KnownBits::smin(Known, Known2);
+        unionWithMinMaxIntrinsicClamp(II, Known);
         break;
       case Intrinsic::smax:
         computeKnownBits(I->getOperand(0), DemandedElts, Known, Depth + 1, Q);
         computeKnownBits(I->getOperand(1), DemandedElts, Known2, Depth + 1, Q);
         Known = KnownBits::smax(Known, Known2);
+        unionWithMinMaxIntrinsicClamp(II, Known);
         break;
       case Intrinsic::ptrmask: {
         computeKnownBits(I->getOperand(0), DemandedElts, Known, Depth + 1, Q);
@@ -3751,55 +3809,6 @@ static bool isKnownNonEqual(const Value *V1, const Value *V2,
   return false;
 }
 
-// Match a signed min+max clamp pattern like smax(smin(In, CHigh), CLow).
-// Returns the input and lower/upper bounds.
-static bool isSignedMinMaxClamp(const Value *Select, const Value *&In,
-                                const APInt *&CLow, const APInt *&CHigh) {
-  assert(isa<Operator>(Select) &&
-         cast<Operator>(Select)->getOpcode() == Instruction::Select &&
-         "Input should be a Select!");
-
-  const Value *LHS = nullptr, *RHS = nullptr;
-  SelectPatternFlavor SPF = matchSelectPattern(Select, LHS, RHS).Flavor;
-  if (SPF != SPF_SMAX && SPF != SPF_SMIN)
-    return false;
-
-  if (!match(RHS, m_APInt(CLow)))
-    return false;
-
-  const Value *LHS2 = nullptr, *RHS2 = nullptr;
-  SelectPatternFlavor SPF2 = matchSelectPattern(LHS, LHS2, RHS2).Flavor;
-  if (getInverseMinMaxFlavor(SPF) != SPF2)
-    return false;
-
-  if (!match(RHS2, m_APInt(CHigh)))
-    return false;
-
-  if (SPF == SPF_SMIN)
-    std::swap(CLow, CHigh);
-
-  In = LHS2;
-  return CLow->sle(*CHigh);
-}
-
-static bool isSignedMinMaxIntrinsicClamp(const IntrinsicInst *II,
-                                         const APInt *&CLow,
-                                         const APInt *&CHigh) {
-  assert((II->getIntrinsicID() == Intrinsic::smin ||
-          II->getIntrinsicID() == Intrinsic::smax) && "Must be smin/smax");
-
-  Intrinsic::ID InverseID = getInverseMinMaxIntrinsic(II->getIntrinsicID());
-  auto *InnerII = dyn_cast<IntrinsicInst>(II->getArgOperand(0));
-  if (!InnerII || InnerII->getIntrinsicID() != InverseID ||
-      !match(II->getArgOperand(1), m_APInt(CLow)) ||
-      !match(InnerII->getArgOperand(1), m_APInt(CHigh)))
-    return false;
-
-  if (II->getIntrinsicID() == Intrinsic::smin)
-    std::swap(CLow, CHigh);
-  return CLow->sle(*CHigh);
-}
-
 /// For vector constants, loop over the elements and find the constant with the
 /// minimum number of sign bits. Return 0 if the value is not a vector constant
 /// or if any element was not analyzed; otherwise, return the count for the
diff --git a/llvm/test/Analysis/ValueTracking/knownbits-trunc-with-min-max-clamp.ll b/llvm/test/Analysis/ValueTracking/knownbits-trunc-with-min-max-clamp.ll
new file mode 100644
index 00000000000000..aa01a0d3f3ab8e
--- /dev/null
+++ b/llvm/test/Analysis/ValueTracking/knownbits-trunc-with-min-max-clamp.ll
@@ -0,0 +1,100 @@
+; RUN: opt < %s -passes=aggressive-instcombine -mtriple=x86_64 -S | FileCheck %s
+
+; This LIT test checks if TruncInstCombine pass correctly recognizes the
+; constraints from a signed min-max clamp. The clamp is a sequence of smin and
+; smax instructions limiting a variable into a range, smin <= x <= smax.
+
+declare i16 @llvm.smin.i16(i16, i16)
+declare i16 @llvm.smax.i16(i16, i16)
+
+
+; CHECK-LABEL: @test_1
+; CHECK-NEXT: [[ONE:%.*]] = tail call i16 @llvm.smin.i16(i16 [[X:%.*]], i16 31)
+; CHECK-NEXT: [[TWO:%.*]] = tail call i16 @llvm.smax.i16(i16 [[ONE]], i16 0)
+; CHECK-NEXT: [[A:%.*]] = trunc i16 [[TWO]] to i8
+; CHECK-NEXT: [[B:%.*]] = lshr i8 [[A]], 2
+; CHECK-NEXT: ret i8 [[B]]
+
+define i8 @test_1(i16 %x) {
+  %1 = tail call i16 @llvm.smin.i16(i16 %x, i16 31)
+  %2 = tail call i16 @llvm.smax.i16(i16 %1, i16 0)
+  %a = sext i16 %2 to i32
+  %b = lshr i32 %a, 2
+  %b.trunc = trunc i32 %b to i8
+  ret i8 %b.trunc
+}
+
+
+; CHECK-LABEL: @test_1a
+; CHECK-NEXT: [[ONE:%.*]] = tail call i16 @llvm.smin.i16(i16 [[X:%.*]], i16 31)
+; CHECK-NEXT: [[TWO:%.*]] = tail call i16 @llvm.smax.i16(i16 [[ONE]], i16 0)
+; CHECK-NEXT: [[A:%.*]] = trunc i16 [[TWO]] to i8
+; CHECK-NEXT: [[B:%.*]] = add i8 [[A]], 2
+; CHECK-NEXT: ret i8 [[B]]
+
+define i8 @test_1a(i16 %x) {
+  %1 = tail call i16 @llvm.smin.i16(i16 %x, i16 31)
+  %2 = tail call i16 @llvm.smax.i16(i16 %1, i16 0)
+  %a = sext i16 %2 to i32
+  %b = add i32 %a, 2
+  %b.trunc = trunc i32 %b to i8
+  ret i8 %b.trunc
+}
+
+
+; CHECK-LABEL: @test_2
+; CHECK-NEXT: [[ONE:%.*]] = tail call i16 @llvm.smin.i16(i16 [[X:%.*]], i16 -1)
+; CHECK-NEXT: [[TWO:%.*]] = tail call i16 @llvm.smax.i16(i16 [[ONE]], i16 -31)
+; CHECK-NEXT: [[A:%.*]] = trunc i16 [[TWO]] to i8
+; CHECK-NEXT: [[B:%.*]] = add i8 [[A]], 2
+; CHECK-NEXT: ret i8 [[B]]
+
+define i8 @test_2(i16 %x) {
+  %1 = tail call i16 @llvm.smin.i16(i16 %x, i16 -1)
+  %2 = tail call i16 @llvm.smax.i16(i16 %1, i16 -31)
+  %a = sext i16 %2 to i32
+  %b = add i32 %a, 2
+  %b.trunc = trunc i32 %b to i8
+  ret i8 %b.trunc
+}
+
+
+; CHECK-LABEL: @test_3
+; CHECK-NEXT: [[ONE:%.*]] = tail call i16 @llvm.smin.i16(i16 [[X:%.*]], i16 31)
+; CHECK-NEXT: [[TWO:%.*]] = tail call i16 @llvm.smax.i16(i16 [[ONE]], i16 -31)
+; CHECK-NEXT: [[A:%.*]] = trunc i16 [[TWO]] to i8
+; CHECK-NEXT: [[B:%.*]] = add i8 [[A]], 2
+; CHECK-NEXT: ret i8 [[B]]
+
+define i8 @test_3(i16 %x) {
+  %1 = tail call i16 @llvm.smin.i16(i16 %x, i16 31)
+  %2 = tail call i16 @llvm.smax.i16(i16 %1, i16 -31)
+  %a = sext i16 %2 to i32
+  %b = add i32 %a, 2
+  %b.trunc = trunc i32 %b to i8
+  ret i8 %b.trunc
+}
+
+
+; CHECK-LABEL: @test_4
+; CHECK-NEXT: [[ONE:%.*]] = tail call i16 @llvm.smin.i16(i16 [[X:%.*]], i16 127)
+; CHECK-NEXT: [[TWO:%.*]] = tail call i16 @llvm.smax.i16(i16 [[ONE]], i16 0)
+; CHECK-NEXT: [[THREE:%.*]] = tail call i16 @llvm.smin.i16(i16 [[Y:%.*]], i16 127)
+; CHECK-NEXT: [[FOUR:%.*]] = tail call i16 @llvm.smax.i16(i16 [[THREE]], i16 0)
+; CHECK-NEXT: [[A:%.*]] = mul i16 [[TWO]], [[FOUR]]
+; CHECK-NEXT: [[B:%.*]] = lshr i16 [[A]], 7
+; CHECK-NEXT: [[C:%.*]] = trunc i16 [[B]] to i8
+; CHECK-NEXT: ret i8 [[C]]
+
+define i8 @test_4(i16 %x, i16 %y) {
+  %1 = tail call i16 @llvm.smin.i16(i16 %x, i16 127)
+  %2 = tail call i16 @llvm.smax.i16(i16 %1, i16 0)
+  %x.clamp = zext nneg i16 %2 to i32
+  %3 = tail call i16 @llvm.smin.i16(i16 %y, i16 127)
+  %4 = tail call i16 @llvm.smax.i16(i16 %3, i16 0)
+  %y.clamp = zext nneg i16 %4 to i32
+  %mul = mul nuw nsw i32 %x.clamp, %y.clamp
+  %shr = lshr i32 %mul, 7
+  %trunc= trunc nuw nsw i32 %shr to i8
+  ret i8 %trunc
+}

>From 54b6643377009de4f401be9b8e42e77926fdc7fb Mon Sep 17 00:00:00 2001
From: "Bzowski, Adam" <adam.bzowski at intel.com>
Date: Thu, 19 Dec 2024 06:07:26 -0800
Subject: [PATCH 2/5] [ValueTracking] Improve KnownBits for signed min-max
 clamping

A signed min-max clamp is the sequence of smin and smax intrinsics, which constrain a signed value into the range: smin <= value <= smax. The patch improves the calculation of KnownBits for a value subjected to the signed clamping.
---
 .../knownbits-trunc-with-min-max-clamp.ll     | 94 +++++++++++++++++--
 1 file changed, 84 insertions(+), 10 deletions(-)

diff --git a/llvm/test/Analysis/ValueTracking/knownbits-trunc-with-min-max-clamp.ll b/llvm/test/Analysis/ValueTracking/knownbits-trunc-with-min-max-clamp.ll
index aa01a0d3f3ab8e..fc5d06105975be 100644
--- a/llvm/test/Analysis/ValueTracking/knownbits-trunc-with-min-max-clamp.ll
+++ b/llvm/test/Analysis/ValueTracking/knownbits-trunc-with-min-max-clamp.ll
@@ -7,15 +7,21 @@
 declare i16 @llvm.smin.i16(i16, i16)
 declare i16 @llvm.smax.i16(i16, i16)
 
+; Each LIT test (except the last one) has two versions depending on the order
+; of smin and smax:
+; a) y = smax(smin(x, upper_limit), lower_limit)
+; b) y = smin(smax(x, lower_limit), upper_limit)
 
-; CHECK-LABEL: @test_1
+
+
+; CHECK-LABEL: @test_0a
 ; CHECK-NEXT: [[ONE:%.*]] = tail call i16 @llvm.smin.i16(i16 [[X:%.*]], i16 31)
 ; CHECK-NEXT: [[TWO:%.*]] = tail call i16 @llvm.smax.i16(i16 [[ONE]], i16 0)
 ; CHECK-NEXT: [[A:%.*]] = trunc i16 [[TWO]] to i8
 ; CHECK-NEXT: [[B:%.*]] = lshr i8 [[A]], 2
 ; CHECK-NEXT: ret i8 [[B]]
 
-define i8 @test_1(i16 %x) {
+define i8 @test_0a(i16 %x) {
   %1 = tail call i16 @llvm.smin.i16(i16 %x, i16 31)
   %2 = tail call i16 @llvm.smax.i16(i16 %1, i16 0)
   %a = sext i16 %2 to i32
@@ -25,6 +31,23 @@ define i8 @test_1(i16 %x) {
 }
 
 
+; CHECK-LABEL: @test_0b
+; CHECK-NEXT: [[ONE:%.*]] = tail call i16 @llvm.smax.i16(i16 [[X:%.*]], i16 0)
+; CHECK-NEXT: [[TWO:%.*]] = tail call i16 @llvm.smin.i16(i16 [[ONE]], i16 31)
+; CHECK-NEXT: [[A:%.*]] = trunc i16 [[TWO]] to i8
+; CHECK-NEXT: [[B:%.*]] = lshr i8 [[A]], 2
+; CHECK-NEXT: ret i8 [[B]]
+
+define i8 @test_0b(i16 %x) {
+  %1 = tail call i16 @llvm.smax.i16(i16 %x, i16 0)
+  %2 = tail call i16 @llvm.smin.i16(i16 %1, i16 31)
+  %a = sext i16 %2 to i32
+  %b = lshr i32 %a, 2
+  %b.trunc = trunc i32 %b to i8
+  ret i8 %b.trunc
+}
+
+
 ; CHECK-LABEL: @test_1a
 ; CHECK-NEXT: [[ONE:%.*]] = tail call i16 @llvm.smin.i16(i16 [[X:%.*]], i16 31)
 ; CHECK-NEXT: [[TWO:%.*]] = tail call i16 @llvm.smax.i16(i16 [[ONE]], i16 0)
@@ -42,14 +65,31 @@ define i8 @test_1a(i16 %x) {
 }
 
 
-; CHECK-LABEL: @test_2
+; CHECK-LABEL: @test_1b
+; CHECK-NEXT: [[ONE:%.*]] = tail call i16 @llvm.smax.i16(i16 [[X:%.*]], i16 0)
+; CHECK-NEXT: [[TWO:%.*]] = tail call i16 @llvm.smin.i16(i16 [[ONE]], i16 31)
+; CHECK-NEXT: [[A:%.*]] = trunc i16 [[TWO]] to i8
+; CHECK-NEXT: [[B:%.*]] = add i8 [[A]], 2
+; CHECK-NEXT: ret i8 [[B]]
+
+define i8 @test_1b(i16 %x) {
+  %1 = tail call i16 @llvm.smax.i16(i16 %x, i16 0)
+  %2 = tail call i16 @llvm.smin.i16(i16 %1, i16 31)
+  %a = sext i16 %2 to i32
+  %b = add i32 %a, 2
+  %b.trunc = trunc i32 %b to i8
+  ret i8 %b.trunc
+}
+
+
+; CHECK-LABEL: @test_2a
 ; CHECK-NEXT: [[ONE:%.*]] = tail call i16 @llvm.smin.i16(i16 [[X:%.*]], i16 -1)
 ; CHECK-NEXT: [[TWO:%.*]] = tail call i16 @llvm.smax.i16(i16 [[ONE]], i16 -31)
 ; CHECK-NEXT: [[A:%.*]] = trunc i16 [[TWO]] to i8
 ; CHECK-NEXT: [[B:%.*]] = add i8 [[A]], 2
 ; CHECK-NEXT: ret i8 [[B]]
 
-define i8 @test_2(i16 %x) {
+define i8 @test_2a(i16 %x) {
   %1 = tail call i16 @llvm.smin.i16(i16 %x, i16 -1)
   %2 = tail call i16 @llvm.smax.i16(i16 %1, i16 -31)
   %a = sext i16 %2 to i32
@@ -59,14 +99,31 @@ define i8 @test_2(i16 %x) {
 }
 
 
-; CHECK-LABEL: @test_3
+; CHECK-LABEL: @test_2b
+; CHECK-NEXT: [[ONE:%.*]] = tail call i16 @llvm.smax.i16(i16 [[X:%.*]], i16 -31)
+; CHECK-NEXT: [[TWO:%.*]] = tail call i16 @llvm.smin.i16(i16 [[ONE]], i16 -1)
+; CHECK-NEXT: [[A:%.*]] = trunc i16 [[TWO]] to i8
+; CHECK-NEXT: [[B:%.*]] = add i8 [[A]], 2
+; CHECK-NEXT: ret i8 [[B]]
+
+define i8 @test_2b(i16 %x) {
+  %1 = tail call i16 @llvm.smax.i16(i16 %x, i16 -31)
+  %2 = tail call i16 @llvm.smin.i16(i16 %1, i16 -1)
+  %a = sext i16 %2 to i32
+  %b = add i32 %a, 2
+  %b.trunc = trunc i32 %b to i8
+  ret i8 %b.trunc
+}
+
+
+; CHECK-LABEL: @test_3a
 ; CHECK-NEXT: [[ONE:%.*]] = tail call i16 @llvm.smin.i16(i16 [[X:%.*]], i16 31)
 ; CHECK-NEXT: [[TWO:%.*]] = tail call i16 @llvm.smax.i16(i16 [[ONE]], i16 -31)
 ; CHECK-NEXT: [[A:%.*]] = trunc i16 [[TWO]] to i8
 ; CHECK-NEXT: [[B:%.*]] = add i8 [[A]], 2
 ; CHECK-NEXT: ret i8 [[B]]
 
-define i8 @test_3(i16 %x) {
+define i8 @test_3a(i16 %x) {
   %1 = tail call i16 @llvm.smin.i16(i16 %x, i16 31)
   %2 = tail call i16 @llvm.smax.i16(i16 %1, i16 -31)
   %a = sext i16 %2 to i32
@@ -76,11 +133,28 @@ define i8 @test_3(i16 %x) {
 }
 
 
+; CHECK-LABEL: @test_3b
+; CHECK-NEXT: [[ONE:%.*]] = tail call i16 @llvm.smax.i16(i16 [[X:%.*]], i16 -31)
+; CHECK-NEXT: [[TWO:%.*]] = tail call i16 @llvm.smin.i16(i16 [[ONE]], i16 31)
+; CHECK-NEXT: [[A:%.*]] = trunc i16 [[TWO]] to i8
+; CHECK-NEXT: [[B:%.*]] = add i8 [[A]], 2
+; CHECK-NEXT: ret i8 [[B]]
+
+define i8 @test_3b(i16 %x) {
+  %1 = tail call i16 @llvm.smax.i16(i16 %x, i16 -31)
+  %2 = tail call i16 @llvm.smin.i16(i16 %1, i16 31)
+  %a = sext i16 %2 to i32
+  %b = add i32 %a, 2
+  %b.trunc = trunc i32 %b to i8
+  ret i8 %b.trunc
+}
+
+
 ; CHECK-LABEL: @test_4
 ; CHECK-NEXT: [[ONE:%.*]] = tail call i16 @llvm.smin.i16(i16 [[X:%.*]], i16 127)
 ; CHECK-NEXT: [[TWO:%.*]] = tail call i16 @llvm.smax.i16(i16 [[ONE]], i16 0)
-; CHECK-NEXT: [[THREE:%.*]] = tail call i16 @llvm.smin.i16(i16 [[Y:%.*]], i16 127)
-; CHECK-NEXT: [[FOUR:%.*]] = tail call i16 @llvm.smax.i16(i16 [[THREE]], i16 0)
+; CHECK-NEXT: [[THREE:%.*]] = tail call i16 @llvm.smax.i16(i16 [[Y:%.*]], i16 0)
+; CHECK-NEXT: [[FOUR:%.*]] = tail call i16 @llvm.smin.i16(i16 [[THREE]], i16 127)
 ; CHECK-NEXT: [[A:%.*]] = mul i16 [[TWO]], [[FOUR]]
 ; CHECK-NEXT: [[B:%.*]] = lshr i16 [[A]], 7
 ; CHECK-NEXT: [[C:%.*]] = trunc i16 [[B]] to i8
@@ -90,8 +164,8 @@ define i8 @test_4(i16 %x, i16 %y) {
   %1 = tail call i16 @llvm.smin.i16(i16 %x, i16 127)
   %2 = tail call i16 @llvm.smax.i16(i16 %1, i16 0)
   %x.clamp = zext nneg i16 %2 to i32
-  %3 = tail call i16 @llvm.smin.i16(i16 %y, i16 127)
-  %4 = tail call i16 @llvm.smax.i16(i16 %3, i16 0)
+  %3 = tail call i16 @llvm.smax.i16(i16 %y, i16 0)
+  %4 = tail call i16 @llvm.smin.i16(i16 %3, i16 127)
   %y.clamp = zext nneg i16 %4 to i32
   %mul = mul nuw nsw i32 %x.clamp, %y.clamp
   %shr = lshr i32 %mul, 7

>From a72e960fbacfd6aa3ce1b68520eb224ba8fca0de Mon Sep 17 00:00:00 2001
From: "Bzowski, Adam" <adam.bzowski at intel.com>
Date: Thu, 19 Dec 2024 08:13:59 -0800
Subject: [PATCH 3/5] [ValueTracking] Improve KnownBits for signed min-max
 clamping

A signed min-max clamp is the sequence of smin and smax intrinsics, which constrain a signed value into the range: smin <= value <= smax. The patch improves the calculation of KnownBits for a value subjected to the signed clamping.
---
 llvm/lib/Analysis/ValueTracking.cpp           |   3 +-
 .../knownbits-trunc-with-min-max-clamp.ll     | 189 ++++++++++--------
 2 files changed, 112 insertions(+), 80 deletions(-)

diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp
index dad1c2bb2347ef..d54ea644bfcfc9 100644
--- a/llvm/lib/Analysis/ValueTracking.cpp
+++ b/llvm/lib/Analysis/ValueTracking.cpp
@@ -1100,7 +1100,8 @@ static bool isSignedMinMaxIntrinsicClamp(const IntrinsicInst *II,
                                          const APInt *&CLow,
                                          const APInt *&CHigh) {
   assert((II->getIntrinsicID() == Intrinsic::smin ||
-          II->getIntrinsicID() == Intrinsic::smax) && "Must be smin/smax");
+          II->getIntrinsicID() == Intrinsic::smax) &&
+         "Must be smin/smax");
 
   Intrinsic::ID InverseID = getInverseMinMaxIntrinsic(II->getIntrinsicID());
   auto *InnerII = dyn_cast<IntrinsicInst>(II->getArgOperand(0));
diff --git a/llvm/test/Analysis/ValueTracking/knownbits-trunc-with-min-max-clamp.ll b/llvm/test/Analysis/ValueTracking/knownbits-trunc-with-min-max-clamp.ll
index fc5d06105975be..2a0096a94e375d 100644
--- a/llvm/test/Analysis/ValueTracking/knownbits-trunc-with-min-max-clamp.ll
+++ b/llvm/test/Analysis/ValueTracking/knownbits-trunc-with-min-max-clamp.ll
@@ -1,27 +1,24 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
 ; RUN: opt < %s -passes=aggressive-instcombine -mtriple=x86_64 -S | FileCheck %s
 
 ; This LIT test checks if TruncInstCombine pass correctly recognizes the
 ; constraints from a signed min-max clamp. The clamp is a sequence of smin and
 ; smax instructions limiting a variable into a range, smin <= x <= smax.
-
-declare i16 @llvm.smin.i16(i16, i16)
-declare i16 @llvm.smax.i16(i16, i16)
-
+;
 ; Each LIT test (except the last one) has two versions depending on the order
 ; of smin and smax:
 ; a) y = smax(smin(x, upper_limit), lower_limit)
 ; b) y = smin(smax(x, lower_limit), upper_limit)
 
-
-
-; CHECK-LABEL: @test_0a
-; CHECK-NEXT: [[ONE:%.*]] = tail call i16 @llvm.smin.i16(i16 [[X:%.*]], i16 31)
-; CHECK-NEXT: [[TWO:%.*]] = tail call i16 @llvm.smax.i16(i16 [[ONE]], i16 0)
-; CHECK-NEXT: [[A:%.*]] = trunc i16 [[TWO]] to i8
-; CHECK-NEXT: [[B:%.*]] = lshr i8 [[A]], 2
-; CHECK-NEXT: ret i8 [[B]]
-
 define i8 @test_0a(i16 %x) {
+; CHECK-LABEL: define i8 @test_0a(
+; CHECK-SAME: i16 [[X:%.*]]) {
+; CHECK-NEXT:    [[TMP1:%.*]] = tail call i16 @llvm.smin.i16(i16 [[X]], i16 31)
+; CHECK-NEXT:    [[TMP2:%.*]] = tail call i16 @llvm.smax.i16(i16 [[TMP1]], i16 0)
+; CHECK-NEXT:    [[A:%.*]] = trunc i16 [[TMP2]] to i8
+; CHECK-NEXT:    [[B:%.*]] = lshr i8 [[A]], 2
+; CHECK-NEXT:    ret i8 [[B]]
+;
   %1 = tail call i16 @llvm.smin.i16(i16 %x, i16 31)
   %2 = tail call i16 @llvm.smax.i16(i16 %1, i16 0)
   %a = sext i16 %2 to i32
@@ -30,15 +27,15 @@ define i8 @test_0a(i16 %x) {
   ret i8 %b.trunc
 }
 
-
-; CHECK-LABEL: @test_0b
-; CHECK-NEXT: [[ONE:%.*]] = tail call i16 @llvm.smax.i16(i16 [[X:%.*]], i16 0)
-; CHECK-NEXT: [[TWO:%.*]] = tail call i16 @llvm.smin.i16(i16 [[ONE]], i16 31)
-; CHECK-NEXT: [[A:%.*]] = trunc i16 [[TWO]] to i8
-; CHECK-NEXT: [[B:%.*]] = lshr i8 [[A]], 2
-; CHECK-NEXT: ret i8 [[B]]
-
 define i8 @test_0b(i16 %x) {
+; CHECK-LABEL: define i8 @test_0b(
+; CHECK-SAME: i16 [[X:%.*]]) {
+; CHECK-NEXT:    [[TMP1:%.*]] = tail call i16 @llvm.smax.i16(i16 [[X]], i16 0)
+; CHECK-NEXT:    [[TMP2:%.*]] = tail call i16 @llvm.smin.i16(i16 [[TMP1]], i16 31)
+; CHECK-NEXT:    [[A:%.*]] = trunc i16 [[TMP2]] to i8
+; CHECK-NEXT:    [[B:%.*]] = lshr i8 [[A]], 2
+; CHECK-NEXT:    ret i8 [[B]]
+;
   %1 = tail call i16 @llvm.smax.i16(i16 %x, i16 0)
   %2 = tail call i16 @llvm.smin.i16(i16 %1, i16 31)
   %a = sext i16 %2 to i32
@@ -47,15 +44,15 @@ define i8 @test_0b(i16 %x) {
   ret i8 %b.trunc
 }
 
-
-; CHECK-LABEL: @test_1a
-; CHECK-NEXT: [[ONE:%.*]] = tail call i16 @llvm.smin.i16(i16 [[X:%.*]], i16 31)
-; CHECK-NEXT: [[TWO:%.*]] = tail call i16 @llvm.smax.i16(i16 [[ONE]], i16 0)
-; CHECK-NEXT: [[A:%.*]] = trunc i16 [[TWO]] to i8
-; CHECK-NEXT: [[B:%.*]] = add i8 [[A]], 2
-; CHECK-NEXT: ret i8 [[B]]
-
 define i8 @test_1a(i16 %x) {
+; CHECK-LABEL: define i8 @test_1a(
+; CHECK-SAME: i16 [[X:%.*]]) {
+; CHECK-NEXT:    [[TMP1:%.*]] = tail call i16 @llvm.smin.i16(i16 [[X]], i16 31)
+; CHECK-NEXT:    [[TMP2:%.*]] = tail call i16 @llvm.smax.i16(i16 [[TMP1]], i16 0)
+; CHECK-NEXT:    [[A:%.*]] = trunc i16 [[TMP2]] to i8
+; CHECK-NEXT:    [[B:%.*]] = add i8 [[A]], 2
+; CHECK-NEXT:    ret i8 [[B]]
+;
   %1 = tail call i16 @llvm.smin.i16(i16 %x, i16 31)
   %2 = tail call i16 @llvm.smax.i16(i16 %1, i16 0)
   %a = sext i16 %2 to i32
@@ -64,15 +61,15 @@ define i8 @test_1a(i16 %x) {
   ret i8 %b.trunc
 }
 
-
-; CHECK-LABEL: @test_1b
-; CHECK-NEXT: [[ONE:%.*]] = tail call i16 @llvm.smax.i16(i16 [[X:%.*]], i16 0)
-; CHECK-NEXT: [[TWO:%.*]] = tail call i16 @llvm.smin.i16(i16 [[ONE]], i16 31)
-; CHECK-NEXT: [[A:%.*]] = trunc i16 [[TWO]] to i8
-; CHECK-NEXT: [[B:%.*]] = add i8 [[A]], 2
-; CHECK-NEXT: ret i8 [[B]]
-
 define i8 @test_1b(i16 %x) {
+; CHECK-LABEL: define i8 @test_1b(
+; CHECK-SAME: i16 [[X:%.*]]) {
+; CHECK-NEXT:    [[TMP1:%.*]] = tail call i16 @llvm.smax.i16(i16 [[X]], i16 0)
+; CHECK-NEXT:    [[TMP2:%.*]] = tail call i16 @llvm.smin.i16(i16 [[TMP1]], i16 31)
+; CHECK-NEXT:    [[A:%.*]] = trunc i16 [[TMP2]] to i8
+; CHECK-NEXT:    [[B:%.*]] = add i8 [[A]], 2
+; CHECK-NEXT:    ret i8 [[B]]
+;
   %1 = tail call i16 @llvm.smax.i16(i16 %x, i16 0)
   %2 = tail call i16 @llvm.smin.i16(i16 %1, i16 31)
   %a = sext i16 %2 to i32
@@ -81,15 +78,15 @@ define i8 @test_1b(i16 %x) {
   ret i8 %b.trunc
 }
 
-
-; CHECK-LABEL: @test_2a
-; CHECK-NEXT: [[ONE:%.*]] = tail call i16 @llvm.smin.i16(i16 [[X:%.*]], i16 -1)
-; CHECK-NEXT: [[TWO:%.*]] = tail call i16 @llvm.smax.i16(i16 [[ONE]], i16 -31)
-; CHECK-NEXT: [[A:%.*]] = trunc i16 [[TWO]] to i8
-; CHECK-NEXT: [[B:%.*]] = add i8 [[A]], 2
-; CHECK-NEXT: ret i8 [[B]]
-
 define i8 @test_2a(i16 %x) {
+; CHECK-LABEL: define i8 @test_2a(
+; CHECK-SAME: i16 [[X:%.*]]) {
+; CHECK-NEXT:    [[TMP1:%.*]] = tail call i16 @llvm.smin.i16(i16 [[X]], i16 -1)
+; CHECK-NEXT:    [[TMP2:%.*]] = tail call i16 @llvm.smax.i16(i16 [[TMP1]], i16 -31)
+; CHECK-NEXT:    [[A:%.*]] = trunc i16 [[TMP2]] to i8
+; CHECK-NEXT:    [[B:%.*]] = add i8 [[A]], 2
+; CHECK-NEXT:    ret i8 [[B]]
+;
   %1 = tail call i16 @llvm.smin.i16(i16 %x, i16 -1)
   %2 = tail call i16 @llvm.smax.i16(i16 %1, i16 -31)
   %a = sext i16 %2 to i32
@@ -98,15 +95,15 @@ define i8 @test_2a(i16 %x) {
   ret i8 %b.trunc
 }
 
-
-; CHECK-LABEL: @test_2b
-; CHECK-NEXT: [[ONE:%.*]] = tail call i16 @llvm.smax.i16(i16 [[X:%.*]], i16 -31)
-; CHECK-NEXT: [[TWO:%.*]] = tail call i16 @llvm.smin.i16(i16 [[ONE]], i16 -1)
-; CHECK-NEXT: [[A:%.*]] = trunc i16 [[TWO]] to i8
-; CHECK-NEXT: [[B:%.*]] = add i8 [[A]], 2
-; CHECK-NEXT: ret i8 [[B]]
-
 define i8 @test_2b(i16 %x) {
+; CHECK-LABEL: define i8 @test_2b(
+; CHECK-SAME: i16 [[X:%.*]]) {
+; CHECK-NEXT:    [[TMP1:%.*]] = tail call i16 @llvm.smax.i16(i16 [[X]], i16 -31)
+; CHECK-NEXT:    [[TMP2:%.*]] = tail call i16 @llvm.smin.i16(i16 [[TMP1]], i16 -1)
+; CHECK-NEXT:    [[A:%.*]] = trunc i16 [[TMP2]] to i8
+; CHECK-NEXT:    [[B:%.*]] = add i8 [[A]], 2
+; CHECK-NEXT:    ret i8 [[B]]
+;
   %1 = tail call i16 @llvm.smax.i16(i16 %x, i16 -31)
   %2 = tail call i16 @llvm.smin.i16(i16 %1, i16 -1)
   %a = sext i16 %2 to i32
@@ -115,15 +112,15 @@ define i8 @test_2b(i16 %x) {
   ret i8 %b.trunc
 }
 
-
-; CHECK-LABEL: @test_3a
-; CHECK-NEXT: [[ONE:%.*]] = tail call i16 @llvm.smin.i16(i16 [[X:%.*]], i16 31)
-; CHECK-NEXT: [[TWO:%.*]] = tail call i16 @llvm.smax.i16(i16 [[ONE]], i16 -31)
-; CHECK-NEXT: [[A:%.*]] = trunc i16 [[TWO]] to i8
-; CHECK-NEXT: [[B:%.*]] = add i8 [[A]], 2
-; CHECK-NEXT: ret i8 [[B]]
-
 define i8 @test_3a(i16 %x) {
+; CHECK-LABEL: define i8 @test_3a(
+; CHECK-SAME: i16 [[X:%.*]]) {
+; CHECK-NEXT:    [[TMP1:%.*]] = tail call i16 @llvm.smin.i16(i16 [[X]], i16 31)
+; CHECK-NEXT:    [[TMP2:%.*]] = tail call i16 @llvm.smax.i16(i16 [[TMP1]], i16 -31)
+; CHECK-NEXT:    [[A:%.*]] = trunc i16 [[TMP2]] to i8
+; CHECK-NEXT:    [[B:%.*]] = add i8 [[A]], 2
+; CHECK-NEXT:    ret i8 [[B]]
+;
   %1 = tail call i16 @llvm.smin.i16(i16 %x, i16 31)
   %2 = tail call i16 @llvm.smax.i16(i16 %1, i16 -31)
   %a = sext i16 %2 to i32
@@ -132,15 +129,15 @@ define i8 @test_3a(i16 %x) {
   ret i8 %b.trunc
 }
 
-
-; CHECK-LABEL: @test_3b
-; CHECK-NEXT: [[ONE:%.*]] = tail call i16 @llvm.smax.i16(i16 [[X:%.*]], i16 -31)
-; CHECK-NEXT: [[TWO:%.*]] = tail call i16 @llvm.smin.i16(i16 [[ONE]], i16 31)
-; CHECK-NEXT: [[A:%.*]] = trunc i16 [[TWO]] to i8
-; CHECK-NEXT: [[B:%.*]] = add i8 [[A]], 2
-; CHECK-NEXT: ret i8 [[B]]
-
 define i8 @test_3b(i16 %x) {
+; CHECK-LABEL: define i8 @test_3b(
+; CHECK-SAME: i16 [[X:%.*]]) {
+; CHECK-NEXT:    [[TMP1:%.*]] = tail call i16 @llvm.smax.i16(i16 [[X]], i16 -31)
+; CHECK-NEXT:    [[TMP2:%.*]] = tail call i16 @llvm.smin.i16(i16 [[TMP1]], i16 31)
+; CHECK-NEXT:    [[A:%.*]] = trunc i16 [[TMP2]] to i8
+; CHECK-NEXT:    [[B:%.*]] = add i8 [[A]], 2
+; CHECK-NEXT:    ret i8 [[B]]
+;
   %1 = tail call i16 @llvm.smax.i16(i16 %x, i16 -31)
   %2 = tail call i16 @llvm.smin.i16(i16 %1, i16 31)
   %a = sext i16 %2 to i32
@@ -149,18 +146,52 @@ define i8 @test_3b(i16 %x) {
   ret i8 %b.trunc
 }
 
+define <16 x i8> @test_vec_1a(<16 x i16> %x) {
+; CHECK-LABEL: define <16 x i8> @test_vec_1a(
+; CHECK-SAME: <16 x i16> [[X:%.*]]) {
+; CHECK-NEXT:    [[TMP1:%.*]] = tail call <16 x i16> @llvm.smin.v16i16(<16 x i16> [[X]], <16 x i16> splat (i16 127))
+; CHECK-NEXT:    [[TMP2:%.*]] = tail call <16 x i16> @llvm.smax.v16i16(<16 x i16> [[TMP1]], <16 x i16> zeroinitializer)
+; CHECK-NEXT:    [[A:%.*]] = trunc <16 x i16> [[TMP2]] to <16 x i8>
+; CHECK-NEXT:    [[B:%.*]] = add <16 x i8> [[A]], splat (i8 2)
+; CHECK-NEXT:    ret <16 x i8> [[B]]
+;
+  %1 = tail call <16 x i16> @llvm.smin.v16i16(<16 x i16> %x, <16 x i16> splat (i16 127))
+  %2 = tail call <16 x i16> @llvm.smax.v16i16(<16 x i16> %1, <16 x i16> zeroinitializer)
+  %a = sext <16 x i16> %2 to <16 x i32>
+  %b = add <16 x i32> %a, splat (i32 2)
+  %b.trunc = trunc <16 x i32> %b to <16 x i8>
+  ret <16 x i8> %b.trunc
+}
 
-; CHECK-LABEL: @test_4
-; CHECK-NEXT: [[ONE:%.*]] = tail call i16 @llvm.smin.i16(i16 [[X:%.*]], i16 127)
-; CHECK-NEXT: [[TWO:%.*]] = tail call i16 @llvm.smax.i16(i16 [[ONE]], i16 0)
-; CHECK-NEXT: [[THREE:%.*]] = tail call i16 @llvm.smax.i16(i16 [[Y:%.*]], i16 0)
-; CHECK-NEXT: [[FOUR:%.*]] = tail call i16 @llvm.smin.i16(i16 [[THREE]], i16 127)
-; CHECK-NEXT: [[A:%.*]] = mul i16 [[TWO]], [[FOUR]]
-; CHECK-NEXT: [[B:%.*]] = lshr i16 [[A]], 7
-; CHECK-NEXT: [[C:%.*]] = trunc i16 [[B]] to i8
-; CHECK-NEXT: ret i8 [[C]]
+define <16 x i8> @test_vec_1b(<16 x i16> %x) {
+; CHECK-LABEL: define <16 x i8> @test_vec_1b(
+; CHECK-SAME: <16 x i16> [[X:%.*]]) {
+; CHECK-NEXT:    [[TMP1:%.*]] = tail call <16 x i16> @llvm.smax.v16i16(<16 x i16> [[X]], <16 x i16> zeroinitializer)
+; CHECK-NEXT:    [[TMP2:%.*]] = tail call <16 x i16> @llvm.smin.v16i16(<16 x i16> [[TMP1]], <16 x i16> splat (i16 127))
+; CHECK-NEXT:    [[A:%.*]] = trunc <16 x i16> [[TMP2]] to <16 x i8>
+; CHECK-NEXT:    [[B:%.*]] = add <16 x i8> [[A]], splat (i8 2)
+; CHECK-NEXT:    ret <16 x i8> [[B]]
+;
+  %1 = tail call <16 x i16> @llvm.smax.v16i16(<16 x i16> %x, <16 x i16> zeroinitializer)
+  %2 = tail call <16 x i16> @llvm.smin.v16i16(<16 x i16> %1, <16 x i16> splat (i16 127))
+  %a = sext <16 x i16> %2 to <16 x i32>
+  %b = add <16 x i32> %a, splat (i32 2)
+  %b.trunc = trunc <16 x i32> %b to <16 x i8>
+  ret <16 x i8> %b.trunc
+}
 
-define i8 @test_4(i16 %x, i16 %y) {
+define i8 @test_final(i16 %x, i16 %y) {
+; CHECK-LABEL: define i8 @test_final(
+; CHECK-SAME: i16 [[X:%.*]], i16 [[Y:%.*]]) {
+; CHECK-NEXT:    [[TMP1:%.*]] = tail call i16 @llvm.smin.i16(i16 [[X]], i16 127)
+; CHECK-NEXT:    [[TMP2:%.*]] = tail call i16 @llvm.smax.i16(i16 [[TMP1]], i16 0)
+; CHECK-NEXT:    [[TMP3:%.*]] = tail call i16 @llvm.smax.i16(i16 [[Y]], i16 0)
+; CHECK-NEXT:    [[TMP4:%.*]] = tail call i16 @llvm.smin.i16(i16 [[TMP3]], i16 127)
+; CHECK-NEXT:    [[MUL:%.*]] = mul i16 [[TMP2]], [[TMP4]]
+; CHECK-NEXT:    [[SHR:%.*]] = lshr i16 [[MUL]], 7
+; CHECK-NEXT:    [[TRUNC:%.*]] = trunc i16 [[SHR]] to i8
+; CHECK-NEXT:    ret i8 [[TRUNC]]
+;
   %1 = tail call i16 @llvm.smin.i16(i16 %x, i16 127)
   %2 = tail call i16 @llvm.smax.i16(i16 %1, i16 0)
   %x.clamp = zext nneg i16 %2 to i32

>From 6636dc61e9f37cc0c42702a0ce5255d35c155ce2 Mon Sep 17 00:00:00 2001
From: "Bzowski, Adam" <adam.bzowski at intel.com>
Date: Fri, 20 Dec 2024 08:23:03 -0800
Subject: [PATCH 4/5] [ValueTracking] Improve KnownBits for signed min-max
 clamping

A signed min-max clamp is the sequence of smin and smax intrinsics, which constrain a signed value into the range: smin <= value <= smax. The patch improves the calculation of KnownBits for a value subjected to the signed clamping.
---
 llvm/lib/Analysis/ValueTracking.cpp           |  2 +-
 .../knownbits-trunc-with-min-max-clamp.ll     | 60 ++++++++++++++++++-
 2 files changed, 60 insertions(+), 2 deletions(-)

diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp
index d54ea644bfcfc9..78fec25a6e502d 100644
--- a/llvm/lib/Analysis/ValueTracking.cpp
+++ b/llvm/lib/Analysis/ValueTracking.cpp
@@ -1119,7 +1119,7 @@ static void unionWithMinMaxIntrinsicClamp(const IntrinsicInst *II,
                                           KnownBits &Known) {
   const APInt *CLow, *CHigh;
   if (isSignedMinMaxIntrinsicClamp(II, CLow, CHigh))
-    Known = Known.unionWith(ConstantRange(*CLow, *CHigh).toKnownBits());
+    Known = Known.unionWith(ConstantRange(*CLow, *CHigh + 1).toKnownBits());
 }
 
 static void computeKnownBitsFromOperator(const Operator *I,
diff --git a/llvm/test/Analysis/ValueTracking/knownbits-trunc-with-min-max-clamp.ll b/llvm/test/Analysis/ValueTracking/knownbits-trunc-with-min-max-clamp.ll
index 2a0096a94e375d..6d48918f0e3540 100644
--- a/llvm/test/Analysis/ValueTracking/knownbits-trunc-with-min-max-clamp.ll
+++ b/llvm/test/Analysis/ValueTracking/knownbits-trunc-with-min-max-clamp.ll
@@ -5,7 +5,7 @@
 ; constraints from a signed min-max clamp. The clamp is a sequence of smin and
 ; smax instructions limiting a variable into a range, smin <= x <= smax.
 ;
-; Each LIT test (except the last one) has two versions depending on the order
+; Each LIT test (except the last ones) has two versions depending on the order
 ; of smin and smax:
 ; a) y = smax(smin(x, upper_limit), lower_limit)
 ; b) y = smin(smax(x, lower_limit), upper_limit)
@@ -180,6 +180,7 @@ define <16 x i8> @test_vec_1b(<16 x i16> %x) {
   ret <16 x i8> %b.trunc
 }
 
+; A longer test that was the original motivation for the smin-smax clamping.
 define i8 @test_final(i16 %x, i16 %y) {
 ; CHECK-LABEL: define i8 @test_final(
 ; CHECK-SAME: i16 [[X:%.*]], i16 [[Y:%.*]]) {
@@ -203,3 +204,60 @@ define i8 @test_final(i16 %x, i16 %y) {
   %trunc= trunc nuw nsw i32 %shr to i8
   ret i8 %trunc
 }
+
+; Range tests below check if the bounds are dealt with correctly.
+
+; This gets optimized.
+define i8 @test_bounds_1(i16 %x) {
+; CHECK-LABEL: define i8 @test_bounds_1(
+; CHECK-SAME: i16 [[X:%.*]]) {
+; CHECK-NEXT:    [[TMP1:%.*]] = tail call i16 @llvm.smin.i16(i16 [[X]], i16 127)
+; CHECK-NEXT:    [[TMP2:%.*]] = tail call i16 @llvm.smax.i16(i16 [[TMP1]], i16 0)
+; CHECK-NEXT:    [[A:%.*]] = trunc i16 [[TMP2]] to i8
+; CHECK-NEXT:    [[SHR:%.*]] = ashr i8 [[A]], 7
+; CHECK-NEXT:    ret i8 [[SHR]]
+;
+  %1 = tail call i16 @llvm.smin.i16(i16 %x, i16 127)
+  %2 = tail call i16 @llvm.smax.i16(i16 %1, i16 0)
+  %a = sext i16 %2 to i32
+  %shr = ashr i32 %a, 7
+  %b.trunc = trunc i32 %shr to i8
+  ret i8 %b.trunc
+}
+
+; While this does not.
+define i8 @test_bounds_2(i16 %x) {
+; CHECK-LABEL: define i8 @test_bounds_2(
+; CHECK-SAME: i16 [[X:%.*]]) {
+; CHECK-NEXT:    [[TMP1:%.*]] = tail call i16 @llvm.smin.i16(i16 [[X]], i16 128)
+; CHECK-NEXT:    [[TMP2:%.*]] = tail call i16 @llvm.smax.i16(i16 [[TMP1]], i16 0)
+; CHECK-NEXT:    [[SHR:%.*]] = ashr i16 [[TMP2]], 7
+; CHECK-NEXT:    [[B_TRUNC:%.*]] = trunc i16 [[SHR]] to i8
+; CHECK-NEXT:    ret i8 [[B_TRUNC]]
+;
+  %1 = tail call i16 @llvm.smin.i16(i16 %x, i16 128)
+  %2 = tail call i16 @llvm.smax.i16(i16 %1, i16 0)
+  %a = sext i16 %2 to i32
+  %shr = ashr i32 %a, 7
+  %b.trunc = trunc i32 %shr to i8
+  ret i8 %b.trunc
+}
+
+; This should get optimized. We test here if the optimization works correctly
+; if the upper limit is signed max int.
+define i8 @test_bounds_3(i16 %x) {
+; CHECK-LABEL: define i8 @test_bounds_3(
+; CHECK-SAME: i16 [[X:%.*]]) {
+; CHECK-NEXT:    [[TMP1:%.*]] = tail call i16 @llvm.smin.i16(i16 [[X]], i16 32767)
+; CHECK-NEXT:    [[TMP2:%.*]] = tail call i16 @llvm.smax.i16(i16 [[TMP1]], i16 32752)
+; CHECK-NEXT:    [[A:%.*]] = trunc i16 [[TMP2]] to i8
+; CHECK-NEXT:    [[AND:%.*]] = and i8 [[A]], -1
+; CHECK-NEXT:    ret i8 [[AND]]
+;
+  %1 = tail call i16 @llvm.smin.i16(i16 %x, i16 32767)
+  %2 = tail call i16 @llvm.smax.i16(i16 %1, i16 32752)
+  %a = sext i16 %2 to i32
+  %and = and i32 %a, 255
+  %b.trunc = trunc i32 %and to i8
+  ret i8 %b.trunc
+}

>From 5422719147d1ab5b1de987f5e703d06469ab383c Mon Sep 17 00:00:00 2001
From: "Bzowski, Adam" <adam.bzowski at intel.com>
Date: Wed, 25 Dec 2024 04:16:32 -0800
Subject: [PATCH 5/5] [ValueTracking] Improve KnownBits for signed min-max
 clamping

A signed min-max clamp is the sequence of smin and smax intrinsics, which constrain a signed value into the range: smin <= value <= smax. The patch improves the calculation of KnownBits for a value subjected to the signed clamping.
---
 .../ValueTracking/knownbits-trunc-with-min-max-clamp.ll      | 5 ++++-
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git a/llvm/test/Analysis/ValueTracking/knownbits-trunc-with-min-max-clamp.ll b/llvm/test/Analysis/ValueTracking/knownbits-trunc-with-min-max-clamp.ll
index 6d48918f0e3540..1ff8a41b3459bc 100644
--- a/llvm/test/Analysis/ValueTracking/knownbits-trunc-with-min-max-clamp.ll
+++ b/llvm/test/Analysis/ValueTracking/knownbits-trunc-with-min-max-clamp.ll
@@ -1,5 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
-; RUN: opt < %s -passes=aggressive-instcombine -mtriple=x86_64 -S | FileCheck %s
+; RUN: opt < %s -passes=aggressive-instcombine -S | FileCheck %s
+
+; The LIT tests rely on i32, i16 and i8 being valid machine types.
+target datalayout = "n8:16:32"
 
 ; This LIT test checks if TruncInstCombine pass correctly recognizes the
 ; constraints from a signed min-max clamp. The clamp is a sequence of smin and



More information about the llvm-commits mailing list