[llvm] a616f57 - [CorrelatedValuePropagation] Fold calls to UCMP/SCMP when we know that ranges of operands do not overlap (#97235)

via llvm-commits llvm-commits at lists.llvm.org
Tue Jul 2 03:18:55 PDT 2024


Author: Poseydon42
Date: 2024-07-02T12:18:52+02:00
New Revision: a616f57c1f5f64ee6013c01b6cfe8c587b4cd1f8

URL: https://github.com/llvm/llvm-project/commit/a616f57c1f5f64ee6013c01b6cfe8c587b4cd1f8
DIFF: https://github.com/llvm/llvm-project/commit/a616f57c1f5f64ee6013c01b6cfe8c587b4cd1f8.diff

LOG: [CorrelatedValuePropagation] Fold calls to UCMP/SCMP when we know that ranges of operands do not overlap (#97235)

This patch adds folds for calls to `ucmp`/`scmp` intrinsics where we can
establish that the range of the first operand is strictly to the left or
strictly to the right of the range of the second operand.

Added: 
    llvm/test/Transforms/CorrelatedValuePropagation/uscmp.ll

Modified: 
    llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp b/llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp
index 88adeb597e755..875d3ea78fae5 100644
--- a/llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp
+++ b/llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp
@@ -85,6 +85,7 @@ STATISTIC(NumOverflows, "Number of overflow checks removed");
 STATISTIC(NumSaturating,
     "Number of saturating arithmetics converted to normal arithmetics");
 STATISTIC(NumNonNull, "Number of function pointer arguments marked non-null");
+STATISTIC(NumCmpIntr, "Number of llvm.[us]cmp intrinsics removed");
 STATISTIC(NumMinMax, "Number of llvm.[us]{min,max} intrinsics removed");
 STATISTIC(NumSMinMax,
           "Number of llvm.s{min,max} intrinsics simplified to unsigned");
@@ -548,6 +549,35 @@ static bool processAbsIntrinsic(IntrinsicInst *II, LazyValueInfo *LVI) {
   return false;
 }
 
+static bool processCmpIntrinsic(IntrinsicInst *II, LazyValueInfo *LVI) {
+  bool IsSigned = II->getIntrinsicID() == Intrinsic::scmp;
+  ConstantRange LHS_CR = LVI->getConstantRangeAtUse(II->getOperandUse(0),
+                                                    /*UndefAllowed*/ false);
+  ConstantRange RHS_CR = LVI->getConstantRangeAtUse(II->getOperandUse(1),
+                                                    /*UndefAllowed*/ false);
+
+  if (LHS_CR.icmp(IsSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT, RHS_CR)) {
+    ++NumCmpIntr;
+    II->replaceAllUsesWith(ConstantInt::get(II->getType(), 1));
+    II->eraseFromParent();
+    return true;
+  }
+  if (LHS_CR.icmp(IsSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT, RHS_CR)) {
+    ++NumCmpIntr;
+    II->replaceAllUsesWith(ConstantInt::getSigned(II->getType(), -1));
+    II->eraseFromParent();
+    return true;
+  }
+  if (LHS_CR.icmp(ICmpInst::ICMP_EQ, RHS_CR)) {
+    ++NumCmpIntr;
+    II->replaceAllUsesWith(ConstantInt::get(II->getType(), 0));
+    II->eraseFromParent();
+    return true;
+  }
+
+  return false;
+}
+
 // See if this min/max intrinsic always picks it's one specific operand.
 // If not, check whether we can canonicalize signed minmax into unsigned version
 static bool processMinMaxIntrinsic(MinMaxIntrinsic *MM, LazyValueInfo *LVI) {
@@ -639,6 +669,11 @@ static bool processCallSite(CallBase &CB, LazyValueInfo *LVI) {
     return processAbsIntrinsic(&cast<IntrinsicInst>(CB), LVI);
   }
 
+  if (CB.getIntrinsicID() == Intrinsic::scmp ||
+      CB.getIntrinsicID() == Intrinsic::ucmp) {
+    return processCmpIntrinsic(&cast<IntrinsicInst>(CB), LVI);
+  }
+
   if (auto *MM = dyn_cast<MinMaxIntrinsic>(&CB)) {
     return processMinMaxIntrinsic(MM, LVI);
   }

diff  --git a/llvm/test/Transforms/CorrelatedValuePropagation/uscmp.ll b/llvm/test/Transforms/CorrelatedValuePropagation/uscmp.ll
new file mode 100644
index 0000000000000..efe4235b344a6
--- /dev/null
+++ b/llvm/test/Transforms/CorrelatedValuePropagation/uscmp.ll
@@ -0,0 +1,258 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -passes=correlated-propagation -S | FileCheck %s
+
+; If nothing is known we can't change anything
+define i8 @ucmp_0(i32 %x, i32 %y) {
+; CHECK-LABEL: @ucmp_0(
+; CHECK-NEXT:    [[TMP1:%.*]] = call i8 @llvm.ucmp.i8.i32(i32 [[X:%.*]], i32 [[Y:%.*]])
+; CHECK-NEXT:    ret i8 [[TMP1]]
+;
+  %1 = call i8 @llvm.ucmp(i32 %x, i32 %y)
+  ret i8 %1
+}
+
+define i8 @scmp_0(i32 %x, i32 %y) {
+; CHECK-LABEL: @scmp_0(
+; CHECK-NEXT:    [[TMP1:%.*]] = call i8 @llvm.scmp.i8.i32(i32 [[X:%.*]], i32 [[Y:%.*]])
+; CHECK-NEXT:    ret i8 [[TMP1]]
+;
+  %1 = call i8 @llvm.scmp(i32 %x, i32 %y)
+  ret i8 %1
+}
+
+; If we know that range of LHS < range of RHS then return -1
+define i8 @ucmp_1(i32 %x, i32 %y) {
+  ; X is within [4, 8)
+; CHECK-LABEL: @ucmp_1(
+; CHECK-NEXT:    [[COND1:%.*]] = icmp uge i32 [[X:%.*]], 4
+; CHECK-NEXT:    call void @llvm.assume(i1 [[COND1]])
+; CHECK-NEXT:    [[COND2:%.*]] = icmp ult i32 [[X]], 8
+; CHECK-NEXT:    call void @llvm.assume(i1 [[COND2]])
+; CHECK-NEXT:    [[COND3:%.*]] = icmp uge i32 [[Y:%.*]], 8
+; CHECK-NEXT:    call void @llvm.assume(i1 [[COND3]])
+; CHECK-NEXT:    ret i8 -1
+;
+  %cond1 = icmp uge i32 %x, 4
+  call void @llvm.assume(i1 %cond1)
+  %cond2 = icmp ult i32 %x, 8
+  call void @llvm.assume(i1 %cond2)
+  ; Y is within [8, UNSIGNED_MAX)
+  %cond3 = icmp uge i32 %y, 8
+  call void @llvm.assume(i1 %cond3)
+
+  %1 = call i8 @llvm.ucmp(i32 %x, i32 %y)
+  ret i8 %1
+}
+
+define i8 @scmp_1(i32 %x, i32 %y) {
+  ; X is within [-5, 3)
+; CHECK-LABEL: @scmp_1(
+; CHECK-NEXT:    [[COND1:%.*]] = icmp sge i32 [[X:%.*]], -5
+; CHECK-NEXT:    call void @llvm.assume(i1 [[COND1]])
+; CHECK-NEXT:    [[COND2:%.*]] = icmp slt i32 [[X]], 3
+; CHECK-NEXT:    call void @llvm.assume(i1 [[COND2]])
+; CHECK-NEXT:    [[COND3:%.*]] = icmp sge i32 [[Y:%.*]], 3
+; CHECK-NEXT:    call void @llvm.assume(i1 [[COND3]])
+; CHECK-NEXT:    ret i8 -1
+;
+  %cond1 = icmp sge i32 %x, -5
+  call void @llvm.assume(i1 %cond1)
+  %cond2 = icmp slt i32 %x, 3
+  call void @llvm.assume(i1 %cond2)
+  ; Y is within [3, SIGNED_MAX)
+  %cond3 = icmp sge i32 %y, 3
+  call void @llvm.assume(i1 %cond3)
+
+  %1 = call i8 @llvm.scmp(i32 %x, i32 %y)
+  ret i8 %1
+}
+
+; If we know that range of LHS > range of RHS then return 1
+define i8 @ucmp_2(i32 %x, i32 %y) {
+  ; X is within [4, UNSIGNED_MAX)
+; CHECK-LABEL: @ucmp_2(
+; CHECK-NEXT:    [[COND1:%.*]] = icmp uge i32 [[X:%.*]], 4
+; CHECK-NEXT:    call void @llvm.assume(i1 [[COND1]])
+; CHECK-NEXT:    [[COND2:%.*]] = icmp ult i32 [[Y:%.*]], 4
+; CHECK-NEXT:    call void @llvm.assume(i1 [[COND2]])
+; CHECK-NEXT:    ret i8 1
+;
+  %cond1 = icmp uge i32 %x, 4
+  call void @llvm.assume(i1 %cond1)
+  ; Y is within [0, 4)
+  %cond2 = icmp ult i32 %y, 4
+  call void @llvm.assume(i1 %cond2)
+
+  %1 = call i8 @llvm.ucmp(i32 %x, i32 %y)
+  ret i8 %1
+}
+
+define i8 @scmp_2(i32 %x, i32 %y) {
+  ; X is within [4, SIGNED_MAX)
+; CHECK-LABEL: @scmp_2(
+; CHECK-NEXT:    [[COND1:%.*]] = icmp sge i32 [[X:%.*]], 4
+; CHECK-NEXT:    call void @llvm.assume(i1 [[COND1]])
+; CHECK-NEXT:    [[COND2:%.*]] = icmp slt i32 [[Y:%.*]], 4
+; CHECK-NEXT:    call void @llvm.assume(i1 [[COND2]])
+; CHECK-NEXT:    ret i8 1
+;
+  %cond1 = icmp sge i32 %x, 4
+  call void @llvm.assume(i1 %cond1)
+  ; Y is within [SIGNED_MIN, 4)
+  %cond2 = icmp slt i32 %y, 4
+  call void @llvm.assume(i1 %cond2)
+
+  %1 = call i8 @llvm.scmp(i32 %x, i32 %y)
+  ret i8 %1
+}
+
+; If we know that LHS and RHS are both constants then return 0
+define i8 @ucmp_5(i32 %x, i32 %y) {
+; CHECK-LABEL: @ucmp_5(
+; CHECK-NEXT:    [[COND1:%.*]] = icmp eq i32 [[X:%.*]], 4
+; CHECK-NEXT:    call void @llvm.assume(i1 [[COND1]])
+; CHECK-NEXT:    [[COND2:%.*]] = icmp eq i32 [[Y:%.*]], 4
+; CHECK-NEXT:    call void @llvm.assume(i1 [[COND2]])
+; CHECK-NEXT:    ret i8 0
+;
+  %cond1 = icmp eq i32 %x, 4
+  call void @llvm.assume(i1 %cond1)
+  %cond2 = icmp eq i32 %y, 4
+  call void @llvm.assume(i1 %cond2)
+
+  %1 = call i8 @llvm.ucmp(i32 %x, i32 %y)
+  ret i8 %1
+}
+
+define i8 @scmp_5(i32 %x, i32 %y) {
+; CHECK-LABEL: @scmp_5(
+; CHECK-NEXT:    [[COND1:%.*]] = icmp eq i32 [[X:%.*]], -5
+; CHECK-NEXT:    call void @llvm.assume(i1 [[COND1]])
+; CHECK-NEXT:    [[COND2:%.*]] = icmp eq i32 [[Y:%.*]], -5
+; CHECK-NEXT:    call void @llvm.assume(i1 [[COND2]])
+; CHECK-NEXT:    ret i8 0
+;
+  %cond1 = icmp eq i32 %x, -5
+  call void @llvm.assume(i1 %cond1)
+  %cond2 = icmp eq i32 %y, -5
+  call void @llvm.assume(i1 %cond2)
+
+  %1 = call i8 @llvm.scmp(i32 %x, i32 %y)
+  ret i8 %1
+}
+
+; We can infer ranges based on the location where a UCMP/SCMP result is used
+define i8 @scmp_6(i32 noundef %x) {
+; CHECK-LABEL: @scmp_6(
+; CHECK-NEXT:    [[TMP1:%.*]] = icmp slt i32 [[X:%.*]], 10
+; CHECK-NEXT:    [[TMP2:%.*]] = select i1 [[TMP1]], i8 -1, i8 5
+; CHECK-NEXT:    ret i8 [[TMP2]]
+;
+  %1 = icmp slt i32 %x, 10
+  %2 = call i8 @llvm.scmp(i32 %x, i32 10)
+  %3 = select i1 %1, i8 %2, i8 5
+  ret i8 %3
+}
+
+; Negative test: ranges overlap
+define i8 @ucmp_3(i32 %x, i32 %y) {
+  ; X is within [4, UNSIGNED_MAX)
+; CHECK-LABEL: @ucmp_3(
+; CHECK-NEXT:    [[COND1:%.*]] = icmp uge i32 [[X:%.*]], 4
+; CHECK-NEXT:    call void @llvm.assume(i1 [[COND1]])
+; CHECK-NEXT:    [[COND2:%.*]] = icmp ult i32 [[Y:%.*]], 6
+; CHECK-NEXT:    call void @llvm.assume(i1 [[COND2]])
+; CHECK-NEXT:    [[TMP1:%.*]] = call i8 @llvm.ucmp.i8.i32(i32 [[X]], i32 [[Y]])
+; CHECK-NEXT:    ret i8 [[TMP1]]
+;
+  %cond1 = icmp uge i32 %x, 4
+  call void @llvm.assume(i1 %cond1)
+  ; Y is within [0, 6)
+  %cond2 = icmp ult i32 %y, 6
+  call void @llvm.assume(i1 %cond2)
+
+  %1 = call i8 @llvm.ucmp(i32 %x, i32 %y)
+  ret i8 %1
+}
+
+define i8 @scmp_3(i32 %x, i32 %y) {
+  ; X is within [2, SIGNED_MAX)
+; CHECK-LABEL: @scmp_3(
+; CHECK-NEXT:    [[COND1:%.*]] = icmp sge i32 [[X:%.*]], 2
+; CHECK-NEXT:    call void @llvm.assume(i1 [[COND1]])
+; CHECK-NEXT:    [[COND2:%.*]] = icmp slt i32 [[Y:%.*]], 4
+; CHECK-NEXT:    call void @llvm.assume(i1 [[COND2]])
+; CHECK-NEXT:    [[TMP1:%.*]] = call i8 @llvm.scmp.i8.i32(i32 [[X]], i32 [[Y]])
+; CHECK-NEXT:    ret i8 [[TMP1]]
+;
+  %cond1 = icmp sge i32 %x, 2
+  call void @llvm.assume(i1 %cond1)
+  ; Y is within [SIGNED_MIN, 4)
+  %cond2 = icmp slt i32 %y, 4
+  call void @llvm.assume(i1 %cond2)
+
+  %1 = call i8 @llvm.scmp(i32 %x, i32 %y)
+  ret i8 %1
+}
+
+; Negative test: mismatched signedness of range-establishing comparisons and
+;                of the intrinsic
+define i8 @ucmp_4(i32 %x, i32 %y) {
+  ; X is within [4, SIGNED_MAX)
+; CHECK-LABEL: @ucmp_4(
+; CHECK-NEXT:    [[COND1:%.*]] = icmp sge i32 [[X:%.*]], 4
+; CHECK-NEXT:    call void @llvm.assume(i1 [[COND1]])
+; CHECK-NEXT:    [[COND2:%.*]] = icmp slt i32 [[Y:%.*]], 4
+; CHECK-NEXT:    call void @llvm.assume(i1 [[COND2]])
+; CHECK-NEXT:    [[TMP1:%.*]] = call i8 @llvm.ucmp.i8.i32(i32 [[X]], i32 [[Y]])
+; CHECK-NEXT:    ret i8 [[TMP1]]
+;
+  %cond1 = icmp sge i32 %x, 4
+  call void @llvm.assume(i1 %cond1)
+  ; Y is within [0, 4)
+  %cond2 = icmp slt i32 %y, 4
+  call void @llvm.assume(i1 %cond2)
+
+  %1 = call i8 @llvm.ucmp(i32 %x, i32 %y)
+  ret i8 %1
+}
+
+define i8 @scmp_4(i32 %x, i32 %y) {
+  ; X is within [4, UNSIGNED_MAX)
+; CHECK-LABEL: @scmp_4(
+; CHECK-NEXT:    [[COND1:%.*]] = icmp uge i32 [[X:%.*]], 4
+; CHECK-NEXT:    call void @llvm.assume(i1 [[COND1]])
+; CHECK-NEXT:    [[COND2:%.*]] = icmp ult i32 [[Y:%.*]], 4
+; CHECK-NEXT:    call void @llvm.assume(i1 [[COND2]])
+; CHECK-NEXT:    [[TMP1:%.*]] = call i8 @llvm.scmp.i8.i32(i32 [[X]], i32 [[Y]])
+; CHECK-NEXT:    ret i8 [[TMP1]]
+;
+  %cond1 = icmp uge i32 %x, 4
+  call void @llvm.assume(i1 %cond1)
+  ; Y is within [0, 4)
+  %cond2 = icmp ult i32 %y, 4
+  call void @llvm.assume(i1 %cond2)
+
+  %1 = call i8 @llvm.scmp(i32 %x, i32 %y)
+  ret i8 %1
+}
+
+; Negative test: ranges are the same, but we can't be sure the values are equal
+define i8 @ucmp_6(i32 %x, i32 %y) {
+  ; Both X and Y are within [0, 10]
+; CHECK-LABEL: @ucmp_6(
+; CHECK-NEXT:    [[COND1:%.*]] = icmp ule i32 [[X:%.*]], 10
+; CHECK-NEXT:    call void @llvm.assume(i1 [[COND1]])
+; CHECK-NEXT:    [[COND2:%.*]] = icmp ule i32 [[Y:%.*]], 10
+; CHECK-NEXT:    call void @llvm.assume(i1 [[COND2]])
+; CHECK-NEXT:    [[TMP1:%.*]] = call i8 @llvm.ucmp.i8.i32(i32 [[X]], i32 [[Y]])
+; CHECK-NEXT:    ret i8 [[TMP1]]
+;
+  %cond1 = icmp ule i32 %x, 10
+  call void @llvm.assume(i1 %cond1)
+  %cond2 = icmp ule i32 %y, 10
+  call void @llvm.assume(i1 %cond2)
+
+  %1 = call i8 @llvm.ucmp(i32 %x, i32 %y)
+  ret i8 %1
+}


        


More information about the llvm-commits mailing list