[llvm] 462e102 - [InstCombine] Fold (X / C) < X and (X >> C) < X into X > 0 (#85555)

via llvm-commits llvm-commits at lists.llvm.org
Thu Apr 11 02:40:55 PDT 2024


Author: Poseydon42
Date: 2024-04-11T18:40:52+09:00
New Revision: 462e1023838703f1d3e763869afdd72ec5342a33

URL: https://github.com/llvm/llvm-project/commit/462e1023838703f1d3e763869afdd72ec5342a33
DIFF: https://github.com/llvm/llvm-project/commit/462e1023838703f1d3e763869afdd72ec5342a33.diff

LOG: [InstCombine] Fold (X / C) < X and (X >> C) < X into X > 0 (#85555)

Proofs: https://alive2.llvm.org/ce/z/52droC

This resolves #85313.

Added: 
    

Modified: 
    llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
    llvm/test/Transforms/InstCombine/icmp-div-constant.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
index 9ff1e3aa5502e6..7292bb62702aaa 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
@@ -7163,6 +7163,40 @@ Instruction *InstCombinerImpl::foldICmpCommutative(ICmpInst::Predicate Pred,
   if (Value *V = foldICmpWithLowBitMaskedVal(Pred, Op0, Op1, Q, *this))
     return replaceInstUsesWith(CxtI, V);
 
+  // Folding (X / Y) pred X => X swap(pred) 0 for constant Y other than 0 or 1
+  {
+    const APInt *Divisor;
+    if (match(Op0, m_UDiv(m_Specific(Op1), m_APInt(Divisor))) &&
+        Divisor->ugt(1)) {
+      return new ICmpInst(ICmpInst::getSwappedPredicate(Pred), Op1,
+                          Constant::getNullValue(Op1->getType()));
+    }
+
+    if (!ICmpInst::isUnsigned(Pred) &&
+        match(Op0, m_SDiv(m_Specific(Op1), m_APInt(Divisor))) &&
+        Divisor->ugt(1)) {
+      return new ICmpInst(ICmpInst::getSwappedPredicate(Pred), Op1,
+                          Constant::getNullValue(Op1->getType()));
+    }
+  }
+
+  // Another case of this fold is (X >> Y) pred X => X swap(pred) 0 if Y != 0
+  {
+    const APInt *Shift;
+    if (match(Op0, m_LShr(m_Specific(Op1), m_APInt(Shift))) &&
+        !Shift->isZero()) {
+      return new ICmpInst(ICmpInst::getSwappedPredicate(Pred), Op1,
+                          Constant::getNullValue(Op1->getType()));
+    }
+
+    if ((Pred == CmpInst::ICMP_SLT || Pred == CmpInst::ICMP_SGE) &&
+        match(Op0, m_AShr(m_Specific(Op1), m_APInt(Shift))) &&
+        !Shift->isZero()) {
+      return new ICmpInst(ICmpInst::getSwappedPredicate(Pred), Op1,
+                          Constant::getNullValue(Op1->getType()));
+    }
+  }
+
   return nullptr;
 }
 

diff  --git a/llvm/test/Transforms/InstCombine/icmp-div-constant.ll b/llvm/test/Transforms/InstCombine/icmp-div-constant.ll
index 8dcb96284685ff..b047715432d779 100644
--- a/llvm/test/Transforms/InstCombine/icmp-div-constant.ll
+++ b/llvm/test/Transforms/InstCombine/icmp-div-constant.ll
@@ -375,3 +375,144 @@ define i1 @sdiv_eq_smin_use(i32 %x, i32 %y) {
   %r = icmp eq i32 %d, -2147483648
   ret i1 %r
 }
+
+; Fold (X / C) cmp X into X ~cmp 0 (~cmp is the inverse predicate of cmp), for some C != 1
+; Alternative form of this fold is when division is replaced with logic right shift
+
+define i1 @sdiv_x_by_const_cmp_x(i32 %x) {
+; CHECK-LABEL: @sdiv_x_by_const_cmp_x(
+; CHECK-NEXT:    [[TMP1:%.*]] = icmp eq i32 [[X:%.*]], 0
+; CHECK-NEXT:    ret i1 [[TMP1]]
+;
+  %v = sdiv i32 %x, 13
+  %r = icmp eq i32 %v, %x
+  ret i1 %r
+}
+
+define i1 @udiv_x_by_const_cmp_x(i32 %x) {
+; CHECK-LABEL: @udiv_x_by_const_cmp_x(
+; CHECK-NEXT:    [[TMP1:%.*]] = icmp sgt i32 [[X:%.*]], 0
+; CHECK-NEXT:    ret i1 [[TMP1]]
+;
+  %1 = udiv i32 %x, 123
+  %2 = icmp slt i32 %1, %x
+  ret i1 %2
+}
+
+; Same as above but with right shift instead of division (C != 0)
+
+define i1 @lshr_x_by_const_cmp_x(i32 %x) {
+; CHECK-LABEL: @lshr_x_by_const_cmp_x(
+; CHECK-NEXT:    [[TMP1:%.*]] = icmp eq i32 [[X:%.*]], 0
+; CHECK-NEXT:    ret i1 [[TMP1]]
+;
+  %v = lshr i32 %x, 1
+  %r = icmp eq i32 %v, %x
+  ret i1 %r
+}
+
+define <4 x i1> @lshr_by_const_cmp_sle_value(<4 x i32> %x) {
+; CHECK-LABEL: @lshr_by_const_cmp_sle_value(
+; CHECK-NEXT:    [[R:%.*]] = icmp sgt <4 x i32> [[X:%.*]], <i32 -1, i32 -1, i32 -1, i32 -1>
+; CHECK-NEXT:    ret <4 x i1> [[R]]
+;
+  %v = lshr <4 x i32> %x, <i32 3, i32 3, i32 3, i32 3>
+  %r = icmp sle <4 x i32> %v, %x
+  ret <4 x i1> %r
+}
+
+define i1 @lshr_by_const_cmp_sge_value(i32 %x) {
+; CHECK-LABEL: @lshr_by_const_cmp_sge_value(
+; CHECK-NEXT:    [[R:%.*]] = icmp slt i32 [[X:%.*]], 1
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %v = lshr i32 %x, 3
+  %r = icmp sge i32 %v, %x
+  ret i1 %r
+}
+
+define i1 @ashr_x_by_const_cmp_sge_x(i32 %x) {
+; CHECK-LABEL: @ashr_x_by_const_cmp_sge_x(
+; CHECK-NEXT:    [[R:%.*]] = icmp slt i32 [[X:%.*]], 1
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %v = ashr i32 %x, 5
+  %r = icmp sge i32 %v, %x
+  ret i1 %r
+}
+
+; Negative test - constant is 1
+
+define <2 x i1> @udiv_x_by_const_cmp_eq_value_neg(<2 x i32> %x) {
+; CHECK-LABEL: @udiv_x_by_const_cmp_eq_value_neg(
+; CHECK-NEXT:    [[V:%.*]] = udiv <2 x i32> [[X:%.*]], <i32 1, i32 3>
+; CHECK-NEXT:    [[R:%.*]] = icmp eq <2 x i32> [[V]], [[X]]
+; CHECK-NEXT:    ret <2 x i1> [[R]]
+;
+  %v = udiv <2 x i32> %x, <i32 1, i32 3>
+  %r = icmp eq <2 x i32> %v, %x
+  ret <2 x i1> %r
+}
+
+define <2 x i1> @sdiv_x_by_const_cmp_eq_value_neg(<2 x i32> %x) {
+; CHECK-LABEL: @sdiv_x_by_const_cmp_eq_value_neg(
+; CHECK-NEXT:    [[V:%.*]] = sdiv <2 x i32> [[X:%.*]], <i32 1, i32 3>
+; CHECK-NEXT:    [[R:%.*]] = icmp eq <2 x i32> [[V]], [[X]]
+; CHECK-NEXT:    ret <2 x i1> [[R]]
+;
+  %v = sdiv <2 x i32> %x, <i32 1, i32 3>
+  %r = icmp eq <2 x i32> %v, %x
+  ret <2 x i1> %r
+}
+
+; Negative test - constant is 0
+
+define <2 x i1> @lshr_x_by_const_cmp_slt_value_neg(<2 x i32> %x) {
+; CHECK-LABEL: @lshr_x_by_const_cmp_slt_value_neg(
+; CHECK-NEXT:    [[V:%.*]] = lshr <2 x i32> [[X:%.*]], <i32 0, i32 2>
+; CHECK-NEXT:    [[R:%.*]] = icmp slt <2 x i32> [[V]], [[X]]
+; CHECK-NEXT:    ret <2 x i1> [[R]]
+;
+  %v = lshr <2 x i32> %x, <i32 0, i32 2>
+  %r = icmp slt <2 x i32> %v, %x
+  ret <2 x i1> %r
+}
+
+; Negative test - unsigned predicate with sdiv
+
+define i1 @sdiv_x_by_const_cmp_ult_value_neg(i32 %x) {
+; CHECK-LABEL: @sdiv_x_by_const_cmp_ult_value_neg(
+; CHECK-NEXT:    [[V:%.*]] = sdiv i32 [[X:%.*]], 3
+; CHECK-NEXT:    [[R:%.*]] = icmp ult i32 [[V]], [[X]]
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %v = sdiv i32 %x, 3
+  %r = icmp ult i32 %v, %x
+  ret i1 %r
+}
+
+; Negative case - one of the components of a vector is 1
+
+define <4 x i1> @sdiv_x_by_const_cmp_sgt_value_neg(<4 x i32> %x) {
+; CHECK-LABEL: @sdiv_x_by_const_cmp_sgt_value_neg(
+; CHECK-NEXT:    [[V:%.*]] = sdiv <4 x i32> [[X:%.*]], <i32 1, i32 2, i32 3, i32 4>
+; CHECK-NEXT:    [[R:%.*]] = icmp sgt <4 x i32> [[V]], [[X]]
+; CHECK-NEXT:    ret <4 x i1> [[R]]
+;
+  %v = sdiv <4 x i32> %x, <i32 1, i32 2, i32 3, i32 4>
+  %r = icmp sgt <4 x i32> %v, %x
+  ret <4 x i1> %r
+}
+
+; Negative case - ashr only allows sge/slt predicates
+
+define i1 @ashr_x_by_const_cmp_sle_value_neg(i32 %x) {
+; CHECK-LABEL: @ashr_x_by_const_cmp_sle_value_neg(
+; CHECK-NEXT:    [[V:%.*]] = ashr i32 [[X:%.*]], 3
+; CHECK-NEXT:    [[R:%.*]] = icmp sle i32 [[V]], [[X]]
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %v = ashr i32 %x, 3
+  %r = icmp sle i32 %v, %x
+  ret i1 %r
+}


        


More information about the llvm-commits mailing list