[llvm] 51d7df3 - [InstructionSimplify] icmp (X+Y), (X+Z) simplification

Sjoerd Meijer via llvm-commits llvm-commits at lists.llvm.org
Thu Oct 22 01:05:48 PDT 2020


Author: Sjoerd Meijer
Date: 2020-10-22T08:55:52+01:00
New Revision: 51d7df3fa1c3ebd65f72c021074b7aeb43dac8c3

URL: https://github.com/llvm/llvm-project/commit/51d7df3fa1c3ebd65f72c021074b7aeb43dac8c3
DIFF: https://github.com/llvm/llvm-project/commit/51d7df3fa1c3ebd65f72c021074b7aeb43dac8c3.diff

LOG: [InstructionSimplify] icmp (X+Y), (X+Z) simplification

This improves simplifications for pattern `icmp (X+Y), (X+Z)` -> `icmp Y,Z`
if only one of the operands has NSW set, e.g.:

    icmp slt (x + 0), (x +nsw 1)

We can still safely rewrite this to:

    icmp slt 0, 1

because we know that the LHS can't overflow if the RHS has NSW set and
C1 < C2 && C1 >= 0, or C2 < C1 && C1 <= 0

This simplification is useful because ScalarEvolutionExpander which is used to
generate code for SCEVs in different loop optimisers is not always able to put
back NSW flags across control-flow, thus inhibiting CFG simplifications.

Differential Revision: https://reviews.llvm.org/D89317

Added: 
    

Modified: 
    llvm/lib/Analysis/InstructionSimplify.cpp
    llvm/test/Transforms/InstSimplify/compare.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Analysis/InstructionSimplify.cpp b/llvm/lib/Analysis/InstructionSimplify.cpp
index b730eb33960a..afa4af977c95 100644
--- a/llvm/lib/Analysis/InstructionSimplify.cpp
+++ b/llvm/lib/Analysis/InstructionSimplify.cpp
@@ -2849,6 +2849,42 @@ static Value *simplifyICmpWithBinOpOnLHS(
   return nullptr;
 }
 
+
+// If only one of the icmp's operands has NSW flags, try to prove that:
+//
+//   icmp slt (x + C1), (x +nsw C2)
+//
+// is equivalent to:
+//
+//   icmp slt C1, C2
+//
+// which is true if x + C2 has the NSW flags set and:
+// *) C1 < C2 && C1 >= 0, or
+// *) C2 < C1 && C1 <= 0.
+//
+static bool trySimplifyICmpWithAdds(CmpInst::Predicate Pred, Value *LHS,
+                                    Value *RHS) {
+  // TODO: only support icmp slt for now.
+  if (Pred != CmpInst::ICMP_SLT)
+    return false;
+
+  // Canonicalize nsw add as RHS.
+  if (!match(RHS, m_NSWAdd(m_Value(), m_Value())))
+    std::swap(LHS, RHS);
+  if (!match(RHS, m_NSWAdd(m_Value(), m_Value())))
+    return false;
+
+  Value *X;
+  const APInt *C1, *C2;
+  if (!match(LHS, m_c_Add(m_Value(X), m_APInt(C1))) ||
+      !match(RHS, m_c_Add(m_Specific(X), m_APInt(C2))))
+    return false;
+
+  return (C1->slt(*C2) && C1->isNonNegative()) ||
+         (C2->slt(*C1) && C1->isNonPositive());
+}
+
+
 /// TODO: A large part of this logic is duplicated in InstCombine's
 /// foldICmpBinOp(). We should be able to share that and avoid the code
 /// duplication.
@@ -2898,8 +2934,9 @@ static Value *simplifyICmpWithBinOp(CmpInst::Predicate Pred, Value *LHS,
         return V;
 
     // icmp (X+Y), (X+Z) -> icmp Y,Z for equalities or if there is no overflow.
-    if (A && C && (A == C || A == D || B == C || B == D) && NoLHSWrapProblem &&
-        NoRHSWrapProblem) {
+    bool CanSimplify = (NoLHSWrapProblem && NoRHSWrapProblem) ||
+                       trySimplifyICmpWithAdds(Pred, LHS, RHS);
+    if (A && C && (A == C || A == D || B == C || B == D) && CanSimplify) {
       // Determine Y and Z in the form icmp (X+Y), (X+Z).
       Value *Y, *Z;
       if (A == C) {

diff  --git a/llvm/test/Transforms/InstSimplify/compare.ll b/llvm/test/Transforms/InstSimplify/compare.ll
index dab1937adc14..d0e5f3a73529 100644
--- a/llvm/test/Transforms/InstSimplify/compare.ll
+++ b/llvm/test/Transforms/InstSimplify/compare.ll
@@ -1769,12 +1769,7 @@ define i1 @cmp_through_addrspacecast(i32 addrspace(1)* %p1) {
 
 define i1 @icmp_nsw_1(i32 %V) {
 ; CHECK-LABEL: @icmp_nsw_1(
-; CHECK-NEXT:    [[ADD5:%.*]] = add i32 [[V:%.*]], 5
-; CHECK-NEXT:    [[ADD6:%.*]] = add nsw i32 [[V]], 6
-; CHECK-NEXT:    [[S1:%.*]] = sext i32 [[ADD5]] to i64
-; CHECK-NEXT:    [[S2:%.*]] = sext i32 [[ADD6]] to i64
-; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i64 [[S1]], [[S2]]
-; CHECK-NEXT:    ret i1 [[CMP]]
+; CHECK-NEXT:    ret i1 true
 ;
   %add5 = add i32 %V, 5
   %add6 = add nsw i32 %V, 6
@@ -1786,10 +1781,7 @@ define i1 @icmp_nsw_1(i32 %V) {
 
 define i1 @icmp_nsw_2(i32 %V) {
 ; CHECK-LABEL: @icmp_nsw_2(
-; CHECK-NEXT:    [[ADD5:%.*]] = add i32 [[V:%.*]], 5
-; CHECK-NEXT:    [[ADD6:%.*]] = add nsw i32 [[V]], 6
-; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[ADD5]], [[ADD6]]
-; CHECK-NEXT:    ret i1 [[CMP]]
+; CHECK-NEXT:    ret i1 true
 ;
   %add5 = add i32 %V, 5
   %add6 = add nsw i32 %V, 6
@@ -1799,10 +1791,7 @@ define i1 @icmp_nsw_2(i32 %V) {
 
 define i1 @icmp_nsw_commute(i32 %V) {
 ; CHECK-LABEL: @icmp_nsw_commute(
-; CHECK-NEXT:    [[ADD5:%.*]] = add i32 5, [[V:%.*]]
-; CHECK-NEXT:    [[ADD6:%.*]] = add nsw i32 [[V]], 6
-; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[ADD5]], [[ADD6]]
-; CHECK-NEXT:    ret i1 [[CMP]]
+; CHECK-NEXT:    ret i1 true
 ;
   %add5 = add i32 5, %V
   %add6 = add nsw i32 %V, 6
@@ -1812,10 +1801,7 @@ define i1 @icmp_nsw_commute(i32 %V) {
 
 define i1 @icmp_nsw_commute2(i32 %V) {
 ; CHECK-LABEL: @icmp_nsw_commute2(
-; CHECK-NEXT:    [[ADD5:%.*]] = add i32 [[V:%.*]], 5
-; CHECK-NEXT:    [[ADD6:%.*]] = add nsw i32 6, [[V]]
-; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[ADD5]], [[ADD6]]
-; CHECK-NEXT:    ret i1 [[CMP]]
+; CHECK-NEXT:    ret i1 true
 ;
   %add5 = add i32 %V, 5
   %add6 = add nsw i32 6, %V
@@ -1825,10 +1811,7 @@ define i1 @icmp_nsw_commute2(i32 %V) {
 
 define i1 @icmp_nsw_commute3(i32 %V) {
 ; CHECK-LABEL: @icmp_nsw_commute3(
-; CHECK-NEXT:    [[ADD5:%.*]] = add i32 5, [[V:%.*]]
-; CHECK-NEXT:    [[ADD6:%.*]] = add nsw i32 6, [[V]]
-; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[ADD5]], [[ADD6]]
-; CHECK-NEXT:    ret i1 [[CMP]]
+; CHECK-NEXT:    ret i1 true
 ;
   %add5 = add i32 5, %V
   %add6 = add nsw i32 6, %V
@@ -1861,10 +1844,7 @@ define i1 @icmp_nsw_23(i32 %V) {
 
 define i1 @icmp_nsw_false(i32 %V) {
 ; CHECK-LABEL: @icmp_nsw_false(
-; CHECK-NEXT:    [[ADD5:%.*]] = add nsw i32 [[V:%.*]], 6
-; CHECK-NEXT:    [[ADD6:%.*]] = add i32 [[V]], 5
-; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[ADD5]], [[ADD6]]
-; CHECK-NEXT:    ret i1 [[CMP]]
+; CHECK-NEXT:    ret i1 false
 ;
   %add5 = add nsw i32 %V, 6
   %add6 = add i32 %V, 5
@@ -1908,12 +1888,22 @@ define i1 @icmp_nsw_false_4(i32 %V) {
   ret i1 %cmp
 }
 
+define i1 @icmp_nsw_false_5(i8 %V) {
+; CHECK-LABEL: @icmp_nsw_false_5(
+; CHECK-NEXT:    [[ADD:%.*]] = add i8 [[V:%.*]], 121
+; CHECK-NEXT:    [[ADDNSW:%.*]] = add nsw i8 [[V]], -104
+; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i8 [[ADD]], [[ADDNSW]]
+; CHECK-NEXT:    ret i1 [[CMP]]
+;
+  %add = add i8 %V, 121
+  %addnsw = add nsw i8 %V, -104
+  %cmp = icmp slt i8 %add, %addnsw
+  ret i1 %cmp
+}
+
 define i1 @icmp_nsw_i8(i8 %V) {
 ; CHECK-LABEL: @icmp_nsw_i8(
-; CHECK-NEXT:    [[ADD5:%.*]] = add i8 [[V:%.*]], 5
-; CHECK-NEXT:    [[ADD6:%.*]] = add nsw i8 [[V]], 6
-; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i8 [[ADD5]], [[ADD6]]
-; CHECK-NEXT:    ret i1 [[CMP]]
+; CHECK-NEXT:    ret i1 true
 ;
   %add5 = add i8 %V, 5
   %add6 = add nsw i8 %V, 6
@@ -1923,23 +1913,17 @@ define i1 @icmp_nsw_i8(i8 %V) {
 
 define i1 @icmp_nsw_i16(i16 %V) {
 ; CHECK-LABEL: @icmp_nsw_i16(
-; CHECK-NEXT:    [[ADD5:%.*]] = add i16 [[V:%.*]], 5
-; CHECK-NEXT:    [[ADD6:%.*]] = add nsw i16 [[V]], 6
-; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i16 [[ADD5]], [[ADD6]]
-; CHECK-NEXT:    ret i1 [[CMP]]
+; CHECK-NEXT:    ret i1 true
 ;
-  %add5 = add i16 %V, 5
-  %add6 = add nsw i16 %V, 6
+  %add5 = add i16 %V, 0
+  %add6 = add nsw i16 %V, 1
   %cmp = icmp slt i16 %add5, %add6
   ret i1 %cmp
 }
 
 define i1 @icmp_nsw_i64(i64 %V) {
 ; CHECK-LABEL: @icmp_nsw_i64(
-; CHECK-NEXT:    [[ADD5:%.*]] = add i64 [[V:%.*]], 5
-; CHECK-NEXT:    [[ADD6:%.*]] = add nsw i64 [[V]], 6
-; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i64 [[ADD5]], [[ADD6]]
-; CHECK-NEXT:    ret i1 [[CMP]]
+; CHECK-NEXT:    ret i1 true
 ;
   %add5 = add i64 %V, 5
   %add6 = add nsw i64 %V, 6
@@ -1949,10 +1933,7 @@ define i1 @icmp_nsw_i64(i64 %V) {
 
 define <4 x i1> @icmp_nsw_vec(<4 x i32> %V) {
 ; CHECK-LABEL: @icmp_nsw_vec(
-; CHECK-NEXT:    [[ADD5:%.*]] = add <4 x i32> [[V:%.*]], <i32 5, i32 5, i32 5, i32 5>
-; CHECK-NEXT:    [[ADD6:%.*]] = add nsw <4 x i32> [[V]], <i32 6, i32 6, i32 6, i32 6>
-; CHECK-NEXT:    [[CMP:%.*]] = icmp slt <4 x i32> [[ADD5]], [[ADD6]]
-; CHECK-NEXT:    ret <4 x i1> [[CMP]]
+; CHECK-NEXT:    ret <4 x i1> <i1 true, i1 true, i1 true, i1 true>
 ;
   %add5 = add <4 x i32> %V, <i32 5, i32 5, i32 5, i32 5>
   %add6 = add nsw <4 x i32> %V, <i32 6, i32 6, i32 6, i32 6>
@@ -2064,4 +2045,26 @@ define i1 @icmp_nsw_11(i32 %V) {
   ret i1 %cmp
 }
 
+define i1 @icmp_nsw_nonpos(i32 %V) {
+; CHECK-LABEL: @icmp_nsw_nonpos(
+; CHECK-NEXT:    ret i1 false
+;
+  %add5 = add i32 %V, 0
+  %add6 = add nsw i32 %V, -1
+  %cmp = icmp slt i32 %add5, %add6
+  ret i1 %cmp
+}
+
+define i1 @icmp_nsw_nonpos2(i32 %V) {
+; CHECK-LABEL: @icmp_nsw_nonpos2(
+; CHECK-NEXT:    [[ADD5:%.*]] = add i32 [[V:%.*]], 1
+; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[ADD5]], [[V]]
+; CHECK-NEXT:    ret i1 [[CMP]]
+;
+  %add5 = add i32 %V, 1
+  %add6 = add nsw i32 %V, 0
+  %cmp = icmp slt i32 %add5, %add6
+  ret i1 %cmp
+}
+
 attributes #0 = { null_pointer_is_valid }


        


More information about the llvm-commits mailing list