[llvm] r372768 - [InstCombine] (a+b) < a && (a+b) != 0 -> (0-b) < a iff a/b != 0 (PR43259)

Roman Lebedev via llvm-commits llvm-commits at lists.llvm.org
Tue Sep 24 09:10:50 PDT 2019


Author: lebedevri
Date: Tue Sep 24 09:10:50 2019
New Revision: 372768

URL: http://llvm.org/viewvc/llvm-project?rev=372768&view=rev
Log:
[InstCombine] (a+b) < a && (a+b) != 0 -> (0-b) < a iff a/b != 0 (PR43259)

Summary:
This is again motivated by D67122 sanitizer check enhancement.
That patch seemingly worsens `-fsanitize=pointer-overflow`
overhead from 25% to 50%, which strongly implies missing folds.

For
```
#include <cassert>
char* test(char& base, signed long offset) {
  __builtin_assume(offset < 0);
  return &base + offset;
}
```
We produce

https://godbolt.org/z/r40U47

and again those two icmp's can be merged:
```
Name: 0
Pre: C != 0
  %adjusted = add i8 %base, C
  %not_null = icmp ne i8 %adjusted, 0
  %no_underflow = icmp ult i8 %adjusted, %base
  %r = and i1 %not_null, %no_underflow
=>
  %neg_offset = sub i8 0, C
  %r = icmp ugt i8 %base, %neg_offset
```
https://rise4fun.com/Alive/ALap
https://rise4fun.com/Alive/slnN

There are 3 other variants of this pattern,
i believe they all will go into InstSimplify.

https://bugs.llvm.org/show_bug.cgi?id=43259

Reviewers: spatel, xbolva00, nikic

Reviewed By: spatel

Subscribers: efriedma, hiraditya, llvm-commits

Tags: #llvm

Differential Revision: https://reviews.llvm.org/D67849

Modified:
    llvm/trunk/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
    llvm/trunk/test/Transforms/InstCombine/result-of-add-of-negative-is-non-zero-and-no-underflow.ll

Modified: llvm/trunk/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp?rev=372768&r1=372767&r2=372768&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp (original)
+++ llvm/trunk/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp Tue Sep 24 09:10:50 2019
@@ -1061,6 +1061,10 @@ static Value *foldUnsignedUnderflowCheck
       !ICmpInst::isEquality(EqPred))
     return nullptr;
 
+  auto IsKnownNonZero = [&](Value *V) {
+    return isKnownNonZero(V, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT);
+  };
+
   ICmpInst::Predicate UnsignedPred;
 
   Value *A, *B;
@@ -1071,15 +1075,32 @@ static Value *foldUnsignedUnderflowCheck
     if (UnsignedICmp->getOperand(0) != ZeroCmpOp)
       UnsignedPred = ICmpInst::getSwappedPredicate(UnsignedPred);
 
+    auto GetKnownNonZeroAndOther = [&](Value *&NonZero, Value *&Other) {
+      if (!IsKnownNonZero(NonZero))
+        std::swap(NonZero, Other);
+      return IsKnownNonZero(NonZero);
+    };
+
     // Given  ZeroCmpOp = (A + B)
     //   ZeroCmpOp <= A && ZeroCmpOp != 0  -->  (0-B) <  A
     //   ZeroCmpOp >  A || ZeroCmpOp == 0  -->  (0-B) >= A
+    //
+    //   ZeroCmpOp <  A && ZeroCmpOp != 0  -->  (0-X) <  Y  iff
+    //   ZeroCmpOp >= A || ZeroCmpOp == 0  -->  (0-X) >= Y  iff
+    //     with X being the value (A/B) that is known to be non-zero,
+    //     and Y being remaining value.
     if (UnsignedPred == ICmpInst::ICMP_ULE && EqPred == ICmpInst::ICMP_NE &&
         IsAnd)
       return Builder.CreateICmpULT(Builder.CreateNeg(B), A);
+    if (UnsignedPred == ICmpInst::ICMP_ULT && EqPred == ICmpInst::ICMP_NE &&
+        IsAnd && GetKnownNonZeroAndOther(B, A))
+      return Builder.CreateICmpULT(Builder.CreateNeg(B), A);
     if (UnsignedPred == ICmpInst::ICMP_UGT && EqPred == ICmpInst::ICMP_EQ &&
         !IsAnd)
       return Builder.CreateICmpUGE(Builder.CreateNeg(B), A);
+    if (UnsignedPred == ICmpInst::ICMP_UGE && EqPred == ICmpInst::ICMP_EQ &&
+        !IsAnd && GetKnownNonZeroAndOther(B, A))
+      return Builder.CreateICmpUGE(Builder.CreateNeg(B), A);
   }
 
   Value *Base, *Offset;
@@ -1094,12 +1115,10 @@ static Value *foldUnsignedUnderflowCheck
       UnsignedPred = ICmpInst::getSwappedPredicate(UnsignedPred);
 
     if (UnsignedPred == ICmpInst::ICMP_ULT && IsAnd &&
-        EqPred == ICmpInst::ICMP_NE &&
-        isKnownNonZero(Offset, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT))
+        EqPred == ICmpInst::ICMP_NE && IsKnownNonZero(Offset))
       return Builder.CreateICmpUGT(Base, Offset);
     if (UnsignedPred == ICmpInst::ICMP_UGE && !IsAnd &&
-        EqPred == ICmpInst::ICMP_EQ &&
-        isKnownNonZero(Offset, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT))
+        EqPred == ICmpInst::ICMP_EQ && IsKnownNonZero(Offset))
       return Builder.CreateICmpULE(Base, Offset);
   }
 

Modified: llvm/trunk/test/Transforms/InstCombine/result-of-add-of-negative-is-non-zero-and-no-underflow.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/InstCombine/result-of-add-of-negative-is-non-zero-and-no-underflow.ll?rev=372768&r1=372767&r2=372768&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/InstCombine/result-of-add-of-negative-is-non-zero-and-no-underflow.ll (original)
+++ llvm/trunk/test/Transforms/InstCombine/result-of-add-of-negative-is-non-zero-and-no-underflow.ll Tue Sep 24 09:10:50 2019
@@ -31,10 +31,9 @@ define i1 @t1(i8 %base, i8 %offset) {
 ; CHECK-NEXT:    call void @llvm.assume(i1 [[CMP]])
 ; CHECK-NEXT:    [[ADJUSTED:%.*]] = add i8 [[BASE]], [[OFFSET:%.*]]
 ; CHECK-NEXT:    call void @use8(i8 [[ADJUSTED]])
-; CHECK-NEXT:    [[NOT_NULL:%.*]] = icmp ne i8 [[ADJUSTED]], 0
-; CHECK-NEXT:    [[NO_UNDERFLOW:%.*]] = icmp ult i8 [[ADJUSTED]], [[BASE]]
-; CHECK-NEXT:    [[R:%.*]] = and i1 [[NOT_NULL]], [[NO_UNDERFLOW]]
-; CHECK-NEXT:    ret i1 [[R]]
+; CHECK-NEXT:    [[TMP1:%.*]] = sub i8 0, [[BASE]]
+; CHECK-NEXT:    [[TMP2:%.*]] = icmp ult i8 [[TMP1]], [[OFFSET]]
+; CHECK-NEXT:    ret i1 [[TMP2]]
 ;
   %cmp = icmp slt i8 %base, 0
   call void @llvm.assume(i1 %cmp)
@@ -54,10 +53,9 @@ define i1 @t2(i8 %base, i8 %offset) {
 ; CHECK-NEXT:    call void @llvm.assume(i1 [[CMP]])
 ; CHECK-NEXT:    [[ADJUSTED:%.*]] = add i8 [[BASE:%.*]], [[OFFSET]]
 ; CHECK-NEXT:    call void @use8(i8 [[ADJUSTED]])
-; CHECK-NEXT:    [[NOT_NULL:%.*]] = icmp ne i8 [[ADJUSTED]], 0
-; CHECK-NEXT:    [[NO_UNDERFLOW:%.*]] = icmp ult i8 [[ADJUSTED]], [[BASE]]
-; CHECK-NEXT:    [[R:%.*]] = and i1 [[NOT_NULL]], [[NO_UNDERFLOW]]
-; CHECK-NEXT:    ret i1 [[R]]
+; CHECK-NEXT:    [[TMP1:%.*]] = sub i8 0, [[OFFSET]]
+; CHECK-NEXT:    [[TMP2:%.*]] = icmp ult i8 [[TMP1]], [[BASE]]
+; CHECK-NEXT:    ret i1 [[TMP2]]
 ;
   %cmp = icmp slt i8 %offset, 0
   call void @llvm.assume(i1 %cmp)
@@ -79,9 +77,9 @@ define i1 @t3_oneuse0(i8 %base, i8 %offs
 ; CHECK-NEXT:    call void @use8(i8 [[ADJUSTED]])
 ; CHECK-NEXT:    [[NOT_NULL:%.*]] = icmp ne i8 [[ADJUSTED]], 0
 ; CHECK-NEXT:    call void @use1(i1 [[NOT_NULL]])
-; CHECK-NEXT:    [[NO_UNDERFLOW:%.*]] = icmp ult i8 [[ADJUSTED]], [[BASE]]
-; CHECK-NEXT:    [[R:%.*]] = and i1 [[NOT_NULL]], [[NO_UNDERFLOW]]
-; CHECK-NEXT:    ret i1 [[R]]
+; CHECK-NEXT:    [[TMP1:%.*]] = sub i8 0, [[BASE]]
+; CHECK-NEXT:    [[TMP2:%.*]] = icmp ult i8 [[TMP1]], [[OFFSET]]
+; CHECK-NEXT:    ret i1 [[TMP2]]
 ;
   %cmp = icmp slt i8 %base, 0
   call void @llvm.assume(i1 %cmp)
@@ -100,11 +98,11 @@ define i1 @t4_oneuse1(i8 %base, i8 %offs
 ; CHECK-NEXT:    call void @llvm.assume(i1 [[CMP]])
 ; CHECK-NEXT:    [[ADJUSTED:%.*]] = add i8 [[BASE]], [[OFFSET:%.*]]
 ; CHECK-NEXT:    call void @use8(i8 [[ADJUSTED]])
-; CHECK-NEXT:    [[NOT_NULL:%.*]] = icmp ne i8 [[ADJUSTED]], 0
 ; CHECK-NEXT:    [[NO_UNDERFLOW:%.*]] = icmp ult i8 [[ADJUSTED]], [[BASE]]
 ; CHECK-NEXT:    call void @use1(i1 [[NO_UNDERFLOW]])
-; CHECK-NEXT:    [[R:%.*]] = and i1 [[NOT_NULL]], [[NO_UNDERFLOW]]
-; CHECK-NEXT:    ret i1 [[R]]
+; CHECK-NEXT:    [[TMP1:%.*]] = sub i8 0, [[BASE]]
+; CHECK-NEXT:    [[TMP2:%.*]] = icmp ult i8 [[TMP1]], [[OFFSET]]
+; CHECK-NEXT:    ret i1 [[TMP2]]
 ;
   %cmp = icmp slt i8 %base, 0
   call void @llvm.assume(i1 %cmp)
@@ -149,10 +147,9 @@ define i1 @t6_commutativity0(i8 %base, i
 ; CHECK-NEXT:    call void @llvm.assume(i1 [[CMP]])
 ; CHECK-NEXT:    [[ADJUSTED:%.*]] = add i8 [[BASE]], [[OFFSET:%.*]]
 ; CHECK-NEXT:    call void @use8(i8 [[ADJUSTED]])
-; CHECK-NEXT:    [[NOT_NULL:%.*]] = icmp ne i8 [[ADJUSTED]], 0
-; CHECK-NEXT:    [[NO_UNDERFLOW:%.*]] = icmp ult i8 [[ADJUSTED]], [[BASE]]
-; CHECK-NEXT:    [[R:%.*]] = and i1 [[NO_UNDERFLOW]], [[NOT_NULL]]
-; CHECK-NEXT:    ret i1 [[R]]
+; CHECK-NEXT:    [[TMP1:%.*]] = sub i8 0, [[BASE]]
+; CHECK-NEXT:    [[TMP2:%.*]] = icmp ult i8 [[TMP1]], [[OFFSET]]
+; CHECK-NEXT:    ret i1 [[TMP2]]
 ;
   %cmp = icmp slt i8 %base, 0
   call void @llvm.assume(i1 %cmp)
@@ -170,10 +167,9 @@ define i1 @t7_commutativity1(i8 %base, i
 ; CHECK-NEXT:    call void @llvm.assume(i1 [[CMP]])
 ; CHECK-NEXT:    [[ADJUSTED:%.*]] = add i8 [[BASE]], [[OFFSET:%.*]]
 ; CHECK-NEXT:    call void @use8(i8 [[ADJUSTED]])
-; CHECK-NEXT:    [[NOT_NULL:%.*]] = icmp ne i8 [[ADJUSTED]], 0
-; CHECK-NEXT:    [[NO_UNDERFLOW:%.*]] = icmp ult i8 [[ADJUSTED]], [[BASE]]
-; CHECK-NEXT:    [[R:%.*]] = and i1 [[NOT_NULL]], [[NO_UNDERFLOW]]
-; CHECK-NEXT:    ret i1 [[R]]
+; CHECK-NEXT:    [[TMP1:%.*]] = sub i8 0, [[BASE]]
+; CHECK-NEXT:    [[TMP2:%.*]] = icmp ult i8 [[TMP1]], [[OFFSET]]
+; CHECK-NEXT:    ret i1 [[TMP2]]
 ;
   %cmp = icmp slt i8 %base, 0
   call void @llvm.assume(i1 %cmp)
@@ -191,10 +187,9 @@ define i1 @t7_commutativity3(i8 %base, i
 ; CHECK-NEXT:    call void @llvm.assume(i1 [[CMP]])
 ; CHECK-NEXT:    [[ADJUSTED:%.*]] = add i8 [[BASE]], [[OFFSET:%.*]]
 ; CHECK-NEXT:    call void @use8(i8 [[ADJUSTED]])
-; CHECK-NEXT:    [[NOT_NULL:%.*]] = icmp ne i8 [[ADJUSTED]], 0
-; CHECK-NEXT:    [[NO_UNDERFLOW:%.*]] = icmp ult i8 [[ADJUSTED]], [[BASE]]
-; CHECK-NEXT:    [[R:%.*]] = and i1 [[NO_UNDERFLOW]], [[NOT_NULL]]
-; CHECK-NEXT:    ret i1 [[R]]
+; CHECK-NEXT:    [[TMP1:%.*]] = sub i8 0, [[BASE]]
+; CHECK-NEXT:    [[TMP2:%.*]] = icmp ult i8 [[TMP1]], [[OFFSET]]
+; CHECK-NEXT:    ret i1 [[TMP2]]
 ;
   %cmp = icmp slt i8 %base, 0
   call void @llvm.assume(i1 %cmp)
@@ -214,10 +209,9 @@ define i1 @t8(i8 %base, i8 %offset) {
 ; CHECK-NEXT:    call void @llvm.assume(i1 [[CMP]])
 ; CHECK-NEXT:    [[ADJUSTED:%.*]] = add i8 [[BASE]], [[OFFSET:%.*]]
 ; CHECK-NEXT:    call void @use8(i8 [[ADJUSTED]])
-; CHECK-NEXT:    [[NOT_NULL:%.*]] = icmp eq i8 [[ADJUSTED]], 0
-; CHECK-NEXT:    [[NO_UNDERFLOW:%.*]] = icmp uge i8 [[ADJUSTED]], [[BASE]]
-; CHECK-NEXT:    [[R:%.*]] = or i1 [[NOT_NULL]], [[NO_UNDERFLOW]]
-; CHECK-NEXT:    ret i1 [[R]]
+; CHECK-NEXT:    [[TMP1:%.*]] = sub i8 0, [[BASE]]
+; CHECK-NEXT:    [[TMP2:%.*]] = icmp uge i8 [[TMP1]], [[OFFSET]]
+; CHECK-NEXT:    ret i1 [[TMP2]]
 ;
   %cmp = icmp slt i8 %base, 0
   call void @llvm.assume(i1 %cmp)
@@ -237,10 +231,9 @@ define i1 @t9(i8 %base, i8 %offset) {
 ; CHECK-NEXT:    call void @llvm.assume(i1 [[CMP]])
 ; CHECK-NEXT:    [[ADJUSTED:%.*]] = add i8 [[BASE]], [[OFFSET:%.*]]
 ; CHECK-NEXT:    call void @use8(i8 [[ADJUSTED]])
-; CHECK-NEXT:    [[NOT_NULL:%.*]] = icmp ne i8 [[ADJUSTED]], 0
-; CHECK-NEXT:    [[NO_UNDERFLOW:%.*]] = icmp ult i8 [[ADJUSTED]], [[OFFSET]]
-; CHECK-NEXT:    [[R:%.*]] = and i1 [[NOT_NULL]], [[NO_UNDERFLOW]]
-; CHECK-NEXT:    ret i1 [[R]]
+; CHECK-NEXT:    [[TMP1:%.*]] = sub i8 0, [[BASE]]
+; CHECK-NEXT:    [[TMP2:%.*]] = icmp ult i8 [[TMP1]], [[OFFSET]]
+; CHECK-NEXT:    ret i1 [[TMP2]]
 ;
   %cmp = icmp slt i8 %base, 0
   call void @llvm.assume(i1 %cmp)




More information about the llvm-commits mailing list