[llvm] r372912 - [InstCombine] Fold (A - B) u>=/u< A --> B u>/u<= A iff B != 0

Roman Lebedev via llvm-commits llvm-commits at lists.llvm.org
Wed Sep 25 12:06:40 PDT 2019


Author: lebedevri
Date: Wed Sep 25 12:06:40 2019
New Revision: 372912

URL: http://llvm.org/viewvc/llvm-project?rev=372912&view=rev
Log:
[InstCombine] Fold  (A - B) u>=/u< A  --> B  u>/u<= A  iff B != 0

https://rise4fun.com/Alive/KtL

This also shows that the fold added in D67412 / r372257
was too specific, and the new fold allows those test cases
to be handled more generically, therefore i delete now-dead code.

This is yet again motivated by
D67122 "[UBSan][clang][compiler-rt] Applying non-zero offset to nullptr is undefined behaviour"

Modified:
    llvm/trunk/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
    llvm/trunk/lib/Transforms/InstCombine/InstCombineCompares.cpp
    llvm/trunk/lib/Transforms/InstCombine/InstCombineInternal.h
    llvm/trunk/test/Transforms/InstCombine/result-of-usub-is-non-zero-and-no-overflow.ll
    llvm/trunk/test/Transforms/InstCombine/strict-sub-underflow-check-to-comparison-of-sub-operands.ll

Modified: llvm/trunk/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp?rev=372912&r1=372911&r2=372912&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp (original)
+++ llvm/trunk/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp Wed Sep 25 12:06:40 2019
@@ -1107,21 +1107,6 @@ static Value *foldUnsignedUnderflowCheck
   if (!match(ZeroCmpOp, m_Sub(m_Value(Base), m_Value(Offset))))
     return nullptr;
 
-  // ZeroCmpOp <  Base && ZeroCmpOp != 0  --> Base >  Offset  iff Offset != 0
-  // ZeroCmpOp >= Base || ZeroCmpOp == 0  --> Base <= Base    iff Offset != 0
-  if (match(UnsignedICmp,
-            m_c_ICmp(UnsignedPred, m_Specific(ZeroCmpOp), m_Specific(Base)))) {
-    if (UnsignedICmp->getOperand(0) != ZeroCmpOp)
-      UnsignedPred = ICmpInst::getSwappedPredicate(UnsignedPred);
-
-    if (UnsignedPred == ICmpInst::ICMP_ULT && IsAnd &&
-        EqPred == ICmpInst::ICMP_NE && IsKnownNonZero(Offset))
-      return Builder.CreateICmpUGT(Base, Offset);
-    if (UnsignedPred == ICmpInst::ICMP_UGE && !IsAnd &&
-        EqPred == ICmpInst::ICMP_EQ && IsKnownNonZero(Offset))
-      return Builder.CreateICmpULE(Base, Offset);
-  }
-
   if (!match(UnsignedICmp,
              m_c_ICmp(UnsignedPred, m_Specific(Base), m_Specific(Offset))) ||
       !ICmpInst::isUnsigned(UnsignedPred))

Modified: llvm/trunk/lib/Transforms/InstCombine/InstCombineCompares.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/InstCombine/InstCombineCompares.cpp?rev=372912&r1=372911&r2=372912&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/InstCombine/InstCombineCompares.cpp (original)
+++ llvm/trunk/lib/Transforms/InstCombine/InstCombineCompares.cpp Wed Sep 25 12:06:40 2019
@@ -3638,7 +3638,8 @@ Value *InstCombiner::foldUnsignedMultipl
 /// TODO: A large part of this logic is duplicated in InstSimplify's
 /// simplifyICmpWithBinOp(). We should be able to share that and avoid the code
 /// duplication.
-Instruction *InstCombiner::foldICmpBinOp(ICmpInst &I) {
+Instruction *InstCombiner::foldICmpBinOp(ICmpInst &I, const SimplifyQuery &SQ) {
+  const SimplifyQuery Q = SQ.getWithInstruction(&I);
   Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
 
   // Special logic for binary operators.
@@ -3840,6 +3841,14 @@ Instruction *InstCombiner::foldICmpBinOp
   // C u</u>= (C - D) --> C u</u>= D
   if (C == Op0 && (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_UGE))
     return new ICmpInst(Pred, C, D);
+  // (A - B) u>=/u< A --> B u>/u<= A  iff B != 0
+  if (A == Op1 && (Pred == ICmpInst::ICMP_UGE || Pred == ICmpInst::ICMP_ULT) &&
+      isKnownNonZero(B, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT))
+    return new ICmpInst(CmpInst::getFlippedStrictnessPredicate(Pred), B, A);
+  // C u<=/u> (C - D) --> C u</u>= D  iff B != 0
+  if (C == Op0 && (Pred == ICmpInst::ICMP_ULE || Pred == ICmpInst::ICMP_UGT) &&
+      isKnownNonZero(D, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT))
+    return new ICmpInst(CmpInst::getFlippedStrictnessPredicate(Pred), C, D);
 
   // icmp (A-B), (C-B) -> icmp A, C for equalities or if there is no overflow.
   if (B && D && B == D && NoOp0WrapProblem && NoOp1WrapProblem)
@@ -5346,6 +5355,7 @@ static Instruction *foldVectorCmp(CmpIns
 
 Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
   bool Changed = false;
+  const SimplifyQuery Q = SQ.getWithInstruction(&I);
   Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
   unsigned Op0Cplxity = getComplexity(Op0);
   unsigned Op1Cplxity = getComplexity(Op1);
@@ -5360,8 +5370,7 @@ Instruction *InstCombiner::visitICmpInst
     Changed = true;
   }
 
-  if (Value *V = SimplifyICmpInst(I.getPredicate(), Op0, Op1,
-                                  SQ.getWithInstruction(&I)))
+  if (Value *V = SimplifyICmpInst(I.getPredicate(), Op0, Op1, Q))
     return replaceInstUsesWith(I, V);
 
   // Comparing -val or val with non-zero is the same as just comparing val
@@ -5394,7 +5403,7 @@ Instruction *InstCombiner::visitICmpInst
   if (Instruction *Res = foldICmpWithDominatingICmp(I))
     return Res;
 
-  if (Instruction *Res = foldICmpBinOp(I))
+  if (Instruction *Res = foldICmpBinOp(I, Q))
     return Res;
 
   if (Instruction *Res = foldICmpUsingKnownBits(I))

Modified: llvm/trunk/lib/Transforms/InstCombine/InstCombineInternal.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/InstCombine/InstCombineInternal.h?rev=372912&r1=372911&r2=372912&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/InstCombine/InstCombineInternal.h (original)
+++ llvm/trunk/lib/Transforms/InstCombine/InstCombineInternal.h Wed Sep 25 12:06:40 2019
@@ -868,7 +868,7 @@ private:
   Instruction *foldICmpWithConstant(ICmpInst &Cmp);
   Instruction *foldICmpInstWithConstant(ICmpInst &Cmp);
   Instruction *foldICmpInstWithConstantNotInt(ICmpInst &Cmp);
-  Instruction *foldICmpBinOp(ICmpInst &Cmp);
+  Instruction *foldICmpBinOp(ICmpInst &Cmp, const SimplifyQuery &SQ);
   Instruction *foldICmpEquality(ICmpInst &Cmp);
   Instruction *foldIRemByPowerOfTwoToBitTest(ICmpInst &I);
   Instruction *foldICmpWithZero(ICmpInst &Cmp);

Modified: llvm/trunk/test/Transforms/InstCombine/result-of-usub-is-non-zero-and-no-overflow.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/InstCombine/result-of-usub-is-non-zero-and-no-overflow.ll?rev=372912&r1=372911&r2=372912&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/InstCombine/result-of-usub-is-non-zero-and-no-overflow.ll (original)
+++ llvm/trunk/test/Transforms/InstCombine/result-of-usub-is-non-zero-and-no-overflow.ll Wed Sep 25 12:06:40 2019
@@ -292,7 +292,7 @@ define i1 @t10(i64 %base, i64* nonnull %
 ; CHECK-NEXT:    [[OFFSET:%.*]] = ptrtoint i64* [[OFFSETPTR:%.*]] to i64
 ; CHECK-NEXT:    [[ADJUSTED:%.*]] = sub i64 [[BASE:%.*]], [[OFFSET]]
 ; CHECK-NEXT:    call void @use64(i64 [[ADJUSTED]])
-; CHECK-NEXT:    [[NO_UNDERFLOW:%.*]] = icmp ult i64 [[ADJUSTED]], [[BASE]]
+; CHECK-NEXT:    [[NO_UNDERFLOW:%.*]] = icmp ule i64 [[OFFSET]], [[BASE]]
 ; CHECK-NEXT:    call void @use1(i1 [[NO_UNDERFLOW]])
 ; CHECK-NEXT:    [[NOT_NULL:%.*]] = icmp ne i64 [[ADJUSTED]], 0
 ; CHECK-NEXT:    call void @use1(i1 [[NOT_NULL]])
@@ -315,7 +315,7 @@ define i1 @t11_commutative(i64 %base, i6
 ; CHECK-NEXT:    [[OFFSET:%.*]] = ptrtoint i64* [[OFFSETPTR:%.*]] to i64
 ; CHECK-NEXT:    [[ADJUSTED:%.*]] = sub i64 [[BASE:%.*]], [[OFFSET]]
 ; CHECK-NEXT:    call void @use64(i64 [[ADJUSTED]])
-; CHECK-NEXT:    [[NO_UNDERFLOW:%.*]] = icmp ult i64 [[ADJUSTED]], [[BASE]]
+; CHECK-NEXT:    [[NO_UNDERFLOW:%.*]] = icmp ule i64 [[OFFSET]], [[BASE]]
 ; CHECK-NEXT:    call void @use1(i1 [[NO_UNDERFLOW]])
 ; CHECK-NEXT:    [[NOT_NULL:%.*]] = icmp ne i64 [[ADJUSTED]], 0
 ; CHECK-NEXT:    call void @use1(i1 [[NOT_NULL]])
@@ -339,7 +339,7 @@ define i1 @t12(i64 %base, i64* nonnull %
 ; CHECK-NEXT:    [[OFFSET:%.*]] = ptrtoint i64* [[OFFSETPTR:%.*]] to i64
 ; CHECK-NEXT:    [[ADJUSTED:%.*]] = sub i64 [[BASE:%.*]], [[OFFSET]]
 ; CHECK-NEXT:    call void @use64(i64 [[ADJUSTED]])
-; CHECK-NEXT:    [[NO_UNDERFLOW:%.*]] = icmp uge i64 [[ADJUSTED]], [[BASE]]
+; CHECK-NEXT:    [[NO_UNDERFLOW:%.*]] = icmp ugt i64 [[OFFSET]], [[BASE]]
 ; CHECK-NEXT:    call void @use1(i1 [[NO_UNDERFLOW]])
 ; CHECK-NEXT:    [[NOT_NULL:%.*]] = icmp eq i64 [[ADJUSTED]], 0
 ; CHECK-NEXT:    call void @use1(i1 [[NOT_NULL]])
@@ -362,7 +362,7 @@ define i1 @t13(i64 %base, i64* nonnull %
 ; CHECK-NEXT:    [[OFFSET:%.*]] = ptrtoint i64* [[OFFSETPTR:%.*]] to i64
 ; CHECK-NEXT:    [[ADJUSTED:%.*]] = sub i64 [[BASE:%.*]], [[OFFSET]]
 ; CHECK-NEXT:    call void @use64(i64 [[ADJUSTED]])
-; CHECK-NEXT:    [[NO_UNDERFLOW:%.*]] = icmp uge i64 [[ADJUSTED]], [[BASE]]
+; CHECK-NEXT:    [[NO_UNDERFLOW:%.*]] = icmp ugt i64 [[OFFSET]], [[BASE]]
 ; CHECK-NEXT:    call void @use1(i1 [[NO_UNDERFLOW]])
 ; CHECK-NEXT:    [[NOT_NULL:%.*]] = icmp eq i64 [[ADJUSTED]], 0
 ; CHECK-NEXT:    call void @use1(i1 [[NOT_NULL]])

Modified: llvm/trunk/test/Transforms/InstCombine/strict-sub-underflow-check-to-comparison-of-sub-operands.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/InstCombine/strict-sub-underflow-check-to-comparison-of-sub-operands.ll?rev=372912&r1=372911&r2=372912&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/InstCombine/strict-sub-underflow-check-to-comparison-of-sub-operands.ll (original)
+++ llvm/trunk/test/Transforms/InstCombine/strict-sub-underflow-check-to-comparison-of-sub-operands.ll Wed Sep 25 12:06:40 2019
@@ -11,7 +11,7 @@ define i1 @t0(i8 %base, i8 %offset) {
 ; CHECK-NEXT:    call void @llvm.assume(i1 [[CMP]])
 ; CHECK-NEXT:    [[ADJUSTED:%.*]] = sub i8 [[BASE:%.*]], [[OFFSET]]
 ; CHECK-NEXT:    call void @use8(i8 [[ADJUSTED]])
-; CHECK-NEXT:    [[RES:%.*]] = icmp ult i8 [[ADJUSTED]], [[BASE]]
+; CHECK-NEXT:    [[RES:%.*]] = icmp uge i8 [[BASE]], [[OFFSET]]
 ; CHECK-NEXT:    ret i1 [[RES]]
 ;
   %cmp = icmp slt i8 %offset, 0
@@ -28,7 +28,7 @@ define i1 @t1(i8 %base, i8 %offset) {
 ; CHECK-NEXT:    call void @llvm.assume(i1 [[CMP]])
 ; CHECK-NEXT:    [[ADJUSTED:%.*]] = sub i8 [[BASE:%.*]], [[OFFSET]]
 ; CHECK-NEXT:    call void @use8(i8 [[ADJUSTED]])
-; CHECK-NEXT:    [[RES:%.*]] = icmp uge i8 [[ADJUSTED]], [[BASE]]
+; CHECK-NEXT:    [[RES:%.*]] = icmp ult i8 [[BASE]], [[OFFSET]]
 ; CHECK-NEXT:    ret i1 [[RES]]
 ;
   %cmp = icmp slt i8 %offset, 0
@@ -46,7 +46,7 @@ define i1 @t2(i8 %offset) {
 ; CHECK-NEXT:    [[BASE:%.*]] = call i8 @gen8()
 ; CHECK-NEXT:    [[ADJUSTED:%.*]] = sub i8 [[BASE]], [[OFFSET]]
 ; CHECK-NEXT:    call void @use8(i8 [[ADJUSTED]])
-; CHECK-NEXT:    [[RES:%.*]] = icmp ugt i8 [[BASE]], [[ADJUSTED]]
+; CHECK-NEXT:    [[RES:%.*]] = icmp uge i8 [[BASE]], [[OFFSET]]
 ; CHECK-NEXT:    ret i1 [[RES]]
 ;
   %cmp = icmp slt i8 %offset, 0
@@ -65,7 +65,7 @@ define i1 @t3(i8 %offset) {
 ; CHECK-NEXT:    [[BASE:%.*]] = call i8 @gen8()
 ; CHECK-NEXT:    [[ADJUSTED:%.*]] = sub i8 [[BASE]], [[OFFSET]]
 ; CHECK-NEXT:    call void @use8(i8 [[ADJUSTED]])
-; CHECK-NEXT:    [[RES:%.*]] = icmp ule i8 [[BASE]], [[ADJUSTED]]
+; CHECK-NEXT:    [[RES:%.*]] = icmp ult i8 [[BASE]], [[OFFSET]]
 ; CHECK-NEXT:    ret i1 [[RES]]
 ;
   %cmp = icmp slt i8 %offset, 0




More information about the llvm-commits mailing list