[llvm-branch-commits] [llvm] be3a066 - [SCEV] Improve handling of pointer compares involving subtractions.

Florian Hahn via llvm-branch-commits llvm-branch-commits at lists.llvm.org
Sat Feb 20 08:25:32 PST 2021


Author: Florian Hahn
Date: 2021-02-20T16:25:18Z
New Revision: be3a066ebfe86371a159267e1c7287a86614e041

URL: https://github.com/llvm/llvm-project/commit/be3a066ebfe86371a159267e1c7287a86614e041
DIFF: https://github.com/llvm/llvm-project/commit/be3a066ebfe86371a159267e1c7287a86614e041.diff

LOG: [SCEV] Improve handling of pointer compares involving subtractions.

This patch improves handling of pointer comparisons involving
subtractions, by using knowledge that the involved variables are != 0
and signed positive.

It uses applyLoopGuards to add != 0 info to the involved expressions (by
re-writing them as umax expression. It also adds a new fold to move zext
into umax expressions and to move udiv expression inside subtraction, if
the operands guarantee a positive result.

Proof for zext/umax fold: https://alive2.llvm.org/ce/z/t75T53

Proof for getUDivExpr extension:a https://alive2.llvm.org/ce/z/H_G2Q0

Proof for isKnownPredicateSubIdiom: https://alive2.llvm.org/ce/z/Gfe8mS

Added: 
    

Modified: 
    llvm/include/llvm/Analysis/ScalarEvolution.h
    llvm/lib/Analysis/ScalarEvolution.cpp
    llvm/lib/Transforms/Scalar/IndVarSimplify.cpp
    llvm/test/Transforms/IndVarSimplify/simplify-pointer-arithmetic.ll

Removed: 
    


################################################################################
diff  --git a/llvm/include/llvm/Analysis/ScalarEvolution.h b/llvm/include/llvm/Analysis/ScalarEvolution.h
index c35c1db7dfe0..1c9f9b36c94c 100644
--- a/llvm/include/llvm/Analysis/ScalarEvolution.h
+++ b/llvm/include/llvm/Analysis/ScalarEvolution.h
@@ -1876,6 +1876,11 @@ class ScalarEvolution {
   bool isKnownPredicateViaConstantRanges(ICmpInst::Predicate Pred,
                                          const SCEV *LHS, const SCEV *RHS);
 
+  /// Test if the given expression is known to satisfy the condition described
+  /// by Pred by decomposing a subtraction.
+  bool isKnownPredicateViaSubIdiom(ICmpInst::Predicate Pred, const SCEV *LHS,
+                                   const SCEV *RHS);
+
   /// Try to prove the condition described by "LHS Pred RHS" by ruling out
   /// integer overflow.
   ///

diff  --git a/llvm/lib/Analysis/ScalarEvolution.cpp b/llvm/lib/Analysis/ScalarEvolution.cpp
index b8d55e6eb68a..6c82c5eece12 100644
--- a/llvm/lib/Analysis/ScalarEvolution.cpp
+++ b/llvm/lib/Analysis/ScalarEvolution.cpp
@@ -1833,6 +1833,12 @@ ScalarEvolution::getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth) {
           }
   }
 
+  // zext (A umax B) --> (zext A) umax (zext B)
+  if (auto *SM = dyn_cast<SCEVUMaxExpr>(Op)) {
+    return getUMaxExpr(getZeroExtendExpr(SM->getOperand(0), Ty),
+                       getZeroExtendExpr(SM->getOperand(1), Ty));
+  }
+
   // The cast wasn't folded; create an explicit cast node.
   // Recompute the insert position, as it may have been invalidated.
   if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
@@ -3104,6 +3110,34 @@ const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS,
          getEffectiveSCEVType(RHS->getType()) &&
          "SCEVUDivExpr operand types don't match!");
 
+  // Try to shift udiv across an addition even when no wrapping info is
+  // present, using the fact that (B - A)  will be in [0, B+1), if
+  // B s>= A and A s>= 0.
+  //
+  // (-C1 + X) /u C2 can be transformed to (C1 /u C3) + (X /u C2), if
+  //   * C1 % C2 == 0
+  //   * X % C3 == 0
+  //   * X s>= C1
+  //   * C1 s>= 0
+  //
+  // If C1 and C2 are constants, at least one of udiv expression can be
+  // eliminated. This pattern is commonly created for trip counts
+  // involving pointer IVs, where a multiple of the element width
+  // is subtracted.
+  const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(LHS);
+  const SCEVConstant *C = dyn_cast<SCEVConstant>(RHS);
+  if (Add && C && Add->getNumOperands() == 2) {
+    unsigned MultTrailing = C->getAPInt().countTrailingZeros();
+    auto *NegOp0 = getNegativeSCEV(Add->getOperand(0));
+    if (GetMinTrailingZeros(Add->getOperand(0)) >= MultTrailing &&
+        GetMinTrailingZeros(Add->getOperand(1)) >= MultTrailing &&
+        isKnownNonNegative(NegOp0) &&
+        isKnownPredicate(CmpInst::ICMP_SGE, Add->getOperand(1), NegOp0)) {
+      return getMinusSCEV(getUDivExactExpr(Add->getOperand(1), RHS),
+                          getUDivExactExpr(NegOp0, RHS));
+    }
+  }
+
   FoldingSetNodeID ID;
   ID.AddInteger(scUDivExpr);
   ID.AddPointer(LHS);
@@ -9113,7 +9147,6 @@ ScalarEvolution::howFarToZero(const SCEV *V, const Loop *L, bool ControlsExit,
   // First compute the unsigned distance from zero in the direction of Step.
   bool CountDown = StepC->getAPInt().isNegative();
   const SCEV *Distance = CountDown ? Start : getNegativeSCEV(Start);
-
   // Handle unitary steps, which cannot wraparound.
   // 1*N = -Start; -1*N = Start (mod 2^BW), so:
   //   N = Distance (as unsigned)
@@ -10095,7 +10128,10 @@ bool ScalarEvolution::isLoopEntryGuardedByCond(const Loop *L,
          "LHS is not available at Loop Entry");
   assert(isAvailableAtLoopEntry(RHS, L) &&
          "RHS is not available at Loop Entry");
-  return isBasicBlockEntryGuardedByCond(L->getHeader(), Pred, LHS, RHS);
+  if (isBasicBlockEntryGuardedByCond(L->getHeader(), Pred, LHS, RHS))
+    return true;
+  return isBasicBlockEntryGuardedByCond(
+      L->getHeader(), Pred, applyLoopGuards(LHS, L), applyLoopGuards(RHS, L));
 }
 
 bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred, const SCEV *LHS,
@@ -10934,10 +10970,28 @@ static bool isKnownPredicateExtendIdiom(ICmpInst::Predicate Pred,
   return false;
 }
 
+bool ScalarEvolution::isKnownPredicateViaSubIdiom(ICmpInst::Predicate Pred,
+                                                  const SCEV *LHS,
+                                                  const SCEV *RHS) {
+  // Handle Y - X <= Y, if X s>= 0 and Y >= X. In that case, the result of (Y -
+  // X) will be in [0, Y+1) expression won't wrap in the unsigned sense.
+  auto *Add = dyn_cast<SCEVAddExpr>(LHS);
+  if (Add && Pred == CmpInst::ICMP_ULE) {
+    auto *X = Add->getOperand(0);
+    auto *Y = Add->getOperand(1);
+    if (Y == RHS && isKnownNonPositive(X) && isKnownNonNegative(Y) &&
+        isKnownPredicateViaConstantRanges(CmpInst::ICMP_UGE, Y,
+                                          getNegativeSCEV(X)))
+      return true;
+  }
+  return false;
+}
+
 bool
 ScalarEvolution::isKnownViaNonRecursiveReasoning(ICmpInst::Predicate Pred,
                                            const SCEV *LHS, const SCEV *RHS) {
   return isKnownPredicateExtendIdiom(Pred, LHS, RHS) ||
+         isKnownPredicateViaSubIdiom(Pred, LHS, RHS) ||
          isKnownPredicateViaConstantRanges(Pred, LHS, RHS) ||
          IsKnownPredicateViaMinOrMax(*this, Pred, LHS, RHS) ||
          IsKnownPredicateViaAddRecStart(*this, Pred, LHS, RHS) ||
@@ -11223,12 +11277,13 @@ ScalarEvolution::howManyLessThans(const SCEV *LHS, const SCEV *RHS,
     return ExitLimit(getCouldNotCompute() /* ExactNotTaken */, MaxBECount,
                      false /*MaxOrZero*/, Predicates);
   }
+
   // If the backedge is taken at least once, then it will be taken
   // (End-Start)/Stride times (rounded up to a multiple of Stride), where Start
   // is the LHS value of the less-than comparison the first time it is evaluated
   // and End is the RHS.
   const SCEV *BECountIfBackedgeTaken =
-    computeBECount(getMinusSCEV(End, Start), Stride, false);
+      computeBECount(getMinusSCEV(End, Start), Stride, false);
   // If the loop entry is guarded by the result of the backedge test of the
   // first loop iteration, then we know the backedge will be taken at least
   // once and so the backedge taken count is as above. If not then we use the

diff  --git a/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp b/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp
index ae1fff0fa844..643605ff352c 100644
--- a/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp
+++ b/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp
@@ -1521,8 +1521,8 @@ bool IndVarSimplify::optimizeLoopExits(Loop *L, SCEVExpander &Rewriter) {
 
     // Can we prove that some other exit must be taken strictly before this
     // one?
-    if (SE->isLoopEntryGuardedByCond(L, CmpInst::ICMP_ULT,
-                                     MaxExitCount, ExitCount)) {
+    if (SE->isLoopEntryGuardedByCond(L, CmpInst::ICMP_ULT, MaxExitCount,
+                                     ExitCount)) {
       foldExit(L, ExitingBB, false, DeadInsts);
       Changed = true;
       continue;

diff  --git a/llvm/test/Transforms/IndVarSimplify/simplify-pointer-arithmetic.ll b/llvm/test/Transforms/IndVarSimplify/simplify-pointer-arithmetic.ll
index 6ca5f43bb7d7..6560ca655ad7 100644
--- a/llvm/test/Transforms/IndVarSimplify/simplify-pointer-arithmetic.ll
+++ b/llvm/test/Transforms/IndVarSimplify/simplify-pointer-arithmetic.ll
@@ -20,10 +20,7 @@ define i1 @can_simplify_ult_i32_ptr_len_zext(i32* %p.base, i32 %len) {
 ; CHECK-NEXT:    ret i1 false
 ; CHECK:       header:
 ; CHECK-NEXT:    [[P:%.*]] = phi i32* [ [[P_INC:%.*]], [[LATCH:%.*]] ], [ [[P_BASE]], [[HEADER_PREHEADER]] ]
-; CHECK-NEXT:    [[I:%.*]] = phi i64 [ [[I_INC:%.*]], [[LATCH]] ], [ 0, [[HEADER_PREHEADER]] ]
-; CHECK-NEXT:    [[I_INC]] = add nuw nsw i64 [[I]], 1
-; CHECK-NEXT:    [[I_ULT_EXT:%.*]] = icmp ult i64 [[I]], [[EXT]]
-; CHECK-NEXT:    br i1 [[I_ULT_EXT]], label [[LATCH]], label [[TRAP_LOOPEXIT:%.*]]
+; CHECK-NEXT:    br i1 true, label [[LATCH]], label [[TRAP_LOOPEXIT:%.*]]
 ; CHECK:       latch:
 ; CHECK-NEXT:    [[P_INC]] = getelementptr inbounds i32, i32* [[P]], i64 1
 ; CHECK-NEXT:    [[C:%.*]] = icmp ne i32* [[P_INC]], [[P_END]]
@@ -179,10 +176,7 @@ define i1 @can_simplify_ule_i32_ptr_len_zext(i32* %p.base, i32 %len) {
 ; CHECK-NEXT:    ret i1 false
 ; CHECK:       header:
 ; CHECK-NEXT:    [[P:%.*]] = phi i32* [ [[P_INC:%.*]], [[LATCH:%.*]] ], [ [[P_BASE]], [[HEADER_PREHEADER]] ]
-; CHECK-NEXT:    [[I:%.*]] = phi i64 [ [[I_INC:%.*]], [[LATCH]] ], [ 1, [[HEADER_PREHEADER]] ]
-; CHECK-NEXT:    [[I_INC]] = add nuw nsw i64 [[I]], 1
-; CHECK-NEXT:    [[I_ULT_EXT:%.*]] = icmp ule i64 [[I]], [[EXT]]
-; CHECK-NEXT:    br i1 [[I_ULT_EXT]], label [[LATCH]], label [[TRAP_LOOPEXIT:%.*]]
+; CHECK-NEXT:    br i1 true, label [[LATCH]], label [[TRAP_LOOPEXIT:%.*]]
 ; CHECK:       latch:
 ; CHECK-NEXT:    [[P_INC]] = getelementptr inbounds i32, i32* [[P]], i64 1
 ; CHECK-NEXT:    [[C:%.*]] = icmp ne i32* [[P_INC]], [[P_END]]
@@ -234,10 +228,7 @@ define i1 @can_simplify_uge_i32_ptr_len_zext(i32* %p.base, i32 %len) {
 ; CHECK-NEXT:    ret i1 false
 ; CHECK:       header:
 ; CHECK-NEXT:    [[P:%.*]] = phi i32* [ [[P_INC:%.*]], [[LATCH:%.*]] ], [ [[P_BASE]], [[HEADER_PREHEADER]] ]
-; CHECK-NEXT:    [[I:%.*]] = phi i64 [ [[I_INC:%.*]], [[LATCH]] ], [ 0, [[HEADER_PREHEADER]] ]
-; CHECK-NEXT:    [[I_INC]] = add nuw nsw i64 [[I]], 1
-; CHECK-NEXT:    [[I_UGE_EXT:%.*]] = icmp uge i64 [[I]], [[EXT]]
-; CHECK-NEXT:    br i1 [[I_UGE_EXT]], label [[TRAP_LOOPEXIT:%.*]], label [[LATCH]]
+; CHECK-NEXT:    br i1 false, label [[TRAP_LOOPEXIT:%.*]], label [[LATCH]]
 ; CHECK:       latch:
 ; CHECK-NEXT:    [[P_INC]] = getelementptr inbounds i32, i32* [[P]], i64 1
 ; CHECK-NEXT:    [[C:%.*]] = icmp ne i32* [[P_INC]], [[P_END]]


        


More information about the llvm-branch-commits mailing list