[llvm] [SCEV] Introduce SCEVUse, use it instead of const SCEV * (NFCI) (WIP). (PR #91961)
Florian Hahn via llvm-commits
llvm-commits at lists.llvm.org
Wed May 22 07:31:05 PDT 2024
https://github.com/fhahn updated https://github.com/llvm/llvm-project/pull/91961
>From 0cb398f53ad2931c0a0c8df03371ac4e2670cd8e Mon Sep 17 00:00:00 2001
From: Florian Hahn <flo at fhahn.com>
Date: Mon, 13 May 2024 09:49:39 +0100
Subject: [PATCH 1/4] [SCEV] Use const SCEV * explicitly in more places.
---
llvm/lib/Analysis/Delinearization.cpp | 2 +-
llvm/lib/Analysis/IVUsers.cpp | 4 +-
llvm/lib/Analysis/LoopAccessAnalysis.cpp | 8 +-
llvm/lib/Target/ARM/MVETailPredication.cpp | 16 ++-
llvm/lib/Transforms/Scalar/IndVarSimplify.cpp | 10 +-
llvm/lib/Transforms/Scalar/LoopDeletion.cpp | 4 +-
.../lib/Transforms/Scalar/LoopPredication.cpp | 4 +-
.../Transforms/Utils/LowerMemIntrinsics.cpp | 4 +-
llvm/lib/Transforms/Utils/SimplifyIndVar.cpp | 10 +-
.../Vectorize/LoopVectorizationLegality.cpp | 9 +-
.../Transforms/Vectorize/SLPVectorizer.cpp | 4 +-
.../Analysis/ScalarEvolutionTest.cpp | 122 +++++++++---------
.../Transforms/Utils/LoopUtilsTest.cpp | 4 +-
.../Utils/ScalarEvolutionExpanderTest.cpp | 10 +-
14 files changed, 109 insertions(+), 102 deletions(-)
diff --git a/llvm/lib/Analysis/Delinearization.cpp b/llvm/lib/Analysis/Delinearization.cpp
index a45d8815e54ce..857778e3e75df 100644
--- a/llvm/lib/Analysis/Delinearization.cpp
+++ b/llvm/lib/Analysis/Delinearization.cpp
@@ -131,7 +131,7 @@ struct SCEVCollectAddRecMultiplies {
if (auto *Mul = dyn_cast<SCEVMulExpr>(S)) {
bool HasAddRec = false;
SmallVector<const SCEV *, 0> Operands;
- for (const auto *Op : Mul->operands()) {
+ for (const SCEV *Op : Mul->operands()) {
const SCEVUnknown *Unknown = dyn_cast<SCEVUnknown>(Op);
if (Unknown && !isa<CallInst>(Unknown->getValue())) {
Operands.push_back(Op);
diff --git a/llvm/lib/Analysis/IVUsers.cpp b/llvm/lib/Analysis/IVUsers.cpp
index 5c7883fb3b37c..681d0ebf53f27 100644
--- a/llvm/lib/Analysis/IVUsers.cpp
+++ b/llvm/lib/Analysis/IVUsers.cpp
@@ -73,7 +73,7 @@ static bool isInteresting(const SCEV *S, const Instruction *I, const Loop *L,
// An add is interesting if exactly one of its operands is interesting.
if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
bool AnyInterestingYet = false;
- for (const auto *Op : Add->operands())
+ for (const SCEV *Op : Add->operands())
if (isInteresting(Op, I, L, SE, LI)) {
if (AnyInterestingYet)
return false;
@@ -346,7 +346,7 @@ static const SCEVAddRecExpr *findAddRecForLoop(const SCEV *S, const Loop *L) {
}
if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
- for (const auto *Op : Add->operands())
+ for (const SCEV *Op : Add->operands())
if (const SCEVAddRecExpr *AR = findAddRecForLoop(Op, L))
return AR;
return nullptr;
diff --git a/llvm/lib/Analysis/LoopAccessAnalysis.cpp b/llvm/lib/Analysis/LoopAccessAnalysis.cpp
index 2a967f570c4aa..9405625c7d074 100644
--- a/llvm/lib/Analysis/LoopAccessAnalysis.cpp
+++ b/llvm/lib/Analysis/LoopAccessAnalysis.cpp
@@ -171,9 +171,9 @@ const SCEV *llvm::replaceSymbolicStrideSCEV(PredicatedScalarEvolution &PSE,
assert(isa<SCEVUnknown>(StrideSCEV) && "shouldn't be in map");
ScalarEvolution *SE = PSE.getSE();
- const auto *CT = SE->getOne(StrideSCEV->getType());
+ const SCEV *CT = SE->getOne(StrideSCEV->getType());
PSE.addPredicate(*SE->getEqualPredicate(StrideSCEV, CT));
- auto *Expr = PSE.getSCEV(Ptr);
+ const SCEV *Expr = PSE.getSCEV(Ptr);
LLVM_DEBUG(dbgs() << "LAA: Replacing SCEV: " << *OrigSCEV
<< " by: " << *Expr << "\n");
@@ -1089,7 +1089,7 @@ bool AccessAnalysis::createCheckForAccess(RuntimePointerChecking &RtCheck,
return false;
if (!isNoWrap(PSE, StridesMap, Ptr, AccessTy, TheLoop)) {
- auto *Expr = PSE.getSCEV(Ptr);
+ const SCEV *Expr = PSE.getSCEV(Ptr);
if (!Assume || !isa<SCEVAddRecExpr>(Expr))
return false;
PSE.setNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW);
@@ -1448,7 +1448,7 @@ static bool isNoWrapAddRec(Value *Ptr, const SCEVAddRecExpr *AR,
// Assume constant for other the operand so that the AddRec can be
// easily found.
isa<ConstantInt>(OBO->getOperand(1))) {
- auto *OpScev = PSE.getSCEV(OBO->getOperand(0));
+ const SCEV *OpScev = PSE.getSCEV(OBO->getOperand(0));
if (auto *OpAR = dyn_cast<SCEVAddRecExpr>(OpScev))
return OpAR->getLoop() == L && OpAR->getNoWrapFlags(SCEV::FlagNSW);
diff --git a/llvm/lib/Target/ARM/MVETailPredication.cpp b/llvm/lib/Target/ARM/MVETailPredication.cpp
index fe97d4f758997..58ab5195efa88 100644
--- a/llvm/lib/Target/ARM/MVETailPredication.cpp
+++ b/llvm/lib/Target/ARM/MVETailPredication.cpp
@@ -205,8 +205,8 @@ const SCEV *MVETailPredication::IsSafeActiveMask(IntrinsicInst *ActiveLaneMask,
if (!L->makeLoopInvariant(ElemCount, Changed))
return nullptr;
- auto *EC= SE->getSCEV(ElemCount);
- auto *TC = SE->getSCEV(TripCount);
+ const SCEV *EC = SE->getSCEV(ElemCount);
+ const SCEV *TC = SE->getSCEV(TripCount);
int VectorWidth =
cast<FixedVectorType>(ActiveLaneMask->getType())->getNumElements();
if (VectorWidth != 2 && VectorWidth != 4 && VectorWidth != 8 &&
@@ -228,7 +228,7 @@ const SCEV *MVETailPredication::IsSafeActiveMask(IntrinsicInst *ActiveLaneMask,
// different counter. Using SCEV, we check that the induction is of the
// form i = i + 4, where the increment must be equal to the VectorWidth.
auto *IV = ActiveLaneMask->getOperand(0);
- auto *IVExpr = SE->getSCEV(IV);
+ const SCEV *IVExpr = SE->getSCEV(IV);
auto *AddExpr = dyn_cast<SCEVAddRecExpr>(IVExpr);
if (!AddExpr) {
@@ -291,14 +291,16 @@ const SCEV *MVETailPredication::IsSafeActiveMask(IntrinsicInst *ActiveLaneMask,
//
// which what we will be using here.
//
- auto *VW = SE->getSCEV(ConstantInt::get(TripCount->getType(), VectorWidth));
+ const SCEV *VW =
+ SE->getSCEV(ConstantInt::get(TripCount->getType(), VectorWidth));
// ElementCount + (VW-1):
- auto *Start = AddExpr->getStart();
- auto *ECPlusVWMinus1 = SE->getAddExpr(EC,
+ const SCEV *Start = AddExpr->getStart();
+ const SCEV *ECPlusVWMinus1 = SE->getAddExpr(
+ EC,
SE->getSCEV(ConstantInt::get(TripCount->getType(), VectorWidth - 1)));
// Ceil = ElementCount + (VW-1) / VW
- auto *Ceil = SE->getUDivExpr(ECPlusVWMinus1, VW);
+ const SCEV *Ceil = SE->getUDivExpr(ECPlusVWMinus1, VW);
// Prevent unused variable warnings with TC
(void)TC;
diff --git a/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp b/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp
index dd7c89034ca09..dfb4352e501c6 100644
--- a/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp
+++ b/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp
@@ -1287,7 +1287,7 @@ createReplacement(ICmpInst *ICmp, const Loop *L, BasicBlock *ExitingBB,
MaxIter = SE->getZeroExtendExpr(MaxIter, ARTy);
else if (SE->getTypeSizeInBits(ARTy) < SE->getTypeSizeInBits(MaxIterTy)) {
const SCEV *MinusOne = SE->getMinusOne(ARTy);
- auto *MaxAllowedIter = SE->getZeroExtendExpr(MinusOne, MaxIterTy);
+ const SCEV *MaxAllowedIter = SE->getZeroExtendExpr(MinusOne, MaxIterTy);
if (SE->isKnownPredicateAt(ICmpInst::ICMP_ULE, MaxIter, MaxAllowedIter, BI))
MaxIter = SE->getTruncateExpr(MaxIter, ARTy);
}
@@ -1299,7 +1299,7 @@ createReplacement(ICmpInst *ICmp, const Loop *L, BasicBlock *ExitingBB,
// So we manually construct umin(a - 1, b - 1).
SmallVector<const SCEV *, 4> Elements;
if (auto *UMin = dyn_cast<SCEVUMinExpr>(MaxIter)) {
- for (auto *Op : UMin->operands())
+ for (const SCEV *Op : UMin->operands())
Elements.push_back(SE->getMinusSCEV(Op, SE->getOne(Op->getType())));
MaxIter = SE->getUMinFromMismatchedTypes(Elements);
} else
@@ -1376,15 +1376,15 @@ static bool optimizeLoopExitWithUnknownExitCount(
for (auto *ICmp : LeafConditions) {
auto EL = SE->computeExitLimitFromCond(L, ICmp, Inverted,
/*ControlsExit*/ false);
- auto *ExitMax = EL.SymbolicMaxNotTaken;
+ const SCEV *ExitMax = EL.SymbolicMaxNotTaken;
if (isa<SCEVCouldNotCompute>(ExitMax))
continue;
// They could be of different types (specifically this happens after
// IV widening).
auto *WiderType =
SE->getWiderType(ExitMax->getType(), MaxIter->getType());
- auto *WideExitMax = SE->getNoopOrZeroExtend(ExitMax, WiderType);
- auto *WideMaxIter = SE->getNoopOrZeroExtend(MaxIter, WiderType);
+ const SCEV *WideExitMax = SE->getNoopOrZeroExtend(ExitMax, WiderType);
+ const SCEV *WideMaxIter = SE->getNoopOrZeroExtend(MaxIter, WiderType);
if (WideExitMax == WideMaxIter)
ICmpsFailingOnLastIter.insert(ICmp);
}
diff --git a/llvm/lib/Transforms/Scalar/LoopDeletion.cpp b/llvm/lib/Transforms/Scalar/LoopDeletion.cpp
index bfe9374cf2f8c..a8cf8ab3dfde2 100644
--- a/llvm/lib/Transforms/Scalar/LoopDeletion.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopDeletion.cpp
@@ -404,9 +404,9 @@ breakBackedgeIfNotTaken(Loop *L, DominatorTree &DT, ScalarEvolution &SE,
if (!L->getLoopLatch())
return LoopDeletionResult::Unmodified;
- auto *BTCMax = SE.getConstantMaxBackedgeTakenCount(L);
+ const SCEV *BTCMax = SE.getConstantMaxBackedgeTakenCount(L);
if (!BTCMax->isZero()) {
- auto *BTC = SE.getBackedgeTakenCount(L);
+ const SCEV *BTC = SE.getBackedgeTakenCount(L);
if (!BTC->isZero()) {
if (!isa<SCEVCouldNotCompute>(BTC) && SE.isKnownNonZero(BTC))
return LoopDeletionResult::Unmodified;
diff --git a/llvm/lib/Transforms/Scalar/LoopPredication.cpp b/llvm/lib/Transforms/Scalar/LoopPredication.cpp
index 027dbb9c0f71a..209b083a4e91a 100644
--- a/llvm/lib/Transforms/Scalar/LoopPredication.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopPredication.cpp
@@ -677,7 +677,7 @@ LoopPredication::widenICmpRangeCheck(ICmpInst *ICI, SCEVExpander &Expander,
LLVM_DEBUG(dbgs() << "Range check IV is not affine!\n");
return std::nullopt;
}
- auto *Step = RangeCheckIV->getStepRecurrence(*SE);
+ const SCEV *Step = RangeCheckIV->getStepRecurrence(*SE);
// We cannot just compare with latch IV step because the latch and range IVs
// may have different types.
if (!isSupportedStep(Step)) {
@@ -845,7 +845,7 @@ std::optional<LoopICmp> LoopPredication::parseLoopLatchICmp() {
return std::nullopt;
}
- auto *Step = Result->IV->getStepRecurrence(*SE);
+ const SCEV *Step = Result->IV->getStepRecurrence(*SE);
if (!isSupportedStep(Step)) {
LLVM_DEBUG(dbgs() << "Unsupported loop stride(" << *Step << ")!\n");
return std::nullopt;
diff --git a/llvm/lib/Transforms/Utils/LowerMemIntrinsics.cpp b/llvm/lib/Transforms/Utils/LowerMemIntrinsics.cpp
index acd3f2802031e..505eef02d8161 100644
--- a/llvm/lib/Transforms/Utils/LowerMemIntrinsics.cpp
+++ b/llvm/lib/Transforms/Utils/LowerMemIntrinsics.cpp
@@ -497,8 +497,8 @@ static void createMemSetLoop(Instruction *InsertBefore, Value *DstAddr,
template <typename T>
static bool canOverlap(MemTransferBase<T> *Memcpy, ScalarEvolution *SE) {
if (SE) {
- auto *SrcSCEV = SE->getSCEV(Memcpy->getRawSource());
- auto *DestSCEV = SE->getSCEV(Memcpy->getRawDest());
+ const SCEV *SrcSCEV = SE->getSCEV(Memcpy->getRawSource());
+ const SCEV *DestSCEV = SE->getSCEV(Memcpy->getRawDest());
if (SE->isKnownPredicateAt(CmpInst::ICMP_NE, SrcSCEV, DestSCEV, Memcpy))
return false;
}
diff --git a/llvm/lib/Transforms/Utils/SimplifyIndVar.cpp b/llvm/lib/Transforms/Utils/SimplifyIndVar.cpp
index 912c02c2ed3ae..cdc17e36af4a4 100644
--- a/llvm/lib/Transforms/Utils/SimplifyIndVar.cpp
+++ b/llvm/lib/Transforms/Utils/SimplifyIndVar.cpp
@@ -162,8 +162,8 @@ Value *SimplifyIndvar::foldIVUser(Instruction *UseInst, Instruction *IVOperand)
D = ConstantInt::get(UseInst->getContext(),
APInt::getOneBitSet(BitWidth, D->getZExtValue()));
}
- const auto *LHS = SE->getSCEV(IVSrc);
- const auto *RHS = SE->getSCEV(D);
+ const SCEV *LHS = SE->getSCEV(IVSrc);
+ const SCEV *RHS = SE->getSCEV(D);
FoldedExpr = SE->getUDivExpr(LHS, RHS);
// We might have 'exact' flag set at this point which will no longer be
// correct after we make the replacement.
@@ -292,8 +292,8 @@ void SimplifyIndvar::eliminateIVComparison(ICmpInst *ICmp,
bool SimplifyIndvar::eliminateSDiv(BinaryOperator *SDiv) {
// Get the SCEVs for the ICmp operands.
- auto *N = SE->getSCEV(SDiv->getOperand(0));
- auto *D = SE->getSCEV(SDiv->getOperand(1));
+ const SCEV *N = SE->getSCEV(SDiv->getOperand(0));
+ const SCEV *D = SE->getSCEV(SDiv->getOperand(1));
// Simplify unnecessary loops away.
const Loop *L = LI->getLoopFor(SDiv->getParent());
@@ -389,7 +389,7 @@ void SimplifyIndvar::simplifyIVRemainder(BinaryOperator *Rem,
}
auto *T = Rem->getType();
- const auto *NLessOne = SE->getMinusSCEV(N, SE->getOne(T));
+ const SCEV *NLessOne = SE->getMinusSCEV(N, SE->getOne(T));
if (SE->isKnownPredicate(LT, NLessOne, D)) {
replaceRemWithNumeratorOrZero(Rem);
return;
diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp
index 9de49d1bcfeac..93382c901f381 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp
@@ -511,14 +511,15 @@ class SCEVAddRecForUniformityRewriter
// Build a new AddRec by multiplying the step by StepMultiplier and
// incrementing the start by Offset * step.
Type *Ty = Expr->getType();
- auto *Step = Expr->getStepRecurrence(SE);
+ const SCEV *Step = Expr->getStepRecurrence(SE);
if (!SE.isLoopInvariant(Step, TheLoop)) {
CannotAnalyze = true;
return Expr;
}
- auto *NewStep = SE.getMulExpr(Step, SE.getConstant(Ty, StepMultiplier));
- auto *ScaledOffset = SE.getMulExpr(Step, SE.getConstant(Ty, Offset));
- auto *NewStart = SE.getAddExpr(Expr->getStart(), ScaledOffset);
+ const SCEV *NewStep =
+ SE.getMulExpr(Step, SE.getConstant(Ty, StepMultiplier));
+ const SCEV *ScaledOffset = SE.getMulExpr(Step, SE.getConstant(Ty, Offset));
+ const SCEV *NewStart = SE.getAddExpr(Expr->getStart(), ScaledOffset);
return SE.getAddRecExpr(NewStart, NewStep, TheLoop, SCEV::FlagAnyWrap);
}
diff --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
index 140a1b1ffbafe..4adbdf2938fd1 100644
--- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
+++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
@@ -18649,10 +18649,10 @@ bool SLPVectorizerPass::vectorizeGEPIndices(BasicBlock *BB, BoUpSLP &R) {
auto *GEPI = GEPList[I];
if (!Candidates.count(GEPI))
continue;
- auto *SCEVI = SE->getSCEV(GEPList[I]);
+ const SCEV *SCEVI = SE->getSCEV(GEPList[I]);
for (int J = I + 1; J < E && Candidates.size() > 1; ++J) {
auto *GEPJ = GEPList[J];
- auto *SCEVJ = SE->getSCEV(GEPList[J]);
+ const SCEV *SCEVJ = SE->getSCEV(GEPList[J]);
if (isa<SCEVConstant>(SE->getMinusSCEV(SCEVI, SCEVJ))) {
Candidates.remove(GEPI);
Candidates.remove(GEPJ);
diff --git a/llvm/unittests/Analysis/ScalarEvolutionTest.cpp b/llvm/unittests/Analysis/ScalarEvolutionTest.cpp
index a7b3c5c404ab7..8ab22ef746c25 100644
--- a/llvm/unittests/Analysis/ScalarEvolutionTest.cpp
+++ b/llvm/unittests/Analysis/ScalarEvolutionTest.cpp
@@ -141,9 +141,9 @@ TEST_F(ScalarEvolutionsTest, SimplifiedPHI) {
PN->addIncoming(Constant::getNullValue(Ty), EntryBB);
PN->addIncoming(UndefValue::get(Ty), LoopBB);
ScalarEvolution SE = buildSE(*F);
- auto *S1 = SE.getSCEV(PN);
- auto *S2 = SE.getSCEV(PN);
- auto *ZeroConst = SE.getConstant(Ty, 0);
+ const SCEV *S1 = SE.getSCEV(PN);
+ const SCEV *S2 = SE.getSCEV(PN);
+ const SCEV *ZeroConst = SE.getConstant(Ty, 0);
// At some point, only the first call to getSCEV returned the simplified
// SCEVConstant and later calls just returned a SCEVUnknown referencing the
@@ -238,9 +238,9 @@ TEST_F(ScalarEvolutionsTest, CommutativeExprOperandOrder) {
auto *IV0 = getInstructionByName(F, "iv0");
auto *IV0Inc = getInstructionByName(F, "iv0.inc");
- auto *FirstExprForIV0 = SE.getSCEV(IV0);
- auto *FirstExprForIV0Inc = SE.getSCEV(IV0Inc);
- auto *SecondExprForIV0 = SE.getSCEV(IV0);
+ const SCEV *FirstExprForIV0 = SE.getSCEV(IV0);
+ const SCEV *FirstExprForIV0Inc = SE.getSCEV(IV0Inc);
+ const SCEV *SecondExprForIV0 = SE.getSCEV(IV0);
EXPECT_TRUE(isa<SCEVAddRecExpr>(FirstExprForIV0));
EXPECT_TRUE(isa<SCEVAddRecExpr>(FirstExprForIV0Inc));
@@ -260,12 +260,12 @@ TEST_F(ScalarEvolutionsTest, CommutativeExprOperandOrder) {
SmallVector<const SCEV *, 3> Ops4 = {C, B, A};
SmallVector<const SCEV *, 3> Ops5 = {C, A, B};
- auto *Mul0 = SE.getMulExpr(Ops0);
- auto *Mul1 = SE.getMulExpr(Ops1);
- auto *Mul2 = SE.getMulExpr(Ops2);
- auto *Mul3 = SE.getMulExpr(Ops3);
- auto *Mul4 = SE.getMulExpr(Ops4);
- auto *Mul5 = SE.getMulExpr(Ops5);
+ const SCEV *Mul0 = SE.getMulExpr(Ops0);
+ const SCEV *Mul1 = SE.getMulExpr(Ops1);
+ const SCEV *Mul2 = SE.getMulExpr(Ops2);
+ const SCEV *Mul3 = SE.getMulExpr(Ops3);
+ const SCEV *Mul4 = SE.getMulExpr(Ops4);
+ const SCEV *Mul5 = SE.getMulExpr(Ops5);
EXPECT_EQ(Mul0, Mul1) << "Expected " << *Mul0 << " == " << *Mul1;
EXPECT_EQ(Mul1, Mul2) << "Expected " << *Mul1 << " == " << *Mul2;
@@ -383,8 +383,8 @@ TEST_F(ScalarEvolutionsTest, CompareValueComplexity) {
// CompareValueComplexity that is both fast and more accurate.
ScalarEvolution SE = buildSE(*F);
- auto *A = SE.getSCEV(MulA);
- auto *B = SE.getSCEV(MulB);
+ const SCEV *A = SE.getSCEV(MulA);
+ const SCEV *B = SE.getSCEV(MulB);
EXPECT_NE(A, B);
}
@@ -430,21 +430,21 @@ TEST_F(ScalarEvolutionsTest, SCEVAddExpr) {
EXPECT_EQ(AddWithNUW->getNumOperands(), 3u);
EXPECT_EQ(AddWithNUW->getNoWrapFlags(), SCEV::FlagNUW);
- auto *AddWithAnyWrap =
+ const SCEV *AddWithAnyWrap =
SE.getAddExpr(SE.getSCEV(A3), SE.getSCEV(A4), SCEV::FlagAnyWrap);
auto *AddWithAnyWrapNUW = cast<SCEVAddExpr>(
SE.getAddExpr(AddWithAnyWrap, SE.getSCEV(A5), SCEV::FlagNUW));
EXPECT_EQ(AddWithAnyWrapNUW->getNumOperands(), 3u);
EXPECT_EQ(AddWithAnyWrapNUW->getNoWrapFlags(), SCEV::FlagAnyWrap);
- auto *AddWithNSW = SE.getAddExpr(
+ const SCEV *AddWithNSW = SE.getAddExpr(
SE.getSCEV(A2), SE.getConstant(APInt(32, 99)), SCEV::FlagNSW);
auto *AddWithNSW_NUW = cast<SCEVAddExpr>(
SE.getAddExpr(AddWithNSW, SE.getSCEV(A5), SCEV::FlagNUW));
EXPECT_EQ(AddWithNSW_NUW->getNumOperands(), 3u);
EXPECT_EQ(AddWithNSW_NUW->getNoWrapFlags(), SCEV::FlagAnyWrap);
- auto *AddWithNSWNUW =
+ const SCEV *AddWithNSWNUW =
SE.getAddExpr(SE.getSCEV(A2), SE.getSCEV(A4),
ScalarEvolution::setFlags(SCEV::FlagNUW, SCEV::FlagNSW));
auto *AddWithNSWNUW_NUW = cast<SCEVAddExpr>(
@@ -780,8 +780,8 @@ TEST_F(ScalarEvolutionsTest, SCEVExitLimitForgetLoop) {
// The add recurrence {5,+,1} does not correspond to any PHI in the IR, and
// that is relevant to this test.
- auto *Five = SE.getConstant(APInt(/*numBits=*/64, 5));
- auto *AR =
+ const SCEV *Five = SE.getConstant(APInt(/*numBits=*/64, 5));
+ const SCEV *AR =
SE.getAddRecExpr(Five, SE.getOne(T_int64), Loop, SCEV::FlagAnyWrap);
const SCEV *ARAtLoopExit = SE.getSCEVAtScope(AR, nullptr);
EXPECT_FALSE(isa<SCEVCouldNotCompute>(ARAtLoopExit));
@@ -1019,19 +1019,19 @@ TEST_F(ScalarEvolutionsTest, SCEVFoldSumOfTruncs) {
ScalarEvolution SE = buildSE(*F);
auto *Arg = &*(F->arg_begin());
- const auto *ArgSCEV = SE.getSCEV(Arg);
+ const SCEV *ArgSCEV = SE.getSCEV(Arg);
// Build the SCEV
- const auto *A0 = SE.getNegativeSCEV(ArgSCEV);
- const auto *A1 = SE.getTruncateExpr(A0, Int32Ty);
- const auto *A = SE.getNegativeSCEV(A1);
+ const SCEV *A0 = SE.getNegativeSCEV(ArgSCEV);
+ const SCEV *A1 = SE.getTruncateExpr(A0, Int32Ty);
+ const SCEV *A = SE.getNegativeSCEV(A1);
- const auto *B0 = SE.getTruncateExpr(ArgSCEV, Int32Ty);
- const auto *B = SE.getNegativeSCEV(B0);
+ const SCEV *B0 = SE.getTruncateExpr(ArgSCEV, Int32Ty);
+ const SCEV *B = SE.getNegativeSCEV(B0);
- const auto *Expr = SE.getAddExpr(A, B);
+ const SCEV *Expr = SE.getAddExpr(A, B);
// Verify that the SCEV was folded to 0
- const auto *ZeroConst = SE.getConstant(Int32Ty, 0);
+ const SCEV *ZeroConst = SE.getConstant(Int32Ty, 0);
EXPECT_EQ(Expr, ZeroConst);
}
@@ -1108,7 +1108,7 @@ TEST_F(ScalarEvolutionsTest, SCEVLoopDecIntrinsic) {
ASSERT_TRUE(!verifyModule(*M) && "Must have been well formed!");
runWithSE(*M, "foo", [&](Function &F, LoopInfo &LI, ScalarEvolution &SE) {
- auto *ScevInc = SE.getSCEV(getInstructionByName(F, "inc"));
+ const SCEV *ScevInc = SE.getSCEV(getInstructionByName(F, "inc"));
EXPECT_TRUE(isa<SCEVAddRecExpr>(ScevInc));
});
}
@@ -1139,13 +1139,15 @@ TEST_F(ScalarEvolutionsTest, SCEVComputeConstantDifference) {
ASSERT_TRUE(!verifyModule(*M) && "Must have been well formed!");
runWithSE(*M, "foo", [](Function &F, LoopInfo &LI, ScalarEvolution &SE) {
- auto *ScevV0 = SE.getSCEV(getInstructionByName(F, "v0")); // %pp
- auto *ScevV3 = SE.getSCEV(getInstructionByName(F, "v3")); // (3 + %pp)
- auto *ScevIV = SE.getSCEV(getInstructionByName(F, "iv")); // {0,+,1}
- auto *ScevXA = SE.getSCEV(getInstructionByName(F, "xa")); // {%pp,+,1}
- auto *ScevYY = SE.getSCEV(getInstructionByName(F, "yy")); // {(3 + %pp),+,1}
- auto *ScevXB = SE.getSCEV(getInstructionByName(F, "xb")); // {%pp,+,1}
- auto *ScevIVNext = SE.getSCEV(getInstructionByName(F, "iv.next")); // {1,+,1}
+ const SCEV *ScevV0 = SE.getSCEV(getInstructionByName(F, "v0")); // %pp
+ const SCEV *ScevV3 = SE.getSCEV(getInstructionByName(F, "v3")); // (3 + %pp)
+ const SCEV *ScevIV = SE.getSCEV(getInstructionByName(F, "iv")); // {0,+,1}
+ const SCEV *ScevXA = SE.getSCEV(getInstructionByName(F, "xa")); // {%pp,+,1}
+ const SCEV *ScevYY =
+ SE.getSCEV(getInstructionByName(F, "yy")); // {(3 + %pp),+,1}
+ const SCEV *ScevXB = SE.getSCEV(getInstructionByName(F, "xb")); // {%pp,+,1}
+ const SCEV *ScevIVNext =
+ SE.getSCEV(getInstructionByName(F, "iv.next")); // {1,+,1}
auto diff = [&SE](const SCEV *LHS, const SCEV *RHS) -> std::optional<int> {
auto ConstantDiffOrNone = computeConstantDifference(SE, LHS, RHS);
@@ -1197,16 +1199,17 @@ TEST_F(ScalarEvolutionsTest, SCEVrewriteUnknowns) {
ASSERT_TRUE(!verifyModule(*M) && "Must have been well formed!");
runWithSE(*M, "foo", [](Function &F, LoopInfo &LI, ScalarEvolution &SE) {
- auto *ScevIV = SE.getSCEV(getInstructionByName(F, "iv")); // {0,+,1}
- auto *ScevI = SE.getSCEV(getArgByName(F, "i")); // {0,+,1}
+ const SCEV *ScevIV = SE.getSCEV(getInstructionByName(F, "iv")); // {0,+,1}
+ const SCEV *ScevI = SE.getSCEV(getArgByName(F, "i")); // {0,+,1}
ValueToSCEVMapTy RewriteMap;
RewriteMap[cast<SCEVUnknown>(ScevI)->getValue()] =
SE.getUMinExpr(ScevI, SE.getConstant(ScevI->getType(), 17));
- auto *WithUMin = SCEVParameterRewriter::rewrite(ScevIV, SE, RewriteMap);
+ const SCEV *WithUMin =
+ SCEVParameterRewriter::rewrite(ScevIV, SE, RewriteMap);
EXPECT_NE(WithUMin, ScevIV);
- auto *AR = dyn_cast<SCEVAddRecExpr>(WithUMin);
+ const auto *AR = dyn_cast<SCEVAddRecExpr>(WithUMin);
EXPECT_TRUE(AR);
EXPECT_EQ(AR->getStart(),
SE.getUMinExpr(ScevI, SE.getConstant(ScevI->getType(), 17)));
@@ -1227,9 +1230,9 @@ TEST_F(ScalarEvolutionsTest, SCEVAddNUW) {
ASSERT_TRUE(!verifyModule(*M) && "Must have been well formed!");
runWithSE(*M, "foo", [](Function &F, LoopInfo &LI, ScalarEvolution &SE) {
- auto *X = SE.getSCEV(getArgByName(F, "x"));
- auto *One = SE.getOne(X->getType());
- auto *Sum = SE.getAddExpr(X, One, SCEV::FlagNUW);
+ const SCEV *X = SE.getSCEV(getArgByName(F, "x"));
+ const SCEV *One = SE.getOne(X->getType());
+ const SCEV *Sum = SE.getAddExpr(X, One, SCEV::FlagNUW);
EXPECT_TRUE(SE.isKnownPredicate(ICmpInst::ICMP_UGE, Sum, X));
EXPECT_TRUE(SE.isKnownPredicate(ICmpInst::ICMP_UGT, Sum, X));
});
@@ -1253,16 +1256,17 @@ TEST_F(ScalarEvolutionsTest, SCEVgetRanges) {
Err, C);
runWithSE(*M, "foo", [](Function &F, LoopInfo &LI, ScalarEvolution &SE) {
- auto *ScevIV = SE.getSCEV(getInstructionByName(F, "iv")); // {0,+,1}
- auto *ScevI = SE.getSCEV(getArgByName(F, "i"));
+ const SCEV *ScevIV = SE.getSCEV(getInstructionByName(F, "iv")); // {0,+,1}
+ const SCEV *ScevI = SE.getSCEV(getArgByName(F, "i"));
EXPECT_EQ(SE.getUnsignedRange(ScevIV).getLower(), 0);
EXPECT_EQ(SE.getUnsignedRange(ScevIV).getUpper(), 16);
- auto *Add = SE.getAddExpr(ScevI, ScevIV);
+ const SCEV *Add = SE.getAddExpr(ScevI, ScevIV);
ValueToSCEVMapTy RewriteMap;
RewriteMap[cast<SCEVUnknown>(ScevI)->getValue()] =
SE.getUMinExpr(ScevI, SE.getConstant(ScevI->getType(), 17));
- auto *AddWithUMin = SCEVParameterRewriter::rewrite(Add, SE, RewriteMap);
+ const SCEV *AddWithUMin =
+ SCEVParameterRewriter::rewrite(Add, SE, RewriteMap);
EXPECT_EQ(SE.getUnsignedRange(AddWithUMin).getLower(), 0);
EXPECT_EQ(SE.getUnsignedRange(AddWithUMin).getUpper(), 33);
});
@@ -1290,7 +1294,7 @@ TEST_F(ScalarEvolutionsTest, SCEVgetExitLimitForGuardedLoop) {
ASSERT_TRUE(!verifyModule(*M) && "Must have been well formed!");
runWithSE(*M, "foo", [](Function &F, LoopInfo &LI, ScalarEvolution &SE) {
- auto *ScevIV = SE.getSCEV(getInstructionByName(F, "iv")); // {0,+,1}
+ const SCEV *ScevIV = SE.getSCEV(getInstructionByName(F, "iv")); // {0,+,1}
const Loop *L = cast<SCEVAddRecExpr>(ScevIV)->getLoop();
const SCEV *BTC = SE.getBackedgeTakenCount(L);
@@ -1325,7 +1329,7 @@ TEST_F(ScalarEvolutionsTest, ImpliedViaAddRecStart) {
ASSERT_TRUE(!verifyModule(*M) && "Must have been well formed!");
runWithSE(*M, "foo", [](Function &F, LoopInfo &LI, ScalarEvolution &SE) {
- auto *X = SE.getSCEV(getInstructionByName(F, "x"));
+ const SCEV *X = SE.getSCEV(getInstructionByName(F, "x"));
auto *Context = getInstructionByName(F, "iv.next");
EXPECT_TRUE(SE.isKnownPredicateAt(ICmpInst::ICMP_NE, X,
SE.getZero(X->getType()), Context));
@@ -1354,8 +1358,8 @@ TEST_F(ScalarEvolutionsTest, UnsignedIsImpliedViaOperations) {
ASSERT_TRUE(!verifyModule(*M) && "Must have been well formed!");
runWithSE(*M, "foo", [](Function &F, LoopInfo &LI, ScalarEvolution &SE) {
- auto *X = SE.getSCEV(getInstructionByName(F, "x"));
- auto *Y = SE.getSCEV(getInstructionByName(F, "y"));
+ const SCEV *X = SE.getSCEV(getInstructionByName(F, "x"));
+ const SCEV *Y = SE.getSCEV(getInstructionByName(F, "y"));
auto *Guarded = getInstructionByName(F, "y")->getParent();
ASSERT_TRUE(Guarded);
EXPECT_TRUE(
@@ -1397,8 +1401,8 @@ TEST_F(ScalarEvolutionsTest, ProveImplicationViaNarrowing) {
ASSERT_TRUE(!verifyModule(*M) && "Must have been well formed!");
runWithSE(*M, "foo", [](Function &F, LoopInfo &LI, ScalarEvolution &SE) {
- auto *IV = SE.getSCEV(getInstructionByName(F, "iv"));
- auto *Zero = SE.getZero(IV->getType());
+ const SCEV *IV = SE.getSCEV(getInstructionByName(F, "iv"));
+ const SCEV *Zero = SE.getZero(IV->getType());
auto *Backedge = getInstructionByName(F, "iv.next")->getParent();
ASSERT_TRUE(Backedge);
(void)IV;
@@ -1525,8 +1529,8 @@ TEST_F(ScalarEvolutionsTest, SCEVUDivFloorCeiling) {
using namespace llvm::APIntOps;
APInt FloorInt = RoundingUDiv(NInt, DInt, APInt::Rounding::DOWN);
APInt CeilingInt = RoundingUDiv(NInt, DInt, APInt::Rounding::UP);
- auto *NS = SE.getConstant(NInt);
- auto *DS = SE.getConstant(DInt);
+ const SCEV *NS = SE.getConstant(NInt);
+ const SCEV *DS = SE.getConstant(DInt);
auto *FloorS = cast<SCEVConstant>(SE.getUDivExpr(NS, DS));
auto *CeilingS = cast<SCEVConstant>(SE.getUDivCeilSCEV(NS, DS));
ASSERT_TRUE(FloorS->getAPInt() == FloorInt);
@@ -1578,13 +1582,13 @@ TEST_F(ScalarEvolutionsTest, ApplyLoopGuards) {
ASSERT_TRUE(!verifyModule(*M) && "Must have been well formed!");
runWithSE(*M, "test", [](Function &F, LoopInfo &LI, ScalarEvolution &SE) {
- auto *TCScev = SE.getSCEV(getArgByName(F, "num"));
- auto *ApplyLoopGuardsTC = SE.applyLoopGuards(TCScev, *LI.begin());
+ const SCEV *TCScev = SE.getSCEV(getArgByName(F, "num"));
+ const SCEV *ApplyLoopGuardsTC = SE.applyLoopGuards(TCScev, *LI.begin());
// Assert that the new TC is (4 * ((4 umax %num) /u 4))
APInt Four(32, 4);
- auto *Constant4 = SE.getConstant(Four);
- auto *Max = SE.getUMaxExpr(TCScev, Constant4);
- auto *Mul = SE.getMulExpr(SE.getUDivExpr(Max, Constant4), Constant4);
+ const SCEV *Constant4 = SE.getConstant(Four);
+ const SCEV *Max = SE.getUMaxExpr(TCScev, Constant4);
+ const SCEV *Mul = SE.getMulExpr(SE.getUDivExpr(Max, Constant4), Constant4);
ASSERT_TRUE(Mul == ApplyLoopGuardsTC);
});
}
diff --git a/llvm/unittests/Transforms/Utils/LoopUtilsTest.cpp b/llvm/unittests/Transforms/Utils/LoopUtilsTest.cpp
index 8272c304ba288..193ff087ae6c8 100644
--- a/llvm/unittests/Transforms/Utils/LoopUtilsTest.cpp
+++ b/llvm/unittests/Transforms/Utils/LoopUtilsTest.cpp
@@ -113,7 +113,7 @@ TEST(LoopUtils, IsKnownPositiveInLoopTest) {
Loop *L = *LI.begin();
assert(L && L->getName() == "loop" && "Expecting loop 'loop'");
auto *Arg = F.getArg(0);
- auto *ArgSCEV = SE.getSCEV(Arg);
+ const SCEV *ArgSCEV = SE.getSCEV(Arg);
EXPECT_EQ(isKnownPositiveInLoop(ArgSCEV, L, SE), true);
});
}
@@ -137,7 +137,7 @@ TEST(LoopUtils, IsKnownNonPositiveInLoopTest) {
Loop *L = *LI.begin();
assert(L && L->getName() == "loop" && "Expecting loop 'loop'");
auto *Arg = F.getArg(0);
- auto *ArgSCEV = SE.getSCEV(Arg);
+ const SCEV *ArgSCEV = SE.getSCEV(Arg);
EXPECT_EQ(isKnownNonPositiveInLoop(ArgSCEV, L, SE), true);
});
}
diff --git a/llvm/unittests/Transforms/Utils/ScalarEvolutionExpanderTest.cpp b/llvm/unittests/Transforms/Utils/ScalarEvolutionExpanderTest.cpp
index eb27e69da47fa..67bcfd87a15d9 100644
--- a/llvm/unittests/Transforms/Utils/ScalarEvolutionExpanderTest.cpp
+++ b/llvm/unittests/Transforms/Utils/ScalarEvolutionExpanderTest.cpp
@@ -117,7 +117,7 @@ TEST_F(ScalarEvolutionExpanderTest, ExpandPtrTypeSCEV) {
CastInst::CreateBitOrPointerCast(Sel, I32PtrTy, "bitcast2", Br);
ScalarEvolution SE = buildSE(*F);
- auto *S = SE.getSCEV(CastB);
+ const SCEV *S = SE.getSCEV(CastB);
EXPECT_TRUE(isa<SCEVUnknown>(S));
}
@@ -185,7 +185,7 @@ TEST_F(ScalarEvolutionExpanderTest, SCEVZeroExtendExprNonIntegral) {
Instruction *Ret = Builder.CreateRetVoid();
ScalarEvolution SE = buildSE(*F);
- auto *AddRec =
+ const SCEV *AddRec =
SE.getAddRecExpr(SE.getUnknown(GepBase), SE.getConstant(T_int64, 1),
LI->getLoopFor(L), SCEV::FlagNUW);
@@ -762,7 +762,7 @@ TEST_F(ScalarEvolutionExpanderTest, SCEVExpandNonAffineAddRec) {
SCEVExpander Exp(SE, M->getDataLayout(), "expander");
auto *InsertAt = I.getNextNode();
Value *V = Exp.expandCodeFor(AR, nullptr, InsertAt);
- auto *ExpandedAR = SE.getSCEV(V);
+ const SCEV *ExpandedAR = SE.getSCEV(V);
// Check that the expansion happened literally.
EXPECT_EQ(AR, ExpandedAR);
});
@@ -811,7 +811,7 @@ TEST_F(ScalarEvolutionExpanderTest, SCEVExpandNonAffineAddRec) {
SCEVExpander Exp(SE, M->getDataLayout(), "expander");
auto *InsertAt = I.getNextNode();
Value *V = Exp.expandCodeFor(AR, nullptr, InsertAt);
- auto *ExpandedAR = SE.getSCEV(V);
+ const SCEV *ExpandedAR = SE.getSCEV(V);
// Check that the expansion happened literally.
EXPECT_EQ(AR, ExpandedAR);
});
@@ -866,7 +866,7 @@ TEST_F(ScalarEvolutionExpanderTest, SCEVExpandNonAffineAddRec) {
SCEVExpander Exp(SE, M->getDataLayout(), "expander");
auto *InsertAt = I.getNextNode();
Value *V = Exp.expandCodeFor(AR, nullptr, InsertAt);
- auto *ExpandedAR = SE.getSCEV(V);
+ const SCEV *ExpandedAR = SE.getSCEV(V);
// Check that the expansion happened literally.
EXPECT_EQ(AR, ExpandedAR);
});
>From 1ba922090abff78aecd651e5dd943dd452a8197d Mon Sep 17 00:00:00 2001
From: Florian Hahn <flo at fhahn.com>
Date: Sat, 11 May 2024 19:32:57 +0100
Subject: [PATCH 2/4] [SCEV] Introduce SCEVUse, use it instead of const SCEV *
(NFCI) (WIP).
This patch introduces SCEVUse, which is a tagged pointer containing the
used const SCEV *, plus extra bits to store NUW/NSW flags that are only
valid at the specific use.
This was suggested by @nikic as an alternative
to https://github.com/llvm/llvm-project/pull/90742.
This patch just updates most SCEV infrastructure to operate on SCEVUse
instead of const SCEV *. It does not introduce any code that makes use
of the use-specific flags yet which I'll share as follow-ups.
Note that this should be NFC, but currently there's at least one case
where it is not (turn-to-invariant.ll), which I'll investigate once we
agree on the overall direction.
This PR at the moment also contains a commit that updates various SCEV
clients to use `const SCEV *` instead of `const auto *`, to prepare for
this patch. This reduces the number of changes needed, as SCEVUse will
automatically convert to `const SCEV *`. This is a safe default, as it
just drops the use-specific flags for the expression (it will not drop
any use-specific flags for any of its operands though).
This probably
SCEVUse could probably also be used to address mis-compiles due to
equivalent AddRecs modulo flags result in an AddRec with incorrect flags
for some uses of some phis, e.g. the one
https://github.com/llvm/llvm-project/pull/80430 attempted to fix
Compile-time impact:
stage1-O3: +0.06%
stage1-ReleaseThinLTO: +0.07%
stage1-ReleaseLTO-g: +0.07%
stage2-O3: +0.11%
https://llvm-compile-time-tracker.com/compare.php?from=ce055843e2be9643bd58764783a7bb69f6db8c9a&to=8c7f4e9e154ebc4862c4e2716cedc3c688352d7c&stat=instructions:u
---
llvm/include/llvm/Analysis/ScalarEvolution.h | 804 +++---
.../Analysis/ScalarEvolutionExpressions.h | 287 +-
llvm/lib/Analysis/DependenceAnalysis.cpp | 20 +-
llvm/lib/Analysis/IVDescriptors.cpp | 2 +-
llvm/lib/Analysis/LoopCacheAnalysis.cpp | 3 +-
llvm/lib/Analysis/ScalarEvolution.cpp | 2359 ++++++++---------
.../Scalar/InductiveRangeCheckElimination.cpp | 6 +-
.../Transforms/Scalar/LoopIdiomRecognize.cpp | 10 +-
.../Transforms/Scalar/LoopStrengthReduce.cpp | 9 +-
.../IndVarSimplify/turn-to-invariant.ll | 8 +-
.../Analysis/ScalarEvolutionTest.cpp | 16 +-
11 files changed, 1804 insertions(+), 1720 deletions(-)
diff --git a/llvm/include/llvm/Analysis/ScalarEvolution.h b/llvm/include/llvm/Analysis/ScalarEvolution.h
index 5828cc156cc78..2859df9964555 100644
--- a/llvm/include/llvm/Analysis/ScalarEvolution.h
+++ b/llvm/include/llvm/Analysis/ScalarEvolution.h
@@ -69,6 +69,97 @@ enum SCEVTypes : unsigned short;
extern bool VerifySCEV;
+class SCEV;
+
+class SCEVUse : public PointerIntPair<const SCEV *, 2> {
+public:
+ SCEVUse() : PointerIntPair(nullptr, 0) {}
+ SCEVUse(const SCEV *S) : PointerIntPair(S, 0) {}
+ SCEVUse(const SCEV *S, int Flags) : PointerIntPair(S, Flags) {}
+
+ operator const SCEV *() const { return getPointer(); }
+ const SCEV *operator->() const { return getPointer(); }
+ const SCEV *operator->() { return getPointer(); }
+
+ /// Print out the internal representation of this scalar to the specified
+ /// stream. This should really only be used for debugging purposes.
+ void print(raw_ostream &OS) const;
+
+ /// This method is used for debugging.
+ void dump() const;
+};
+
+template <> struct PointerLikeTypeTraits<SCEVUse> {
+ static inline void *getAsVoidPointer(SCEVUse U) { return U.getOpaqueValue(); }
+ static inline SCEVUse getFromVoidPointer(void *P) {
+ SCEVUse U;
+ U.setFromOpaqueValue(P);
+ return U;
+ }
+
+ /// Note, we assume here that void* is related to raw malloc'ed memory and
+ /// that malloc returns objects at least 4-byte aligned. However, this may be
+ /// wrong, or pointers may be from something other than malloc. In this case,
+ /// you should specify a real typed pointer or avoid this template.
+ ///
+ /// All clients should use assertions to do a run-time check to ensure that
+ /// this is actually true.
+ static constexpr int NumLowBitsAvailable = 0;
+};
+
+template <> struct DenseMapInfo<SCEVUse> {
+ // The following should hold, but it would require T to be complete:
+ // static_assert(alignof(T) <= (1 << Log2MaxAlign),
+ // "DenseMap does not support pointer keys requiring more than "
+ // "Log2MaxAlign bits of alignment");
+ static constexpr uintptr_t Log2MaxAlign = 12;
+
+ static inline SCEVUse getEmptyKey() {
+ uintptr_t Val = static_cast<uintptr_t>(-1);
+ Val <<= Log2MaxAlign;
+ return PointerLikeTypeTraits<SCEVUse>::getFromVoidPointer((void *)Val);
+ }
+
+ static inline SCEVUse getTombstoneKey() {
+ uintptr_t Val = static_cast<uintptr_t>(-2);
+ Val <<= Log2MaxAlign;
+ return PointerLikeTypeTraits<SCEVUse>::getFromVoidPointer((void *)Val);
+ }
+
+ static unsigned getHashValue(SCEVUse U) {
+ void *PtrVal = PointerLikeTypeTraits<SCEVUse>::getAsVoidPointer(U);
+ return (unsigned((uintptr_t)PtrVal) >> 4) ^
+ (unsigned((uintptr_t)PtrVal) >> 9);
+ }
+
+ static bool isEqual(const SCEVUse LHS, const SCEVUse RHS) {
+ return LHS == RHS;
+ }
+};
+
+template <typename To> [[nodiscard]] inline decltype(auto) dyn_cast(SCEVUse U) {
+ assert(detail::isPresent(U.getPointer()) &&
+ "dyn_cast on a non-existent value");
+ return CastInfo<To, const SCEV *>::doCastIfPossible(U.getPointer());
+}
+
+template <typename To> [[nodiscard]] inline decltype(auto) cast(SCEVUse U) {
+ assert(detail::isPresent(U.getPointer()) &&
+ "dyn_cast on a non-existent value");
+ return CastInfo<To, const SCEV *>::doCast(U.getPointer());
+}
+
+template <typename To> [[nodiscard]] inline bool isa(SCEVUse U) {
+ return CastInfo<To, const SCEV *>::isPossible(U.getPointer());
+}
+
+template <class X> auto dyn_cast_or_null(SCEVUse U) {
+ const SCEV *Val = U.getPointer();
+ if (!detail::isPresent(Val))
+ return CastInfo<X, const SCEV *>::castFailed();
+ return CastInfo<X, const SCEV *>::doCastIfPossible(detail::unwrapValue(Val));
+}
+
/// This class represents an analyzed expression in the program. These are
/// opaque objects that the client is not allowed to do much with directly.
///
@@ -147,7 +238,7 @@ class SCEV : public FoldingSetNode {
Type *getType() const;
/// Return operands of this SCEV expression.
- ArrayRef<const SCEV *> operands() const;
+ ArrayRef<SCEVUse> operands() const;
/// Return true if the expression is a constant zero.
bool isZero() const;
@@ -202,6 +293,11 @@ inline raw_ostream &operator<<(raw_ostream &OS, const SCEV &S) {
return OS;
}
+inline raw_ostream &operator<<(raw_ostream &OS, const SCEVUse &S) {
+ S.print(OS);
+ return OS;
+}
+
/// An object of this class is returned by queries that could not be answered.
/// For example, if you ask for the number of iterations of a linked-list
/// traversal loop, you will get one of these. None of the standard SCEV
@@ -211,6 +307,7 @@ struct SCEVCouldNotCompute : public SCEV {
/// Methods for support type inquiry through isa, cast, and dyn_cast:
static bool classof(const SCEV *S);
+ static bool classof(const SCEVUse *U) { return classof(U->getPointer()); }
};
/// This class represents an assumption made using SCEV expressions which can
@@ -281,13 +378,13 @@ struct FoldingSetTrait<SCEVPredicate> : DefaultFoldingSetTrait<SCEVPredicate> {
class SCEVComparePredicate final : public SCEVPredicate {
/// We assume that LHS Pred RHS is true.
const ICmpInst::Predicate Pred;
- const SCEV *LHS;
- const SCEV *RHS;
+ SCEVUse LHS;
+ SCEVUse RHS;
public:
SCEVComparePredicate(const FoldingSetNodeIDRef ID,
- const ICmpInst::Predicate Pred,
- const SCEV *LHS, const SCEV *RHS);
+ const ICmpInst::Predicate Pred, SCEVUse LHS,
+ SCEVUse RHS);
/// Implementation of the SCEVPredicate interface
bool implies(const SCEVPredicate *N) const override;
@@ -297,10 +394,10 @@ class SCEVComparePredicate final : public SCEVPredicate {
ICmpInst::Predicate getPredicate() const { return Pred; }
/// Returns the left hand side of the predicate.
- const SCEV *getLHS() const { return LHS; }
+ SCEVUse getLHS() const { return LHS; }
/// Returns the right hand side of the predicate.
- const SCEV *getRHS() const { return RHS; }
+ SCEVUse getRHS() const { return RHS; }
/// Methods for support type inquiry through isa, cast, and dyn_cast:
static bool classof(const SCEVPredicate *P) {
@@ -415,8 +512,7 @@ class SCEVWrapPredicate final : public SCEVPredicate {
/// ScalarEvolution::Preds folding set. This is why the \c add function is sound.
class SCEVUnionPredicate final : public SCEVPredicate {
private:
- using PredicateMap =
- DenseMap<const SCEV *, SmallVector<const SCEVPredicate *, 4>>;
+ using PredicateMap = DenseMap<SCEVUse, SmallVector<const SCEVPredicate *, 4>>;
/// Vector with references to all predicates in this union.
SmallVector<const SCEVPredicate *, 16> Preds;
@@ -525,18 +621,17 @@ class ScalarEvolution {
/// loop { v2 = load @global2; }
/// }
/// No SCEV with operand V1, and v2 can exist in this program.
- bool instructionCouldExistWithOperands(const SCEV *A, const SCEV *B);
+ bool instructionCouldExistWithOperands(SCEVUse A, SCEVUse B);
/// Return true if the SCEV is a scAddRecExpr or it contains
/// scAddRecExpr. The result will be cached in HasRecMap.
- bool containsAddRecurrence(const SCEV *S);
+ bool containsAddRecurrence(SCEVUse S);
/// Is operation \p BinOp between \p LHS and \p RHS provably does not have
/// a signed/unsigned overflow (\p Signed)? If \p CtxI is specified, the
/// no-overflow fact should be true in the context of this instruction.
- bool willNotOverflow(Instruction::BinaryOps BinOp, bool Signed,
- const SCEV *LHS, const SCEV *RHS,
- const Instruction *CtxI = nullptr);
+ bool willNotOverflow(Instruction::BinaryOps BinOp, bool Signed, SCEVUse LHS,
+ SCEVUse RHS, const Instruction *CtxI = nullptr);
/// Parse NSW/NUW flags from add/sub/mul IR binary operation \p Op into
/// SCEV no-wrap flags, and deduce flag[s] that aren't known yet.
@@ -547,78 +642,84 @@ class ScalarEvolution {
getStrengthenedNoWrapFlagsFromBinOp(const OverflowingBinaryOperator *OBO);
/// Notify this ScalarEvolution that \p User directly uses SCEVs in \p Ops.
- void registerUser(const SCEV *User, ArrayRef<const SCEV *> Ops);
+ void registerUser(SCEVUse User, ArrayRef<SCEVUse> Ops);
/// Return true if the SCEV expression contains an undef value.
- bool containsUndefs(const SCEV *S) const;
+ bool containsUndefs(SCEVUse S) const;
/// Return true if the SCEV expression contains a Value that has been
/// optimised out and is now a nullptr.
- bool containsErasedValue(const SCEV *S) const;
+ bool containsErasedValue(SCEVUse S) const;
/// Return a SCEV expression for the full generality of the specified
/// expression.
- const SCEV *getSCEV(Value *V);
+ SCEVUse getSCEV(Value *V);
/// Return an existing SCEV for V if there is one, otherwise return nullptr.
- const SCEV *getExistingSCEV(Value *V);
-
- const SCEV *getConstant(ConstantInt *V);
- const SCEV *getConstant(const APInt &Val);
- const SCEV *getConstant(Type *Ty, uint64_t V, bool isSigned = false);
- const SCEV *getLosslessPtrToIntExpr(const SCEV *Op, unsigned Depth = 0);
- const SCEV *getPtrToIntExpr(const SCEV *Op, Type *Ty);
- const SCEV *getTruncateExpr(const SCEV *Op, Type *Ty, unsigned Depth = 0);
- const SCEV *getVScale(Type *Ty);
- const SCEV *getElementCount(Type *Ty, ElementCount EC);
- const SCEV *getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth = 0);
- const SCEV *getZeroExtendExprImpl(const SCEV *Op, Type *Ty,
- unsigned Depth = 0);
- const SCEV *getSignExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth = 0);
- const SCEV *getSignExtendExprImpl(const SCEV *Op, Type *Ty,
- unsigned Depth = 0);
- const SCEV *getCastExpr(SCEVTypes Kind, const SCEV *Op, Type *Ty);
- const SCEV *getAnyExtendExpr(const SCEV *Op, Type *Ty);
- const SCEV *getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
- SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
- unsigned Depth = 0);
- const SCEV *getAddExpr(const SCEV *LHS, const SCEV *RHS,
- SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
- unsigned Depth = 0) {
- SmallVector<const SCEV *, 2> Ops = {LHS, RHS};
+ SCEVUse getExistingSCEV(Value *V);
+
+ SCEVUse getConstant(ConstantInt *V);
+ SCEVUse getConstant(const APInt &Val);
+ SCEVUse getConstant(Type *Ty, uint64_t V, bool isSigned = false);
+ SCEVUse getLosslessPtrToIntExpr(SCEVUse Op, unsigned Depth = 0);
+ SCEVUse getPtrToIntExpr(SCEVUse Op, Type *Ty);
+ SCEVUse getTruncateExpr(SCEVUse Op, Type *Ty, unsigned Depth = 0);
+ SCEVUse getVScale(Type *Ty);
+ SCEVUse getElementCount(Type *Ty, ElementCount EC);
+ SCEVUse getZeroExtendExpr(SCEVUse Op, Type *Ty, unsigned Depth = 0);
+ SCEVUse getZeroExtendExprImpl(SCEVUse Op, Type *Ty, unsigned Depth = 0);
+ SCEVUse getSignExtendExpr(SCEVUse Op, Type *Ty, unsigned Depth = 0);
+ SCEVUse getSignExtendExprImpl(SCEVUse Op, Type *Ty, unsigned Depth = 0);
+ SCEVUse getCastExpr(SCEVTypes Kind, SCEVUse Op, Type *Ty);
+ SCEVUse getAnyExtendExpr(SCEVUse Op, Type *Ty);
+ SCEVUse getAddExpr(ArrayRef<const SCEV *> Ops,
+ SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
+ unsigned Depth = 0);
+ SCEVUse getAddExpr(SmallVectorImpl<SCEVUse> &Ops,
+ SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
+ unsigned Depth = 0);
+ SCEVUse getAddExpr(SCEVUse LHS, SCEVUse RHS,
+ SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
+ unsigned Depth = 0) {
+ SmallVector<SCEVUse, 2> Ops = {LHS, RHS};
return getAddExpr(Ops, Flags, Depth);
}
- const SCEV *getAddExpr(const SCEV *Op0, const SCEV *Op1, const SCEV *Op2,
- SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
- unsigned Depth = 0) {
- SmallVector<const SCEV *, 3> Ops = {Op0, Op1, Op2};
+ SCEVUse getAddExpr(SCEVUse Op0, SCEVUse Op1, SCEVUse Op2,
+ SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
+ unsigned Depth = 0) {
+ SmallVector<SCEVUse, 3> Ops = {Op0, Op1, Op2};
return getAddExpr(Ops, Flags, Depth);
}
- const SCEV *getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
- SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
- unsigned Depth = 0);
- const SCEV *getMulExpr(const SCEV *LHS, const SCEV *RHS,
- SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
- unsigned Depth = 0) {
- SmallVector<const SCEV *, 2> Ops = {LHS, RHS};
+ SCEVUse getMulExpr(ArrayRef<const SCEV *> Ops,
+ SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
+ unsigned Depth = 0);
+ SCEVUse getMulExpr(SmallVectorImpl<SCEVUse> &Ops,
+ SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
+ unsigned Depth = 0);
+ SCEVUse getMulExpr(SCEVUse LHS, SCEVUse RHS,
+ SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
+ unsigned Depth = 0) {
+ SmallVector<SCEVUse, 2> Ops = {LHS, RHS};
return getMulExpr(Ops, Flags, Depth);
}
- const SCEV *getMulExpr(const SCEV *Op0, const SCEV *Op1, const SCEV *Op2,
- SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
- unsigned Depth = 0) {
- SmallVector<const SCEV *, 3> Ops = {Op0, Op1, Op2};
+ SCEVUse getMulExpr(SCEVUse Op0, SCEVUse Op1, SCEVUse Op2,
+ SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
+ unsigned Depth = 0) {
+ SmallVector<SCEVUse, 3> Ops = {Op0, Op1, Op2};
return getMulExpr(Ops, Flags, Depth);
}
- const SCEV *getUDivExpr(const SCEV *LHS, const SCEV *RHS);
- const SCEV *getUDivExactExpr(const SCEV *LHS, const SCEV *RHS);
- const SCEV *getURemExpr(const SCEV *LHS, const SCEV *RHS);
- const SCEV *getAddRecExpr(const SCEV *Start, const SCEV *Step, const Loop *L,
- SCEV::NoWrapFlags Flags);
- const SCEV *getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands,
- const Loop *L, SCEV::NoWrapFlags Flags);
- const SCEV *getAddRecExpr(const SmallVectorImpl<const SCEV *> &Operands,
- const Loop *L, SCEV::NoWrapFlags Flags) {
- SmallVector<const SCEV *, 4> NewOp(Operands.begin(), Operands.end());
+ SCEVUse getUDivExpr(SCEVUse LHS, SCEVUse RHS);
+ SCEVUse getUDivExactExpr(SCEVUse LHS, SCEVUse RHS);
+ SCEVUse getURemExpr(SCEVUse LHS, SCEVUse RHS);
+ SCEVUse getAddRecExpr(SCEVUse Start, SCEVUse Step, const Loop *L,
+ SCEV::NoWrapFlags Flags);
+ SCEVUse getAddRecExpr(ArrayRef<const SCEV *> Operands, const Loop *L,
+ SCEV::NoWrapFlags Flags);
+ SCEVUse getAddRecExpr(SmallVectorImpl<SCEVUse> &Operands, const Loop *L,
+ SCEV::NoWrapFlags Flags);
+ SCEVUse getAddRecExpr(const SmallVectorImpl<SCEVUse> &Operands, const Loop *L,
+ SCEV::NoWrapFlags Flags) {
+ SmallVector<SCEVUse, 4> NewOp(Operands.begin(), Operands.end());
return getAddRecExpr(NewOp, L, Flags);
}
@@ -626,7 +727,7 @@ class ScalarEvolution {
/// Predicates. If successful return these <AddRecExpr, Predicates>;
/// The function is intended to be called from PSCEV (the caller will decide
/// whether to actually add the predicates and carry out the rewrites).
- std::optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
+ std::optional<std::pair<SCEVUse, SmallVector<const SCEVPredicate *, 3>>>
createAddRecFromPHIWithCasts(const SCEVUnknown *SymbolicPHI);
/// Returns an expression for a GEP
@@ -634,61 +735,61 @@ class ScalarEvolution {
/// \p GEP The GEP. The indices contained in the GEP itself are ignored,
/// instead we use IndexExprs.
/// \p IndexExprs The expressions for the indices.
- const SCEV *getGEPExpr(GEPOperator *GEP,
- const SmallVectorImpl<const SCEV *> &IndexExprs);
- const SCEV *getAbsExpr(const SCEV *Op, bool IsNSW);
- const SCEV *getMinMaxExpr(SCEVTypes Kind,
- SmallVectorImpl<const SCEV *> &Operands);
- const SCEV *getSequentialMinMaxExpr(SCEVTypes Kind,
- SmallVectorImpl<const SCEV *> &Operands);
- const SCEV *getSMaxExpr(const SCEV *LHS, const SCEV *RHS);
- const SCEV *getSMaxExpr(SmallVectorImpl<const SCEV *> &Operands);
- const SCEV *getUMaxExpr(const SCEV *LHS, const SCEV *RHS);
- const SCEV *getUMaxExpr(SmallVectorImpl<const SCEV *> &Operands);
- const SCEV *getSMinExpr(const SCEV *LHS, const SCEV *RHS);
- const SCEV *getSMinExpr(SmallVectorImpl<const SCEV *> &Operands);
- const SCEV *getUMinExpr(const SCEV *LHS, const SCEV *RHS,
- bool Sequential = false);
- const SCEV *getUMinExpr(SmallVectorImpl<const SCEV *> &Operands,
- bool Sequential = false);
- const SCEV *getUnknown(Value *V);
- const SCEV *getCouldNotCompute();
+ SCEVUse getGEPExpr(GEPOperator *GEP, ArrayRef<const SCEV *> IndexExprs);
+ SCEVUse getGEPExpr(GEPOperator *GEP,
+ const SmallVectorImpl<SCEVUse> &IndexExprs);
+ SCEVUse getAbsExpr(SCEVUse Op, bool IsNSW);
+ SCEVUse getMinMaxExpr(SCEVTypes Kind, ArrayRef<const SCEV *> Operands);
+ SCEVUse getMinMaxExpr(SCEVTypes Kind, SmallVectorImpl<SCEVUse> &Operands);
+ SCEVUse getSequentialMinMaxExpr(SCEVTypes Kind,
+ SmallVectorImpl<SCEVUse> &Operands);
+ SCEVUse getSMaxExpr(SCEVUse LHS, SCEVUse RHS);
+ SCEVUse getSMaxExpr(SmallVectorImpl<SCEVUse> &Operands);
+ SCEVUse getUMaxExpr(SCEVUse LHS, SCEVUse RHS);
+ SCEVUse getUMaxExpr(SmallVectorImpl<SCEVUse> &Operands);
+ SCEVUse getSMinExpr(SCEVUse LHS, SCEVUse RHS);
+ SCEVUse getSMinExpr(SmallVectorImpl<SCEVUse> &Operands);
+ SCEVUse getUMinExpr(SCEVUse LHS, SCEVUse RHS, bool Sequential = false);
+ SCEVUse getUMinExpr(SmallVectorImpl<SCEVUse> &Operands,
+ bool Sequential = false);
+ SCEVUse getUnknown(Value *V);
+ SCEVUse getCouldNotCompute();
/// Return a SCEV for the constant 0 of a specific type.
- const SCEV *getZero(Type *Ty) { return getConstant(Ty, 0); }
+ SCEVUse getZero(Type *Ty) { return getConstant(Ty, 0); }
/// Return a SCEV for the constant 1 of a specific type.
- const SCEV *getOne(Type *Ty) { return getConstant(Ty, 1); }
+ SCEVUse getOne(Type *Ty) { return getConstant(Ty, 1); }
/// Return a SCEV for the constant \p Power of two.
- const SCEV *getPowerOfTwo(Type *Ty, unsigned Power) {
+ SCEVUse getPowerOfTwo(Type *Ty, unsigned Power) {
assert(Power < getTypeSizeInBits(Ty) && "Power out of range");
return getConstant(APInt::getOneBitSet(getTypeSizeInBits(Ty), Power));
}
/// Return a SCEV for the constant -1 of a specific type.
- const SCEV *getMinusOne(Type *Ty) {
+ SCEVUse getMinusOne(Type *Ty) {
return getConstant(Ty, -1, /*isSigned=*/true);
}
/// Return an expression for a TypeSize.
- const SCEV *getSizeOfExpr(Type *IntTy, TypeSize Size);
+ SCEVUse getSizeOfExpr(Type *IntTy, TypeSize Size);
/// Return an expression for the alloc size of AllocTy that is type IntTy
- const SCEV *getSizeOfExpr(Type *IntTy, Type *AllocTy);
+ SCEVUse getSizeOfExpr(Type *IntTy, Type *AllocTy);
/// Return an expression for the store size of StoreTy that is type IntTy
- const SCEV *getStoreSizeOfExpr(Type *IntTy, Type *StoreTy);
+ SCEVUse getStoreSizeOfExpr(Type *IntTy, Type *StoreTy);
/// Return an expression for offsetof on the given field with type IntTy
- const SCEV *getOffsetOfExpr(Type *IntTy, StructType *STy, unsigned FieldNo);
+ SCEVUse getOffsetOfExpr(Type *IntTy, StructType *STy, unsigned FieldNo);
/// Return the SCEV object corresponding to -V.
- const SCEV *getNegativeSCEV(const SCEV *V,
- SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap);
+ SCEVUse getNegativeSCEV(SCEVUse V,
+ SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap);
/// Return the SCEV object corresponding to ~V.
- const SCEV *getNotSCEV(const SCEV *V);
+ SCEVUse getNotSCEV(SCEVUse V);
/// Return LHS-RHS. Minus is represented in SCEV as A+B*-1.
///
@@ -697,9 +798,9 @@ class ScalarEvolution {
/// To compute the difference between two unrelated pointers, you can
/// explicitly convert the arguments using getPtrToIntExpr(), for pointer
/// types that support it.
- const SCEV *getMinusSCEV(const SCEV *LHS, const SCEV *RHS,
- SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
- unsigned Depth = 0);
+ SCEVUse getMinusSCEV(SCEVUse LHS, SCEVUse RHS,
+ SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
+ unsigned Depth = 0);
/// Compute ceil(N / D). N and D are treated as unsigned values.
///
@@ -709,59 +810,59 @@ class ScalarEvolution {
/// umin(N, 1) + floor((N - umin(N, 1)) / D)
///
/// A denominator of zero or poison is handled the same way as getUDivExpr().
- const SCEV *getUDivCeilSCEV(const SCEV *N, const SCEV *D);
+ SCEVUse getUDivCeilSCEV(SCEVUse N, SCEVUse D);
/// Return a SCEV corresponding to a conversion of the input value to the
/// specified type. If the type must be extended, it is zero extended.
- const SCEV *getTruncateOrZeroExtend(const SCEV *V, Type *Ty,
- unsigned Depth = 0);
+ SCEVUse getTruncateOrZeroExtend(SCEVUse V, Type *Ty, unsigned Depth = 0);
/// Return a SCEV corresponding to a conversion of the input value to the
/// specified type. If the type must be extended, it is sign extended.
- const SCEV *getTruncateOrSignExtend(const SCEV *V, Type *Ty,
- unsigned Depth = 0);
+ SCEVUse getTruncateOrSignExtend(SCEVUse V, Type *Ty, unsigned Depth = 0);
/// Return a SCEV corresponding to a conversion of the input value to the
/// specified type. If the type must be extended, it is zero extended. The
/// conversion must not be narrowing.
- const SCEV *getNoopOrZeroExtend(const SCEV *V, Type *Ty);
+ SCEVUse getNoopOrZeroExtend(SCEVUse V, Type *Ty);
/// Return a SCEV corresponding to a conversion of the input value to the
/// specified type. If the type must be extended, it is sign extended. The
/// conversion must not be narrowing.
- const SCEV *getNoopOrSignExtend(const SCEV *V, Type *Ty);
+ SCEVUse getNoopOrSignExtend(SCEVUse V, Type *Ty);
/// Return a SCEV corresponding to a conversion of the input value to the
/// specified type. If the type must be extended, it is extended with
/// unspecified bits. The conversion must not be narrowing.
- const SCEV *getNoopOrAnyExtend(const SCEV *V, Type *Ty);
+ SCEVUse getNoopOrAnyExtend(SCEVUse V, Type *Ty);
/// Return a SCEV corresponding to a conversion of the input value to the
/// specified type. The conversion must not be widening.
- const SCEV *getTruncateOrNoop(const SCEV *V, Type *Ty);
+ SCEVUse getTruncateOrNoop(SCEVUse V, Type *Ty);
/// Promote the operands to the wider of the types using zero-extension, and
/// then perform a umax operation with them.
- const SCEV *getUMaxFromMismatchedTypes(const SCEV *LHS, const SCEV *RHS);
+ SCEVUse getUMaxFromMismatchedTypes(SCEVUse LHS, SCEVUse RHS);
/// Promote the operands to the wider of the types using zero-extension, and
/// then perform a umin operation with them.
- const SCEV *getUMinFromMismatchedTypes(const SCEV *LHS, const SCEV *RHS,
- bool Sequential = false);
+ SCEVUse getUMinFromMismatchedTypes(SCEVUse LHS, SCEVUse RHS,
+ bool Sequential = false);
/// Promote the operands to the wider of the types using zero-extension, and
/// then perform a umin operation with them. N-ary function.
- const SCEV *getUMinFromMismatchedTypes(SmallVectorImpl<const SCEV *> &Ops,
- bool Sequential = false);
+ SCEVUse getUMinFromMismatchedTypes(ArrayRef<const SCEV *> Ops,
+ bool Sequential = false);
+ SCEVUse getUMinFromMismatchedTypes(SmallVectorImpl<SCEVUse> &Ops,
+ bool Sequential = false);
/// Transitively follow the chain of pointer-type operands until reaching a
/// SCEV that does not have a single pointer operand. This returns a
/// SCEVUnknown pointer for well-formed pointer-type expressions, but corner
/// cases do exist.
- const SCEV *getPointerBase(const SCEV *V);
+ SCEVUse getPointerBase(SCEVUse V);
/// Compute an expression equivalent to S - getPointerBase(S).
- const SCEV *removePointerBase(const SCEV *S);
+ SCEVUse removePointerBase(SCEVUse S);
/// Return a SCEV expression for the specified value at the specified scope
/// in the program. The L value specifies a loop nest to evaluate the
@@ -773,31 +874,31 @@ class ScalarEvolution {
///
/// In the case that a relevant loop exit value cannot be computed, the
/// original value V is returned.
- const SCEV *getSCEVAtScope(const SCEV *S, const Loop *L);
+ SCEVUse getSCEVAtScope(SCEVUse S, const Loop *L);
/// This is a convenience function which does getSCEVAtScope(getSCEV(V), L).
- const SCEV *getSCEVAtScope(Value *V, const Loop *L);
+ SCEVUse getSCEVAtScope(Value *V, const Loop *L);
/// Test whether entry to the loop is protected by a conditional between LHS
/// and RHS. This is used to help avoid max expressions in loop trip
/// counts, and to eliminate casts.
bool isLoopEntryGuardedByCond(const Loop *L, ICmpInst::Predicate Pred,
- const SCEV *LHS, const SCEV *RHS);
+ SCEVUse LHS, SCEVUse RHS);
/// Test whether entry to the basic block is protected by a conditional
/// between LHS and RHS.
bool isBasicBlockEntryGuardedByCond(const BasicBlock *BB,
- ICmpInst::Predicate Pred, const SCEV *LHS,
- const SCEV *RHS);
+ ICmpInst::Predicate Pred, SCEVUse LHS,
+ SCEVUse RHS);
/// Test whether the backedge of the loop is protected by a conditional
/// between LHS and RHS. This is used to eliminate casts.
bool isLoopBackedgeGuardedByCond(const Loop *L, ICmpInst::Predicate Pred,
- const SCEV *LHS, const SCEV *RHS);
+ SCEVUse LHS, SCEVUse RHS);
/// A version of getTripCountFromExitCount below which always picks an
/// evaluation type which can not result in overflow.
- const SCEV *getTripCountFromExitCount(const SCEV *ExitCount);
+ SCEVUse getTripCountFromExitCount(SCEVUse ExitCount);
/// Convert from an "exit count" (i.e. "backedge taken count") to a "trip
/// count". A "trip count" is the number of times the header of the loop
@@ -806,8 +907,8 @@ class ScalarEvolution {
/// expression can overflow if ExitCount = UINT_MAX. If EvalTy is not wide
/// enough to hold the result without overflow, result unsigned wraps with
/// 2s-complement semantics. ex: EC = 255 (i8), TC = 0 (i8)
- const SCEV *getTripCountFromExitCount(const SCEV *ExitCount, Type *EvalTy,
- const Loop *L);
+ SCEVUse getTripCountFromExitCount(SCEVUse ExitCount, Type *EvalTy,
+ const Loop *L);
/// Returns the exact trip count of the loop if we can compute it, and
/// the result is a small constant. '0' is used to represent an unknown
@@ -838,8 +939,7 @@ class ScalarEvolution {
/// unknown or not guaranteed to be the multiple of a constant., Will also
/// return 1 if the trip count is very large (>= 2^32).
/// Note that the argument is an exit count for loop L, NOT a trip count.
- unsigned getSmallConstantTripMultiple(const Loop *L,
- const SCEV *ExitCount);
+ unsigned getSmallConstantTripMultiple(const Loop *L, SCEVUse ExitCount);
/// Returns the largest constant divisor of the trip count of the
/// loop. Will return 1 if no trip count could be computed, or if a
@@ -874,8 +974,8 @@ class ScalarEvolution {
/// getBackedgeTakenCount. The loop is guaranteed to exit (via *some* exit)
/// before the backedge is executed (ExitCount + 1) times. Note that there
/// is no guarantee about *which* exit is taken on the exiting iteration.
- const SCEV *getExitCount(const Loop *L, const BasicBlock *ExitingBlock,
- ExitCountKind Kind = Exact);
+ SCEVUse getExitCount(const Loop *L, const BasicBlock *ExitingBlock,
+ ExitCountKind Kind = Exact);
/// If the specified loop has a predictable backedge-taken count, return it,
/// otherwise return a SCEVCouldNotCompute object. The backedge-taken count is
@@ -887,20 +987,20 @@ class ScalarEvolution {
/// Note that it is not valid to call this method on a loop without a
/// loop-invariant backedge-taken count (see
/// hasLoopInvariantBackedgeTakenCount).
- const SCEV *getBackedgeTakenCount(const Loop *L, ExitCountKind Kind = Exact);
+ SCEVUse getBackedgeTakenCount(const Loop *L, ExitCountKind Kind = Exact);
/// Similar to getBackedgeTakenCount, except it will add a set of
/// SCEV predicates to Predicates that are required to be true in order for
/// the answer to be correct. Predicates can be checked with run-time
/// checks and can be used to perform loop versioning.
- const SCEV *getPredicatedBackedgeTakenCount(const Loop *L,
- SmallVector<const SCEVPredicate *, 4> &Predicates);
+ SCEVUse getPredicatedBackedgeTakenCount(
+ const Loop *L, SmallVector<const SCEVPredicate *, 4> &Predicates);
/// When successful, this returns a SCEVConstant that is greater than or equal
/// to (i.e. a "conservative over-approximation") of the value returend by
/// getBackedgeTakenCount. If such a value cannot be computed, it returns the
/// SCEVCouldNotCompute object.
- const SCEV *getConstantMaxBackedgeTakenCount(const Loop *L) {
+ SCEVUse getConstantMaxBackedgeTakenCount(const Loop *L) {
return getBackedgeTakenCount(L, ConstantMaximum);
}
@@ -908,7 +1008,7 @@ class ScalarEvolution {
/// to (i.e. a "conservative over-approximation") of the value returend by
/// getBackedgeTakenCount. If such a value cannot be computed, it returns the
/// SCEVCouldNotCompute object.
- const SCEV *getSymbolicMaxBackedgeTakenCount(const Loop *L) {
+ SCEVUse getSymbolicMaxBackedgeTakenCount(const Loop *L) {
return getBackedgeTakenCount(L, SymbolicMaximum);
}
@@ -966,60 +1066,60 @@ class ScalarEvolution {
/// (at every loop iteration). It is, at the same time, the minimum number
/// of times S is divisible by 2. For example, given {4,+,8} it returns 2.
/// If S is guaranteed to be 0, it returns the bitwidth of S.
- uint32_t getMinTrailingZeros(const SCEV *S);
+ uint32_t getMinTrailingZeros(SCEVUse S);
/// Returns the max constant multiple of S.
- APInt getConstantMultiple(const SCEV *S);
+ APInt getConstantMultiple(SCEVUse S);
// Returns the max constant multiple of S. If S is exactly 0, return 1.
- APInt getNonZeroConstantMultiple(const SCEV *S);
+ APInt getNonZeroConstantMultiple(SCEVUse S);
/// Determine the unsigned range for a particular SCEV.
/// NOTE: This returns a copy of the reference returned by getRangeRef.
- ConstantRange getUnsignedRange(const SCEV *S) {
+ ConstantRange getUnsignedRange(SCEVUse S) {
return getRangeRef(S, HINT_RANGE_UNSIGNED);
}
/// Determine the min of the unsigned range for a particular SCEV.
- APInt getUnsignedRangeMin(const SCEV *S) {
+ APInt getUnsignedRangeMin(SCEVUse S) {
return getRangeRef(S, HINT_RANGE_UNSIGNED).getUnsignedMin();
}
/// Determine the max of the unsigned range for a particular SCEV.
- APInt getUnsignedRangeMax(const SCEV *S) {
+ APInt getUnsignedRangeMax(SCEVUse S) {
return getRangeRef(S, HINT_RANGE_UNSIGNED).getUnsignedMax();
}
/// Determine the signed range for a particular SCEV.
/// NOTE: This returns a copy of the reference returned by getRangeRef.
- ConstantRange getSignedRange(const SCEV *S) {
+ ConstantRange getSignedRange(SCEVUse S) {
return getRangeRef(S, HINT_RANGE_SIGNED);
}
/// Determine the min of the signed range for a particular SCEV.
- APInt getSignedRangeMin(const SCEV *S) {
+ APInt getSignedRangeMin(SCEVUse S) {
return getRangeRef(S, HINT_RANGE_SIGNED).getSignedMin();
}
/// Determine the max of the signed range for a particular SCEV.
- APInt getSignedRangeMax(const SCEV *S) {
+ APInt getSignedRangeMax(SCEVUse S) {
return getRangeRef(S, HINT_RANGE_SIGNED).getSignedMax();
}
/// Test if the given expression is known to be negative.
- bool isKnownNegative(const SCEV *S);
+ bool isKnownNegative(SCEVUse S);
/// Test if the given expression is known to be positive.
- bool isKnownPositive(const SCEV *S);
+ bool isKnownPositive(SCEVUse S);
/// Test if the given expression is known to be non-negative.
- bool isKnownNonNegative(const SCEV *S);
+ bool isKnownNonNegative(SCEVUse S);
/// Test if the given expression is known to be non-positive.
- bool isKnownNonPositive(const SCEV *S);
+ bool isKnownNonPositive(SCEVUse S);
/// Test if the given expression is known to be non-zero.
- bool isKnownNonZero(const SCEV *S);
+ bool isKnownNonZero(SCEVUse S);
/// Splits SCEV expression \p S into two SCEVs. One of them is obtained from
/// \p S by substitution of all AddRec sub-expression related to loop \p L
@@ -1037,8 +1137,7 @@ class ScalarEvolution {
/// 0 (initial value) for the first element and to {1, +, 1}<L1> (post
/// increment value) for the second one. In both cases AddRec expression
/// related to L2 remains the same.
- std::pair<const SCEV *, const SCEV *> SplitIntoInitAndPostInc(const Loop *L,
- const SCEV *S);
+ std::pair<SCEVUse, SCEVUse> SplitIntoInitAndPostInc(const Loop *L, SCEVUse S);
/// We'd like to check the predicate on every iteration of the most dominated
/// loop between loops used in LHS and RHS.
@@ -1058,46 +1157,43 @@ class ScalarEvolution {
/// so we can assert on that.
/// e. Return true if isLoopEntryGuardedByCond(Pred, E(LHS), E(RHS)) &&
/// isLoopBackedgeGuardedByCond(Pred, B(LHS), B(RHS))
- bool isKnownViaInduction(ICmpInst::Predicate Pred, const SCEV *LHS,
- const SCEV *RHS);
+ bool isKnownViaInduction(ICmpInst::Predicate Pred, SCEVUse LHS, SCEVUse RHS);
/// Test if the given expression is known to satisfy the condition described
/// by Pred, LHS, and RHS.
- bool isKnownPredicate(ICmpInst::Predicate Pred, const SCEV *LHS,
- const SCEV *RHS);
+ bool isKnownPredicate(ICmpInst::Predicate Pred, SCEVUse LHS, SCEVUse RHS);
/// Check whether the condition described by Pred, LHS, and RHS is true or
/// false. If we know it, return the evaluation of this condition. If neither
/// is proved, return std::nullopt.
- std::optional<bool> evaluatePredicate(ICmpInst::Predicate Pred,
- const SCEV *LHS, const SCEV *RHS);
+ std::optional<bool> evaluatePredicate(ICmpInst::Predicate Pred, SCEVUse LHS,
+ SCEVUse RHS);
/// Test if the given expression is known to satisfy the condition described
/// by Pred, LHS, and RHS in the given Context.
- bool isKnownPredicateAt(ICmpInst::Predicate Pred, const SCEV *LHS,
- const SCEV *RHS, const Instruction *CtxI);
+ bool isKnownPredicateAt(ICmpInst::Predicate Pred, SCEVUse LHS, SCEVUse RHS,
+ const Instruction *CtxI);
/// Check whether the condition described by Pred, LHS, and RHS is true or
/// false in the given \p Context. If we know it, return the evaluation of
/// this condition. If neither is proved, return std::nullopt.
- std::optional<bool> evaluatePredicateAt(ICmpInst::Predicate Pred,
- const SCEV *LHS, const SCEV *RHS,
- const Instruction *CtxI);
+ std::optional<bool> evaluatePredicateAt(ICmpInst::Predicate Pred, SCEVUse LHS,
+ SCEVUse RHS, const Instruction *CtxI);
/// Test if the condition described by Pred, LHS, RHS is known to be true on
/// every iteration of the loop of the recurrency LHS.
bool isKnownOnEveryIteration(ICmpInst::Predicate Pred,
- const SCEVAddRecExpr *LHS, const SCEV *RHS);
+ const SCEVAddRecExpr *LHS, SCEVUse RHS);
/// Information about the number of loop iterations for which a loop exit's
/// branch condition evaluates to the not-taken path. This is a temporary
/// pair of exact and max expressions that are eventually summarized in
/// ExitNotTakenInfo and BackedgeTakenInfo.
struct ExitLimit {
- const SCEV *ExactNotTaken; // The exit is not taken exactly this many times
- const SCEV *ConstantMaxNotTaken; // The exit is not taken at most this many
- // times
- const SCEV *SymbolicMaxNotTaken;
+ SCEVUse ExactNotTaken; // The exit is not taken exactly this many times
+ SCEVUse ConstantMaxNotTaken; // The exit is not taken at most this many
+ // times
+ SCEVUse SymbolicMaxNotTaken;
// Not taken either exactly ConstantMaxNotTaken or zero times
bool MaxOrZero = false;
@@ -1115,16 +1211,15 @@ class ScalarEvolution {
/// Construct either an exact exit limit from a constant, or an unknown
/// one from a SCEVCouldNotCompute. No other types of SCEVs are allowed
/// as arguments and asserts enforce that internally.
- /*implicit*/ ExitLimit(const SCEV *E);
+ /*implicit*/ ExitLimit(SCEVUse E);
- ExitLimit(
- const SCEV *E, const SCEV *ConstantMaxNotTaken,
- const SCEV *SymbolicMaxNotTaken, bool MaxOrZero,
- ArrayRef<const SmallPtrSetImpl<const SCEVPredicate *> *> PredSetList =
- std::nullopt);
+ ExitLimit(SCEVUse E, SCEVUse ConstantMaxNotTaken,
+ SCEVUse SymbolicMaxNotTaken, bool MaxOrZero,
+ ArrayRef<const SmallPtrSetImpl<const SCEVPredicate *> *>
+ PredSetList = std::nullopt);
- ExitLimit(const SCEV *E, const SCEV *ConstantMaxNotTaken,
- const SCEV *SymbolicMaxNotTaken, bool MaxOrZero,
+ ExitLimit(SCEVUse E, SCEVUse ConstantMaxNotTaken,
+ SCEVUse SymbolicMaxNotTaken, bool MaxOrZero,
const SmallPtrSetImpl<const SCEVPredicate *> &PredSet);
/// Test whether this ExitLimit contains any computed information, or
@@ -1175,20 +1270,18 @@ class ScalarEvolution {
struct LoopInvariantPredicate {
ICmpInst::Predicate Pred;
- const SCEV *LHS;
- const SCEV *RHS;
+ SCEVUse LHS;
+ SCEVUse RHS;
- LoopInvariantPredicate(ICmpInst::Predicate Pred, const SCEV *LHS,
- const SCEV *RHS)
+ LoopInvariantPredicate(ICmpInst::Predicate Pred, SCEVUse LHS, SCEVUse RHS)
: Pred(Pred), LHS(LHS), RHS(RHS) {}
};
/// If the result of the predicate LHS `Pred` RHS is loop invariant with
/// respect to L, return a LoopInvariantPredicate with LHS and RHS being
/// invariants, available at L's entry. Otherwise, return std::nullopt.
std::optional<LoopInvariantPredicate>
- getLoopInvariantPredicate(ICmpInst::Predicate Pred, const SCEV *LHS,
- const SCEV *RHS, const Loop *L,
- const Instruction *CtxI = nullptr);
+ getLoopInvariantPredicate(ICmpInst::Predicate Pred, SCEVUse LHS, SCEVUse RHS,
+ const Loop *L, const Instruction *CtxI = nullptr);
/// If the result of the predicate LHS `Pred` RHS is loop invariant with
/// respect to L at given Context during at least first MaxIter iterations,
@@ -1197,59 +1290,61 @@ class ScalarEvolution {
/// should be the loop's exit condition.
std::optional<LoopInvariantPredicate>
getLoopInvariantExitCondDuringFirstIterations(ICmpInst::Predicate Pred,
- const SCEV *LHS,
- const SCEV *RHS, const Loop *L,
+ SCEVUse LHS, SCEVUse RHS,
+ const Loop *L,
const Instruction *CtxI,
- const SCEV *MaxIter);
+ SCEVUse MaxIter);
std::optional<LoopInvariantPredicate>
- getLoopInvariantExitCondDuringFirstIterationsImpl(
- ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, const Loop *L,
- const Instruction *CtxI, const SCEV *MaxIter);
+ getLoopInvariantExitCondDuringFirstIterationsImpl(ICmpInst::Predicate Pred,
+ SCEVUse LHS, SCEVUse RHS,
+ const Loop *L,
+ const Instruction *CtxI,
+ SCEVUse MaxIter);
/// Simplify LHS and RHS in a comparison with predicate Pred. Return true
/// iff any changes were made. If the operands are provably equal or
/// unequal, LHS and RHS are set to the same value and Pred is set to either
/// ICMP_EQ or ICMP_NE.
- bool SimplifyICmpOperands(ICmpInst::Predicate &Pred, const SCEV *&LHS,
- const SCEV *&RHS, unsigned Depth = 0);
+ bool SimplifyICmpOperands(ICmpInst::Predicate &Pred, SCEVUse &LHS,
+ SCEVUse &RHS, unsigned Depth = 0);
/// Return the "disposition" of the given SCEV with respect to the given
/// loop.
- LoopDisposition getLoopDisposition(const SCEV *S, const Loop *L);
+ LoopDisposition getLoopDisposition(SCEVUse S, const Loop *L);
/// Return true if the value of the given SCEV is unchanging in the
/// specified loop.
- bool isLoopInvariant(const SCEV *S, const Loop *L);
+ bool isLoopInvariant(SCEVUse S, const Loop *L);
/// Determine if the SCEV can be evaluated at loop's entry. It is true if it
/// doesn't depend on a SCEVUnknown of an instruction which is dominated by
/// the header of loop L.
- bool isAvailableAtLoopEntry(const SCEV *S, const Loop *L);
+ bool isAvailableAtLoopEntry(SCEVUse S, const Loop *L);
/// Return true if the given SCEV changes value in a known way in the
/// specified loop. This property being true implies that the value is
/// variant in the loop AND that we can emit an expression to compute the
/// value of the expression at any particular loop iteration.
- bool hasComputableLoopEvolution(const SCEV *S, const Loop *L);
+ bool hasComputableLoopEvolution(SCEVUse S, const Loop *L);
/// Return the "disposition" of the given SCEV with respect to the given
/// block.
- BlockDisposition getBlockDisposition(const SCEV *S, const BasicBlock *BB);
+ BlockDisposition getBlockDisposition(SCEVUse S, const BasicBlock *BB);
/// Return true if elements that makes up the given SCEV dominate the
/// specified basic block.
- bool dominates(const SCEV *S, const BasicBlock *BB);
+ bool dominates(SCEVUse S, const BasicBlock *BB);
/// Return true if elements that makes up the given SCEV properly dominate
/// the specified basic block.
- bool properlyDominates(const SCEV *S, const BasicBlock *BB);
+ bool properlyDominates(SCEVUse S, const BasicBlock *BB);
/// Test whether the given SCEV has Op as a direct or indirect operand.
- bool hasOperand(const SCEV *S, const SCEV *Op) const;
+ bool hasOperand(SCEVUse S, SCEVUse Op) const;
/// Return the size of an element read or written by Inst.
- const SCEV *getElementSize(Instruction *Inst);
+ SCEVUse getElementSize(Instruction *Inst);
void print(raw_ostream &OS) const;
void verify() const;
@@ -1262,22 +1357,21 @@ class ScalarEvolution {
return F.getParent()->getDataLayout();
}
- const SCEVPredicate *getEqualPredicate(const SCEV *LHS, const SCEV *RHS);
+ const SCEVPredicate *getEqualPredicate(SCEVUse LHS, SCEVUse RHS);
const SCEVPredicate *getComparePredicate(ICmpInst::Predicate Pred,
- const SCEV *LHS, const SCEV *RHS);
+ SCEVUse LHS, SCEVUse RHS);
const SCEVPredicate *
getWrapPredicate(const SCEVAddRecExpr *AR,
SCEVWrapPredicate::IncrementWrapFlags AddedFlags);
/// Re-writes the SCEV according to the Predicates in \p A.
- const SCEV *rewriteUsingPredicate(const SCEV *S, const Loop *L,
- const SCEVPredicate &A);
+ SCEVUse rewriteUsingPredicate(SCEVUse S, const Loop *L,
+ const SCEVPredicate &A);
/// Tries to convert the \p S expression to an AddRec expression,
/// adding additional predicates to \p Preds as required.
const SCEVAddRecExpr *convertSCEVToAddRecWithPredicates(
- const SCEV *S, const Loop *L,
- SmallPtrSetImpl<const SCEVPredicate *> &Preds);
+ SCEVUse S, const Loop *L, SmallPtrSetImpl<const SCEVPredicate *> &Preds);
/// Compute \p LHS - \p RHS and returns the result as an APInt if it is a
/// constant, and std::nullopt if it isn't.
@@ -1286,8 +1380,7 @@ class ScalarEvolution {
/// frugal here since we just bail out of actually constructing and
/// canonicalizing an expression in the cases where the result isn't going
/// to be a constant.
- std::optional<APInt> computeConstantDifference(const SCEV *LHS,
- const SCEV *RHS);
+ std::optional<APInt> computeConstantDifference(SCEVUse LHS, SCEVUse RHS);
/// Update no-wrap flags of an AddRec. This may drop the cached info about
/// this AddRec (such as range info) in case if new flags may potentially
@@ -1295,7 +1388,7 @@ class ScalarEvolution {
void setNoWrapFlags(SCEVAddRecExpr *AddRec, SCEV::NoWrapFlags Flags);
/// Try to apply information from loop guards for \p L to \p Expr.
- const SCEV *applyLoopGuards(const SCEV *Expr, const Loop *L);
+ SCEVUse applyLoopGuards(SCEVUse Expr, const Loop *L);
/// Return true if the loop has no abnormal exits. That is, if the loop
/// is not infinite, it must exit through an explicit edge in the CFG.
@@ -1313,22 +1406,22 @@ class ScalarEvolution {
/// being poison as well. The returned set may be incomplete, i.e. there can
/// be additional Values that also result in S being poison.
void getPoisonGeneratingValues(SmallPtrSetImpl<const Value *> &Result,
- const SCEV *S);
+ SCEVUse S);
/// Check whether it is poison-safe to represent the expression S using the
/// instruction I. If such a replacement is performed, the poison flags of
/// instructions in DropPoisonGeneratingInsts must be dropped.
bool canReuseInstruction(
- const SCEV *S, Instruction *I,
+ SCEVUse S, Instruction *I,
SmallVectorImpl<Instruction *> &DropPoisonGeneratingInsts);
class FoldID {
- const SCEV *Op = nullptr;
+ SCEVUse Op = nullptr;
const Type *Ty = nullptr;
unsigned short C;
public:
- FoldID(SCEVTypes C, const SCEV *Op, const Type *Ty) : Op(Op), Ty(Ty), C(C) {
+ FoldID(SCEVTypes C, SCEVUse Op, const Type *Ty) : Op(Op), Ty(Ty), C(C) {
assert(Op);
assert(Ty);
}
@@ -1337,8 +1430,9 @@ class ScalarEvolution {
unsigned computeHash() const {
return detail::combineHashValue(
- C, detail::combineHashValue(reinterpret_cast<uintptr_t>(Op),
- reinterpret_cast<uintptr_t>(Ty)));
+ C,
+ detail::combineHashValue(reinterpret_cast<uintptr_t>(Op.getPointer()),
+ reinterpret_cast<uintptr_t>(Ty)));
}
bool operator==(const FoldID &RHS) const {
@@ -1387,14 +1481,14 @@ class ScalarEvolution {
std::unique_ptr<SCEVCouldNotCompute> CouldNotCompute;
/// The type for HasRecMap.
- using HasRecMapType = DenseMap<const SCEV *, bool>;
+ using HasRecMapType = DenseMap<SCEVUse, bool>;
/// This is a cache to record whether a SCEV contains any scAddRecExpr.
HasRecMapType HasRecMap;
/// The type for ExprValueMap.
using ValueSetVector = SmallSetVector<Value *, 4>;
- using ExprValueMapType = DenseMap<const SCEV *, ValueSetVector>;
+ using ExprValueMapType = DenseMap<SCEVUse, ValueSetVector>;
/// ExprValueMap -- This map records the original values from which
/// the SCEV expr is generated from.
@@ -1402,15 +1496,15 @@ class ScalarEvolution {
/// The type for ValueExprMap.
using ValueExprMapType =
- DenseMap<SCEVCallbackVH, const SCEV *, DenseMapInfo<Value *>>;
+ DenseMap<SCEVCallbackVH, SCEVUse, DenseMapInfo<Value *>>;
/// This is a cache of the values we have analyzed so far.
ValueExprMapType ValueExprMap;
/// This is a cache for expressions that got folded to a different existing
/// SCEV.
- DenseMap<FoldID, const SCEV *> FoldCache;
- DenseMap<const SCEV *, SmallVector<FoldID, 2>> FoldCacheUser;
+ DenseMap<FoldID, SCEVUse> FoldCache;
+ DenseMap<SCEVUse, SmallVector<FoldID, 2>> FoldCacheUser;
/// Mark predicate values currently being processed by isImpliedCond.
SmallPtrSet<const Value *, 6> PendingLoopPredicates;
@@ -1433,26 +1527,26 @@ class ScalarEvolution {
bool ProvingSplitPredicate = false;
/// Memoized values for the getConstantMultiple
- DenseMap<const SCEV *, APInt> ConstantMultipleCache;
+ DenseMap<SCEVUse, APInt> ConstantMultipleCache;
/// Return the Value set from which the SCEV expr is generated.
- ArrayRef<Value *> getSCEVValues(const SCEV *S);
+ ArrayRef<Value *> getSCEVValues(SCEVUse S);
/// Private helper method for the getConstantMultiple method.
- APInt getConstantMultipleImpl(const SCEV *S);
+ APInt getConstantMultipleImpl(SCEVUse S);
/// Information about the number of times a particular loop exit may be
/// reached before exiting the loop.
struct ExitNotTakenInfo {
PoisoningVH<BasicBlock> ExitingBlock;
- const SCEV *ExactNotTaken;
- const SCEV *ConstantMaxNotTaken;
- const SCEV *SymbolicMaxNotTaken;
+ SCEVUse ExactNotTaken;
+ SCEVUse ConstantMaxNotTaken;
+ SCEVUse SymbolicMaxNotTaken;
SmallPtrSet<const SCEVPredicate *, 4> Predicates;
explicit ExitNotTakenInfo(
- PoisoningVH<BasicBlock> ExitingBlock, const SCEV *ExactNotTaken,
- const SCEV *ConstantMaxNotTaken, const SCEV *SymbolicMaxNotTaken,
+ PoisoningVH<BasicBlock> ExitingBlock, SCEVUse ExactNotTaken,
+ SCEVUse ConstantMaxNotTaken, SCEVUse SymbolicMaxNotTaken,
const SmallPtrSet<const SCEVPredicate *, 4> &Predicates)
: ExitingBlock(ExitingBlock), ExactNotTaken(ExactNotTaken),
ConstantMaxNotTaken(ConstantMaxNotTaken),
@@ -1476,7 +1570,7 @@ class ScalarEvolution {
/// Expression indicating the least constant maximum backedge-taken count of
/// the loop that is known, or a SCEVCouldNotCompute. This expression is
/// only valid if the redicates associated with all loop exits are true.
- const SCEV *ConstantMax = nullptr;
+ SCEVUse ConstantMax = nullptr;
/// Indicating if \c ExitNotTaken has an element for every exiting block in
/// the loop.
@@ -1484,13 +1578,13 @@ class ScalarEvolution {
/// Expression indicating the least maximum backedge-taken count of the loop
/// that is known, or a SCEVCouldNotCompute. Lazily computed on first query.
- const SCEV *SymbolicMax = nullptr;
+ SCEVUse SymbolicMax = nullptr;
/// True iff the backedge is taken either exactly Max or zero times.
bool MaxOrZero = false;
bool isComplete() const { return IsComplete; }
- const SCEV *getConstantMax() const { return ConstantMax; }
+ SCEVUse getConstantMax() const { return ConstantMax; }
public:
BackedgeTakenInfo() = default;
@@ -1501,7 +1595,7 @@ class ScalarEvolution {
/// Initialize BackedgeTakenInfo from a list of exact exit counts.
BackedgeTakenInfo(ArrayRef<EdgeExitInfo> ExitCounts, bool IsComplete,
- const SCEV *ConstantMax, bool MaxOrZero);
+ SCEVUse ConstantMax, bool MaxOrZero);
/// Test whether this BackedgeTakenInfo contains any computed information,
/// or whether it's all SCEVCouldNotCompute values.
@@ -1531,29 +1625,29 @@ class ScalarEvolution {
/// If we allowed SCEV predicates to be generated when populating this
/// vector, this information can contain them and therefore a
/// SCEVPredicate argument should be added to getExact.
- const SCEV *getExact(const Loop *L, ScalarEvolution *SE,
- SmallVector<const SCEVPredicate *, 4> *Predicates = nullptr) const;
+ SCEVUse
+ getExact(const Loop *L, ScalarEvolution *SE,
+ SmallVector<const SCEVPredicate *, 4> *Predicates = nullptr) const;
/// Return the number of times this loop exit may fall through to the back
/// edge, or SCEVCouldNotCompute. The loop is guaranteed not to exit via
/// this block before this number of iterations, but may exit via another
/// block.
- const SCEV *getExact(const BasicBlock *ExitingBlock,
- ScalarEvolution *SE) const;
+ SCEVUse getExact(const BasicBlock *ExitingBlock, ScalarEvolution *SE) const;
/// Get the constant max backedge taken count for the loop.
- const SCEV *getConstantMax(ScalarEvolution *SE) const;
+ SCEVUse getConstantMax(ScalarEvolution *SE) const;
/// Get the constant max backedge taken count for the particular loop exit.
- const SCEV *getConstantMax(const BasicBlock *ExitingBlock,
- ScalarEvolution *SE) const;
+ SCEVUse getConstantMax(const BasicBlock *ExitingBlock,
+ ScalarEvolution *SE) const;
/// Get the symbolic max backedge taken count for the loop.
- const SCEV *getSymbolicMax(const Loop *L, ScalarEvolution *SE);
+ SCEVUse getSymbolicMax(const Loop *L, ScalarEvolution *SE);
/// Get the symbolic max backedge taken count for the particular loop exit.
- const SCEV *getSymbolicMax(const BasicBlock *ExitingBlock,
- ScalarEvolution *SE) const;
+ SCEVUse getSymbolicMax(const BasicBlock *ExitingBlock,
+ ScalarEvolution *SE) const;
/// Return true if the number of times this backedge is taken is either the
/// value returned by getConstantMax or zero.
@@ -1569,7 +1663,7 @@ class ScalarEvolution {
DenseMap<const Loop *, BackedgeTakenInfo> PredicatedBackedgeTakenCounts;
/// Loops whose backedge taken counts directly use this non-constant SCEV.
- DenseMap<const SCEV *, SmallPtrSet<PointerIntPair<const Loop *, 1, bool>, 4>>
+ DenseMap<SCEVUse, SmallPtrSet<PointerIntPair<const Loop *, 1, bool>, 4>>
BECountUsers;
/// This map contains entries for all of the PHI instructions that we
@@ -1581,16 +1675,16 @@ class ScalarEvolution {
/// This map contains entries for all the expressions that we attempt to
/// compute getSCEVAtScope information for, which can be expensive in
/// extreme cases.
- DenseMap<const SCEV *, SmallVector<std::pair<const Loop *, const SCEV *>, 2>>
+ DenseMap<SCEVUse, SmallVector<std::pair<const Loop *, SCEVUse>, 2>>
ValuesAtScopes;
/// Reverse map for invalidation purposes: Stores of which SCEV and which
/// loop this is the value-at-scope of.
- DenseMap<const SCEV *, SmallVector<std::pair<const Loop *, const SCEV *>, 2>>
+ DenseMap<SCEVUse, SmallVector<std::pair<const Loop *, SCEVUse>, 2>>
ValuesAtScopesUsers;
/// Memoized computeLoopDisposition results.
- DenseMap<const SCEV *,
+ DenseMap<SCEVUse,
SmallVector<PointerIntPair<const Loop *, 2, LoopDisposition>, 2>>
LoopDispositions;
@@ -1618,33 +1712,33 @@ class ScalarEvolution {
}
/// Compute a LoopDisposition value.
- LoopDisposition computeLoopDisposition(const SCEV *S, const Loop *L);
+ LoopDisposition computeLoopDisposition(SCEVUse S, const Loop *L);
/// Memoized computeBlockDisposition results.
DenseMap<
- const SCEV *,
+ SCEVUse,
SmallVector<PointerIntPair<const BasicBlock *, 2, BlockDisposition>, 2>>
BlockDispositions;
/// Compute a BlockDisposition value.
- BlockDisposition computeBlockDisposition(const SCEV *S, const BasicBlock *BB);
+ BlockDisposition computeBlockDisposition(SCEVUse S, const BasicBlock *BB);
/// Stores all SCEV that use a given SCEV as its direct operand.
- DenseMap<const SCEV *, SmallPtrSet<const SCEV *, 8> > SCEVUsers;
+ DenseMap<SCEVUse, SmallPtrSet<SCEVUse, 8>> SCEVUsers;
/// Memoized results from getRange
- DenseMap<const SCEV *, ConstantRange> UnsignedRanges;
+ DenseMap<SCEVUse, ConstantRange> UnsignedRanges;
/// Memoized results from getRange
- DenseMap<const SCEV *, ConstantRange> SignedRanges;
+ DenseMap<SCEVUse, ConstantRange> SignedRanges;
/// Used to parameterize getRange
enum RangeSignHint { HINT_RANGE_UNSIGNED, HINT_RANGE_SIGNED };
/// Set the memoized range for the given SCEV.
- const ConstantRange &setRange(const SCEV *S, RangeSignHint Hint,
+ const ConstantRange &setRange(SCEVUse S, RangeSignHint Hint,
ConstantRange CR) {
- DenseMap<const SCEV *, ConstantRange> &Cache =
+ DenseMap<SCEVUse, ConstantRange> &Cache =
Hint == HINT_RANGE_UNSIGNED ? UnsignedRanges : SignedRanges;
auto Pair = Cache.try_emplace(S, std::move(CR));
@@ -1656,29 +1750,29 @@ class ScalarEvolution {
/// Determine the range for a particular SCEV.
/// NOTE: This returns a reference to an entry in a cache. It must be
/// copied if its needed for longer.
- const ConstantRange &getRangeRef(const SCEV *S, RangeSignHint Hint,
+ const ConstantRange &getRangeRef(SCEVUse S, RangeSignHint Hint,
unsigned Depth = 0);
/// Determine the range for a particular SCEV, but evaluates ranges for
/// operands iteratively first.
- const ConstantRange &getRangeRefIter(const SCEV *S, RangeSignHint Hint);
+ const ConstantRange &getRangeRefIter(SCEVUse S, RangeSignHint Hint);
/// Determines the range for the affine SCEVAddRecExpr {\p Start,+,\p Step}.
/// Helper for \c getRange.
- ConstantRange getRangeForAffineAR(const SCEV *Start, const SCEV *Step,
+ ConstantRange getRangeForAffineAR(SCEVUse Start, SCEVUse Step,
const APInt &MaxBECount);
/// Determines the range for the affine non-self-wrapping SCEVAddRecExpr {\p
/// Start,+,\p Step}<nw>.
ConstantRange getRangeForAffineNoSelfWrappingAR(const SCEVAddRecExpr *AddRec,
- const SCEV *MaxBECount,
+ SCEVUse MaxBECount,
unsigned BitWidth,
RangeSignHint SignHint);
/// Try to compute a range for the affine SCEVAddRecExpr {\p Start,+,\p
/// Step} by "factoring out" a ternary expression from the add recurrence.
/// Helper called by \c getRange.
- ConstantRange getRangeViaFactoring(const SCEV *Start, const SCEV *Step,
+ ConstantRange getRangeViaFactoring(SCEVUse Start, SCEVUse Step,
const APInt &MaxBECount);
/// If the unknown expression U corresponds to a simple recurrence, return
@@ -1689,55 +1783,54 @@ class ScalarEvolution {
/// We know that there is no SCEV for the specified value. Analyze the
/// expression recursively.
- const SCEV *createSCEV(Value *V);
+ SCEVUse createSCEV(Value *V);
/// We know that there is no SCEV for the specified value. Create a new SCEV
/// for \p V iteratively.
- const SCEV *createSCEVIter(Value *V);
+ SCEVUse createSCEVIter(Value *V);
/// Collect operands of \p V for which SCEV expressions should be constructed
/// first. Returns a SCEV directly if it can be constructed trivially for \p
/// V.
- const SCEV *getOperandsToCreate(Value *V, SmallVectorImpl<Value *> &Ops);
+ SCEVUse getOperandsToCreate(Value *V, SmallVectorImpl<Value *> &Ops);
/// Provide the special handling we need to analyze PHI SCEVs.
- const SCEV *createNodeForPHI(PHINode *PN);
+ SCEVUse createNodeForPHI(PHINode *PN);
/// Helper function called from createNodeForPHI.
- const SCEV *createAddRecFromPHI(PHINode *PN);
+ SCEVUse createAddRecFromPHI(PHINode *PN);
/// A helper function for createAddRecFromPHI to handle simple cases.
- const SCEV *createSimpleAffineAddRec(PHINode *PN, Value *BEValueV,
- Value *StartValueV);
+ SCEVUse createSimpleAffineAddRec(PHINode *PN, Value *BEValueV,
+ Value *StartValueV);
/// Helper function called from createNodeForPHI.
- const SCEV *createNodeFromSelectLikePHI(PHINode *PN);
+ SCEVUse createNodeFromSelectLikePHI(PHINode *PN);
/// Provide special handling for a select-like instruction (currently this
/// is either a select instruction or a phi node). \p Ty is the type of the
/// instruction being processed, that is assumed equivalent to
/// "Cond ? TrueVal : FalseVal".
- std::optional<const SCEV *>
+ std::optional<SCEVUse>
createNodeForSelectOrPHIInstWithICmpInstCond(Type *Ty, ICmpInst *Cond,
Value *TrueVal, Value *FalseVal);
/// See if we can model this select-like instruction via umin_seq expression.
- const SCEV *createNodeForSelectOrPHIViaUMinSeq(Value *I, Value *Cond,
- Value *TrueVal,
- Value *FalseVal);
+ SCEVUse createNodeForSelectOrPHIViaUMinSeq(Value *I, Value *Cond,
+ Value *TrueVal, Value *FalseVal);
/// Given a value \p V, which is a select-like instruction (currently this is
/// either a select instruction or a phi node), which is assumed equivalent to
/// Cond ? TrueVal : FalseVal
/// see if we can model it as a SCEV expression.
- const SCEV *createNodeForSelectOrPHI(Value *V, Value *Cond, Value *TrueVal,
- Value *FalseVal);
+ SCEVUse createNodeForSelectOrPHI(Value *V, Value *Cond, Value *TrueVal,
+ Value *FalseVal);
/// Provide the special handling we need to analyze GEP SCEVs.
- const SCEV *createNodeForGEP(GEPOperator *GEP);
+ SCEVUse createNodeForGEP(GEPOperator *GEP);
/// Implementation code for getSCEVAtScope; called at most once for each
/// SCEV+Loop pair.
- const SCEV *computeSCEVAtScope(const SCEV *S, const Loop *L);
+ SCEVUse computeSCEVAtScope(SCEVUse S, const Loop *L);
/// Return the BackedgeTakenInfo for the given loop, lazily computing new
/// values if the loop hasn't been analyzed yet. The returned result is
@@ -1764,7 +1857,7 @@ class ScalarEvolution {
/// Return a symbolic upper bound for the backedge taken count of the loop.
/// This is more general than getConstantMaxBackedgeTakenCount as it returns
/// an arbitrary expression as opposed to only constants.
- const SCEV *computeSymbolicMaxBackedgeTakenCount(const Loop *L);
+ SCEVUse computeSymbolicMaxBackedgeTakenCount(const Loop *L);
// Helper functions for computeExitLimitFromCond to avoid exponential time
// complexity.
@@ -1824,8 +1917,7 @@ class ScalarEvolution {
/// return more precise results in some cases and is preferred when caller
/// has a materialized ICmp.
ExitLimit computeExitLimitFromICmp(const Loop *L, ICmpInst::Predicate Pred,
- const SCEV *LHS, const SCEV *RHS,
- bool IsSubExpr,
+ SCEVUse LHS, SCEVUse RHS, bool IsSubExpr,
bool AllowPredicates = false);
/// Compute the number of times the backedge of the specified loop will
@@ -1851,20 +1943,20 @@ class ScalarEvolution {
/// of the loop until we get the exit condition gets a value of ExitWhen
/// (true or false). If we cannot evaluate the exit count of the loop,
/// return CouldNotCompute.
- const SCEV *computeExitCountExhaustively(const Loop *L, Value *Cond,
- bool ExitWhen);
+ SCEVUse computeExitCountExhaustively(const Loop *L, Value *Cond,
+ bool ExitWhen);
/// Return the number of times an exit condition comparing the specified
/// value to zero will execute. If not computable, return CouldNotCompute.
/// If AllowPredicates is set, this call will try to use a minimal set of
/// SCEV predicates in order to return an exact answer.
- ExitLimit howFarToZero(const SCEV *V, const Loop *L, bool IsSubExpr,
+ ExitLimit howFarToZero(SCEVUse V, const Loop *L, bool IsSubExpr,
bool AllowPredicates = false);
/// Return the number of times an exit condition checking the specified
/// value for nonzero will execute. If not computable, return
/// CouldNotCompute.
- ExitLimit howFarToNonZero(const SCEV *V, const Loop *L);
+ ExitLimit howFarToNonZero(SCEVUse V, const Loop *L);
/// Return the number of times an exit condition containing the specified
/// less-than comparison will execute. If not computable, return
@@ -1878,11 +1970,11 @@ class ScalarEvolution {
///
/// If \p AllowPredicates is set, this call will try to use a minimal set of
/// SCEV predicates in order to return an exact answer.
- ExitLimit howManyLessThans(const SCEV *LHS, const SCEV *RHS, const Loop *L,
+ ExitLimit howManyLessThans(SCEVUse LHS, SCEVUse RHS, const Loop *L,
bool isSigned, bool ControlsOnlyExit,
bool AllowPredicates = false);
- ExitLimit howManyGreaterThans(const SCEV *LHS, const SCEV *RHS, const Loop *L,
+ ExitLimit howManyGreaterThans(SCEVUse LHS, SCEVUse RHS, const Loop *L,
bool isSigned, bool IsSubExpr,
bool AllowPredicates = false);
@@ -1896,7 +1988,7 @@ class ScalarEvolution {
/// whenever the given FoundCondValue value evaluates to true in given
/// Context. If Context is nullptr, then the found predicate is true
/// everywhere. LHS and FoundLHS may have different type width.
- bool isImpliedCond(ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS,
+ bool isImpliedCond(ICmpInst::Predicate Pred, SCEVUse LHS, SCEVUse RHS,
const Value *FoundCondValue, bool Inverse,
const Instruction *Context = nullptr);
@@ -1904,65 +1996,60 @@ class ScalarEvolution {
/// whenever the given FoundCondValue value evaluates to true in given
/// Context. If Context is nullptr, then the found predicate is true
/// everywhere. LHS and FoundLHS must have same type width.
- bool isImpliedCondBalancedTypes(ICmpInst::Predicate Pred, const SCEV *LHS,
- const SCEV *RHS,
- ICmpInst::Predicate FoundPred,
- const SCEV *FoundLHS, const SCEV *FoundRHS,
+ bool isImpliedCondBalancedTypes(ICmpInst::Predicate Pred, SCEVUse LHS,
+ SCEVUse RHS, ICmpInst::Predicate FoundPred,
+ SCEVUse FoundLHS, SCEVUse FoundRHS,
const Instruction *CtxI);
/// Test whether the condition described by Pred, LHS, and RHS is true
/// whenever the condition described by FoundPred, FoundLHS, FoundRHS is
/// true in given Context. If Context is nullptr, then the found predicate is
/// true everywhere.
- bool isImpliedCond(ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS,
- ICmpInst::Predicate FoundPred, const SCEV *FoundLHS,
- const SCEV *FoundRHS,
- const Instruction *Context = nullptr);
+ bool isImpliedCond(ICmpInst::Predicate Pred, SCEVUse LHS, SCEVUse RHS,
+ ICmpInst::Predicate FoundPred, SCEVUse FoundLHS,
+ SCEVUse FoundRHS, const Instruction *Context = nullptr);
/// Test whether the condition described by Pred, LHS, and RHS is true
/// whenever the condition described by Pred, FoundLHS, and FoundRHS is
/// true in given Context. If Context is nullptr, then the found predicate is
/// true everywhere.
- bool isImpliedCondOperands(ICmpInst::Predicate Pred, const SCEV *LHS,
- const SCEV *RHS, const SCEV *FoundLHS,
- const SCEV *FoundRHS,
+ bool isImpliedCondOperands(ICmpInst::Predicate Pred, SCEVUse LHS, SCEVUse RHS,
+ SCEVUse FoundLHS, SCEVUse FoundRHS,
const Instruction *Context = nullptr);
/// Test whether the condition described by Pred, LHS, and RHS is true
/// whenever the condition described by Pred, FoundLHS, and FoundRHS is
/// true. Here LHS is an operation that includes FoundLHS as one of its
/// arguments.
- bool isImpliedViaOperations(ICmpInst::Predicate Pred,
- const SCEV *LHS, const SCEV *RHS,
- const SCEV *FoundLHS, const SCEV *FoundRHS,
+ bool isImpliedViaOperations(ICmpInst::Predicate Pred, SCEVUse LHS,
+ SCEVUse RHS, SCEVUse FoundLHS, SCEVUse FoundRHS,
unsigned Depth = 0);
/// Test whether the condition described by Pred, LHS, and RHS is true.
/// Use only simple non-recursive types of checks, such as range analysis etc.
- bool isKnownViaNonRecursiveReasoning(ICmpInst::Predicate Pred,
- const SCEV *LHS, const SCEV *RHS);
+ bool isKnownViaNonRecursiveReasoning(ICmpInst::Predicate Pred, SCEVUse LHS,
+ SCEVUse RHS);
/// Test whether the condition described by Pred, LHS, and RHS is true
/// whenever the condition described by Pred, FoundLHS, and FoundRHS is
/// true.
- bool isImpliedCondOperandsHelper(ICmpInst::Predicate Pred, const SCEV *LHS,
- const SCEV *RHS, const SCEV *FoundLHS,
- const SCEV *FoundRHS);
+ bool isImpliedCondOperandsHelper(ICmpInst::Predicate Pred, SCEVUse LHS,
+ SCEVUse RHS, SCEVUse FoundLHS,
+ SCEVUse FoundRHS);
/// Test whether the condition described by Pred, LHS, and RHS is true
/// whenever the condition described by Pred, FoundLHS, and FoundRHS is
/// true. Utility function used by isImpliedCondOperands. Tries to get
/// cases like "X `sgt` 0 => X - 1 `sgt` -1".
- bool isImpliedCondOperandsViaRanges(ICmpInst::Predicate Pred, const SCEV *LHS,
- const SCEV *RHS,
+ bool isImpliedCondOperandsViaRanges(ICmpInst::Predicate Pred, SCEVUse LHS,
+ SCEVUse RHS,
ICmpInst::Predicate FoundPred,
- const SCEV *FoundLHS,
- const SCEV *FoundRHS);
+ SCEVUse FoundLHS, SCEVUse FoundRHS);
/// Return true if the condition denoted by \p LHS \p Pred \p RHS is implied
/// by a call to @llvm.experimental.guard in \p BB.
bool isImpliedViaGuard(const BasicBlock *BB, ICmpInst::Predicate Pred,
- const SCEV *LHS, const SCEV *RHS);
+ SCEVUse LHS, SCEVUse RHS);
/// Test whether the condition described by Pred, LHS, and RHS is true
/// whenever the condition described by Pred, FoundLHS, and FoundRHS is
@@ -1970,10 +2057,9 @@ class ScalarEvolution {
///
/// This routine tries to rule out certain kinds of integer overflow, and
/// then tries to reason about arithmetic properties of the predicates.
- bool isImpliedCondOperandsViaNoOverflow(ICmpInst::Predicate Pred,
- const SCEV *LHS, const SCEV *RHS,
- const SCEV *FoundLHS,
- const SCEV *FoundRHS);
+ bool isImpliedCondOperandsViaNoOverflow(ICmpInst::Predicate Pred, SCEVUse LHS,
+ SCEVUse RHS, SCEVUse FoundLHS,
+ SCEVUse FoundRHS);
/// Test whether the condition described by Pred, LHS, and RHS is true
/// whenever the condition described by Pred, FoundLHS, and FoundRHS is
@@ -1982,9 +2068,8 @@ class ScalarEvolution {
/// This routine tries to weaken the known condition basing on fact that
/// FoundLHS is an AddRec.
bool isImpliedCondOperandsViaAddRecStart(ICmpInst::Predicate Pred,
- const SCEV *LHS, const SCEV *RHS,
- const SCEV *FoundLHS,
- const SCEV *FoundRHS,
+ SCEVUse LHS, SCEVUse RHS,
+ SCEVUse FoundLHS, SCEVUse FoundRHS,
const Instruction *CtxI);
/// Test whether the condition described by Pred, LHS, and RHS is true
@@ -1994,19 +2079,17 @@ class ScalarEvolution {
/// This routine tries to figure out predicate for Phis which are SCEVUnknown
/// if it is true for every possible incoming value from their respective
/// basic blocks.
- bool isImpliedViaMerge(ICmpInst::Predicate Pred,
- const SCEV *LHS, const SCEV *RHS,
- const SCEV *FoundLHS, const SCEV *FoundRHS,
- unsigned Depth);
+ bool isImpliedViaMerge(ICmpInst::Predicate Pred, SCEVUse LHS, SCEVUse RHS,
+ SCEVUse FoundLHS, SCEVUse FoundRHS, unsigned Depth);
/// Test whether the condition described by Pred, LHS, and RHS is true
/// whenever the condition described by Pred, FoundLHS, and FoundRHS is
/// true.
///
/// This routine tries to reason about shifts.
- bool isImpliedCondOperandsViaShift(ICmpInst::Predicate Pred, const SCEV *LHS,
- const SCEV *RHS, const SCEV *FoundLHS,
- const SCEV *FoundRHS);
+ bool isImpliedCondOperandsViaShift(ICmpInst::Predicate Pred, SCEVUse LHS,
+ SCEVUse RHS, SCEVUse FoundLHS,
+ SCEVUse FoundRHS);
/// If we know that the specified Phi is in the header of its containing
/// loop, we know the loop executes a constant number of times, and the PHI
@@ -2016,50 +2099,50 @@ class ScalarEvolution {
/// Test if the given expression is known to satisfy the condition described
/// by Pred and the known constant ranges of LHS and RHS.
- bool isKnownPredicateViaConstantRanges(ICmpInst::Predicate Pred,
- const SCEV *LHS, const SCEV *RHS);
+ bool isKnownPredicateViaConstantRanges(ICmpInst::Predicate Pred, SCEVUse LHS,
+ SCEVUse RHS);
/// Try to prove the condition described by "LHS Pred RHS" by ruling out
/// integer overflow.
///
/// For instance, this will return true for "A s< (A + C)<nsw>" if C is
/// positive.
- bool isKnownPredicateViaNoOverflow(ICmpInst::Predicate Pred, const SCEV *LHS,
- const SCEV *RHS);
+ bool isKnownPredicateViaNoOverflow(ICmpInst::Predicate Pred, SCEVUse LHS,
+ SCEVUse RHS);
/// Try to split Pred LHS RHS into logical conjunctions (and's) and try to
/// prove them individually.
- bool isKnownPredicateViaSplitting(ICmpInst::Predicate Pred, const SCEV *LHS,
- const SCEV *RHS);
+ bool isKnownPredicateViaSplitting(ICmpInst::Predicate Pred, SCEVUse LHS,
+ SCEVUse RHS);
/// Try to match the Expr as "(L + R)<Flags>".
- bool splitBinaryAdd(const SCEV *Expr, const SCEV *&L, const SCEV *&R,
+ bool splitBinaryAdd(SCEVUse Expr, SCEVUse &L, SCEVUse &R,
SCEV::NoWrapFlags &Flags);
/// Forget predicated/non-predicated backedge taken counts for the given loop.
void forgetBackedgeTakenCounts(const Loop *L, bool Predicated);
/// Drop memoized information for all \p SCEVs.
- void forgetMemoizedResults(ArrayRef<const SCEV *> SCEVs);
+ void forgetMemoizedResults(ArrayRef<SCEVUse> SCEVs);
/// Helper for forgetMemoizedResults.
- void forgetMemoizedResultsImpl(const SCEV *S);
+ void forgetMemoizedResultsImpl(SCEVUse S);
/// Iterate over instructions in \p Worklist and their users. Erase entries
/// from ValueExprMap and collect SCEV expressions in \p ToForget
void visitAndClearUsers(SmallVectorImpl<Instruction *> &Worklist,
SmallPtrSetImpl<Instruction *> &Visited,
- SmallVectorImpl<const SCEV *> &ToForget);
+ SmallVectorImpl<SCEVUse> &ToForget);
/// Erase Value from ValueExprMap and ExprValueMap.
void eraseValueFromMap(Value *V);
/// Insert V to S mapping into ValueExprMap and ExprValueMap.
- void insertValueToMap(Value *V, const SCEV *S);
+ void insertValueToMap(Value *V, SCEVUse S);
/// Return false iff given SCEV contains a SCEVUnknown with NULL value-
/// pointer.
- bool checkValidity(const SCEV *S) const;
+ bool checkValidity(SCEVUse S) const;
/// Return true if `ExtendOpTy`({`Start`,+,`Step`}) can be proved to be
/// equal to {`ExtendOpTy`(`Start`),+,`ExtendOpTy`(`Step`)}. This is
@@ -2067,8 +2150,7 @@ class ScalarEvolution {
/// {`Start`,+,`Step`} if `ExtendOpTy` is `SCEVSignExtendExpr`
/// (resp. `SCEVZeroExtendExpr`).
template <typename ExtendOpTy>
- bool proveNoWrapByVaryingStart(const SCEV *Start, const SCEV *Step,
- const Loop *L);
+ bool proveNoWrapByVaryingStart(SCEVUse Start, SCEVUse Step, const Loop *L);
/// Try to prove NSW or NUW on \p AR relying on ConstantRange manipulation.
SCEV::NoWrapFlags proveNoWrapViaConstantRanges(const SCEVAddRecExpr *AR);
@@ -2094,17 +2176,17 @@ class ScalarEvolution {
/// 'S'. Specifically, return the first instruction in said bounding scope.
/// Return nullptr if the scope is trivial (function entry).
/// (See scope definition rules associated with flag discussion above)
- const Instruction *getNonTrivialDefiningScopeBound(const SCEV *S);
+ const Instruction *getNonTrivialDefiningScopeBound(SCEVUse S);
/// Return a scope which provides an upper bound on the defining scope for
/// a SCEV with the operands in Ops. The outparam Precise is set if the
/// bound found is a precise bound (i.e. must be the defining scope.)
- const Instruction *getDefiningScopeBound(ArrayRef<const SCEV *> Ops,
+ const Instruction *getDefiningScopeBound(ArrayRef<SCEVUse> Ops,
bool &Precise);
/// Wrapper around the above for cases which don't care if the bound
/// is precise.
- const Instruction *getDefiningScopeBound(ArrayRef<const SCEV *> Ops);
+ const Instruction *getDefiningScopeBound(ArrayRef<SCEVUse> Ops);
/// Given two instructions in the same function, return true if we can
/// prove B must execute given A executes.
@@ -2145,7 +2227,7 @@ class ScalarEvolution {
/// If the analysis is not successful, a mapping from the \p SymbolicPHI to
/// itself (with no predicates) is recorded, and a nullptr with an empty
/// predicates vector is returned as a pair.
- std::optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
+ std::optional<std::pair<SCEVUse, SmallVector<const SCEVPredicate *, 3>>>
createAddRecFromPHIWithCastsImpl(const SCEVUnknown *SymbolicPHI);
/// Compute the maximum backedge count based on the range of values
@@ -2157,47 +2239,44 @@ class ScalarEvolution {
/// * the induction variable is assumed not to overflow (i.e. either it
/// actually doesn't, or we'd have to immediately execute UB)
/// We *don't* assert these preconditions so please be careful.
- const SCEV *computeMaxBECountForLT(const SCEV *Start, const SCEV *Stride,
- const SCEV *End, unsigned BitWidth,
- bool IsSigned);
+ SCEVUse computeMaxBECountForLT(SCEVUse Start, SCEVUse Stride, SCEVUse End,
+ unsigned BitWidth, bool IsSigned);
/// Verify if an linear IV with positive stride can overflow when in a
/// less-than comparison, knowing the invariant term of the comparison,
/// the stride.
- bool canIVOverflowOnLT(const SCEV *RHS, const SCEV *Stride, bool IsSigned);
+ bool canIVOverflowOnLT(SCEVUse RHS, SCEVUse Stride, bool IsSigned);
/// Verify if an linear IV with negative stride can overflow when in a
/// greater-than comparison, knowing the invariant term of the comparison,
/// the stride.
- bool canIVOverflowOnGT(const SCEV *RHS, const SCEV *Stride, bool IsSigned);
+ bool canIVOverflowOnGT(SCEVUse RHS, SCEVUse Stride, bool IsSigned);
/// Get add expr already created or create a new one.
- const SCEV *getOrCreateAddExpr(ArrayRef<const SCEV *> Ops,
- SCEV::NoWrapFlags Flags);
+ SCEVUse getOrCreateAddExpr(ArrayRef<SCEVUse> Ops, SCEV::NoWrapFlags Flags);
/// Get mul expr already created or create a new one.
- const SCEV *getOrCreateMulExpr(ArrayRef<const SCEV *> Ops,
- SCEV::NoWrapFlags Flags);
+ SCEVUse getOrCreateMulExpr(ArrayRef<SCEVUse> Ops, SCEV::NoWrapFlags Flags);
// Get addrec expr already created or create a new one.
- const SCEV *getOrCreateAddRecExpr(ArrayRef<const SCEV *> Ops,
- const Loop *L, SCEV::NoWrapFlags Flags);
+ SCEVUse getOrCreateAddRecExpr(ArrayRef<SCEVUse> Ops, const Loop *L,
+ SCEV::NoWrapFlags Flags);
/// Return x if \p Val is f(x) where f is a 1-1 function.
- const SCEV *stripInjectiveFunctions(const SCEV *Val) const;
+ SCEVUse stripInjectiveFunctions(SCEVUse Val) const;
/// Find all of the loops transitively used in \p S, and fill \p LoopsUsed.
/// A loop is considered "used" by an expression if it contains
/// an add rec on said loop.
- void getUsedLoops(const SCEV *S, SmallPtrSetImpl<const Loop *> &LoopsUsed);
+ void getUsedLoops(SCEVUse S, SmallPtrSetImpl<const Loop *> &LoopsUsed);
/// Try to match the pattern generated by getURemExpr(A, B). If successful,
/// Assign A and B to LHS and RHS, respectively.
- bool matchURem(const SCEV *Expr, const SCEV *&LHS, const SCEV *&RHS);
+ bool matchURem(SCEVUse Expr, SCEVUse &LHS, SCEVUse &RHS);
/// Look for a SCEV expression with type `SCEVType` and operands `Ops` in
/// `UniqueSCEVs`. Return if found, else nullptr.
- SCEV *findExistingSCEVInCache(SCEVTypes SCEVType, ArrayRef<const SCEV *> Ops);
+ SCEV *findExistingSCEVInCache(SCEVTypes SCEVType, ArrayRef<SCEVUse> Ops);
/// Get reachable blocks in this function, making limited use of SCEV
/// reasoning about conditions.
@@ -2206,8 +2285,7 @@ class ScalarEvolution {
/// Return the given SCEV expression with a new set of operands.
/// This preserves the origial nowrap flags.
- const SCEV *getWithOperands(const SCEV *S,
- SmallVectorImpl<const SCEV *> &NewOps);
+ SCEVUse getWithOperands(SCEVUse S, SmallVectorImpl<SCEVUse> &NewOps);
FoldingSet<SCEV> UniqueSCEVs;
FoldingSet<SCEVPredicate> UniquePreds;
@@ -2219,7 +2297,7 @@ class ScalarEvolution {
/// Cache tentative mappings from UnknownSCEVs in a Loop, to a SCEV expression
/// they can be rewritten into under certain predicates.
DenseMap<std::pair<const SCEVUnknown *, const Loop *>,
- std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
+ std::pair<SCEVUse, SmallVector<const SCEVPredicate *, 3>>>
PredicatedSCEVRewrites;
/// Set of AddRecs for which proving NUW via an induction has already been
@@ -2311,10 +2389,10 @@ class PredicatedScalarEvolution {
/// predicate. The order of transformations applied on the expression of V
/// returned by ScalarEvolution is guaranteed to be preserved, even when
/// adding new predicates.
- const SCEV *getSCEV(Value *V);
+ SCEVUse getSCEV(Value *V);
/// Get the (predicated) backedge count for the analyzed loop.
- const SCEV *getBackedgeTakenCount();
+ SCEVUse getBackedgeTakenCount();
/// Adds a new predicate.
void addPredicate(const SCEVPredicate &Pred);
@@ -2354,7 +2432,7 @@ class PredicatedScalarEvolution {
/// Holds a SCEV and the version number of the SCEV predicate used to
/// perform the rewrite of the expression.
- using RewriteEntry = std::pair<unsigned, const SCEV *>;
+ using RewriteEntry = std::pair<unsigned, SCEVUse>;
/// Maps a SCEV to the rewrite result of that SCEV at a certain version
/// number. If this number doesn't match the current Generation, we will
@@ -2383,7 +2461,7 @@ class PredicatedScalarEvolution {
unsigned Generation = 0;
/// The backedge taken count.
- const SCEV *BackedgeCount = nullptr;
+ SCEVUse BackedgeCount = nullptr;
};
template <> struct DenseMapInfo<ScalarEvolution::FoldID> {
diff --git a/llvm/include/llvm/Analysis/ScalarEvolutionExpressions.h b/llvm/include/llvm/Analysis/ScalarEvolutionExpressions.h
index fd884f2a2f55b..b72d9fbe64fab 100644
--- a/llvm/include/llvm/Analysis/ScalarEvolutionExpressions.h
+++ b/llvm/include/llvm/Analysis/ScalarEvolutionExpressions.h
@@ -90,11 +90,12 @@ class SCEVVScale : public SCEV {
/// Methods for support type inquiry through isa, cast, and dyn_cast:
static bool classof(const SCEV *S) { return S->getSCEVType() == scVScale; }
+ static bool classof(const SCEVUse *U) { return classof(U->getPointer()); }
};
-inline unsigned short computeExpressionSize(ArrayRef<const SCEV *> Args) {
+inline unsigned short computeExpressionSize(ArrayRef<SCEVUse> Args) {
APInt Size(16, 1);
- for (const auto *Arg : Args)
+ for (const auto Arg : Args)
Size = Size.uadd_sat(APInt(16, Arg->getExpressionSize()));
return (unsigned short)Size.getZExtValue();
}
@@ -102,19 +103,19 @@ inline unsigned short computeExpressionSize(ArrayRef<const SCEV *> Args) {
/// This is the base class for unary cast operator classes.
class SCEVCastExpr : public SCEV {
protected:
- const SCEV *Op;
+ SCEVUse Op;
Type *Ty;
- SCEVCastExpr(const FoldingSetNodeIDRef ID, SCEVTypes SCEVTy, const SCEV *op,
+ SCEVCastExpr(const FoldingSetNodeIDRef ID, SCEVTypes SCEVTy, SCEVUse op,
Type *ty);
public:
- const SCEV *getOperand() const { return Op; }
- const SCEV *getOperand(unsigned i) const {
+ SCEVUse getOperand() const { return Op; }
+ SCEVUse getOperand(unsigned i) const {
assert(i == 0 && "Operand index out of range!");
return Op;
}
- ArrayRef<const SCEV *> operands() const { return Op; }
+ ArrayRef<SCEVUse> operands() const { return Op; }
size_t getNumOperands() const { return 1; }
Type *getType() const { return Ty; }
@@ -123,6 +124,7 @@ class SCEVCastExpr : public SCEV {
return S->getSCEVType() == scPtrToInt || S->getSCEVType() == scTruncate ||
S->getSCEVType() == scZeroExtend || S->getSCEVType() == scSignExtend;
}
+ static bool classof(const SCEVUse *U) { return classof(U->getPointer()); }
};
/// This class represents a cast from a pointer to a pointer-sized integer
@@ -130,18 +132,19 @@ class SCEVCastExpr : public SCEV {
class SCEVPtrToIntExpr : public SCEVCastExpr {
friend class ScalarEvolution;
- SCEVPtrToIntExpr(const FoldingSetNodeIDRef ID, const SCEV *Op, Type *ITy);
+ SCEVPtrToIntExpr(const FoldingSetNodeIDRef ID, SCEVUse Op, Type *ITy);
public:
/// Methods for support type inquiry through isa, cast, and dyn_cast:
static bool classof(const SCEV *S) { return S->getSCEVType() == scPtrToInt; }
+ static bool classof(const SCEVUse *U) { return classof(U->getPointer()); }
};
/// This is the base class for unary integral cast operator classes.
class SCEVIntegralCastExpr : public SCEVCastExpr {
protected:
SCEVIntegralCastExpr(const FoldingSetNodeIDRef ID, SCEVTypes SCEVTy,
- const SCEV *op, Type *ty);
+ SCEVUse op, Type *ty);
public:
/// Methods for support type inquiry through isa, cast, and dyn_cast:
@@ -149,6 +152,7 @@ class SCEVIntegralCastExpr : public SCEVCastExpr {
return S->getSCEVType() == scTruncate || S->getSCEVType() == scZeroExtend ||
S->getSCEVType() == scSignExtend;
}
+ static bool classof(const SCEVUse *U) { return classof(U->getPointer()); }
};
/// This class represents a truncation of an integer value to a
@@ -156,11 +160,12 @@ class SCEVIntegralCastExpr : public SCEVCastExpr {
class SCEVTruncateExpr : public SCEVIntegralCastExpr {
friend class ScalarEvolution;
- SCEVTruncateExpr(const FoldingSetNodeIDRef ID, const SCEV *op, Type *ty);
+ SCEVTruncateExpr(const FoldingSetNodeIDRef ID, SCEVUse op, Type *ty);
public:
/// Methods for support type inquiry through isa, cast, and dyn_cast:
static bool classof(const SCEV *S) { return S->getSCEVType() == scTruncate; }
+ static bool classof(const SCEVUse *U) { return classof(U->getPointer()); }
};
/// This class represents a zero extension of a small integer value
@@ -168,13 +173,14 @@ class SCEVTruncateExpr : public SCEVIntegralCastExpr {
class SCEVZeroExtendExpr : public SCEVIntegralCastExpr {
friend class ScalarEvolution;
- SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID, const SCEV *op, Type *ty);
+ SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID, SCEVUse op, Type *ty);
public:
/// Methods for support type inquiry through isa, cast, and dyn_cast:
static bool classof(const SCEV *S) {
return S->getSCEVType() == scZeroExtend;
}
+ static bool classof(const SCEVUse *U) { return classof(U->getPointer()); }
};
/// This class represents a sign extension of a small integer value
@@ -182,13 +188,14 @@ class SCEVZeroExtendExpr : public SCEVIntegralCastExpr {
class SCEVSignExtendExpr : public SCEVIntegralCastExpr {
friend class ScalarEvolution;
- SCEVSignExtendExpr(const FoldingSetNodeIDRef ID, const SCEV *op, Type *ty);
+ SCEVSignExtendExpr(const FoldingSetNodeIDRef ID, SCEVUse op, Type *ty);
public:
/// Methods for support type inquiry through isa, cast, and dyn_cast:
static bool classof(const SCEV *S) {
return S->getSCEVType() == scSignExtend;
}
+ static bool classof(const SCEVUse *U) { return classof(U->getPointer()); }
};
/// This node is a base class providing common functionality for
@@ -199,25 +206,23 @@ class SCEVNAryExpr : public SCEV {
// arrays with its SCEVAllocator, so this class just needs a simple
// pointer rather than a more elaborate vector-like data structure.
// This also avoids the need for a non-trivial destructor.
- const SCEV *const *Operands;
+ SCEVUse const *Operands;
size_t NumOperands;
- SCEVNAryExpr(const FoldingSetNodeIDRef ID, enum SCEVTypes T,
- const SCEV *const *O, size_t N)
+ SCEVNAryExpr(const FoldingSetNodeIDRef ID, enum SCEVTypes T, SCEVUse const *O,
+ size_t N)
: SCEV(ID, T, computeExpressionSize(ArrayRef(O, N))), Operands(O),
NumOperands(N) {}
public:
size_t getNumOperands() const { return NumOperands; }
- const SCEV *getOperand(unsigned i) const {
+ SCEVUse getOperand(unsigned i) const {
assert(i < NumOperands && "Operand index out of range!");
return Operands[i];
}
- ArrayRef<const SCEV *> operands() const {
- return ArrayRef(Operands, NumOperands);
- }
+ ArrayRef<SCEVUse> operands() const { return ArrayRef(Operands, NumOperands); }
NoWrapFlags getNoWrapFlags(NoWrapFlags Mask = NoWrapMask) const {
return (NoWrapFlags)(SubclassData & Mask);
@@ -241,13 +246,14 @@ class SCEVNAryExpr : public SCEV {
S->getSCEVType() == scSequentialUMinExpr ||
S->getSCEVType() == scAddRecExpr;
}
+ static bool classof(const SCEVUse *U) { return classof(U->getPointer()); }
};
/// This node is the base class for n'ary commutative operators.
class SCEVCommutativeExpr : public SCEVNAryExpr {
protected:
SCEVCommutativeExpr(const FoldingSetNodeIDRef ID, enum SCEVTypes T,
- const SCEV *const *O, size_t N)
+ SCEVUse const *O, size_t N)
: SCEVNAryExpr(ID, T, O, N) {}
public:
@@ -257,6 +263,7 @@ class SCEVCommutativeExpr : public SCEVNAryExpr {
S->getSCEVType() == scSMaxExpr || S->getSCEVType() == scUMaxExpr ||
S->getSCEVType() == scSMinExpr || S->getSCEVType() == scUMinExpr;
}
+ static bool classof(const SCEVUse *U) { return classof(U->getPointer()); }
/// Set flags for a non-recurrence without clearing previously set flags.
void setNoWrapFlags(NoWrapFlags Flags) { SubclassData |= Flags; }
@@ -268,11 +275,10 @@ class SCEVAddExpr : public SCEVCommutativeExpr {
Type *Ty;
- SCEVAddExpr(const FoldingSetNodeIDRef ID, const SCEV *const *O, size_t N)
+ SCEVAddExpr(const FoldingSetNodeIDRef ID, SCEVUse const *O, size_t N)
: SCEVCommutativeExpr(ID, scAddExpr, O, N) {
- auto *FirstPointerTypedOp = find_if(operands(), [](const SCEV *Op) {
- return Op->getType()->isPointerTy();
- });
+ auto *FirstPointerTypedOp = find_if(
+ operands(), [](SCEVUse Op) { return Op->getType()->isPointerTy(); });
if (FirstPointerTypedOp != operands().end())
Ty = (*FirstPointerTypedOp)->getType();
else
@@ -284,13 +290,14 @@ class SCEVAddExpr : public SCEVCommutativeExpr {
/// Methods for support type inquiry through isa, cast, and dyn_cast:
static bool classof(const SCEV *S) { return S->getSCEVType() == scAddExpr; }
+ static bool classof(const SCEVUse *U) { return classof(U->getPointer()); }
};
/// This node represents multiplication of some number of SCEVs.
class SCEVMulExpr : public SCEVCommutativeExpr {
friend class ScalarEvolution;
- SCEVMulExpr(const FoldingSetNodeIDRef ID, const SCEV *const *O, size_t N)
+ SCEVMulExpr(const FoldingSetNodeIDRef ID, SCEVUse const *O, size_t N)
: SCEVCommutativeExpr(ID, scMulExpr, O, N) {}
public:
@@ -298,30 +305,31 @@ class SCEVMulExpr : public SCEVCommutativeExpr {
/// Methods for support type inquiry through isa, cast, and dyn_cast:
static bool classof(const SCEV *S) { return S->getSCEVType() == scMulExpr; }
+ static bool classof(const SCEVUse *U) { return classof(U->getPointer()); }
};
/// This class represents a binary unsigned division operation.
class SCEVUDivExpr : public SCEV {
friend class ScalarEvolution;
- std::array<const SCEV *, 2> Operands;
+ std::array<SCEVUse, 2> Operands;
- SCEVUDivExpr(const FoldingSetNodeIDRef ID, const SCEV *lhs, const SCEV *rhs)
+ SCEVUDivExpr(const FoldingSetNodeIDRef ID, SCEVUse lhs, SCEVUse rhs)
: SCEV(ID, scUDivExpr, computeExpressionSize({lhs, rhs})) {
Operands[0] = lhs;
Operands[1] = rhs;
}
public:
- const SCEV *getLHS() const { return Operands[0]; }
- const SCEV *getRHS() const { return Operands[1]; }
+ SCEVUse getLHS() const { return Operands[0]; }
+ SCEVUse getRHS() const { return Operands[1]; }
size_t getNumOperands() const { return 2; }
- const SCEV *getOperand(unsigned i) const {
+ SCEVUse getOperand(unsigned i) const {
assert((i == 0 || i == 1) && "Operand index out of range!");
return i == 0 ? getLHS() : getRHS();
}
- ArrayRef<const SCEV *> operands() const { return Operands; }
+ ArrayRef<SCEVUse> operands() const { return Operands; }
Type *getType() const {
// In most cases the types of LHS and RHS will be the same, but in some
@@ -334,6 +342,7 @@ class SCEVUDivExpr : public SCEV {
/// Methods for support type inquiry through isa, cast, and dyn_cast:
static bool classof(const SCEV *S) { return S->getSCEVType() == scUDivExpr; }
+ static bool classof(const SCEVUse *U) { return classof(U->getPointer()); }
};
/// This node represents a polynomial recurrence on the trip count
@@ -349,25 +358,24 @@ class SCEVAddRecExpr : public SCEVNAryExpr {
const Loop *L;
- SCEVAddRecExpr(const FoldingSetNodeIDRef ID, const SCEV *const *O, size_t N,
+ SCEVAddRecExpr(const FoldingSetNodeIDRef ID, SCEVUse const *O, size_t N,
const Loop *l)
: SCEVNAryExpr(ID, scAddRecExpr, O, N), L(l) {}
public:
Type *getType() const { return getStart()->getType(); }
- const SCEV *getStart() const { return Operands[0]; }
+ SCEVUse getStart() const { return Operands[0]; }
const Loop *getLoop() const { return L; }
/// Constructs and returns the recurrence indicating how much this
/// expression steps by. If this is a polynomial of degree N, it
/// returns a chrec of degree N-1. We cannot determine whether
/// the step recurrence has self-wraparound.
- const SCEV *getStepRecurrence(ScalarEvolution &SE) const {
+ SCEVUse getStepRecurrence(ScalarEvolution &SE) const {
if (isAffine())
return getOperand(1);
- return SE.getAddRecExpr(
- SmallVector<const SCEV *, 3>(operands().drop_front()), getLoop(),
- FlagAnyWrap);
+ return SE.getAddRecExpr(SmallVector<SCEVUse, 3>(operands().drop_front()),
+ getLoop(), FlagAnyWrap);
}
/// Return true if this represents an expression A + B*x where A
@@ -394,12 +402,12 @@ class SCEVAddRecExpr : public SCEVNAryExpr {
/// Return the value of this chain of recurrences at the specified
/// iteration number.
- const SCEV *evaluateAtIteration(const SCEV *It, ScalarEvolution &SE) const;
+ SCEVUse evaluateAtIteration(SCEVUse It, ScalarEvolution &SE) const;
/// Return the value of this chain of recurrences at the specified iteration
/// number. Takes an explicit list of operands to represent an AddRec.
- static const SCEV *evaluateAtIteration(ArrayRef<const SCEV *> Operands,
- const SCEV *It, ScalarEvolution &SE);
+ static SCEVUse evaluateAtIteration(ArrayRef<SCEVUse> Operands, SCEVUse It,
+ ScalarEvolution &SE);
/// Return the number of iterations of this loop that produce
/// values in the specified constant range. Another way of
@@ -407,8 +415,8 @@ class SCEVAddRecExpr : public SCEVNAryExpr {
/// where the value is not in the condition, thus computing the
/// exit count. If the iteration count can't be computed, an
/// instance of SCEVCouldNotCompute is returned.
- const SCEV *getNumIterationsInRange(const ConstantRange &Range,
- ScalarEvolution &SE) const;
+ SCEVUse getNumIterationsInRange(const ConstantRange &Range,
+ ScalarEvolution &SE) const;
/// Return an expression representing the value of this expression
/// one iteration of the loop ahead.
@@ -418,6 +426,7 @@ class SCEVAddRecExpr : public SCEVNAryExpr {
static bool classof(const SCEV *S) {
return S->getSCEVType() == scAddRecExpr;
}
+ static bool classof(const SCEVUse *U) { return classof(U->getPointer()); }
};
/// This node is the base class min/max selections.
@@ -432,7 +441,7 @@ class SCEVMinMaxExpr : public SCEVCommutativeExpr {
protected:
/// Note: Constructing subclasses via this constructor is allowed
SCEVMinMaxExpr(const FoldingSetNodeIDRef ID, enum SCEVTypes T,
- const SCEV *const *O, size_t N)
+ SCEVUse const *O, size_t N)
: SCEVCommutativeExpr(ID, T, O, N) {
assert(isMinMaxType(T));
// Min and max never overflow
@@ -443,6 +452,7 @@ class SCEVMinMaxExpr : public SCEVCommutativeExpr {
Type *getType() const { return getOperand(0)->getType(); }
static bool classof(const SCEV *S) { return isMinMaxType(S->getSCEVType()); }
+ static bool classof(const SCEVUse *U) { return classof(U->getPointer()); }
static enum SCEVTypes negate(enum SCEVTypes T) {
switch (T) {
@@ -464,48 +474,52 @@ class SCEVMinMaxExpr : public SCEVCommutativeExpr {
class SCEVSMaxExpr : public SCEVMinMaxExpr {
friend class ScalarEvolution;
- SCEVSMaxExpr(const FoldingSetNodeIDRef ID, const SCEV *const *O, size_t N)
+ SCEVSMaxExpr(const FoldingSetNodeIDRef ID, SCEVUse const *O, size_t N)
: SCEVMinMaxExpr(ID, scSMaxExpr, O, N) {}
public:
/// Methods for support type inquiry through isa, cast, and dyn_cast:
static bool classof(const SCEV *S) { return S->getSCEVType() == scSMaxExpr; }
+ static bool classof(const SCEVUse *U) { return classof(U->getPointer()); }
};
/// This class represents an unsigned maximum selection.
class SCEVUMaxExpr : public SCEVMinMaxExpr {
friend class ScalarEvolution;
- SCEVUMaxExpr(const FoldingSetNodeIDRef ID, const SCEV *const *O, size_t N)
+ SCEVUMaxExpr(const FoldingSetNodeIDRef ID, SCEVUse const *O, size_t N)
: SCEVMinMaxExpr(ID, scUMaxExpr, O, N) {}
public:
/// Methods for support type inquiry through isa, cast, and dyn_cast:
static bool classof(const SCEV *S) { return S->getSCEVType() == scUMaxExpr; }
+ static bool classof(const SCEVUse *U) { return classof(U->getPointer()); }
};
/// This class represents a signed minimum selection.
class SCEVSMinExpr : public SCEVMinMaxExpr {
friend class ScalarEvolution;
- SCEVSMinExpr(const FoldingSetNodeIDRef ID, const SCEV *const *O, size_t N)
+ SCEVSMinExpr(const FoldingSetNodeIDRef ID, SCEVUse const *O, size_t N)
: SCEVMinMaxExpr(ID, scSMinExpr, O, N) {}
public:
/// Methods for support type inquiry through isa, cast, and dyn_cast:
static bool classof(const SCEV *S) { return S->getSCEVType() == scSMinExpr; }
+ static bool classof(const SCEVUse *U) { return classof(U->getPointer()); }
};
/// This class represents an unsigned minimum selection.
class SCEVUMinExpr : public SCEVMinMaxExpr {
friend class ScalarEvolution;
- SCEVUMinExpr(const FoldingSetNodeIDRef ID, const SCEV *const *O, size_t N)
+ SCEVUMinExpr(const FoldingSetNodeIDRef ID, SCEVUse const *O, size_t N)
: SCEVMinMaxExpr(ID, scUMinExpr, O, N) {}
public:
/// Methods for support type inquiry through isa, cast, and dyn_cast:
static bool classof(const SCEV *S) { return S->getSCEVType() == scUMinExpr; }
+ static bool classof(const SCEVUse *U) { return classof(U->getPointer()); }
};
/// This node is the base class for sequential/in-order min/max selections.
@@ -526,7 +540,7 @@ class SCEVSequentialMinMaxExpr : public SCEVNAryExpr {
protected:
/// Note: Constructing subclasses via this constructor is allowed
SCEVSequentialMinMaxExpr(const FoldingSetNodeIDRef ID, enum SCEVTypes T,
- const SCEV *const *O, size_t N)
+ SCEVUse const *O, size_t N)
: SCEVNAryExpr(ID, T, O, N) {
assert(isSequentialMinMaxType(T));
// Min and max never overflow
@@ -553,13 +567,14 @@ class SCEVSequentialMinMaxExpr : public SCEVNAryExpr {
static bool classof(const SCEV *S) {
return isSequentialMinMaxType(S->getSCEVType());
}
+ static bool classof(const SCEVUse *U) { return classof(U->getPointer()); }
};
/// This class represents a sequential/in-order unsigned minimum selection.
class SCEVSequentialUMinExpr : public SCEVSequentialMinMaxExpr {
friend class ScalarEvolution;
- SCEVSequentialUMinExpr(const FoldingSetNodeIDRef ID, const SCEV *const *O,
+ SCEVSequentialUMinExpr(const FoldingSetNodeIDRef ID, SCEVUse const *O,
size_t N)
: SCEVSequentialMinMaxExpr(ID, scSequentialUMinExpr, O, N) {}
@@ -568,6 +583,7 @@ class SCEVSequentialUMinExpr : public SCEVSequentialMinMaxExpr {
static bool classof(const SCEV *S) {
return S->getSCEVType() == scSequentialUMinExpr;
}
+ static bool classof(const SCEVUse *U) { return classof(U->getPointer()); }
};
/// This means that we are dealing with an entirely unknown SCEV
@@ -600,48 +616,56 @@ class SCEVUnknown final : public SCEV, private CallbackVH {
/// Methods for support type inquiry through isa, cast, and dyn_cast:
static bool classof(const SCEV *S) { return S->getSCEVType() == scUnknown; }
+ static bool classof(const SCEVUse *U) { return classof(U->getPointer()); }
};
/// This class defines a simple visitor class that may be used for
/// various SCEV analysis purposes.
template <typename SC, typename RetVal = void> struct SCEVVisitor {
- RetVal visit(const SCEV *S) {
+ RetVal visit(SCEVUse S) {
switch (S->getSCEVType()) {
case scConstant:
- return ((SC *)this)->visitConstant((const SCEVConstant *)S);
+ return ((SC *)this)->visitConstant((const SCEVConstant *)S.getPointer());
case scVScale:
- return ((SC *)this)->visitVScale((const SCEVVScale *)S);
+ return ((SC *)this)->visitVScale((const SCEVVScale *)S.getPointer());
case scPtrToInt:
- return ((SC *)this)->visitPtrToIntExpr((const SCEVPtrToIntExpr *)S);
+ return ((SC *)this)
+ ->visitPtrToIntExpr((const SCEVPtrToIntExpr *)S.getPointer());
case scTruncate:
- return ((SC *)this)->visitTruncateExpr((const SCEVTruncateExpr *)S);
+ return ((SC *)this)
+ ->visitTruncateExpr((const SCEVTruncateExpr *)S.getPointer());
case scZeroExtend:
- return ((SC *)this)->visitZeroExtendExpr((const SCEVZeroExtendExpr *)S);
+ return ((SC *)this)
+ ->visitZeroExtendExpr((const SCEVZeroExtendExpr *)S.getPointer());
case scSignExtend:
- return ((SC *)this)->visitSignExtendExpr((const SCEVSignExtendExpr *)S);
+ return ((SC *)this)
+ ->visitSignExtendExpr((const SCEVSignExtendExpr *)S.getPointer());
case scAddExpr:
- return ((SC *)this)->visitAddExpr((const SCEVAddExpr *)S);
+ return ((SC *)this)->visitAddExpr((const SCEVAddExpr *)S.getPointer());
case scMulExpr:
- return ((SC *)this)->visitMulExpr((const SCEVMulExpr *)S);
+ return ((SC *)this)->visitMulExpr((const SCEVMulExpr *)S.getPointer());
case scUDivExpr:
- return ((SC *)this)->visitUDivExpr((const SCEVUDivExpr *)S);
+ return ((SC *)this)->visitUDivExpr((const SCEVUDivExpr *)S.getPointer());
case scAddRecExpr:
- return ((SC *)this)->visitAddRecExpr((const SCEVAddRecExpr *)S);
+ return ((SC *)this)
+ ->visitAddRecExpr((const SCEVAddRecExpr *)S.getPointer());
case scSMaxExpr:
- return ((SC *)this)->visitSMaxExpr((const SCEVSMaxExpr *)S);
+ return ((SC *)this)->visitSMaxExpr((const SCEVSMaxExpr *)S.getPointer());
case scUMaxExpr:
- return ((SC *)this)->visitUMaxExpr((const SCEVUMaxExpr *)S);
+ return ((SC *)this)->visitUMaxExpr((const SCEVUMaxExpr *)S.getPointer());
case scSMinExpr:
- return ((SC *)this)->visitSMinExpr((const SCEVSMinExpr *)S);
+ return ((SC *)this)->visitSMinExpr((const SCEVSMinExpr *)S.getPointer());
case scUMinExpr:
- return ((SC *)this)->visitUMinExpr((const SCEVUMinExpr *)S);
+ return ((SC *)this)->visitUMinExpr((const SCEVUMinExpr *)S.getPointer());
case scSequentialUMinExpr:
return ((SC *)this)
- ->visitSequentialUMinExpr((const SCEVSequentialUMinExpr *)S);
+ ->visitSequentialUMinExpr(
+ (const SCEVSequentialUMinExpr *)S.getPointer());
case scUnknown:
- return ((SC *)this)->visitUnknown((const SCEVUnknown *)S);
+ return ((SC *)this)->visitUnknown((const SCEVUnknown *)S.getPointer());
case scCouldNotCompute:
- return ((SC *)this)->visitCouldNotCompute((const SCEVCouldNotCompute *)S);
+ return ((SC *)this)
+ ->visitCouldNotCompute((const SCEVCouldNotCompute *)S.getPointer());
}
llvm_unreachable("Unknown SCEV kind!");
}
@@ -655,15 +679,15 @@ template <typename SC, typename RetVal = void> struct SCEVVisitor {
///
/// Visitor implements:
/// // return true to follow this node.
-/// bool follow(const SCEV *S);
+/// bool follow(SCEVUse S);
/// // return true to terminate the search.
/// bool isDone();
template <typename SV> class SCEVTraversal {
SV &Visitor;
- SmallVector<const SCEV *, 8> Worklist;
- SmallPtrSet<const SCEV *, 8> Visited;
+ SmallVector<SCEVUse, 8> Worklist;
+ SmallPtrSet<SCEVUse, 8> Visited;
- void push(const SCEV *S) {
+ void push(SCEVUse S) {
if (Visited.insert(S).second && Visitor.follow(S))
Worklist.push_back(S);
}
@@ -671,10 +695,10 @@ template <typename SV> class SCEVTraversal {
public:
SCEVTraversal(SV &V) : Visitor(V) {}
- void visitAll(const SCEV *Root) {
+ void visitAll(SCEVUse Root) {
push(Root);
while (!Worklist.empty() && !Visitor.isDone()) {
- const SCEV *S = Worklist.pop_back_val();
+ SCEVUse S = Worklist.pop_back_val();
switch (S->getSCEVType()) {
case scConstant:
@@ -694,7 +718,7 @@ template <typename SV> class SCEVTraversal {
case scUMinExpr:
case scSequentialUMinExpr:
case scAddRecExpr:
- for (const auto *Op : S->operands()) {
+ for (const auto Op : S->operands()) {
push(Op);
if (Visitor.isDone())
break;
@@ -709,21 +733,20 @@ template <typename SV> class SCEVTraversal {
};
/// Use SCEVTraversal to visit all nodes in the given expression tree.
-template <typename SV> void visitAll(const SCEV *Root, SV &Visitor) {
+template <typename SV> void visitAll(SCEVUse Root, SV &Visitor) {
SCEVTraversal<SV> T(Visitor);
T.visitAll(Root);
}
/// Return true if any node in \p Root satisfies the predicate \p Pred.
-template <typename PredTy>
-bool SCEVExprContains(const SCEV *Root, PredTy Pred) {
+template <typename PredTy> bool SCEVExprContains(SCEVUse Root, PredTy Pred) {
struct FindClosure {
bool Found = false;
PredTy Pred;
FindClosure(PredTy Pred) : Pred(Pred) {}
- bool follow(const SCEV *S) {
+ bool follow(SCEVUse S) {
if (!Pred(S))
return true;
@@ -743,7 +766,7 @@ bool SCEVExprContains(const SCEV *Root, PredTy Pred) {
/// The result from each visit is cached, so it will return the same
/// SCEV for the same input.
template <typename SC>
-class SCEVRewriteVisitor : public SCEVVisitor<SC, const SCEV *> {
+class SCEVRewriteVisitor : public SCEVVisitor<SC, SCEVUse> {
protected:
ScalarEvolution &SE;
// Memoize the result of each visit so that we only compute once for
@@ -751,84 +774,84 @@ class SCEVRewriteVisitor : public SCEVVisitor<SC, const SCEV *> {
// a SCEV is referenced by multiple SCEVs. Without memoization, this
// visit algorithm would have exponential time complexity in the worst
// case, causing the compiler to hang on certain tests.
- SmallDenseMap<const SCEV *, const SCEV *> RewriteResults;
+ SmallDenseMap<const SCEV *, SCEVUse> RewriteResults;
public:
SCEVRewriteVisitor(ScalarEvolution &SE) : SE(SE) {}
- const SCEV *visit(const SCEV *S) {
+ SCEVUse visit(SCEVUse S) {
auto It = RewriteResults.find(S);
if (It != RewriteResults.end())
return It->second;
- auto *Visited = SCEVVisitor<SC, const SCEV *>::visit(S);
+ auto Visited = SCEVVisitor<SC, SCEVUse>::visit(S);
auto Result = RewriteResults.try_emplace(S, Visited);
assert(Result.second && "Should insert a new entry");
return Result.first->second;
}
- const SCEV *visitConstant(const SCEVConstant *Constant) { return Constant; }
+ SCEVUse visitConstant(const SCEVConstant *Constant) { return Constant; }
- const SCEV *visitVScale(const SCEVVScale *VScale) { return VScale; }
+ SCEVUse visitVScale(const SCEVVScale *VScale) { return VScale; }
- const SCEV *visitPtrToIntExpr(const SCEVPtrToIntExpr *Expr) {
- const SCEV *Operand = ((SC *)this)->visit(Expr->getOperand());
+ SCEVUse visitPtrToIntExpr(const SCEVPtrToIntExpr *Expr) {
+ SCEVUse Operand = ((SC *)this)->visit(Expr->getOperand());
return Operand == Expr->getOperand()
? Expr
: SE.getPtrToIntExpr(Operand, Expr->getType());
}
- const SCEV *visitTruncateExpr(const SCEVTruncateExpr *Expr) {
- const SCEV *Operand = ((SC *)this)->visit(Expr->getOperand());
+ SCEVUse visitTruncateExpr(const SCEVTruncateExpr *Expr) {
+ SCEVUse Operand = ((SC *)this)->visit(Expr->getOperand());
return Operand == Expr->getOperand()
? Expr
: SE.getTruncateExpr(Operand, Expr->getType());
}
- const SCEV *visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr) {
- const SCEV *Operand = ((SC *)this)->visit(Expr->getOperand());
+ SCEVUse visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr) {
+ SCEVUse Operand = ((SC *)this)->visit(Expr->getOperand());
return Operand == Expr->getOperand()
? Expr
: SE.getZeroExtendExpr(Operand, Expr->getType());
}
- const SCEV *visitSignExtendExpr(const SCEVSignExtendExpr *Expr) {
- const SCEV *Operand = ((SC *)this)->visit(Expr->getOperand());
+ SCEVUse visitSignExtendExpr(const SCEVSignExtendExpr *Expr) {
+ SCEVUse Operand = ((SC *)this)->visit(Expr->getOperand());
return Operand == Expr->getOperand()
? Expr
: SE.getSignExtendExpr(Operand, Expr->getType());
}
- const SCEV *visitAddExpr(const SCEVAddExpr *Expr) {
- SmallVector<const SCEV *, 2> Operands;
+ SCEVUse visitAddExpr(const SCEVAddExpr *Expr) {
+ SmallVector<SCEVUse, 2> Operands;
bool Changed = false;
- for (const auto *Op : Expr->operands()) {
+ for (const auto Op : Expr->operands()) {
Operands.push_back(((SC *)this)->visit(Op));
Changed |= Op != Operands.back();
}
return !Changed ? Expr : SE.getAddExpr(Operands);
}
- const SCEV *visitMulExpr(const SCEVMulExpr *Expr) {
- SmallVector<const SCEV *, 2> Operands;
+ SCEVUse visitMulExpr(const SCEVMulExpr *Expr) {
+ SmallVector<SCEVUse, 2> Operands;
bool Changed = false;
- for (const auto *Op : Expr->operands()) {
+ for (const auto Op : Expr->operands()) {
Operands.push_back(((SC *)this)->visit(Op));
Changed |= Op != Operands.back();
}
return !Changed ? Expr : SE.getMulExpr(Operands);
}
- const SCEV *visitUDivExpr(const SCEVUDivExpr *Expr) {
- auto *LHS = ((SC *)this)->visit(Expr->getLHS());
- auto *RHS = ((SC *)this)->visit(Expr->getRHS());
+ SCEVUse visitUDivExpr(const SCEVUDivExpr *Expr) {
+ auto LHS = ((SC *)this)->visit(Expr->getLHS());
+ auto RHS = ((SC *)this)->visit(Expr->getRHS());
bool Changed = LHS != Expr->getLHS() || RHS != Expr->getRHS();
return !Changed ? Expr : SE.getUDivExpr(LHS, RHS);
}
- const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) {
- SmallVector<const SCEV *, 2> Operands;
+ SCEVUse visitAddRecExpr(const SCEVAddRecExpr *Expr) {
+ SmallVector<SCEVUse, 2> Operands;
bool Changed = false;
- for (const auto *Op : Expr->operands()) {
+ for (const auto Op : Expr->operands()) {
Operands.push_back(((SC *)this)->visit(Op));
Changed |= Op != Operands.back();
}
@@ -837,72 +860,70 @@ class SCEVRewriteVisitor : public SCEVVisitor<SC, const SCEV *> {
Expr->getNoWrapFlags());
}
- const SCEV *visitSMaxExpr(const SCEVSMaxExpr *Expr) {
- SmallVector<const SCEV *, 2> Operands;
+ SCEVUse visitSMaxExpr(const SCEVSMaxExpr *Expr) {
+ SmallVector<SCEVUse, 2> Operands;
bool Changed = false;
- for (const auto *Op : Expr->operands()) {
+ for (const auto Op : Expr->operands()) {
Operands.push_back(((SC *)this)->visit(Op));
Changed |= Op != Operands.back();
}
return !Changed ? Expr : SE.getSMaxExpr(Operands);
}
- const SCEV *visitUMaxExpr(const SCEVUMaxExpr *Expr) {
- SmallVector<const SCEV *, 2> Operands;
+ SCEVUse visitUMaxExpr(const SCEVUMaxExpr *Expr) {
+ SmallVector<SCEVUse, 2> Operands;
bool Changed = false;
- for (const auto *Op : Expr->operands()) {
+ for (const auto Op : Expr->operands()) {
Operands.push_back(((SC *)this)->visit(Op));
Changed |= Op != Operands.back();
}
return !Changed ? Expr : SE.getUMaxExpr(Operands);
}
- const SCEV *visitSMinExpr(const SCEVSMinExpr *Expr) {
- SmallVector<const SCEV *, 2> Operands;
+ SCEVUse visitSMinExpr(const SCEVSMinExpr *Expr) {
+ SmallVector<SCEVUse, 2> Operands;
bool Changed = false;
- for (const auto *Op : Expr->operands()) {
+ for (const auto Op : Expr->operands()) {
Operands.push_back(((SC *)this)->visit(Op));
Changed |= Op != Operands.back();
}
return !Changed ? Expr : SE.getSMinExpr(Operands);
}
- const SCEV *visitUMinExpr(const SCEVUMinExpr *Expr) {
- SmallVector<const SCEV *, 2> Operands;
+ SCEVUse visitUMinExpr(const SCEVUMinExpr *Expr) {
+ SmallVector<SCEVUse, 2> Operands;
bool Changed = false;
- for (const auto *Op : Expr->operands()) {
+ for (const auto Op : Expr->operands()) {
Operands.push_back(((SC *)this)->visit(Op));
Changed |= Op != Operands.back();
}
return !Changed ? Expr : SE.getUMinExpr(Operands);
}
- const SCEV *visitSequentialUMinExpr(const SCEVSequentialUMinExpr *Expr) {
- SmallVector<const SCEV *, 2> Operands;
+ SCEVUse visitSequentialUMinExpr(const SCEVSequentialUMinExpr *Expr) {
+ SmallVector<SCEVUse, 2> Operands;
bool Changed = false;
- for (const auto *Op : Expr->operands()) {
+ for (const auto Op : Expr->operands()) {
Operands.push_back(((SC *)this)->visit(Op));
Changed |= Op != Operands.back();
}
return !Changed ? Expr : SE.getUMinExpr(Operands, /*Sequential=*/true);
}
- const SCEV *visitUnknown(const SCEVUnknown *Expr) { return Expr; }
+ SCEVUse visitUnknown(const SCEVUnknown *Expr) { return Expr; }
- const SCEV *visitCouldNotCompute(const SCEVCouldNotCompute *Expr) {
- return Expr;
- }
+ SCEVUse visitCouldNotCompute(const SCEVCouldNotCompute *Expr) { return Expr; }
};
using ValueToValueMap = DenseMap<const Value *, Value *>;
-using ValueToSCEVMapTy = DenseMap<const Value *, const SCEV *>;
+using ValueToSCEVMapTy = DenseMap<const Value *, SCEVUse>;
/// The SCEVParameterRewriter takes a scalar evolution expression and updates
/// the SCEVUnknown components following the Map (Value -> SCEV).
class SCEVParameterRewriter : public SCEVRewriteVisitor<SCEVParameterRewriter> {
public:
- static const SCEV *rewrite(const SCEV *Scev, ScalarEvolution &SE,
- ValueToSCEVMapTy &Map) {
+ static SCEVUse rewrite(SCEVUse Scev, ScalarEvolution &SE,
+ ValueToSCEVMapTy &Map) {
SCEVParameterRewriter Rewriter(SE, Map);
return Rewriter.visit(Scev);
}
@@ -910,7 +931,7 @@ class SCEVParameterRewriter : public SCEVRewriteVisitor<SCEVParameterRewriter> {
SCEVParameterRewriter(ScalarEvolution &SE, ValueToSCEVMapTy &M)
: SCEVRewriteVisitor(SE), Map(M) {}
- const SCEV *visitUnknown(const SCEVUnknown *Expr) {
+ SCEVUse visitUnknown(const SCEVUnknown *Expr) {
auto I = Map.find(Expr->getValue());
if (I == Map.end())
return Expr;
@@ -921,7 +942,7 @@ class SCEVParameterRewriter : public SCEVRewriteVisitor<SCEVParameterRewriter> {
ValueToSCEVMapTy ⤅
};
-using LoopToScevMapT = DenseMap<const Loop *, const SCEV *>;
+using LoopToScevMapT = DenseMap<const Loop *, SCEVUse>;
/// The SCEVLoopAddRecRewriter takes a scalar evolution expression and applies
/// the Map (Loop -> SCEV) to all AddRecExprs.
@@ -931,15 +952,15 @@ class SCEVLoopAddRecRewriter
SCEVLoopAddRecRewriter(ScalarEvolution &SE, LoopToScevMapT &M)
: SCEVRewriteVisitor(SE), Map(M) {}
- static const SCEV *rewrite(const SCEV *Scev, LoopToScevMapT &Map,
- ScalarEvolution &SE) {
+ static SCEVUse rewrite(SCEVUse Scev, LoopToScevMapT &Map,
+ ScalarEvolution &SE) {
SCEVLoopAddRecRewriter Rewriter(SE, Map);
return Rewriter.visit(Scev);
}
- const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) {
- SmallVector<const SCEV *, 2> Operands;
- for (const SCEV *Op : Expr->operands())
+ SCEVUse visitAddRecExpr(const SCEVAddRecExpr *Expr) {
+ SmallVector<SCEVUse, 2> Operands;
+ for (SCEVUse Op : Expr->operands())
Operands.push_back(visit(Op));
const Loop *L = Expr->getLoop();
diff --git a/llvm/lib/Analysis/DependenceAnalysis.cpp b/llvm/lib/Analysis/DependenceAnalysis.cpp
index e0e7dd18cd8d4..c50616eb2e0ea 100644
--- a/llvm/lib/Analysis/DependenceAnalysis.cpp
+++ b/llvm/lib/Analysis/DependenceAnalysis.cpp
@@ -1250,10 +1250,12 @@ bool DependenceInfo::strongSIVtest(const SCEV *Coeff, const SCEV *SrcConst,
if (const SCEV *UpperBound = collectUpperBound(CurLoop, Delta->getType())) {
LLVM_DEBUG(dbgs() << "\t UpperBound = " << *UpperBound);
LLVM_DEBUG(dbgs() << ", " << *UpperBound->getType() << "\n");
- const SCEV *AbsDelta =
- SE->isKnownNonNegative(Delta) ? Delta : SE->getNegativeSCEV(Delta);
- const SCEV *AbsCoeff =
- SE->isKnownNonNegative(Coeff) ? Coeff : SE->getNegativeSCEV(Coeff);
+ const SCEV *AbsDelta = SE->isKnownNonNegative(Delta)
+ ? Delta
+ : SE->getNegativeSCEV(Delta).getPointer();
+ const SCEV *AbsCoeff = SE->isKnownNonNegative(Coeff)
+ ? Coeff
+ : SE->getNegativeSCEV(Coeff).getPointer();
const SCEV *Product = SE->getMulExpr(UpperBound, AbsCoeff);
if (isKnownPredicate(CmpInst::ICMP_SGT, AbsDelta, Product)) {
// Distance greater than trip count - no dependence
@@ -1791,8 +1793,9 @@ bool DependenceInfo::weakZeroSrcSIVtest(const SCEV *DstCoeff,
const SCEV *AbsCoeff =
SE->isKnownNegative(ConstCoeff) ?
SE->getNegativeSCEV(ConstCoeff) : ConstCoeff;
- const SCEV *NewDelta =
- SE->isKnownNegative(ConstCoeff) ? SE->getNegativeSCEV(Delta) : Delta;
+ const SCEV *NewDelta = SE->isKnownNegative(ConstCoeff)
+ ? SE->getNegativeSCEV(Delta).getPointer()
+ : Delta;
// check that Delta/SrcCoeff < iteration count
// really check NewDelta < count*AbsCoeff
@@ -1900,8 +1903,9 @@ bool DependenceInfo::weakZeroDstSIVtest(const SCEV *SrcCoeff,
const SCEV *AbsCoeff =
SE->isKnownNegative(ConstCoeff) ?
SE->getNegativeSCEV(ConstCoeff) : ConstCoeff;
- const SCEV *NewDelta =
- SE->isKnownNegative(ConstCoeff) ? SE->getNegativeSCEV(Delta) : Delta;
+ const SCEV *NewDelta = SE->isKnownNegative(ConstCoeff)
+ ? SE->getNegativeSCEV(Delta).getPointer()
+ : Delta;
// check that Delta/SrcCoeff < iteration count
// really check NewDelta < count*AbsCoeff
diff --git a/llvm/lib/Analysis/IVDescriptors.cpp b/llvm/lib/Analysis/IVDescriptors.cpp
index 055f121e74341..cbfa768ca7a39 100644
--- a/llvm/lib/Analysis/IVDescriptors.cpp
+++ b/llvm/lib/Analysis/IVDescriptors.cpp
@@ -1485,7 +1485,7 @@ bool InductionDescriptor::isInductionPHI(
return false;
// Check that the PHI is consecutive.
- const SCEV *PhiScev = Expr ? Expr : SE->getSCEV(Phi);
+ const SCEV *PhiScev = Expr ? Expr : SE->getSCEV(Phi).getPointer();
const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PhiScev);
if (!AR) {
diff --git a/llvm/lib/Analysis/LoopCacheAnalysis.cpp b/llvm/lib/Analysis/LoopCacheAnalysis.cpp
index 284d8d16d264e..a7ed60cc022da 100644
--- a/llvm/lib/Analysis/LoopCacheAnalysis.cpp
+++ b/llvm/lib/Analysis/LoopCacheAnalysis.cpp
@@ -489,7 +489,8 @@ bool IndexedReference::isConsecutive(const Loop &L, const SCEV *&Stride,
SE.getNoopOrSignExtend(ElemSize, WiderType));
const SCEV *CacheLineSize = SE.getConstant(Stride->getType(), CLS);
- Stride = SE.isKnownNegative(Stride) ? SE.getNegativeSCEV(Stride) : Stride;
+ Stride = SE.isKnownNegative(Stride) ? SE.getNegativeSCEV(Stride).getPointer()
+ : Stride;
return SE.isKnownPredicate(ICmpInst::ICMP_ULT, Stride, CacheLineSize);
}
diff --git a/llvm/lib/Analysis/ScalarEvolution.cpp b/llvm/lib/Analysis/ScalarEvolution.cpp
index 704f92669a117..605b66f2ad633 100644
--- a/llvm/lib/Analysis/ScalarEvolution.cpp
+++ b/llvm/lib/Analysis/ScalarEvolution.cpp
@@ -253,6 +253,22 @@ static cl::opt<bool> UseContextForNoWrapFlagInference(
// SCEV class definitions
//===----------------------------------------------------------------------===//
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+LLVM_DUMP_METHOD void SCEVUse::dump() const {
+ print(dbgs());
+ dbgs() << '\n';
+}
+#endif
+
+void SCEVUse::print(raw_ostream &OS) const {
+ getPointer()->print(OS);
+ SCEV::NoWrapFlags Flags = static_cast<SCEV::NoWrapFlags>(getInt());
+ if (Flags & SCEV::FlagNUW)
+ OS << "(u nuw)";
+ if (Flags & SCEV::FlagNSW)
+ OS << "(u nsw)";
+}
+
//===----------------------------------------------------------------------===//
// Implementation of the SCEV class.
//
@@ -274,28 +290,28 @@ void SCEV::print(raw_ostream &OS) const {
return;
case scPtrToInt: {
const SCEVPtrToIntExpr *PtrToInt = cast<SCEVPtrToIntExpr>(this);
- const SCEV *Op = PtrToInt->getOperand();
- OS << "(ptrtoint " << *Op->getType() << " " << *Op << " to "
+ SCEVUse Op = PtrToInt->getOperand();
+ OS << "(ptrtoint " << *Op->getType() << " " << Op << " to "
<< *PtrToInt->getType() << ")";
return;
}
case scTruncate: {
const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(this);
- const SCEV *Op = Trunc->getOperand();
- OS << "(trunc " << *Op->getType() << " " << *Op << " to "
+ SCEVUse Op = Trunc->getOperand();
+ OS << "(trunc " << *Op->getType() << " " << Op << " to "
<< *Trunc->getType() << ")";
return;
}
case scZeroExtend: {
const SCEVZeroExtendExpr *ZExt = cast<SCEVZeroExtendExpr>(this);
- const SCEV *Op = ZExt->getOperand();
- OS << "(zext " << *Op->getType() << " " << *Op << " to "
- << *ZExt->getType() << ")";
+ SCEVUse Op = ZExt->getOperand();
+ OS << "(zext " << *Op->getType() << " " << Op << " to " << *ZExt->getType()
+ << ")";
return;
}
case scSignExtend: {
const SCEVSignExtendExpr *SExt = cast<SCEVSignExtendExpr>(this);
- const SCEV *Op = SExt->getOperand();
+ SCEVUse Op = SExt->getOperand();
OS << "(sext " << *Op->getType() << " " << *Op << " to "
<< *SExt->getType() << ")";
return;
@@ -345,8 +361,8 @@ void SCEV::print(raw_ostream &OS) const {
}
OS << "(";
ListSeparator LS(OpStr);
- for (const SCEV *Op : NAry->operands())
- OS << LS << *Op;
+ for (SCEVUse Op : NAry->operands())
+ OS << LS << Op;
OS << ")";
switch (NAry->getSCEVType()) {
case scAddExpr:
@@ -411,7 +427,7 @@ Type *SCEV::getType() const {
llvm_unreachable("Unknown SCEV kind!");
}
-ArrayRef<const SCEV *> SCEV::operands() const {
+ArrayRef<SCEVUse> SCEV::operands() const {
switch (getSCEVType()) {
case scConstant:
case scVScale:
@@ -476,51 +492,51 @@ bool SCEVCouldNotCompute::classof(const SCEV *S) {
return S->getSCEVType() == scCouldNotCompute;
}
-const SCEV *ScalarEvolution::getConstant(ConstantInt *V) {
+SCEVUse ScalarEvolution::getConstant(ConstantInt *V) {
FoldingSetNodeID ID;
ID.AddInteger(scConstant);
ID.AddPointer(V);
void *IP = nullptr;
- if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
+ if (SCEVUse S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP))
+ return S;
SCEV *S = new (SCEVAllocator) SCEVConstant(ID.Intern(SCEVAllocator), V);
UniqueSCEVs.InsertNode(S, IP);
return S;
}
-const SCEV *ScalarEvolution::getConstant(const APInt &Val) {
+SCEVUse ScalarEvolution::getConstant(const APInt &Val) {
return getConstant(ConstantInt::get(getContext(), Val));
}
-const SCEV *
-ScalarEvolution::getConstant(Type *Ty, uint64_t V, bool isSigned) {
+SCEVUse ScalarEvolution::getConstant(Type *Ty, uint64_t V, bool isSigned) {
IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty));
return getConstant(ConstantInt::get(ITy, V, isSigned));
}
-const SCEV *ScalarEvolution::getVScale(Type *Ty) {
+SCEVUse ScalarEvolution::getVScale(Type *Ty) {
FoldingSetNodeID ID;
ID.AddInteger(scVScale);
ID.AddPointer(Ty);
void *IP = nullptr;
- if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP))
+ if (SCEVUse S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP))
return S;
SCEV *S = new (SCEVAllocator) SCEVVScale(ID.Intern(SCEVAllocator), Ty);
UniqueSCEVs.InsertNode(S, IP);
return S;
}
-const SCEV *ScalarEvolution::getElementCount(Type *Ty, ElementCount EC) {
- const SCEV *Res = getConstant(Ty, EC.getKnownMinValue());
+SCEVUse ScalarEvolution::getElementCount(Type *Ty, ElementCount EC) {
+ SCEVUse Res = getConstant(Ty, EC.getKnownMinValue());
if (EC.isScalable())
Res = getMulExpr(Res, getVScale(Ty));
return Res;
}
SCEVCastExpr::SCEVCastExpr(const FoldingSetNodeIDRef ID, SCEVTypes SCEVTy,
- const SCEV *op, Type *ty)
+ SCEVUse op, Type *ty)
: SCEV(ID, SCEVTy, computeExpressionSize(op)), Op(op), Ty(ty) {}
-SCEVPtrToIntExpr::SCEVPtrToIntExpr(const FoldingSetNodeIDRef ID, const SCEV *Op,
+SCEVPtrToIntExpr::SCEVPtrToIntExpr(const FoldingSetNodeIDRef ID, SCEVUse Op,
Type *ITy)
: SCEVCastExpr(ID, scPtrToInt, Op, ITy) {
assert(getOperand()->getType()->isPointerTy() && Ty->isIntegerTy() &&
@@ -528,26 +544,26 @@ SCEVPtrToIntExpr::SCEVPtrToIntExpr(const FoldingSetNodeIDRef ID, const SCEV *Op,
}
SCEVIntegralCastExpr::SCEVIntegralCastExpr(const FoldingSetNodeIDRef ID,
- SCEVTypes SCEVTy, const SCEV *op,
+ SCEVTypes SCEVTy, SCEVUse op,
Type *ty)
: SCEVCastExpr(ID, SCEVTy, op, ty) {}
-SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeIDRef ID, const SCEV *op,
+SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeIDRef ID, SCEVUse op,
Type *ty)
: SCEVIntegralCastExpr(ID, scTruncate, op, ty) {
assert(getOperand()->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
"Cannot truncate non-integer value!");
}
-SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID,
- const SCEV *op, Type *ty)
+SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID, SCEVUse op,
+ Type *ty)
: SCEVIntegralCastExpr(ID, scZeroExtend, op, ty) {
assert(getOperand()->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
"Cannot zero extend non-integer value!");
}
-SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeIDRef ID,
- const SCEV *op, Type *ty)
+SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeIDRef ID, SCEVUse op,
+ Type *ty)
: SCEVIntegralCastExpr(ID, scSignExtend, op, ty) {
assert(getOperand()->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
"Cannot sign extend non-integer value!");
@@ -555,7 +571,7 @@ SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeIDRef ID,
void SCEVUnknown::deleted() {
// Clear this SCEVUnknown from various maps.
- SE->forgetMemoizedResults(this);
+ SE->forgetMemoizedResults(SCEVUse(this));
// Remove this SCEVUnknown from the uniquing map.
SE->UniqueSCEVs.RemoveNode(this);
@@ -566,7 +582,7 @@ void SCEVUnknown::deleted() {
void SCEVUnknown::allUsesReplacedWith(Value *New) {
// Clear this SCEVUnknown from various maps.
- SE->forgetMemoizedResults(this);
+ SE->forgetMemoizedResults(SCEVUse(this));
// Remove this SCEVUnknown from the uniquing map.
SE->UniqueSCEVs.RemoveNode(this);
@@ -678,10 +694,10 @@ CompareValueComplexity(EquivalenceClasses<const Value *> &EqCacheValue,
// If the max analysis depth was reached, return std::nullopt, assuming we do
// not know if they are equivalent for sure.
static std::optional<int>
-CompareSCEVComplexity(EquivalenceClasses<const SCEV *> &EqCacheSCEV,
+CompareSCEVComplexity(EquivalenceClasses<SCEVUse> &EqCacheSCEV,
EquivalenceClasses<const Value *> &EqCacheValue,
- const LoopInfo *const LI, const SCEV *LHS,
- const SCEV *RHS, DominatorTree &DT, unsigned Depth = 0) {
+ const LoopInfo *const LI, SCEVUse LHS, SCEVUse RHS,
+ DominatorTree &DT, unsigned Depth = 0) {
// Fast-path: SCEVs are uniqued so we can do a quick equality check.
if (LHS == RHS)
return 0;
@@ -764,8 +780,8 @@ CompareSCEVComplexity(EquivalenceClasses<const SCEV *> &EqCacheSCEV,
case scSMinExpr:
case scUMinExpr:
case scSequentialUMinExpr: {
- ArrayRef<const SCEV *> LOps = LHS->operands();
- ArrayRef<const SCEV *> ROps = RHS->operands();
+ ArrayRef<SCEVUse> LOps = LHS->operands();
+ ArrayRef<SCEVUse> ROps = RHS->operands();
// Lexicographically compare n-ary-like expressions.
unsigned LNumOps = LOps.size(), RNumOps = ROps.size();
@@ -797,15 +813,15 @@ CompareSCEVComplexity(EquivalenceClasses<const SCEV *> &EqCacheSCEV,
/// results from this routine. In other words, we don't want the results of
/// this to depend on where the addresses of various SCEV objects happened to
/// land in memory.
-static void GroupByComplexity(SmallVectorImpl<const SCEV *> &Ops,
- LoopInfo *LI, DominatorTree &DT) {
+static void GroupByComplexity(SmallVectorImpl<SCEVUse> &Ops, LoopInfo *LI,
+ DominatorTree &DT) {
if (Ops.size() < 2) return; // Noop
- EquivalenceClasses<const SCEV *> EqCacheSCEV;
+ EquivalenceClasses<SCEVUse> EqCacheSCEV;
EquivalenceClasses<const Value *> EqCacheValue;
// Whether LHS has provably less complexity than RHS.
- auto IsLessComplex = [&](const SCEV *LHS, const SCEV *RHS) {
+ auto IsLessComplex = [&](SCEVUse LHS, SCEVUse RHS) {
auto Complexity =
CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LHS, RHS, DT);
return Complexity && *Complexity < 0;
@@ -813,23 +829,22 @@ static void GroupByComplexity(SmallVectorImpl<const SCEV *> &Ops,
if (Ops.size() == 2) {
// This is the common case, which also happens to be trivially simple.
// Special case it.
- const SCEV *&LHS = Ops[0], *&RHS = Ops[1];
+ SCEVUse &LHS = Ops[0], &RHS = Ops[1];
if (IsLessComplex(RHS, LHS))
std::swap(LHS, RHS);
return;
}
// Do the rough sort by complexity.
- llvm::stable_sort(Ops, [&](const SCEV *LHS, const SCEV *RHS) {
- return IsLessComplex(LHS, RHS);
- });
+ llvm::stable_sort(
+ Ops, [&](SCEVUse LHS, SCEVUse RHS) { return IsLessComplex(LHS, RHS); });
// Now that we are sorted by complexity, group elements of the same
// complexity. Note that this is, at worst, N^2, but the vector is likely to
// be extremely short in practice. Note that we take this approach because we
// do not want to depend on the addresses of the objects we are grouping.
for (unsigned i = 0, e = Ops.size(); i != e-2; ++i) {
- const SCEV *S = Ops[i];
+ SCEVUse S = Ops[i];
unsigned Complexity = S->getSCEVType();
// If there are any objects of the same complexity and same value as this
@@ -847,8 +862,8 @@ static void GroupByComplexity(SmallVectorImpl<const SCEV *> &Ops,
/// Returns true if \p Ops contains a huge SCEV (the subtree of S contains at
/// least HugeExprThreshold nodes).
-static bool hasHugeExpression(ArrayRef<const SCEV *> Ops) {
- return any_of(Ops, [](const SCEV *S) {
+static bool hasHugeExpression(ArrayRef<SCEVUse> Ops) {
+ return any_of(Ops, [](SCEVUse S) {
return S->getExpressionSize() >= HugeExprThreshold;
});
}
@@ -858,9 +873,8 @@ static bool hasHugeExpression(ArrayRef<const SCEV *> Ops) {
//===----------------------------------------------------------------------===//
/// Compute BC(It, K). The result has width W. Assume, K > 0.
-static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K,
- ScalarEvolution &SE,
- Type *ResultTy) {
+static SCEVUse BinomialCoefficient(SCEVUse It, unsigned K, ScalarEvolution &SE,
+ Type *ResultTy) {
// Handle the simplest case efficiently.
if (K == 1)
return SE.getTruncateOrZeroExtend(It, ResultTy);
@@ -947,15 +961,15 @@ static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K,
// Calculate the product, at width T+W
IntegerType *CalculationTy = IntegerType::get(SE.getContext(),
CalculationBits);
- const SCEV *Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy);
+ SCEVUse Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy);
for (unsigned i = 1; i != K; ++i) {
- const SCEV *S = SE.getMinusSCEV(It, SE.getConstant(It->getType(), i));
+ SCEVUse S = SE.getMinusSCEV(It, SE.getConstant(It->getType(), i));
Dividend = SE.getMulExpr(Dividend,
SE.getTruncateOrZeroExtend(S, CalculationTy));
}
// Divide by 2^T
- const SCEV *DivResult = SE.getUDivExpr(Dividend, SE.getConstant(DivFactor));
+ SCEVUse DivResult = SE.getUDivExpr(Dividend, SE.getConstant(DivFactor));
// Truncate the result, and divide by K! / 2^T.
@@ -971,21 +985,20 @@ static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K,
/// A*BC(It, 0) + B*BC(It, 1) + C*BC(It, 2) + D*BC(It, 3)
///
/// where BC(It, k) stands for binomial coefficient.
-const SCEV *SCEVAddRecExpr::evaluateAtIteration(const SCEV *It,
- ScalarEvolution &SE) const {
+SCEVUse SCEVAddRecExpr::evaluateAtIteration(SCEVUse It,
+ ScalarEvolution &SE) const {
return evaluateAtIteration(operands(), It, SE);
}
-const SCEV *
-SCEVAddRecExpr::evaluateAtIteration(ArrayRef<const SCEV *> Operands,
- const SCEV *It, ScalarEvolution &SE) {
+SCEVUse SCEVAddRecExpr::evaluateAtIteration(ArrayRef<SCEVUse> Operands,
+ SCEVUse It, ScalarEvolution &SE) {
assert(Operands.size() > 0);
- const SCEV *Result = Operands[0];
+ SCEVUse Result = Operands[0];
for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
// The computation is correct in the face of overflow provided that the
// multiplication is performed _after_ the evaluation of the binomial
// coefficient.
- const SCEV *Coeff = BinomialCoefficient(It, i, SE, Result->getType());
+ SCEVUse Coeff = BinomialCoefficient(It, i, SE, Result->getType());
if (isa<SCEVCouldNotCompute>(Coeff))
return Coeff;
@@ -998,8 +1011,7 @@ SCEVAddRecExpr::evaluateAtIteration(ArrayRef<const SCEV *> Operands,
// SCEV Expression folder implementations
//===----------------------------------------------------------------------===//
-const SCEV *ScalarEvolution::getLosslessPtrToIntExpr(const SCEV *Op,
- unsigned Depth) {
+SCEVUse ScalarEvolution::getLosslessPtrToIntExpr(SCEVUse Op, unsigned Depth) {
assert(Depth <= 1 &&
"getLosslessPtrToIntExpr() should self-recurse at most once.");
@@ -1016,7 +1028,7 @@ const SCEV *ScalarEvolution::getLosslessPtrToIntExpr(const SCEV *Op,
void *IP = nullptr;
// Is there already an expression for such a cast?
- if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP))
+ if (SCEVUse S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP))
return S;
// It isn't legal for optimizations to construct new ptrtoint expressions
@@ -1073,12 +1085,12 @@ const SCEV *ScalarEvolution::getLosslessPtrToIntExpr(const SCEV *Op,
public:
SCEVPtrToIntSinkingRewriter(ScalarEvolution &SE) : SCEVRewriteVisitor(SE) {}
- static const SCEV *rewrite(const SCEV *Scev, ScalarEvolution &SE) {
+ static SCEVUse rewrite(SCEVUse Scev, ScalarEvolution &SE) {
SCEVPtrToIntSinkingRewriter Rewriter(SE);
return Rewriter.visit(Scev);
}
- const SCEV *visit(const SCEV *S) {
+ SCEVUse visit(SCEVUse S) {
Type *STy = S->getType();
// If the expression is not pointer-typed, just keep it as-is.
if (!STy->isPointerTy())
@@ -1087,27 +1099,27 @@ const SCEV *ScalarEvolution::getLosslessPtrToIntExpr(const SCEV *Op,
return Base::visit(S);
}
- const SCEV *visitAddExpr(const SCEVAddExpr *Expr) {
- SmallVector<const SCEV *, 2> Operands;
+ SCEVUse visitAddExpr(const SCEVAddExpr *Expr) {
+ SmallVector<SCEVUse, 2> Operands;
bool Changed = false;
- for (const auto *Op : Expr->operands()) {
+ for (const auto Op : Expr->operands()) {
Operands.push_back(visit(Op));
Changed |= Op != Operands.back();
}
return !Changed ? Expr : SE.getAddExpr(Operands, Expr->getNoWrapFlags());
}
- const SCEV *visitMulExpr(const SCEVMulExpr *Expr) {
- SmallVector<const SCEV *, 2> Operands;
+ SCEVUse visitMulExpr(const SCEVMulExpr *Expr) {
+ SmallVector<SCEVUse, 2> Operands;
bool Changed = false;
- for (const auto *Op : Expr->operands()) {
+ for (const auto Op : Expr->operands()) {
Operands.push_back(visit(Op));
Changed |= Op != Operands.back();
}
return !Changed ? Expr : SE.getMulExpr(Operands, Expr->getNoWrapFlags());
}
- const SCEV *visitUnknown(const SCEVUnknown *Expr) {
+ SCEVUse visitUnknown(const SCEVUnknown *Expr) {
assert(Expr->getType()->isPointerTy() &&
"Should only reach pointer-typed SCEVUnknown's.");
return SE.getLosslessPtrToIntExpr(Expr, /*Depth=*/1);
@@ -1115,25 +1127,24 @@ const SCEV *ScalarEvolution::getLosslessPtrToIntExpr(const SCEV *Op,
};
// And actually perform the cast sinking.
- const SCEV *IntOp = SCEVPtrToIntSinkingRewriter::rewrite(Op, *this);
+ SCEVUse IntOp = SCEVPtrToIntSinkingRewriter::rewrite(Op, *this);
assert(IntOp->getType()->isIntegerTy() &&
"We must have succeeded in sinking the cast, "
"and ending up with an integer-typed expression!");
return IntOp;
}
-const SCEV *ScalarEvolution::getPtrToIntExpr(const SCEV *Op, Type *Ty) {
+SCEVUse ScalarEvolution::getPtrToIntExpr(SCEVUse Op, Type *Ty) {
assert(Ty->isIntegerTy() && "Target type must be an integer type!");
- const SCEV *IntOp = getLosslessPtrToIntExpr(Op);
+ SCEVUse IntOp = getLosslessPtrToIntExpr(Op);
if (isa<SCEVCouldNotCompute>(IntOp))
return IntOp;
return getTruncateOrZeroExtend(IntOp, Ty);
}
-const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op, Type *Ty,
- unsigned Depth) {
+SCEVUse ScalarEvolution::getTruncateExpr(SCEVUse Op, Type *Ty, unsigned Depth) {
assert(getTypeSizeInBits(Op->getType()) > getTypeSizeInBits(Ty) &&
"This is not a truncating conversion!");
assert(isSCEVable(Ty) &&
@@ -1146,7 +1157,8 @@ const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op, Type *Ty,
ID.AddPointer(Op);
ID.AddPointer(Ty);
void *IP = nullptr;
- if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
+ if (SCEVUse S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP))
+ return S;
// Fold if the operand is constant.
if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
@@ -1179,11 +1191,11 @@ const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op, Type *Ty,
// that replace other casts.
if (isa<SCEVAddExpr>(Op) || isa<SCEVMulExpr>(Op)) {
auto *CommOp = cast<SCEVCommutativeExpr>(Op);
- SmallVector<const SCEV *, 4> Operands;
+ SmallVector<SCEVUse, 4> Operands;
unsigned numTruncs = 0;
for (unsigned i = 0, e = CommOp->getNumOperands(); i != e && numTruncs < 2;
++i) {
- const SCEV *S = getTruncateExpr(CommOp->getOperand(i), Ty, Depth + 1);
+ SCEVUse S = getTruncateExpr(CommOp->getOperand(i), Ty, Depth + 1);
if (!isa<SCEVIntegralCastExpr>(CommOp->getOperand(i)) &&
isa<SCEVTruncateExpr>(S))
numTruncs++;
@@ -1199,14 +1211,14 @@ const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op, Type *Ty,
// Although we checked in the beginning that ID is not in the cache, it is
// possible that during recursion and different modification ID was inserted
// into the cache. So if we find it, just return it.
- if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP))
+ if (SCEVUse S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP))
return S;
}
// If the input value is a chrec scev, truncate the chrec's operands.
if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) {
- SmallVector<const SCEV *, 4> Operands;
- for (const SCEV *Op : AddRec->operands())
+ SmallVector<SCEVUse, 4> Operands;
+ for (SCEVUse Op : AddRec->operands())
Operands.push_back(getTruncateExpr(Op, Ty, Depth + 1));
return getAddRecExpr(Operands, AddRec->getLoop(), SCEV::FlagAnyWrap);
}
@@ -1229,9 +1241,9 @@ const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op, Type *Ty,
// Get the limit of a recurrence such that incrementing by Step cannot cause
// signed overflow as long as the value of the recurrence within the
// loop does not exceed this limit before incrementing.
-static const SCEV *getSignedOverflowLimitForStep(const SCEV *Step,
- ICmpInst::Predicate *Pred,
- ScalarEvolution *SE) {
+static SCEVUse getSignedOverflowLimitForStep(SCEVUse Step,
+ ICmpInst::Predicate *Pred,
+ ScalarEvolution *SE) {
unsigned BitWidth = SE->getTypeSizeInBits(Step->getType());
if (SE->isKnownPositive(Step)) {
*Pred = ICmpInst::ICMP_SLT;
@@ -1249,9 +1261,9 @@ static const SCEV *getSignedOverflowLimitForStep(const SCEV *Step,
// Get the limit of a recurrence such that incrementing by Step cannot cause
// unsigned overflow as long as the value of the recurrence within the loop does
// not exceed this limit before incrementing.
-static const SCEV *getUnsignedOverflowLimitForStep(const SCEV *Step,
- ICmpInst::Predicate *Pred,
- ScalarEvolution *SE) {
+static SCEVUse getUnsignedOverflowLimitForStep(SCEVUse Step,
+ ICmpInst::Predicate *Pred,
+ ScalarEvolution *SE) {
unsigned BitWidth = SE->getTypeSizeInBits(Step->getType());
*Pred = ICmpInst::ICMP_ULT;
@@ -1262,8 +1274,8 @@ static const SCEV *getUnsignedOverflowLimitForStep(const SCEV *Step,
namespace {
struct ExtendOpTraitsBase {
- typedef const SCEV *(ScalarEvolution::*GetExtendExprTy)(const SCEV *, Type *,
- unsigned);
+ typedef SCEVUse (ScalarEvolution:: *GetExtendExprTy)(SCEVUse, Type *,
+ unsigned);
};
// Used to make code generic over signed and unsigned overflow.
@@ -1274,7 +1286,7 @@ template <typename ExtendOp> struct ExtendOpTraits {
//
// static const ExtendOpTraitsBase::GetExtendExprTy GetExtendExpr;
//
- // static const SCEV *getOverflowLimitForStep(const SCEV *Step,
+ // static SCEVUse getOverflowLimitForStep(SCEVUse Step,
// ICmpInst::Predicate *Pred,
// ScalarEvolution *SE);
};
@@ -1285,9 +1297,9 @@ struct ExtendOpTraits<SCEVSignExtendExpr> : public ExtendOpTraitsBase {
static const GetExtendExprTy GetExtendExpr;
- static const SCEV *getOverflowLimitForStep(const SCEV *Step,
- ICmpInst::Predicate *Pred,
- ScalarEvolution *SE) {
+ static SCEVUse getOverflowLimitForStep(SCEVUse Step,
+ ICmpInst::Predicate *Pred,
+ ScalarEvolution *SE) {
return getSignedOverflowLimitForStep(Step, Pred, SE);
}
};
@@ -1301,9 +1313,9 @@ struct ExtendOpTraits<SCEVZeroExtendExpr> : public ExtendOpTraitsBase {
static const GetExtendExprTy GetExtendExpr;
- static const SCEV *getOverflowLimitForStep(const SCEV *Step,
- ICmpInst::Predicate *Pred,
- ScalarEvolution *SE) {
+ static SCEVUse getOverflowLimitForStep(SCEVUse Step,
+ ICmpInst::Predicate *Pred,
+ ScalarEvolution *SE) {
return getUnsignedOverflowLimitForStep(Step, Pred, SE);
}
};
@@ -1321,14 +1333,14 @@ const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits<
// expression "Step + sext/zext(PreIncAR)" is congruent with
// "sext/zext(PostIncAR)"
template <typename ExtendOpTy>
-static const SCEV *getPreStartForExtend(const SCEVAddRecExpr *AR, Type *Ty,
- ScalarEvolution *SE, unsigned Depth) {
+static SCEVUse getPreStartForExtend(const SCEVAddRecExpr *AR, Type *Ty,
+ ScalarEvolution *SE, unsigned Depth) {
auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType;
auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr;
const Loop *L = AR->getLoop();
- const SCEV *Start = AR->getStart();
- const SCEV *Step = AR->getStepRecurrence(*SE);
+ SCEVUse Start = AR->getStart();
+ SCEVUse Step = AR->getStepRecurrence(*SE);
// Check for a simple looking step prior to loop entry.
const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Start);
@@ -1339,7 +1351,7 @@ static const SCEV *getPreStartForExtend(const SCEVAddRecExpr *AR, Type *Ty,
// subtraction is expensive. For this purpose, perform a quick and dirty
// difference, by checking for Step in the operand list. Note, that
// SA might have repeated ops, like %a + %a + ..., so only remove one.
- SmallVector<const SCEV *, 4> DiffOps(SA->operands());
+ SmallVector<SCEVUse, 4> DiffOps(SA->operands());
for (auto It = DiffOps.begin(); It != DiffOps.end(); ++It)
if (*It == Step) {
DiffOps.erase(It);
@@ -1355,7 +1367,7 @@ static const SCEV *getPreStartForExtend(const SCEVAddRecExpr *AR, Type *Ty,
// 1. NSW/NUW flags on the step increment.
auto PreStartFlags =
ScalarEvolution::maskFlags(SA->getNoWrapFlags(), SCEV::FlagNUW);
- const SCEV *PreStart = SE->getAddExpr(DiffOps, PreStartFlags);
+ SCEVUse PreStart = SE->getAddExpr(DiffOps, PreStartFlags);
const SCEVAddRecExpr *PreAR = dyn_cast<SCEVAddRecExpr>(
SE->getAddRecExpr(PreStart, Step, L, SCEV::FlagAnyWrap));
@@ -1363,7 +1375,7 @@ static const SCEV *getPreStartForExtend(const SCEVAddRecExpr *AR, Type *Ty,
// "S+X does not sign/unsign-overflow".
//
- const SCEV *BECount = SE->getBackedgeTakenCount(L);
+ SCEVUse BECount = SE->getBackedgeTakenCount(L);
if (PreAR && PreAR->getNoWrapFlags(WrapType) &&
!isa<SCEVCouldNotCompute>(BECount) && SE->isKnownPositive(BECount))
return PreStart;
@@ -1371,7 +1383,7 @@ static const SCEV *getPreStartForExtend(const SCEVAddRecExpr *AR, Type *Ty,
// 2. Direct overflow check on the step operation's expression.
unsigned BitWidth = SE->getTypeSizeInBits(AR->getType());
Type *WideTy = IntegerType::get(SE->getContext(), BitWidth * 2);
- const SCEV *OperandExtendedStart =
+ SCEVUse OperandExtendedStart =
SE->getAddExpr((SE->*GetExtendExpr)(PreStart, WideTy, Depth),
(SE->*GetExtendExpr)(Step, WideTy, Depth));
if ((SE->*GetExtendExpr)(Start, WideTy, Depth) == OperandExtendedStart) {
@@ -1386,7 +1398,7 @@ static const SCEV *getPreStartForExtend(const SCEVAddRecExpr *AR, Type *Ty,
// 3. Loop precondition.
ICmpInst::Predicate Pred;
- const SCEV *OverflowLimit =
+ SCEVUse OverflowLimit =
ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(Step, &Pred, SE);
if (OverflowLimit &&
@@ -1398,12 +1410,11 @@ static const SCEV *getPreStartForExtend(const SCEVAddRecExpr *AR, Type *Ty,
// Get the normalized zero or sign extended expression for this AddRec's Start.
template <typename ExtendOpTy>
-static const SCEV *getExtendAddRecStart(const SCEVAddRecExpr *AR, Type *Ty,
- ScalarEvolution *SE,
- unsigned Depth) {
+static SCEVUse getExtendAddRecStart(const SCEVAddRecExpr *AR, Type *Ty,
+ ScalarEvolution *SE, unsigned Depth) {
auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr;
- const SCEV *PreStart = getPreStartForExtend<ExtendOpTy>(AR, Ty, SE, Depth);
+ SCEVUse PreStart = getPreStartForExtend<ExtendOpTy>(AR, Ty, SE, Depth);
if (!PreStart)
return (SE->*GetExtendExpr)(AR->getStart(), Ty, Depth);
@@ -1445,8 +1456,7 @@ static const SCEV *getExtendAddRecStart(const SCEVAddRecExpr *AR, Type *Ty,
// In the current context, S is `Start`, X is `Step`, Ext is `ExtendOpTy` and T
// is `Delta` (defined below).
template <typename ExtendOpTy>
-bool ScalarEvolution::proveNoWrapByVaryingStart(const SCEV *Start,
- const SCEV *Step,
+bool ScalarEvolution::proveNoWrapByVaryingStart(SCEVUse Start, SCEVUse Step,
const Loop *L) {
auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType;
@@ -1461,7 +1471,7 @@ bool ScalarEvolution::proveNoWrapByVaryingStart(const SCEV *Start,
APInt StartAI = StartC->getAPInt();
for (unsigned Delta : {-2, -1, 1, 2}) {
- const SCEV *PreStart = getConstant(StartAI - Delta);
+ SCEVUse PreStart = getConstant(StartAI - Delta);
FoldingSetNodeID ID;
ID.AddInteger(scAddRecExpr);
@@ -1475,9 +1485,9 @@ bool ScalarEvolution::proveNoWrapByVaryingStart(const SCEV *Start,
// Give up if we don't already have the add recurrence we need because
// actually constructing an add recurrence is relatively expensive.
if (PreAR && PreAR->getNoWrapFlags(WrapType)) { // proves (2)
- const SCEV *DeltaS = getConstant(StartC->getType(), Delta);
+ SCEVUse DeltaS = getConstant(StartC->getType(), Delta);
ICmpInst::Predicate Pred = ICmpInst::BAD_ICMP_PREDICATE;
- const SCEV *Limit = ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(
+ SCEVUse Limit = ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(
DeltaS, &Pred, this);
if (Limit && isKnownPredicate(Pred, PreAR, Limit)) // proves (1)
return true;
@@ -1515,7 +1525,7 @@ static APInt extractConstantWithoutWrapping(ScalarEvolution &SE,
// ConstantStart, x is an arbitrary \p Step, and n is the loop trip count.
static APInt extractConstantWithoutWrapping(ScalarEvolution &SE,
const APInt &ConstantStart,
- const SCEV *Step) {
+ SCEVUse Step) {
const unsigned BitWidth = ConstantStart.getBitWidth();
const uint32_t TZ = SE.getMinTrailingZeros(Step);
if (TZ)
@@ -1525,10 +1535,9 @@ static APInt extractConstantWithoutWrapping(ScalarEvolution &SE,
}
static void insertFoldCacheEntry(
- const ScalarEvolution::FoldID &ID, const SCEV *S,
- DenseMap<ScalarEvolution::FoldID, const SCEV *> &FoldCache,
- DenseMap<const SCEV *, SmallVector<ScalarEvolution::FoldID, 2>>
- &FoldCacheUser) {
+ const ScalarEvolution::FoldID &ID, SCEVUse S,
+ DenseMap<ScalarEvolution::FoldID, SCEVUse> &FoldCache,
+ DenseMap<SCEVUse, SmallVector<ScalarEvolution::FoldID, 2>> &FoldCacheUser) {
auto I = FoldCache.insert({ID, S});
if (!I.second) {
// Remove FoldCacheUser entry for ID when replacing an existing FoldCache
@@ -1547,8 +1556,8 @@ static void insertFoldCacheEntry(
R.first->second.push_back(ID);
}
-const SCEV *
-ScalarEvolution::getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth) {
+SCEVUse ScalarEvolution::getZeroExtendExpr(SCEVUse Op, Type *Ty,
+ unsigned Depth) {
assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
"This is not an extending conversion!");
assert(isSCEVable(Ty) &&
@@ -1561,14 +1570,14 @@ ScalarEvolution::getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth) {
if (Iter != FoldCache.end())
return Iter->second;
- const SCEV *S = getZeroExtendExprImpl(Op, Ty, Depth);
+ SCEVUse S = getZeroExtendExprImpl(Op, Ty, Depth);
if (!isa<SCEVZeroExtendExpr>(S))
insertFoldCacheEntry(ID, S, FoldCache, FoldCacheUser);
return S;
}
-const SCEV *ScalarEvolution::getZeroExtendExprImpl(const SCEV *Op, Type *Ty,
- unsigned Depth) {
+SCEVUse ScalarEvolution::getZeroExtendExprImpl(SCEVUse Op, Type *Ty,
+ unsigned Depth) {
assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
"This is not an extending conversion!");
assert(isSCEVable(Ty) && "This is not a conversion to a SCEVable type!");
@@ -1589,7 +1598,8 @@ const SCEV *ScalarEvolution::getZeroExtendExprImpl(const SCEV *Op, Type *Ty,
ID.AddPointer(Op);
ID.AddPointer(Ty);
void *IP = nullptr;
- if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
+ if (SCEVUse S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP))
+ return S;
if (Depth > MaxCastDepth) {
SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator),
Op, Ty);
@@ -1602,7 +1612,7 @@ const SCEV *ScalarEvolution::getZeroExtendExprImpl(const SCEV *Op, Type *Ty,
if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) {
// It's possible the bits taken off by the truncate were all zero bits. If
// so, we should be able to simplify this further.
- const SCEV *X = ST->getOperand();
+ SCEVUse X = ST->getOperand();
ConstantRange CR = getUnsignedRange(X);
unsigned TruncBits = getTypeSizeInBits(ST->getType());
unsigned NewBits = getTypeSizeInBits(Ty);
@@ -1617,8 +1627,8 @@ const SCEV *ScalarEvolution::getZeroExtendExprImpl(const SCEV *Op, Type *Ty,
// this: for (unsigned char X = 0; X < 100; ++X) { int Y = X; }
if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op))
if (AR->isAffine()) {
- const SCEV *Start = AR->getStart();
- const SCEV *Step = AR->getStepRecurrence(*this);
+ SCEVUse Start = AR->getStart();
+ SCEVUse Step = AR->getStepRecurrence(*this);
unsigned BitWidth = getTypeSizeInBits(AR->getType());
const Loop *L = AR->getLoop();
@@ -1639,34 +1649,33 @@ const SCEV *ScalarEvolution::getZeroExtendExprImpl(const SCEV *Op, Type *Ty,
// in infinite recursion. In the later case, the analysis code will
// cope with a conservative value, and it will take care to purge
// that value once it has finished.
- const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L);
+ SCEVUse MaxBECount = getConstantMaxBackedgeTakenCount(L);
if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
// Manually compute the final value for AR, checking for overflow.
// Check whether the backedge-taken count can be losslessly casted to
// the addrec's type. The count is always unsigned.
- const SCEV *CastedMaxBECount =
+ SCEVUse CastedMaxBECount =
getTruncateOrZeroExtend(MaxBECount, Start->getType(), Depth);
- const SCEV *RecastedMaxBECount = getTruncateOrZeroExtend(
+ SCEVUse RecastedMaxBECount = getTruncateOrZeroExtend(
CastedMaxBECount, MaxBECount->getType(), Depth);
if (MaxBECount == RecastedMaxBECount) {
Type *WideTy = IntegerType::get(getContext(), BitWidth * 2);
// Check whether Start+Step*MaxBECount has no unsigned overflow.
- const SCEV *ZMul = getMulExpr(CastedMaxBECount, Step,
- SCEV::FlagAnyWrap, Depth + 1);
- const SCEV *ZAdd = getZeroExtendExpr(getAddExpr(Start, ZMul,
- SCEV::FlagAnyWrap,
- Depth + 1),
- WideTy, Depth + 1);
- const SCEV *WideStart = getZeroExtendExpr(Start, WideTy, Depth + 1);
- const SCEV *WideMaxBECount =
- getZeroExtendExpr(CastedMaxBECount, WideTy, Depth + 1);
- const SCEV *OperandExtendedAdd =
- getAddExpr(WideStart,
- getMulExpr(WideMaxBECount,
- getZeroExtendExpr(Step, WideTy, Depth + 1),
- SCEV::FlagAnyWrap, Depth + 1),
- SCEV::FlagAnyWrap, Depth + 1);
+ SCEVUse ZMul =
+ getMulExpr(CastedMaxBECount, Step, SCEV::FlagAnyWrap, Depth + 1);
+ SCEVUse ZAdd = getZeroExtendExpr(
+ getAddExpr(Start, ZMul, SCEV::FlagAnyWrap, Depth + 1), WideTy,
+ Depth + 1);
+ SCEVUse WideStart = getZeroExtendExpr(Start, WideTy, Depth + 1);
+ SCEVUse WideMaxBECount =
+ getZeroExtendExpr(CastedMaxBECount, WideTy, Depth + 1);
+ SCEVUse OperandExtendedAdd =
+ getAddExpr(WideStart,
+ getMulExpr(WideMaxBECount,
+ getZeroExtendExpr(Step, WideTy, Depth + 1),
+ SCEV::FlagAnyWrap, Depth + 1),
+ SCEV::FlagAnyWrap, Depth + 1);
if (ZAdd == OperandExtendedAdd) {
// Cache knowledge of AR NUW, which is propagated to this AddRec.
setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNUW);
@@ -1723,8 +1732,8 @@ const SCEV *ScalarEvolution::getZeroExtendExprImpl(const SCEV *Op, Type *Ty,
// For a negative step, we can extend the operands iff doing so only
// traverses values in the range zext([0,UINT_MAX]).
if (isKnownNegative(Step)) {
- const SCEV *N = getConstant(APInt::getMaxValue(BitWidth) -
- getSignedRangeMin(Step));
+ SCEVUse N = getConstant(APInt::getMaxValue(BitWidth) -
+ getSignedRangeMin(Step));
if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, AR, N) ||
isKnownOnEveryIteration(ICmpInst::ICMP_UGT, AR, N)) {
// Cache knowledge of AR NW, which is propagated to this
@@ -1747,10 +1756,10 @@ const SCEV *ScalarEvolution::getZeroExtendExprImpl(const SCEV *Op, Type *Ty,
const APInt &C = SC->getAPInt();
const APInt &D = extractConstantWithoutWrapping(*this, C, Step);
if (D != 0) {
- const SCEV *SZExtD = getZeroExtendExpr(getConstant(D), Ty, Depth);
- const SCEV *SResidual =
+ SCEVUse SZExtD = getZeroExtendExpr(getConstant(D), Ty, Depth);
+ SCEVUse SResidual =
getAddRecExpr(getConstant(C - D), Step, L, AR->getNoWrapFlags());
- const SCEV *SZExtR = getZeroExtendExpr(SResidual, Ty, Depth + 1);
+ SCEVUse SZExtR = getZeroExtendExpr(SResidual, Ty, Depth + 1);
return getAddExpr(SZExtD, SZExtR,
(SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW),
Depth + 1);
@@ -1768,8 +1777,8 @@ const SCEV *ScalarEvolution::getZeroExtendExprImpl(const SCEV *Op, Type *Ty,
// zext(A % B) --> zext(A) % zext(B)
{
- const SCEV *LHS;
- const SCEV *RHS;
+ SCEVUse LHS;
+ SCEVUse RHS;
if (matchURem(Op, LHS, RHS))
return getURemExpr(getZeroExtendExpr(LHS, Ty, Depth + 1),
getZeroExtendExpr(RHS, Ty, Depth + 1));
@@ -1785,8 +1794,8 @@ const SCEV *ScalarEvolution::getZeroExtendExprImpl(const SCEV *Op, Type *Ty,
if (SA->hasNoUnsignedWrap()) {
// If the addition does not unsign overflow then we can, by definition,
// commute the zero extension with the addition operation.
- SmallVector<const SCEV *, 4> Ops;
- for (const auto *Op : SA->operands())
+ SmallVector<SCEVUse, 4> Ops;
+ for (const auto Op : SA->operands())
Ops.push_back(getZeroExtendExpr(Op, Ty, Depth + 1));
return getAddExpr(Ops, SCEV::FlagNUW, Depth + 1);
}
@@ -1802,10 +1811,10 @@ const SCEV *ScalarEvolution::getZeroExtendExprImpl(const SCEV *Op, Type *Ty,
if (const auto *SC = dyn_cast<SCEVConstant>(SA->getOperand(0))) {
const APInt &D = extractConstantWithoutWrapping(*this, SC, SA);
if (D != 0) {
- const SCEV *SZExtD = getZeroExtendExpr(getConstant(D), Ty, Depth);
- const SCEV *SResidual =
+ SCEVUse SZExtD = getZeroExtendExpr(getConstant(D), Ty, Depth);
+ SCEVUse SResidual =
getAddExpr(getConstant(-D), SA, SCEV::FlagAnyWrap, Depth);
- const SCEV *SZExtR = getZeroExtendExpr(SResidual, Ty, Depth + 1);
+ SCEVUse SZExtR = getZeroExtendExpr(SResidual, Ty, Depth + 1);
return getAddExpr(SZExtD, SZExtR,
(SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW),
Depth + 1);
@@ -1818,8 +1827,8 @@ const SCEV *ScalarEvolution::getZeroExtendExprImpl(const SCEV *Op, Type *Ty,
if (SM->hasNoUnsignedWrap()) {
// If the multiply does not unsign overflow then we can, by definition,
// commute the zero extension with the multiply operation.
- SmallVector<const SCEV *, 4> Ops;
- for (const auto *Op : SM->operands())
+ SmallVector<SCEVUse, 4> Ops;
+ for (const auto Op : SM->operands())
Ops.push_back(getZeroExtendExpr(Op, Ty, Depth + 1));
return getMulExpr(Ops, SCEV::FlagNUW, Depth + 1);
}
@@ -1855,8 +1864,8 @@ const SCEV *ScalarEvolution::getZeroExtendExprImpl(const SCEV *Op, Type *Ty,
// zext(umax(x, y)) -> umax(zext(x), zext(y))
if (isa<SCEVUMinExpr>(Op) || isa<SCEVUMaxExpr>(Op)) {
auto *MinMax = cast<SCEVMinMaxExpr>(Op);
- SmallVector<const SCEV *, 4> Operands;
- for (auto *Operand : MinMax->operands())
+ SmallVector<SCEVUse, 4> Operands;
+ for (auto Operand : MinMax->operands())
Operands.push_back(getZeroExtendExpr(Operand, Ty));
if (isa<SCEVUMinExpr>(MinMax))
return getUMinExpr(Operands);
@@ -1866,15 +1875,16 @@ const SCEV *ScalarEvolution::getZeroExtendExprImpl(const SCEV *Op, Type *Ty,
// zext(umin_seq(x, y)) -> umin_seq(zext(x), zext(y))
if (auto *MinMax = dyn_cast<SCEVSequentialMinMaxExpr>(Op)) {
assert(isa<SCEVSequentialUMinExpr>(MinMax) && "Not supported!");
- SmallVector<const SCEV *, 4> Operands;
- for (auto *Operand : MinMax->operands())
+ SmallVector<SCEVUse, 4> Operands;
+ for (auto Operand : MinMax->operands())
Operands.push_back(getZeroExtendExpr(Operand, Ty));
return getUMinExpr(Operands, /*Sequential*/ true);
}
// The cast wasn't folded; create an explicit cast node.
// Recompute the insert position, as it may have been invalidated.
- if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
+ if (SCEVUse S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP))
+ return S;
SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator),
Op, Ty);
UniqueSCEVs.InsertNode(S, IP);
@@ -1882,8 +1892,8 @@ const SCEV *ScalarEvolution::getZeroExtendExprImpl(const SCEV *Op, Type *Ty,
return S;
}
-const SCEV *
-ScalarEvolution::getSignExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth) {
+SCEVUse ScalarEvolution::getSignExtendExpr(SCEVUse Op, Type *Ty,
+ unsigned Depth) {
assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
"This is not an extending conversion!");
assert(isSCEVable(Ty) &&
@@ -1896,14 +1906,14 @@ ScalarEvolution::getSignExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth) {
if (Iter != FoldCache.end())
return Iter->second;
- const SCEV *S = getSignExtendExprImpl(Op, Ty, Depth);
+ SCEVUse S = getSignExtendExprImpl(Op, Ty, Depth);
if (!isa<SCEVSignExtendExpr>(S))
insertFoldCacheEntry(ID, S, FoldCache, FoldCacheUser);
return S;
}
-const SCEV *ScalarEvolution::getSignExtendExprImpl(const SCEV *Op, Type *Ty,
- unsigned Depth) {
+SCEVUse ScalarEvolution::getSignExtendExprImpl(SCEVUse Op, Type *Ty,
+ unsigned Depth) {
assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
"This is not an extending conversion!");
assert(isSCEVable(Ty) && "This is not a conversion to a SCEVable type!");
@@ -1929,7 +1939,8 @@ const SCEV *ScalarEvolution::getSignExtendExprImpl(const SCEV *Op, Type *Ty,
ID.AddPointer(Op);
ID.AddPointer(Ty);
void *IP = nullptr;
- if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
+ if (SCEVUse S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP))
+ return S;
// Limit recursion depth.
if (Depth > MaxCastDepth) {
SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator),
@@ -1943,7 +1954,7 @@ const SCEV *ScalarEvolution::getSignExtendExprImpl(const SCEV *Op, Type *Ty,
if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) {
// It's possible the bits taken off by the truncate were all sign bits. If
// so, we should be able to simplify this further.
- const SCEV *X = ST->getOperand();
+ SCEVUse X = ST->getOperand();
ConstantRange CR = getSignedRange(X);
unsigned TruncBits = getTypeSizeInBits(ST->getType());
unsigned NewBits = getTypeSizeInBits(Ty);
@@ -1957,8 +1968,8 @@ const SCEV *ScalarEvolution::getSignExtendExprImpl(const SCEV *Op, Type *Ty,
if (SA->hasNoSignedWrap()) {
// If the addition does not sign overflow then we can, by definition,
// commute the sign extension with the addition operation.
- SmallVector<const SCEV *, 4> Ops;
- for (const auto *Op : SA->operands())
+ SmallVector<SCEVUse, 4> Ops;
+ for (const auto Op : SA->operands())
Ops.push_back(getSignExtendExpr(Op, Ty, Depth + 1));
return getAddExpr(Ops, SCEV::FlagNSW, Depth + 1);
}
@@ -1975,10 +1986,10 @@ const SCEV *ScalarEvolution::getSignExtendExprImpl(const SCEV *Op, Type *Ty,
if (const auto *SC = dyn_cast<SCEVConstant>(SA->getOperand(0))) {
const APInt &D = extractConstantWithoutWrapping(*this, SC, SA);
if (D != 0) {
- const SCEV *SSExtD = getSignExtendExpr(getConstant(D), Ty, Depth);
- const SCEV *SResidual =
+ SCEVUse SSExtD = getSignExtendExpr(getConstant(D), Ty, Depth);
+ SCEVUse SResidual =
getAddExpr(getConstant(-D), SA, SCEV::FlagAnyWrap, Depth);
- const SCEV *SSExtR = getSignExtendExpr(SResidual, Ty, Depth + 1);
+ SCEVUse SSExtR = getSignExtendExpr(SResidual, Ty, Depth + 1);
return getAddExpr(SSExtD, SSExtR,
(SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW),
Depth + 1);
@@ -1991,8 +2002,8 @@ const SCEV *ScalarEvolution::getSignExtendExprImpl(const SCEV *Op, Type *Ty,
// this: for (signed char X = 0; X < 100; ++X) { int Y = X; }
if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op))
if (AR->isAffine()) {
- const SCEV *Start = AR->getStart();
- const SCEV *Step = AR->getStepRecurrence(*this);
+ SCEVUse Start = AR->getStart();
+ SCEVUse Step = AR->getStepRecurrence(*this);
unsigned BitWidth = getTypeSizeInBits(AR->getType());
const Loop *L = AR->getLoop();
@@ -2013,35 +2024,34 @@ const SCEV *ScalarEvolution::getSignExtendExprImpl(const SCEV *Op, Type *Ty,
// in infinite recursion. In the later case, the analysis code will
// cope with a conservative value, and it will take care to purge
// that value once it has finished.
- const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L);
+ SCEVUse MaxBECount = getConstantMaxBackedgeTakenCount(L);
if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
// Manually compute the final value for AR, checking for
// overflow.
// Check whether the backedge-taken count can be losslessly casted to
// the addrec's type. The count is always unsigned.
- const SCEV *CastedMaxBECount =
+ SCEVUse CastedMaxBECount =
getTruncateOrZeroExtend(MaxBECount, Start->getType(), Depth);
- const SCEV *RecastedMaxBECount = getTruncateOrZeroExtend(
+ SCEVUse RecastedMaxBECount = getTruncateOrZeroExtend(
CastedMaxBECount, MaxBECount->getType(), Depth);
if (MaxBECount == RecastedMaxBECount) {
Type *WideTy = IntegerType::get(getContext(), BitWidth * 2);
// Check whether Start+Step*MaxBECount has no signed overflow.
- const SCEV *SMul = getMulExpr(CastedMaxBECount, Step,
- SCEV::FlagAnyWrap, Depth + 1);
- const SCEV *SAdd = getSignExtendExpr(getAddExpr(Start, SMul,
- SCEV::FlagAnyWrap,
- Depth + 1),
- WideTy, Depth + 1);
- const SCEV *WideStart = getSignExtendExpr(Start, WideTy, Depth + 1);
- const SCEV *WideMaxBECount =
- getZeroExtendExpr(CastedMaxBECount, WideTy, Depth + 1);
- const SCEV *OperandExtendedAdd =
- getAddExpr(WideStart,
- getMulExpr(WideMaxBECount,
- getSignExtendExpr(Step, WideTy, Depth + 1),
- SCEV::FlagAnyWrap, Depth + 1),
- SCEV::FlagAnyWrap, Depth + 1);
+ SCEVUse SMul =
+ getMulExpr(CastedMaxBECount, Step, SCEV::FlagAnyWrap, Depth + 1);
+ SCEVUse SAdd = getSignExtendExpr(
+ getAddExpr(Start, SMul, SCEV::FlagAnyWrap, Depth + 1), WideTy,
+ Depth + 1);
+ SCEVUse WideStart = getSignExtendExpr(Start, WideTy, Depth + 1);
+ SCEVUse WideMaxBECount =
+ getZeroExtendExpr(CastedMaxBECount, WideTy, Depth + 1);
+ SCEVUse OperandExtendedAdd =
+ getAddExpr(WideStart,
+ getMulExpr(WideMaxBECount,
+ getSignExtendExpr(Step, WideTy, Depth + 1),
+ SCEV::FlagAnyWrap, Depth + 1),
+ SCEV::FlagAnyWrap, Depth + 1);
if (SAdd == OperandExtendedAdd) {
// Cache knowledge of AR NSW, which is propagated to this AddRec.
setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNSW);
@@ -2099,10 +2109,10 @@ const SCEV *ScalarEvolution::getSignExtendExprImpl(const SCEV *Op, Type *Ty,
const APInt &C = SC->getAPInt();
const APInt &D = extractConstantWithoutWrapping(*this, C, Step);
if (D != 0) {
- const SCEV *SSExtD = getSignExtendExpr(getConstant(D), Ty, Depth);
- const SCEV *SResidual =
+ SCEVUse SSExtD = getSignExtendExpr(getConstant(D), Ty, Depth);
+ SCEVUse SResidual =
getAddRecExpr(getConstant(C - D), Step, L, AR->getNoWrapFlags());
- const SCEV *SSExtR = getSignExtendExpr(SResidual, Ty, Depth + 1);
+ SCEVUse SSExtR = getSignExtendExpr(SResidual, Ty, Depth + 1);
return getAddExpr(SSExtD, SSExtR,
(SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW),
Depth + 1);
@@ -2127,8 +2137,8 @@ const SCEV *ScalarEvolution::getSignExtendExprImpl(const SCEV *Op, Type *Ty,
// sext(smax(x, y)) -> smax(sext(x), sext(y))
if (isa<SCEVSMinExpr>(Op) || isa<SCEVSMaxExpr>(Op)) {
auto *MinMax = cast<SCEVMinMaxExpr>(Op);
- SmallVector<const SCEV *, 4> Operands;
- for (auto *Operand : MinMax->operands())
+ SmallVector<SCEVUse, 4> Operands;
+ for (auto Operand : MinMax->operands())
Operands.push_back(getSignExtendExpr(Operand, Ty));
if (isa<SCEVSMinExpr>(MinMax))
return getSMinExpr(Operands);
@@ -2137,7 +2147,8 @@ const SCEV *ScalarEvolution::getSignExtendExprImpl(const SCEV *Op, Type *Ty,
// The cast wasn't folded; create an explicit cast node.
// Recompute the insert position, as it may have been invalidated.
- if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
+ if (SCEVUse S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP))
+ return S;
SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator),
Op, Ty);
UniqueSCEVs.InsertNode(S, IP);
@@ -2145,8 +2156,7 @@ const SCEV *ScalarEvolution::getSignExtendExprImpl(const SCEV *Op, Type *Ty,
return S;
}
-const SCEV *ScalarEvolution::getCastExpr(SCEVTypes Kind, const SCEV *Op,
- Type *Ty) {
+SCEVUse ScalarEvolution::getCastExpr(SCEVTypes Kind, SCEVUse Op, Type *Ty) {
switch (Kind) {
case scTruncate:
return getTruncateExpr(Op, Ty);
@@ -2163,8 +2173,7 @@ const SCEV *ScalarEvolution::getCastExpr(SCEVTypes Kind, const SCEV *Op,
/// getAnyExtendExpr - Return a SCEV for the given operand extended with
/// unspecified bits out to the given type.
-const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op,
- Type *Ty) {
+SCEVUse ScalarEvolution::getAnyExtendExpr(SCEVUse Op, Type *Ty) {
assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
"This is not an extending conversion!");
assert(isSCEVable(Ty) &&
@@ -2178,26 +2187,26 @@ const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op,
// Peel off a truncate cast.
if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Op)) {
- const SCEV *NewOp = T->getOperand();
+ SCEVUse NewOp = T->getOperand();
if (getTypeSizeInBits(NewOp->getType()) < getTypeSizeInBits(Ty))
return getAnyExtendExpr(NewOp, Ty);
return getTruncateOrNoop(NewOp, Ty);
}
// Next try a zext cast. If the cast is folded, use it.
- const SCEV *ZExt = getZeroExtendExpr(Op, Ty);
+ SCEVUse ZExt = getZeroExtendExpr(Op, Ty);
if (!isa<SCEVZeroExtendExpr>(ZExt))
return ZExt;
// Next try a sext cast. If the cast is folded, use it.
- const SCEV *SExt = getSignExtendExpr(Op, Ty);
+ SCEVUse SExt = getSignExtendExpr(Op, Ty);
if (!isa<SCEVSignExtendExpr>(SExt))
return SExt;
// Force the cast to be folded into the operands of an addrec.
if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) {
- SmallVector<const SCEV *, 4> Ops;
- for (const SCEV *Op : AR->operands())
+ SmallVector<SCEVUse, 4> Ops;
+ for (SCEVUse Op : AR->operands())
Ops.push_back(getAnyExtendExpr(Op, Ty));
return getAddRecExpr(Ops, AR->getLoop(), SCEV::FlagNW);
}
@@ -2233,12 +2242,12 @@ const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op,
/// may be exposed. This helps getAddRecExpr short-circuit extra work in
/// the common case where no interesting opportunities are present, and
/// is also used as a check to avoid infinite recursion.
-static bool
-CollectAddOperandsWithScales(DenseMap<const SCEV *, APInt> &M,
- SmallVectorImpl<const SCEV *> &NewOps,
- APInt &AccumulatedConstant,
- ArrayRef<const SCEV *> Ops, const APInt &Scale,
- ScalarEvolution &SE) {
+static bool CollectAddOperandsWithScales(DenseMap<const SCEV *, APInt> &M,
+ SmallVectorImpl<SCEVUse> &NewOps,
+ APInt &AccumulatedConstant,
+ ArrayRef<SCEVUse> Ops,
+ const APInt &Scale,
+ ScalarEvolution &SE) {
bool Interesting = false;
// Iterate over the add operands. They are sorted, with constants first.
@@ -2267,8 +2276,8 @@ CollectAddOperandsWithScales(DenseMap<const SCEV *, APInt> &M,
} else {
// A multiplication of a constant with some other value. Update
// the map.
- SmallVector<const SCEV *, 4> MulOps(drop_begin(Mul->operands()));
- const SCEV *Key = SE.getMulExpr(MulOps);
+ SmallVector<SCEVUse, 4> MulOps(drop_begin(Mul->operands()));
+ SCEVUse Key = SE.getMulExpr(MulOps);
auto Pair = M.insert({Key, NewScale});
if (Pair.second) {
NewOps.push_back(Pair.first->first);
@@ -2298,10 +2307,10 @@ CollectAddOperandsWithScales(DenseMap<const SCEV *, APInt> &M,
}
bool ScalarEvolution::willNotOverflow(Instruction::BinaryOps BinOp, bool Signed,
- const SCEV *LHS, const SCEV *RHS,
+ SCEVUse LHS, SCEVUse RHS,
const Instruction *CtxI) {
- const SCEV *(ScalarEvolution::*Operation)(const SCEV *, const SCEV *,
- SCEV::NoWrapFlags, unsigned);
+ SCEVUse (ScalarEvolution:: *Operation)(SCEVUse, SCEVUse, SCEV::NoWrapFlags,
+ unsigned);
switch (BinOp) {
default:
llvm_unreachable("Unsupported binary op");
@@ -2316,7 +2325,7 @@ bool ScalarEvolution::willNotOverflow(Instruction::BinaryOps BinOp, bool Signed,
break;
}
- const SCEV *(ScalarEvolution::*Extension)(const SCEV *, Type *, unsigned) =
+ SCEVUse (ScalarEvolution:: *Extension)(SCEVUse, Type *, unsigned) =
Signed ? &ScalarEvolution::getSignExtendExpr
: &ScalarEvolution::getZeroExtendExpr;
@@ -2325,11 +2334,11 @@ bool ScalarEvolution::willNotOverflow(Instruction::BinaryOps BinOp, bool Signed,
auto *WideTy =
IntegerType::get(NarrowTy->getContext(), NarrowTy->getBitWidth() * 2);
- const SCEV *A = (this->*Extension)(
+ SCEVUse A = (this->*Extension)(
(this->*Operation)(LHS, RHS, SCEV::FlagAnyWrap, 0), WideTy, 0);
- const SCEV *LHSB = (this->*Extension)(LHS, WideTy, 0);
- const SCEV *RHSB = (this->*Extension)(RHS, WideTy, 0);
- const SCEV *B = (this->*Operation)(LHSB, RHSB, SCEV::FlagAnyWrap, 0);
+ SCEVUse LHSB = (this->*Extension)(LHS, WideTy, 0);
+ SCEVUse RHSB = (this->*Extension)(RHS, WideTy, 0);
+ SCEVUse B = (this->*Operation)(LHSB, RHSB, SCEV::FlagAnyWrap, 0);
if (A == B)
return true;
// Can we use context to prove the fact we need?
@@ -2394,8 +2403,8 @@ ScalarEvolution::getStrengthenedNoWrapFlagsFromBinOp(
OBO->getOpcode() != Instruction::Mul)
return std::nullopt;
- const SCEV *LHS = getSCEV(OBO->getOperand(0));
- const SCEV *RHS = getSCEV(OBO->getOperand(1));
+ SCEVUse LHS = getSCEV(OBO->getOperand(0));
+ SCEVUse RHS = getSCEV(OBO->getOperand(1));
const Instruction *CtxI =
UseContextForNoWrapFlagInference ? dyn_cast<Instruction>(OBO) : nullptr;
@@ -2421,10 +2430,10 @@ ScalarEvolution::getStrengthenedNoWrapFlagsFromBinOp(
// We're trying to construct a SCEV of type `Type' with `Ops' as operands and
// `OldFlags' as can't-wrap behavior. Infer a more aggressive set of
// can't-overflow flags for the operation if possible.
-static SCEV::NoWrapFlags
-StrengthenNoWrapFlags(ScalarEvolution *SE, SCEVTypes Type,
- const ArrayRef<const SCEV *> Ops,
- SCEV::NoWrapFlags Flags) {
+static SCEV::NoWrapFlags StrengthenNoWrapFlags(ScalarEvolution *SE,
+ SCEVTypes Type,
+ const ArrayRef<SCEVUse> Ops,
+ SCEV::NoWrapFlags Flags) {
using namespace std::placeholders;
using OBO = OverflowingBinaryOperator;
@@ -2439,7 +2448,7 @@ StrengthenNoWrapFlags(ScalarEvolution *SE, SCEVTypes Type,
ScalarEvolution::maskFlags(Flags, SignOrUnsignMask);
// If FlagNSW is true and all the operands are non-negative, infer FlagNUW.
- auto IsKnownNonNegative = [&](const SCEV *S) {
+ auto IsKnownNonNegative = [&](SCEVUse S) {
return SE->isKnownNonNegative(S);
};
@@ -2504,14 +2513,20 @@ StrengthenNoWrapFlags(ScalarEvolution *SE, SCEVTypes Type,
return Flags;
}
-bool ScalarEvolution::isAvailableAtLoopEntry(const SCEV *S, const Loop *L) {
+bool ScalarEvolution::isAvailableAtLoopEntry(SCEVUse S, const Loop *L) {
return isLoopInvariant(S, L) && properlyDominates(S, L->getHeader());
}
+SCEVUse ScalarEvolution::getAddExpr(ArrayRef<const SCEV *> Ops,
+ SCEV::NoWrapFlags Flags, unsigned Depth) {
+ SmallVector<SCEVUse> Ops2(Ops.begin(), Ops.end());
+ return getAddExpr(Ops2, Flags, Depth);
+}
+
/// Get a canonical add expression, or something simpler if possible.
-const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
- SCEV::NoWrapFlags OrigFlags,
- unsigned Depth) {
+SCEVUse ScalarEvolution::getAddExpr(SmallVectorImpl<SCEVUse> &Ops,
+ SCEV::NoWrapFlags OrigFlags,
+ unsigned Depth) {
assert(!(OrigFlags & ~(SCEV::FlagNUW | SCEV::FlagNSW)) &&
"only nuw or nsw allowed");
assert(!Ops.empty() && "Cannot get empty add!");
@@ -2521,8 +2536,8 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
for (unsigned i = 1, e = Ops.size(); i != e; ++i)
assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
"SCEVAddExpr operand types don't match!");
- unsigned NumPtrs = count_if(
- Ops, [](const SCEV *Op) { return Op->getType()->isPointerTy(); });
+ unsigned NumPtrs =
+ count_if(Ops, [](SCEVUse Op) { return Op->getType()->isPointerTy(); });
assert(NumPtrs <= 1 && "add has at most one pointer operand");
#endif
@@ -2552,7 +2567,7 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
}
// Delay expensive flag strengthening until necessary.
- auto ComputeFlags = [this, OrigFlags](const ArrayRef<const SCEV *> Ops) {
+ auto ComputeFlags = [this, OrigFlags](const ArrayRef<SCEVUse> Ops) {
return StrengthenNoWrapFlags(this, scAddExpr, Ops, OrigFlags);
};
@@ -2580,8 +2595,8 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
while (i+Count != e && Ops[i+Count] == Ops[i])
++Count;
// Merge the values into a multiply.
- const SCEV *Scale = getConstant(Ty, Count);
- const SCEV *Mul = getMulExpr(Scale, Ops[i], SCEV::FlagAnyWrap, Depth + 1);
+ SCEVUse Scale = getConstant(Ty, Count);
+ SCEVUse Mul = getMulExpr(Scale, Ops[i], SCEV::FlagAnyWrap, Depth + 1);
if (Ops.size() == Count)
return Mul;
Ops[i] = Mul;
@@ -2604,14 +2619,14 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
if (auto *T = dyn_cast<SCEVTruncateExpr>(Ops[Idx]))
return T->getOperand()->getType();
if (const auto *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) {
- const auto *LastOp = Mul->getOperand(Mul->getNumOperands() - 1);
+ const auto LastOp = Mul->getOperand(Mul->getNumOperands() - 1);
if (const auto *T = dyn_cast<SCEVTruncateExpr>(LastOp))
return T->getOperand()->getType();
}
return nullptr;
};
if (auto *SrcType = FindTruncSrcType()) {
- SmallVector<const SCEV *, 8> LargeOps;
+ SmallVector<SCEVUse, 8> LargeOps;
bool Ok = true;
// Check all the operands to see if they can be represented in the
// source type of the truncate.
@@ -2625,7 +2640,7 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
} else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) {
LargeOps.push_back(getAnyExtendExpr(C, SrcType));
} else if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Ops[i])) {
- SmallVector<const SCEV *, 8> LargeMulOps;
+ SmallVector<SCEVUse, 8> LargeMulOps;
for (unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) {
if (const SCEVTruncateExpr *T =
dyn_cast<SCEVTruncateExpr>(M->getOperand(j))) {
@@ -2650,7 +2665,7 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
}
if (Ok) {
// Evaluate the expression in the larger type.
- const SCEV *Fold = getAddExpr(LargeOps, SCEV::FlagAnyWrap, Depth + 1);
+ SCEVUse Fold = getAddExpr(LargeOps, SCEV::FlagAnyWrap, Depth + 1);
// If it folds to something simple, use it. Otherwise, don't.
if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold))
return getTruncateExpr(Fold, Ty);
@@ -2661,8 +2676,8 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
// Check if we have an expression of the form ((X + C1) - C2), where C1 and
// C2 can be folded in a way that allows retaining wrapping flags of (X +
// C1).
- const SCEV *A = Ops[0];
- const SCEV *B = Ops[1];
+ SCEVUse A = Ops[0];
+ SCEVUse B = Ops[1];
auto *AddExpr = dyn_cast<SCEVAddExpr>(B);
auto *C = dyn_cast<SCEVConstant>(A);
if (AddExpr && C && isa<SCEVConstant>(AddExpr->getOperand(0))) {
@@ -2689,7 +2704,7 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
}
if (PreservedFlags != SCEV::FlagAnyWrap) {
- SmallVector<const SCEV *, 4> NewOps(AddExpr->operands());
+ SmallVector<SCEVUse, 4> NewOps(AddExpr->operands());
NewOps[0] = getConstant(ConstAdd);
return getAddExpr(NewOps, PreservedFlags);
}
@@ -2701,8 +2716,8 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[0]);
if (Mul && Mul->getNumOperands() == 2 &&
Mul->getOperand(0)->isAllOnesValue()) {
- const SCEV *X;
- const SCEV *Y;
+ SCEVUse X;
+ SCEVUse Y;
if (matchURem(Mul->getOperand(1), X, Y) && X == Ops[1]) {
return getMulExpr(Y, getUDivExpr(X, Y));
}
@@ -2748,7 +2763,7 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
if (Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx])) {
uint64_t BitWidth = getTypeSizeInBits(Ty);
DenseMap<const SCEV *, APInt> M;
- SmallVector<const SCEV *, 8> NewOps;
+ SmallVector<SCEVUse, 8> NewOps;
APInt AccumulatedConstant(BitWidth, 0);
if (CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant,
Ops, APInt(BitWidth, 1), *this)) {
@@ -2761,8 +2776,8 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
// Some interesting folding opportunity is present, so its worthwhile to
// re-generate the operands list. Group the operands by constant scale,
// to avoid multiplying by the same constant scale multiple times.
- std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare> MulOpLists;
- for (const SCEV *NewOp : NewOps)
+ std::map<APInt, SmallVector<SCEVUse, 4>, APIntCompare> MulOpLists;
+ for (SCEVUse NewOp : NewOps)
MulOpLists[M.find(NewOp)->second].push_back(NewOp);
// Re-generate the operands list.
Ops.clear();
@@ -2792,25 +2807,24 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
for (; Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx]); ++Idx) {
const SCEVMulExpr *Mul = cast<SCEVMulExpr>(Ops[Idx]);
for (unsigned MulOp = 0, e = Mul->getNumOperands(); MulOp != e; ++MulOp) {
- const SCEV *MulOpSCEV = Mul->getOperand(MulOp);
+ SCEVUse MulOpSCEV = Mul->getOperand(MulOp);
if (isa<SCEVConstant>(MulOpSCEV))
continue;
for (unsigned AddOp = 0, e = Ops.size(); AddOp != e; ++AddOp)
if (MulOpSCEV == Ops[AddOp]) {
// Fold W + X + (X * Y * Z) --> W + (X * ((Y*Z)+1))
- const SCEV *InnerMul = Mul->getOperand(MulOp == 0);
+ SCEVUse InnerMul = Mul->getOperand(MulOp == 0);
if (Mul->getNumOperands() != 2) {
// If the multiply has more than two operands, we must get the
// Y*Z term.
- SmallVector<const SCEV *, 4> MulOps(
- Mul->operands().take_front(MulOp));
+ SmallVector<SCEVUse, 4> MulOps(Mul->operands().take_front(MulOp));
append_range(MulOps, Mul->operands().drop_front(MulOp + 1));
InnerMul = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1);
}
- SmallVector<const SCEV *, 2> TwoOps = {getOne(Ty), InnerMul};
- const SCEV *AddOne = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1);
- const SCEV *OuterMul = getMulExpr(AddOne, MulOpSCEV,
- SCEV::FlagAnyWrap, Depth + 1);
+ SmallVector<SCEVUse, 2> TwoOps = {getOne(Ty), InnerMul};
+ SCEVUse AddOne = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1);
+ SCEVUse OuterMul =
+ getMulExpr(AddOne, MulOpSCEV, SCEV::FlagAnyWrap, Depth + 1);
if (Ops.size() == 2) return OuterMul;
if (AddOp < Idx) {
Ops.erase(Ops.begin()+AddOp);
@@ -2834,25 +2848,24 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
OMulOp != e; ++OMulOp)
if (OtherMul->getOperand(OMulOp) == MulOpSCEV) {
// Fold X + (A*B*C) + (A*D*E) --> X + (A*(B*C+D*E))
- const SCEV *InnerMul1 = Mul->getOperand(MulOp == 0);
+ SCEVUse InnerMul1 = Mul->getOperand(MulOp == 0);
if (Mul->getNumOperands() != 2) {
- SmallVector<const SCEV *, 4> MulOps(
- Mul->operands().take_front(MulOp));
+ SmallVector<SCEVUse, 4> MulOps(Mul->operands().take_front(MulOp));
append_range(MulOps, Mul->operands().drop_front(MulOp+1));
InnerMul1 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1);
}
- const SCEV *InnerMul2 = OtherMul->getOperand(OMulOp == 0);
+ SCEVUse InnerMul2 = OtherMul->getOperand(OMulOp == 0);
if (OtherMul->getNumOperands() != 2) {
- SmallVector<const SCEV *, 4> MulOps(
+ SmallVector<SCEVUse, 4> MulOps(
OtherMul->operands().take_front(OMulOp));
append_range(MulOps, OtherMul->operands().drop_front(OMulOp+1));
InnerMul2 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1);
}
- SmallVector<const SCEV *, 2> TwoOps = {InnerMul1, InnerMul2};
- const SCEV *InnerMulSum =
+ SmallVector<SCEVUse, 2> TwoOps = {InnerMul1, InnerMul2};
+ SCEVUse InnerMulSum =
getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1);
- const SCEV *OuterMul = getMulExpr(MulOpSCEV, InnerMulSum,
- SCEV::FlagAnyWrap, Depth + 1);
+ SCEVUse OuterMul = getMulExpr(MulOpSCEV, InnerMulSum,
+ SCEV::FlagAnyWrap, Depth + 1);
if (Ops.size() == 2) return OuterMul;
Ops.erase(Ops.begin()+Idx);
Ops.erase(Ops.begin()+OtherMulIdx-1);
@@ -2873,7 +2886,7 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) {
// Scan all of the other operands to this add and add them to the vector if
// they are loop invariant w.r.t. the recurrence.
- SmallVector<const SCEV *, 8> LIOps;
+ SmallVector<SCEVUse, 8> LIOps;
const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
const Loop *AddRecLoop = AddRec->getLoop();
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
@@ -2895,7 +2908,7 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
// NLI + LI + {Start,+,Step} --> NLI + {LI+Start,+,Step}
LIOps.push_back(AddRec->getStart());
- SmallVector<const SCEV *, 4> AddRecOps(AddRec->operands());
+ SmallVector<SCEVUse, 4> AddRecOps(AddRec->operands());
// It is not in general safe to propagate flags valid on an add within
// the addrec scope to one outside it. We must prove that the inner
@@ -2920,7 +2933,7 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
// outer add and the inner addrec are guaranteed to have no overflow.
// Always propagate NW.
Flags = AddRec->getNoWrapFlags(setFlags(Flags, SCEV::FlagNW));
- const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRecLoop, Flags);
+ SCEVUse NewRec = getAddRecExpr(AddRecOps, AddRecLoop, Flags);
// If all of the other operands were loop invariant, we are done.
if (Ops.size() == 1) return NewRec;
@@ -2948,7 +2961,7 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
"AddRecExprs are not sorted in reverse dominance order?");
if (AddRecLoop == cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()) {
// Other + {A,+,B}<L> + {C,+,D}<L> --> Other + {A+C,+,B+D}<L>
- SmallVector<const SCEV *, 4> AddRecOps(AddRec->operands());
+ SmallVector<SCEVUse, 4> AddRecOps(AddRec->operands());
for (; OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
++OtherIdx) {
const auto *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]);
@@ -2959,8 +2972,8 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
append_range(AddRecOps, OtherAddRec->operands().drop_front(i));
break;
}
- SmallVector<const SCEV *, 2> TwoOps = {
- AddRecOps[i], OtherAddRec->getOperand(i)};
+ SmallVector<SCEVUse, 2> TwoOps = {AddRecOps[i],
+ OtherAddRec->getOperand(i)};
AddRecOps[i] = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1);
}
Ops.erase(Ops.begin() + OtherIdx); --OtherIdx;
@@ -2981,18 +2994,17 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
return getOrCreateAddExpr(Ops, ComputeFlags(Ops));
}
-const SCEV *
-ScalarEvolution::getOrCreateAddExpr(ArrayRef<const SCEV *> Ops,
- SCEV::NoWrapFlags Flags) {
+SCEVUse ScalarEvolution::getOrCreateAddExpr(ArrayRef<SCEVUse> Ops,
+ SCEV::NoWrapFlags Flags) {
FoldingSetNodeID ID;
ID.AddInteger(scAddExpr);
- for (const SCEV *Op : Ops)
+ for (SCEVUse Op : Ops)
ID.AddPointer(Op);
void *IP = nullptr;
SCEVAddExpr *S =
static_cast<SCEVAddExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
if (!S) {
- const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
+ SCEVUse *O = SCEVAllocator.Allocate<SCEVUse>(Ops.size());
std::uninitialized_copy(Ops.begin(), Ops.end(), O);
S = new (SCEVAllocator)
SCEVAddExpr(ID.Intern(SCEVAllocator), O, Ops.size());
@@ -3003,19 +3015,19 @@ ScalarEvolution::getOrCreateAddExpr(ArrayRef<const SCEV *> Ops,
return S;
}
-const SCEV *
-ScalarEvolution::getOrCreateAddRecExpr(ArrayRef<const SCEV *> Ops,
- const Loop *L, SCEV::NoWrapFlags Flags) {
+SCEVUse ScalarEvolution::getOrCreateAddRecExpr(ArrayRef<SCEVUse> Ops,
+ const Loop *L,
+ SCEV::NoWrapFlags Flags) {
FoldingSetNodeID ID;
ID.AddInteger(scAddRecExpr);
- for (const SCEV *Op : Ops)
+ for (SCEVUse Op : Ops)
ID.AddPointer(Op);
ID.AddPointer(L);
void *IP = nullptr;
SCEVAddRecExpr *S =
static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
if (!S) {
- const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
+ SCEVUse *O = SCEVAllocator.Allocate<SCEVUse>(Ops.size());
std::uninitialized_copy(Ops.begin(), Ops.end(), O);
S = new (SCEVAllocator)
SCEVAddRecExpr(ID.Intern(SCEVAllocator), O, Ops.size(), L);
@@ -3027,18 +3039,17 @@ ScalarEvolution::getOrCreateAddRecExpr(ArrayRef<const SCEV *> Ops,
return S;
}
-const SCEV *
-ScalarEvolution::getOrCreateMulExpr(ArrayRef<const SCEV *> Ops,
- SCEV::NoWrapFlags Flags) {
+SCEVUse ScalarEvolution::getOrCreateMulExpr(ArrayRef<SCEVUse> Ops,
+ SCEV::NoWrapFlags Flags) {
FoldingSetNodeID ID;
ID.AddInteger(scMulExpr);
- for (const SCEV *Op : Ops)
+ for (SCEVUse Op : Ops)
ID.AddPointer(Op);
void *IP = nullptr;
SCEVMulExpr *S =
static_cast<SCEVMulExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
if (!S) {
- const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
+ SCEVUse *O = SCEVAllocator.Allocate<SCEVUse>(Ops.size());
std::uninitialized_copy(Ops.begin(), Ops.end(), O);
S = new (SCEVAllocator) SCEVMulExpr(ID.Intern(SCEVAllocator),
O, Ops.size());
@@ -3083,11 +3094,11 @@ static uint64_t Choose(uint64_t n, uint64_t k, bool &Overflow) {
/// Determine if any of the operands in this SCEV are a constant or if
/// any of the add or multiply expressions in this SCEV contain a constant.
-static bool containsConstantInAddMulChain(const SCEV *StartExpr) {
+static bool containsConstantInAddMulChain(SCEVUse StartExpr) {
struct FindConstantInAddMulChain {
bool FoundConstant = false;
- bool follow(const SCEV *S) {
+ bool follow(SCEVUse S) {
FoundConstant |= isa<SCEVConstant>(S);
return isa<SCEVAddExpr>(S) || isa<SCEVMulExpr>(S);
}
@@ -3104,9 +3115,16 @@ static bool containsConstantInAddMulChain(const SCEV *StartExpr) {
}
/// Get a canonical multiply expression, or something simpler if possible.
-const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
- SCEV::NoWrapFlags OrigFlags,
- unsigned Depth) {
+SCEVUse ScalarEvolution::getMulExpr(ArrayRef<const SCEV *> Ops,
+ SCEV::NoWrapFlags OrigFlags,
+ unsigned Depth) {
+ SmallVector<SCEVUse> Ops2(Ops);
+ return getMulExpr(Ops2, OrigFlags, Depth);
+}
+
+SCEVUse ScalarEvolution::getMulExpr(SmallVectorImpl<SCEVUse> &Ops,
+ SCEV::NoWrapFlags OrigFlags,
+ unsigned Depth) {
assert(OrigFlags == maskFlags(OrigFlags, SCEV::FlagNUW | SCEV::FlagNSW) &&
"only nuw or nsw allowed");
assert(!Ops.empty() && "Cannot get empty mul!");
@@ -3150,7 +3168,7 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
}
// Delay expensive flag strengthening until necessary.
- auto ComputeFlags = [this, OrigFlags](const ArrayRef<const SCEV *> Ops) {
+ auto ComputeFlags = [this, OrigFlags](const ArrayRef<SCEVUse> Ops) {
return StrengthenNoWrapFlags(this, scMulExpr, Ops, OrigFlags);
};
@@ -3177,10 +3195,10 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
// profitable; for example, Add = (C0 + X) * Y + Z. Maybe the scope of
// this transformation should be narrowed down.
if (Add->getNumOperands() == 2 && containsConstantInAddMulChain(Add)) {
- const SCEV *LHS = getMulExpr(LHSC, Add->getOperand(0),
- SCEV::FlagAnyWrap, Depth + 1);
- const SCEV *RHS = getMulExpr(LHSC, Add->getOperand(1),
- SCEV::FlagAnyWrap, Depth + 1);
+ SCEVUse LHS = getMulExpr(LHSC, Add->getOperand(0), SCEV::FlagAnyWrap,
+ Depth + 1);
+ SCEVUse RHS = getMulExpr(LHSC, Add->getOperand(1), SCEV::FlagAnyWrap,
+ Depth + 1);
return getAddExpr(LHS, RHS, SCEV::FlagAnyWrap, Depth + 1);
}
@@ -3188,11 +3206,11 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
// If we have a mul by -1 of an add, try distributing the -1 among the
// add operands.
if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) {
- SmallVector<const SCEV *, 4> NewOps;
+ SmallVector<SCEVUse, 4> NewOps;
bool AnyFolded = false;
- for (const SCEV *AddOp : Add->operands()) {
- const SCEV *Mul = getMulExpr(Ops[0], AddOp, SCEV::FlagAnyWrap,
- Depth + 1);
+ for (SCEVUse AddOp : Add->operands()) {
+ SCEVUse Mul =
+ getMulExpr(Ops[0], AddOp, SCEV::FlagAnyWrap, Depth + 1);
if (!isa<SCEVMulExpr>(Mul)) AnyFolded = true;
NewOps.push_back(Mul);
}
@@ -3200,8 +3218,8 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
return getAddExpr(NewOps, SCEV::FlagAnyWrap, Depth + 1);
} else if (const auto *AddRec = dyn_cast<SCEVAddRecExpr>(Ops[1])) {
// Negation preserves a recurrence's no self-wrap property.
- SmallVector<const SCEV *, 4> Operands;
- for (const SCEV *AddRecOp : AddRec->operands())
+ SmallVector<SCEVUse, 4> Operands;
+ for (SCEVUse AddRecOp : AddRec->operands())
Operands.push_back(getMulExpr(Ops[0], AddRecOp, SCEV::FlagAnyWrap,
Depth + 1));
// Let M be the minimum representable signed value. AddRec with nsw
@@ -3257,7 +3275,7 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) {
// Scan all of the other operands to this mul and add them to the vector
// if they are loop invariant w.r.t. the recurrence.
- SmallVector<const SCEV *, 8> LIOps;
+ SmallVector<SCEVUse, 8> LIOps;
const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
if (isAvailableAtLoopEntry(Ops[i], AddRec->getLoop())) {
@@ -3269,9 +3287,9 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
// If we found some loop invariants, fold them into the recurrence.
if (!LIOps.empty()) {
// NLI * LI * {Start,+,Step} --> NLI * {LI*Start,+,LI*Step}
- SmallVector<const SCEV *, 4> NewOps;
+ SmallVector<SCEVUse, 4> NewOps;
NewOps.reserve(AddRec->getNumOperands());
- const SCEV *Scale = getMulExpr(LIOps, SCEV::FlagAnyWrap, Depth + 1);
+ SCEVUse Scale = getMulExpr(LIOps, SCEV::FlagAnyWrap, Depth + 1);
// If both the mul and addrec are nuw, we can preserve nuw.
// If both the mul and addrec are nsw, we can only preserve nsw if either
@@ -3293,7 +3311,7 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
}
}
- const SCEV *NewRec = getAddRecExpr(NewOps, AddRec->getLoop(), Flags);
+ SCEVUse NewRec = getAddRecExpr(NewOps, AddRec->getLoop(), Flags);
// If all of the other operands were loop invariant, we are done.
if (Ops.size() == 1) return NewRec;
@@ -3339,10 +3357,10 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
bool Overflow = false;
Type *Ty = AddRec->getType();
bool LargerThan64Bits = getTypeSizeInBits(Ty) > 64;
- SmallVector<const SCEV*, 7> AddRecOps;
+ SmallVector<SCEVUse, 7> AddRecOps;
for (int x = 0, xe = AddRec->getNumOperands() +
OtherAddRec->getNumOperands() - 1; x != xe && !Overflow; ++x) {
- SmallVector <const SCEV *, 7> SumOps;
+ SmallVector<SCEVUse, 7> SumOps;
for (int y = x, ye = 2*x+1; y != ye && !Overflow; ++y) {
uint64_t Coeff1 = Choose(x, 2*x - y, Overflow);
for (int z = std::max(y-x, y-(int)AddRec->getNumOperands()+1),
@@ -3354,9 +3372,9 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
Coeff = umul_ov(Coeff1, Coeff2, Overflow);
else
Coeff = Coeff1*Coeff2;
- const SCEV *CoeffTerm = getConstant(Ty, Coeff);
- const SCEV *Term1 = AddRec->getOperand(y-z);
- const SCEV *Term2 = OtherAddRec->getOperand(z);
+ SCEVUse CoeffTerm = getConstant(Ty, Coeff);
+ SCEVUse Term1 = AddRec->getOperand(y - z);
+ SCEVUse Term2 = OtherAddRec->getOperand(z);
SumOps.push_back(getMulExpr(CoeffTerm, Term1, Term2,
SCEV::FlagAnyWrap, Depth + 1));
}
@@ -3366,8 +3384,8 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
AddRecOps.push_back(getAddExpr(SumOps, SCEV::FlagAnyWrap, Depth + 1));
}
if (!Overflow) {
- const SCEV *NewAddRec = getAddRecExpr(AddRecOps, AddRec->getLoop(),
- SCEV::FlagAnyWrap);
+ SCEVUse NewAddRec =
+ getAddRecExpr(AddRecOps, AddRec->getLoop(), SCEV::FlagAnyWrap);
if (Ops.size() == 2) return NewAddRec;
Ops[Idx] = NewAddRec;
Ops.erase(Ops.begin() + OtherIdx); --OtherIdx;
@@ -3390,8 +3408,7 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
}
/// Represents an unsigned remainder expression based on unsigned division.
-const SCEV *ScalarEvolution::getURemExpr(const SCEV *LHS,
- const SCEV *RHS) {
+SCEVUse ScalarEvolution::getURemExpr(SCEVUse LHS, SCEVUse RHS) {
assert(getEffectiveSCEVType(LHS->getType()) ==
getEffectiveSCEVType(RHS->getType()) &&
"SCEVURemExpr operand types don't match!");
@@ -3412,15 +3429,14 @@ const SCEV *ScalarEvolution::getURemExpr(const SCEV *LHS,
}
// Fallback to %a == %x urem %y == %x -<nuw> ((%x udiv %y) *<nuw> %y)
- const SCEV *UDiv = getUDivExpr(LHS, RHS);
- const SCEV *Mult = getMulExpr(UDiv, RHS, SCEV::FlagNUW);
+ SCEVUse UDiv = getUDivExpr(LHS, RHS);
+ SCEVUse Mult = getMulExpr(UDiv, RHS, SCEV::FlagNUW);
return getMinusSCEV(LHS, Mult, SCEV::FlagNUW);
}
/// Get a canonical unsigned division expression, or something simpler if
/// possible.
-const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS,
- const SCEV *RHS) {
+SCEVUse ScalarEvolution::getUDivExpr(SCEVUse LHS, SCEVUse RHS) {
assert(!LHS->getType()->isPointerTy() &&
"SCEVUDivExpr operand can't be pointer!");
assert(LHS->getType() == RHS->getType() &&
@@ -3431,7 +3447,7 @@ const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS,
ID.AddPointer(LHS);
ID.AddPointer(RHS);
void *IP = nullptr;
- if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP))
+ if (SCEVUse S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP))
return S;
// 0 udiv Y == 0
@@ -3469,8 +3485,8 @@ const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS,
getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy),
getZeroExtendExpr(Step, ExtTy),
AR->getLoop(), SCEV::FlagAnyWrap)) {
- SmallVector<const SCEV *, 4> Operands;
- for (const SCEV *Op : AR->operands())
+ SmallVector<SCEVUse, 4> Operands;
+ for (SCEVUse Op : AR->operands())
Operands.push_back(getUDivExpr(Op, RHS));
return getAddRecExpr(Operands, AR->getLoop(), SCEV::FlagNW);
}
@@ -3486,9 +3502,8 @@ const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS,
const APInt &StartInt = StartC->getAPInt();
const APInt &StartRem = StartInt.urem(StepInt);
if (StartRem != 0) {
- const SCEV *NewLHS =
- getAddRecExpr(getConstant(StartInt - StartRem), Step,
- AR->getLoop(), SCEV::FlagNW);
+ SCEVUse NewLHS = getAddRecExpr(getConstant(StartInt - StartRem),
+ Step, AR->getLoop(), SCEV::FlagNW);
if (LHS != NewLHS) {
LHS = NewLHS;
@@ -3499,7 +3514,7 @@ const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS,
ID.AddPointer(LHS);
ID.AddPointer(RHS);
IP = nullptr;
- if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP))
+ if (SCEVUse S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP))
return S;
}
}
@@ -3507,16 +3522,16 @@ const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS,
}
// (A*B)/C --> A*(B/C) if safe and B/C can be folded.
if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) {
- SmallVector<const SCEV *, 4> Operands;
- for (const SCEV *Op : M->operands())
+ SmallVector<SCEVUse, 4> Operands;
+ for (SCEVUse Op : M->operands())
Operands.push_back(getZeroExtendExpr(Op, ExtTy));
if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands))
// Find an operand that's safely divisible.
for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
- const SCEV *Op = M->getOperand(i);
- const SCEV *Div = getUDivExpr(Op, RHSC);
+ SCEVUse Op = M->getOperand(i);
+ SCEVUse Div = getUDivExpr(Op, RHSC);
if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) {
- Operands = SmallVector<const SCEV *, 4>(M->operands());
+ Operands = SmallVector<SCEVUse, 4>(M->operands());
Operands[i] = Div;
return getMulExpr(Operands);
}
@@ -3539,13 +3554,13 @@ const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS,
// (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded.
if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(LHS)) {
- SmallVector<const SCEV *, 4> Operands;
- for (const SCEV *Op : A->operands())
+ SmallVector<SCEVUse, 4> Operands;
+ for (SCEVUse Op : A->operands())
Operands.push_back(getZeroExtendExpr(Op, ExtTy));
if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) {
Operands.clear();
for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) {
- const SCEV *Op = getUDivExpr(A->getOperand(i), RHS);
+ SCEVUse Op = getUDivExpr(A->getOperand(i), RHS);
if (isa<SCEVUDivExpr>(Op) ||
getMulExpr(Op, RHS) != A->getOperand(i))
break;
@@ -3565,7 +3580,8 @@ const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS,
// The Insertion Point (IP) might be invalid by now (due to UniqueSCEVs
// changes). Make sure we get a new one.
IP = nullptr;
- if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
+ if (SCEVUse S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP))
+ return S;
SCEV *S = new (SCEVAllocator) SCEVUDivExpr(ID.Intern(SCEVAllocator),
LHS, RHS);
UniqueSCEVs.InsertNode(S, IP);
@@ -3591,8 +3607,7 @@ APInt gcd(const SCEVConstant *C1, const SCEVConstant *C2) {
/// possible. There is no representation for an exact udiv in SCEV IR, but we
/// can attempt to remove factors from the LHS and RHS. We can't do this when
/// it's not exact because the udiv may be clearing bits.
-const SCEV *ScalarEvolution::getUDivExactExpr(const SCEV *LHS,
- const SCEV *RHS) {
+SCEVUse ScalarEvolution::getUDivExactExpr(SCEVUse LHS, SCEVUse RHS) {
// TODO: we could try to find factors in all sorts of things, but for now we
// just deal with u/exact (multiply, constant). See SCEVDivision towards the
// end of this file for inspiration.
@@ -3606,7 +3621,7 @@ const SCEV *ScalarEvolution::getUDivExactExpr(const SCEV *LHS,
// first element of the mulexpr.
if (const auto *LHSCst = dyn_cast<SCEVConstant>(Mul->getOperand(0))) {
if (LHSCst == RHSCst) {
- SmallVector<const SCEV *, 2> Operands(drop_begin(Mul->operands()));
+ SmallVector<SCEVUse, 2> Operands(drop_begin(Mul->operands()));
return getMulExpr(Operands);
}
@@ -3619,7 +3634,7 @@ const SCEV *ScalarEvolution::getUDivExactExpr(const SCEV *LHS,
cast<SCEVConstant>(getConstant(LHSCst->getAPInt().udiv(Factor)));
RHSCst =
cast<SCEVConstant>(getConstant(RHSCst->getAPInt().udiv(Factor)));
- SmallVector<const SCEV *, 2> Operands;
+ SmallVector<SCEVUse, 2> Operands;
Operands.push_back(LHSCst);
append_range(Operands, Mul->operands().drop_front());
LHS = getMulExpr(Operands);
@@ -3633,7 +3648,7 @@ const SCEV *ScalarEvolution::getUDivExactExpr(const SCEV *LHS,
for (int i = 0, e = Mul->getNumOperands(); i != e; ++i) {
if (Mul->getOperand(i) == RHS) {
- SmallVector<const SCEV *, 2> Operands;
+ SmallVector<SCEVUse, 2> Operands;
append_range(Operands, Mul->operands().take_front(i));
append_range(Operands, Mul->operands().drop_front(i + 1));
return getMulExpr(Operands);
@@ -3645,10 +3660,9 @@ const SCEV *ScalarEvolution::getUDivExactExpr(const SCEV *LHS,
/// Get an add recurrence expression for the specified loop. Simplify the
/// expression as much as possible.
-const SCEV *ScalarEvolution::getAddRecExpr(const SCEV *Start, const SCEV *Step,
- const Loop *L,
- SCEV::NoWrapFlags Flags) {
- SmallVector<const SCEV *, 4> Operands;
+SCEVUse ScalarEvolution::getAddRecExpr(SCEVUse Start, SCEVUse Step,
+ const Loop *L, SCEV::NoWrapFlags Flags) {
+ SmallVector<SCEVUse, 4> Operands;
Operands.push_back(Start);
if (const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step))
if (StepChrec->getLoop() == L) {
@@ -3660,11 +3674,16 @@ const SCEV *ScalarEvolution::getAddRecExpr(const SCEV *Start, const SCEV *Step,
return getAddRecExpr(Operands, L, Flags);
}
+SCEVUse ScalarEvolution::getAddRecExpr(ArrayRef<const SCEV *> Operands,
+ const Loop *L, SCEV::NoWrapFlags Flags) {
+ SmallVector<SCEVUse> Ops2(Operands);
+ return getAddRecExpr(Ops2, L, Flags);
+}
+
/// Get an add recurrence expression for the specified loop. Simplify the
/// expression as much as possible.
-const SCEV *
-ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands,
- const Loop *L, SCEV::NoWrapFlags Flags) {
+SCEVUse ScalarEvolution::getAddRecExpr(SmallVectorImpl<SCEVUse> &Operands,
+ const Loop *L, SCEV::NoWrapFlags Flags) {
if (Operands.size() == 1) return Operands[0];
#ifndef NDEBUG
Type *ETy = getEffectiveSCEVType(Operands[0]->getType());
@@ -3698,13 +3717,13 @@ ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands,
? (L->getLoopDepth() < NestedLoop->getLoopDepth())
: (!NestedLoop->contains(L) &&
DT.dominates(L->getHeader(), NestedLoop->getHeader()))) {
- SmallVector<const SCEV *, 4> NestedOperands(NestedAR->operands());
+ SmallVector<SCEVUse, 4> NestedOperands(NestedAR->operands());
Operands[0] = NestedAR->getStart();
// AddRecs require their operands be loop-invariant with respect to their
// loops. Don't perform this transformation if it would break this
// requirement.
- bool AllInvariant = all_of(
- Operands, [&](const SCEV *Op) { return isLoopInvariant(Op, L); });
+ bool AllInvariant =
+ all_of(Operands, [&](SCEVUse Op) { return isLoopInvariant(Op, L); });
if (AllInvariant) {
// Create a recurrence for the outer loop with the same step size.
@@ -3715,7 +3734,7 @@ ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands,
maskFlags(Flags, SCEV::FlagNW | NestedAR->getNoWrapFlags());
NestedOperands[0] = getAddRecExpr(Operands, L, OuterFlags);
- AllInvariant = all_of(NestedOperands, [&](const SCEV *Op) {
+ AllInvariant = all_of(NestedOperands, [&](SCEVUse Op) {
return isLoopInvariant(Op, NestedLoop);
});
@@ -3739,10 +3758,15 @@ ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands,
return getOrCreateAddRecExpr(Operands, L, Flags);
}
-const SCEV *
+SCEVUse ScalarEvolution::getGEPExpr(GEPOperator *GEP,
+ ArrayRef<const SCEV *> IndexExprs) {
+ return getGEPExpr(GEP, SmallVector<SCEVUse>(IndexExprs));
+}
+
+SCEVUse
ScalarEvolution::getGEPExpr(GEPOperator *GEP,
- const SmallVectorImpl<const SCEV *> &IndexExprs) {
- const SCEV *BaseExpr = getSCEV(GEP->getPointerOperand());
+ const SmallVectorImpl<SCEVUse> &IndexExprs) {
+ SCEVUse BaseExpr = getSCEV(GEP->getPointerOperand());
// getSCEV(Base)->getType() has the same address space as Base->getType()
// because SCEV::getType() preserves the address space.
Type *IntIdxTy = getEffectiveSCEVType(BaseExpr->getType());
@@ -3764,14 +3788,14 @@ ScalarEvolution::getGEPExpr(GEPOperator *GEP,
Type *CurTy = GEP->getType();
bool FirstIter = true;
- SmallVector<const SCEV *, 4> Offsets;
- for (const SCEV *IndexExpr : IndexExprs) {
+ SmallVector<SCEVUse, 4> Offsets;
+ for (SCEVUse IndexExpr : IndexExprs) {
// Compute the (potentially symbolic) offset in bytes for this index.
if (StructType *STy = dyn_cast<StructType>(CurTy)) {
// For a struct, add the member offset.
ConstantInt *Index = cast<SCEVConstant>(IndexExpr)->getValue();
unsigned FieldNo = Index->getZExtValue();
- const SCEV *FieldOffset = getOffsetOfExpr(IntIdxTy, STy, FieldNo);
+ SCEVUse FieldOffset = getOffsetOfExpr(IntIdxTy, STy, FieldNo);
Offsets.push_back(FieldOffset);
// Update CurTy to the type of the field at Index.
@@ -3787,12 +3811,12 @@ ScalarEvolution::getGEPExpr(GEPOperator *GEP,
CurTy = GetElementPtrInst::getTypeAtIndex(CurTy, (uint64_t)0);
}
// For an array, add the element offset, explicitly scaled.
- const SCEV *ElementSize = getSizeOfExpr(IntIdxTy, CurTy);
+ SCEVUse ElementSize = getSizeOfExpr(IntIdxTy, CurTy);
// Getelementptr indices are signed.
IndexExpr = getTruncateOrSignExtend(IndexExpr, IntIdxTy);
// Multiply the index by the element size to compute the element offset.
- const SCEV *LocalOffset = getMulExpr(IndexExpr, ElementSize, OffsetWrap);
+ SCEVUse LocalOffset = getMulExpr(IndexExpr, ElementSize, OffsetWrap);
Offsets.push_back(LocalOffset);
}
}
@@ -3802,35 +3826,41 @@ ScalarEvolution::getGEPExpr(GEPOperator *GEP,
return BaseExpr;
// Add the offsets together, assuming nsw if inbounds.
- const SCEV *Offset = getAddExpr(Offsets, OffsetWrap);
+ SCEVUse Offset = getAddExpr(Offsets, OffsetWrap);
// Add the base address and the offset. We cannot use the nsw flag, as the
// base address is unsigned. However, if we know that the offset is
// non-negative, we can use nuw.
SCEV::NoWrapFlags BaseWrap = AssumeInBoundsFlags && isKnownNonNegative(Offset)
? SCEV::FlagNUW : SCEV::FlagAnyWrap;
- auto *GEPExpr = getAddExpr(BaseExpr, Offset, BaseWrap);
+ auto GEPExpr = getAddExpr(BaseExpr, Offset, BaseWrap);
assert(BaseExpr->getType() == GEPExpr->getType() &&
"GEP should not change type mid-flight.");
return GEPExpr;
}
SCEV *ScalarEvolution::findExistingSCEVInCache(SCEVTypes SCEVType,
- ArrayRef<const SCEV *> Ops) {
+ ArrayRef<SCEVUse> Ops) {
FoldingSetNodeID ID;
ID.AddInteger(SCEVType);
- for (const SCEV *Op : Ops)
+ for (SCEVUse Op : Ops)
ID.AddPointer(Op);
void *IP = nullptr;
return UniqueSCEVs.FindNodeOrInsertPos(ID, IP);
}
-const SCEV *ScalarEvolution::getAbsExpr(const SCEV *Op, bool IsNSW) {
+SCEVUse ScalarEvolution::getAbsExpr(SCEVUse Op, bool IsNSW) {
SCEV::NoWrapFlags Flags = IsNSW ? SCEV::FlagNSW : SCEV::FlagAnyWrap;
return getSMaxExpr(Op, getNegativeSCEV(Op, Flags));
}
-const SCEV *ScalarEvolution::getMinMaxExpr(SCEVTypes Kind,
- SmallVectorImpl<const SCEV *> &Ops) {
+SCEVUse ScalarEvolution::getMinMaxExpr(SCEVTypes Kind,
+ ArrayRef<const SCEV *> Ops) {
+ SmallVector<SCEVUse> Ops2(Ops);
+ return getMinMaxExpr(Kind, Ops2);
+}
+
+SCEVUse ScalarEvolution::getMinMaxExpr(SCEVTypes Kind,
+ SmallVectorImpl<SCEVUse> &Ops) {
assert(SCEVMinMaxExpr::isMinMaxType(Kind) && "Not a SCEVMinMaxExpr!");
assert(!Ops.empty() && "Cannot get empty (u|s)(min|max)!");
if (Ops.size() == 1) return Ops[0];
@@ -3852,7 +3882,7 @@ const SCEV *ScalarEvolution::getMinMaxExpr(SCEVTypes Kind,
GroupByComplexity(Ops, &LI, DT);
// Check if we have created the same expression before.
- if (const SCEV *S = findExistingSCEVInCache(Kind, Ops)) {
+ if (SCEVUse S = findExistingSCEVInCache(Kind, Ops)) {
return S;
}
@@ -3958,10 +3988,10 @@ const SCEV *ScalarEvolution::getMinMaxExpr(SCEVTypes Kind,
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
ID.AddPointer(Ops[i]);
void *IP = nullptr;
- const SCEV *ExistingSCEV = UniqueSCEVs.FindNodeOrInsertPos(ID, IP);
+ SCEVUse ExistingSCEV = UniqueSCEVs.FindNodeOrInsertPos(ID, IP);
if (ExistingSCEV)
return ExistingSCEV;
- const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
+ SCEVUse *O = SCEVAllocator.Allocate<SCEVUse>(Ops.size());
std::uninitialized_copy(Ops.begin(), Ops.end(), O);
SCEV *S = new (SCEVAllocator)
SCEVMinMaxExpr(ID.Intern(SCEVAllocator), Kind, O, Ops.size());
@@ -3975,14 +4005,14 @@ namespace {
class SCEVSequentialMinMaxDeduplicatingVisitor final
: public SCEVVisitor<SCEVSequentialMinMaxDeduplicatingVisitor,
- std::optional<const SCEV *>> {
- using RetVal = std::optional<const SCEV *>;
+ std::optional<SCEVUse>> {
+ using RetVal = std::optional<SCEVUse>;
using Base = SCEVVisitor<SCEVSequentialMinMaxDeduplicatingVisitor, RetVal>;
ScalarEvolution &SE;
const SCEVTypes RootKind; // Must be a sequential min/max expression.
const SCEVTypes NonSequentialRootKind; // Non-sequential variant of RootKind.
- SmallPtrSet<const SCEV *, 16> SeenOps;
+ SmallPtrSet<SCEVUse, 16> SeenOps;
bool canRecurseInto(SCEVTypes Kind) const {
// We can only recurse into the SCEV expression of the same effective type
@@ -3990,7 +4020,7 @@ class SCEVSequentialMinMaxDeduplicatingVisitor final
return RootKind == Kind || NonSequentialRootKind == Kind;
};
- RetVal visitAnyMinMaxExpr(const SCEV *S) {
+ RetVal visitAnyMinMaxExpr(SCEVUse S) {
assert((isa<SCEVMinMaxExpr>(S) || isa<SCEVSequentialMinMaxExpr>(S)) &&
"Only for min/max expressions.");
SCEVTypes Kind = S->getSCEVType();
@@ -3999,7 +4029,7 @@ class SCEVSequentialMinMaxDeduplicatingVisitor final
return S;
auto *NAry = cast<SCEVNAryExpr>(S);
- SmallVector<const SCEV *> NewOps;
+ SmallVector<SCEVUse> NewOps;
bool Changed = visit(Kind, NAry->operands(), NewOps);
if (!Changed)
@@ -4012,7 +4042,7 @@ class SCEVSequentialMinMaxDeduplicatingVisitor final
: SE.getMinMaxExpr(Kind, NewOps);
}
- RetVal visit(const SCEV *S) {
+ RetVal visit(SCEVUse S) {
// Has the whole operand been seen already?
if (!SeenOps.insert(S).second)
return std::nullopt;
@@ -4027,13 +4057,13 @@ class SCEVSequentialMinMaxDeduplicatingVisitor final
SCEVSequentialMinMaxExpr::getEquivalentNonSequentialSCEVType(
RootKind)) {}
- bool /*Changed*/ visit(SCEVTypes Kind, ArrayRef<const SCEV *> OrigOps,
- SmallVectorImpl<const SCEV *> &NewOps) {
+ bool /*Changed*/ visit(SCEVTypes Kind, ArrayRef<SCEVUse> OrigOps,
+ SmallVectorImpl<SCEVUse> &NewOps) {
bool Changed = false;
- SmallVector<const SCEV *> Ops;
+ SmallVector<SCEVUse> Ops;
Ops.reserve(OrigOps.size());
- for (const SCEV *Op : OrigOps) {
+ for (SCEVUse Op : OrigOps) {
RetVal NewOp = visit(Op);
if (NewOp != Op)
Changed = true;
@@ -4136,7 +4166,7 @@ struct SCEVPoisonCollector {
SCEVPoisonCollector(bool LookThroughMaybePoisonBlocking)
: LookThroughMaybePoisonBlocking(LookThroughMaybePoisonBlocking) {}
- bool follow(const SCEV *S) {
+ bool follow(SCEVUse S) {
if (!LookThroughMaybePoisonBlocking &&
!scevUnconditionallyPropagatesPoisonFromOperands(S->getSCEVType()))
return false;
@@ -4152,7 +4182,7 @@ struct SCEVPoisonCollector {
} // namespace
/// Return true if V is poison given that AssumedPoison is already poison.
-static bool impliesPoison(const SCEV *AssumedPoison, const SCEV *S) {
+static bool impliesPoison(SCEVUse AssumedPoison, SCEVUse S) {
// First collect all SCEVs that might result in AssumedPoison to be poison.
// We need to look through potentially poison-blocking operations here,
// because we want to find all SCEVs that *might* result in poison, not only
@@ -4179,7 +4209,7 @@ static bool impliesPoison(const SCEV *AssumedPoison, const SCEV *S) {
}
void ScalarEvolution::getPoisonGeneratingValues(
- SmallPtrSetImpl<const Value *> &Result, const SCEV *S) {
+ SmallPtrSetImpl<const Value *> &Result, SCEVUse S) {
SCEVPoisonCollector PC(/* LookThroughMaybePoisonBlocking */ false);
visitAll(S, PC);
for (const SCEVUnknown *SU : PC.MaybePoison)
@@ -4187,7 +4217,7 @@ void ScalarEvolution::getPoisonGeneratingValues(
}
bool ScalarEvolution::canReuseInstruction(
- const SCEV *S, Instruction *I,
+ SCEVUse S, Instruction *I,
SmallVectorImpl<Instruction *> &DropPoisonGeneratingInsts) {
// If the instruction cannot be poison, it's always safe to reuse.
if (programUndefinedIfPoison(I))
@@ -4248,9 +4278,9 @@ bool ScalarEvolution::canReuseInstruction(
return true;
}
-const SCEV *
+SCEVUse
ScalarEvolution::getSequentialMinMaxExpr(SCEVTypes Kind,
- SmallVectorImpl<const SCEV *> &Ops) {
+ SmallVectorImpl<SCEVUse> &Ops) {
assert(SCEVSequentialMinMaxExpr::isSequentialMinMaxType(Kind) &&
"Not a SCEVSequentialMinMaxExpr!");
assert(!Ops.empty() && "Cannot get empty (u|s)(min|max)!");
@@ -4271,7 +4301,7 @@ ScalarEvolution::getSequentialMinMaxExpr(SCEVTypes Kind,
// so we can *NOT* do any kind of sorting of the expressions!
// Check if we have created the same expression before.
- if (const SCEV *S = findExistingSCEVInCache(Kind, Ops))
+ if (SCEVUse S = findExistingSCEVInCache(Kind, Ops))
return S;
// FIXME: there are *some* simplifications that we can do here.
@@ -4305,7 +4335,7 @@ ScalarEvolution::getSequentialMinMaxExpr(SCEVTypes Kind,
return getSequentialMinMaxExpr(Kind, Ops);
}
- const SCEV *SaturationPoint;
+ SCEVUse SaturationPoint;
ICmpInst::Predicate Pred;
switch (Kind) {
case scSequentialUMinExpr:
@@ -4323,7 +4353,7 @@ ScalarEvolution::getSequentialMinMaxExpr(SCEVTypes Kind,
if (::impliesPoison(Ops[i], Ops[i - 1]) ||
isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_NE, Ops[i - 1],
SaturationPoint)) {
- SmallVector<const SCEV *> SeqOps = {Ops[i - 1], Ops[i]};
+ SmallVector<SCEVUse> SeqOps = {Ops[i - 1], Ops[i]};
Ops[i - 1] = getMinMaxExpr(
SCEVSequentialMinMaxExpr::getEquivalentNonSequentialSCEVType(Kind),
SeqOps);
@@ -4345,11 +4375,11 @@ ScalarEvolution::getSequentialMinMaxExpr(SCEVTypes Kind,
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
ID.AddPointer(Ops[i]);
void *IP = nullptr;
- const SCEV *ExistingSCEV = UniqueSCEVs.FindNodeOrInsertPos(ID, IP);
+ SCEVUse ExistingSCEV = UniqueSCEVs.FindNodeOrInsertPos(ID, IP);
if (ExistingSCEV)
return ExistingSCEV;
- const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
+ SCEVUse *O = SCEVAllocator.Allocate<SCEVUse>(Ops.size());
std::uninitialized_copy(Ops.begin(), Ops.end(), O);
SCEV *S = new (SCEVAllocator)
SCEVSequentialMinMaxExpr(ID.Intern(SCEVAllocator), Kind, O, Ops.size());
@@ -4359,65 +4389,62 @@ ScalarEvolution::getSequentialMinMaxExpr(SCEVTypes Kind,
return S;
}
-const SCEV *ScalarEvolution::getSMaxExpr(const SCEV *LHS, const SCEV *RHS) {
- SmallVector<const SCEV *, 2> Ops = {LHS, RHS};
+SCEVUse ScalarEvolution::getSMaxExpr(SCEVUse LHS, SCEVUse RHS) {
+ SmallVector<SCEVUse, 2> Ops = {LHS, RHS};
return getSMaxExpr(Ops);
}
-const SCEV *ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
+SCEVUse ScalarEvolution::getSMaxExpr(SmallVectorImpl<SCEVUse> &Ops) {
return getMinMaxExpr(scSMaxExpr, Ops);
}
-const SCEV *ScalarEvolution::getUMaxExpr(const SCEV *LHS, const SCEV *RHS) {
- SmallVector<const SCEV *, 2> Ops = {LHS, RHS};
+SCEVUse ScalarEvolution::getUMaxExpr(SCEVUse LHS, SCEVUse RHS) {
+ SmallVector<SCEVUse, 2> Ops = {LHS, RHS};
return getUMaxExpr(Ops);
}
-const SCEV *ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
+SCEVUse ScalarEvolution::getUMaxExpr(SmallVectorImpl<SCEVUse> &Ops) {
return getMinMaxExpr(scUMaxExpr, Ops);
}
-const SCEV *ScalarEvolution::getSMinExpr(const SCEV *LHS,
- const SCEV *RHS) {
- SmallVector<const SCEV *, 2> Ops = { LHS, RHS };
+SCEVUse ScalarEvolution::getSMinExpr(SCEVUse LHS, SCEVUse RHS) {
+ SmallVector<SCEVUse, 2> Ops = {LHS, RHS};
return getSMinExpr(Ops);
}
-const SCEV *ScalarEvolution::getSMinExpr(SmallVectorImpl<const SCEV *> &Ops) {
+SCEVUse ScalarEvolution::getSMinExpr(SmallVectorImpl<SCEVUse> &Ops) {
return getMinMaxExpr(scSMinExpr, Ops);
}
-const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS, const SCEV *RHS,
- bool Sequential) {
- SmallVector<const SCEV *, 2> Ops = { LHS, RHS };
+SCEVUse ScalarEvolution::getUMinExpr(SCEVUse LHS, SCEVUse RHS,
+ bool Sequential) {
+ SmallVector<SCEVUse, 2> Ops = {LHS, RHS};
return getUMinExpr(Ops, Sequential);
}
-const SCEV *ScalarEvolution::getUMinExpr(SmallVectorImpl<const SCEV *> &Ops,
- bool Sequential) {
+SCEVUse ScalarEvolution::getUMinExpr(SmallVectorImpl<SCEVUse> &Ops,
+ bool Sequential) {
return Sequential ? getSequentialMinMaxExpr(scSequentialUMinExpr, Ops)
: getMinMaxExpr(scUMinExpr, Ops);
}
-const SCEV *
-ScalarEvolution::getSizeOfExpr(Type *IntTy, TypeSize Size) {
- const SCEV *Res = getConstant(IntTy, Size.getKnownMinValue());
+SCEVUse ScalarEvolution::getSizeOfExpr(Type *IntTy, TypeSize Size) {
+ SCEVUse Res = getConstant(IntTy, Size.getKnownMinValue());
if (Size.isScalable())
Res = getMulExpr(Res, getVScale(IntTy));
return Res;
}
-const SCEV *ScalarEvolution::getSizeOfExpr(Type *IntTy, Type *AllocTy) {
+SCEVUse ScalarEvolution::getSizeOfExpr(Type *IntTy, Type *AllocTy) {
return getSizeOfExpr(IntTy, getDataLayout().getTypeAllocSize(AllocTy));
}
-const SCEV *ScalarEvolution::getStoreSizeOfExpr(Type *IntTy, Type *StoreTy) {
+SCEVUse ScalarEvolution::getStoreSizeOfExpr(Type *IntTy, Type *StoreTy) {
return getSizeOfExpr(IntTy, getDataLayout().getTypeStoreSize(StoreTy));
}
-const SCEV *ScalarEvolution::getOffsetOfExpr(Type *IntTy,
- StructType *STy,
- unsigned FieldNo) {
+SCEVUse ScalarEvolution::getOffsetOfExpr(Type *IntTy, StructType *STy,
+ unsigned FieldNo) {
// We can bypass creating a target-independent constant expression and then
// folding it back into a ConstantInt. This is just a compile-time
// optimization.
@@ -4427,7 +4454,7 @@ const SCEV *ScalarEvolution::getOffsetOfExpr(Type *IntTy,
return getConstant(IntTy, SL->getElementOffset(FieldNo));
}
-const SCEV *ScalarEvolution::getUnknown(Value *V) {
+SCEVUse ScalarEvolution::getUnknown(Value *V) {
// Don't attempt to do anything other than create a SCEVUnknown object
// here. createSCEV only calls getUnknown after checking for all other
// interesting possibilities, and any other code that calls getUnknown
@@ -4489,8 +4516,7 @@ Type *ScalarEvolution::getWiderType(Type *T1, Type *T2) const {
return getTypeSizeInBits(T1) >= getTypeSizeInBits(T2) ? T1 : T2;
}
-bool ScalarEvolution::instructionCouldExistWithOperands(const SCEV *A,
- const SCEV *B) {
+bool ScalarEvolution::instructionCouldExistWithOperands(SCEVUse A, SCEVUse B) {
/// For a valid use point to exist, the defining scope of one operand
/// must dominate the other.
bool PreciseA, PreciseB;
@@ -4503,12 +4529,10 @@ bool ScalarEvolution::instructionCouldExistWithOperands(const SCEV *A,
DT.dominates(ScopeB, ScopeA);
}
-const SCEV *ScalarEvolution::getCouldNotCompute() {
- return CouldNotCompute.get();
-}
+SCEVUse ScalarEvolution::getCouldNotCompute() { return CouldNotCompute.get(); }
-bool ScalarEvolution::checkValidity(const SCEV *S) const {
- bool ContainsNulls = SCEVExprContains(S, [](const SCEV *S) {
+bool ScalarEvolution::checkValidity(SCEVUse S) const {
+ bool ContainsNulls = SCEVExprContains(S, [](SCEVUse S) {
auto *SU = dyn_cast<SCEVUnknown>(S);
return SU && SU->getValue() == nullptr;
});
@@ -4516,20 +4540,20 @@ bool ScalarEvolution::checkValidity(const SCEV *S) const {
return !ContainsNulls;
}
-bool ScalarEvolution::containsAddRecurrence(const SCEV *S) {
+bool ScalarEvolution::containsAddRecurrence(SCEVUse S) {
HasRecMapType::iterator I = HasRecMap.find(S);
if (I != HasRecMap.end())
return I->second;
bool FoundAddRec =
- SCEVExprContains(S, [](const SCEV *S) { return isa<SCEVAddRecExpr>(S); });
+ SCEVExprContains(S, [](SCEVUse S) { return isa<SCEVAddRecExpr>(S); });
HasRecMap.insert({S, FoundAddRec});
return FoundAddRec;
}
/// Return the ValueOffsetPair set for \p S. \p S can be represented
/// by the value and offset from any ValueOffsetPair in the set.
-ArrayRef<Value *> ScalarEvolution::getSCEVValues(const SCEV *S) {
+ArrayRef<Value *> ScalarEvolution::getSCEVValues(SCEVUse S) {
ExprValueMapType::iterator SI = ExprValueMap.find_as(S);
if (SI == ExprValueMap.end())
return std::nullopt;
@@ -4550,7 +4574,7 @@ void ScalarEvolution::eraseValueFromMap(Value *V) {
}
}
-void ScalarEvolution::insertValueToMap(Value *V, const SCEV *S) {
+void ScalarEvolution::insertValueToMap(Value *V, SCEVUse S) {
// A recursive query may have already computed the SCEV. It should be
// equivalent, but may not necessarily be exactly the same, e.g. due to lazily
// inferred nowrap flags.
@@ -4563,20 +4587,20 @@ void ScalarEvolution::insertValueToMap(Value *V, const SCEV *S) {
/// Return an existing SCEV if it exists, otherwise analyze the expression and
/// create a new one.
-const SCEV *ScalarEvolution::getSCEV(Value *V) {
+SCEVUse ScalarEvolution::getSCEV(Value *V) {
assert(isSCEVable(V->getType()) && "Value is not SCEVable!");
- if (const SCEV *S = getExistingSCEV(V))
+ if (SCEVUse S = getExistingSCEV(V))
return S;
return createSCEVIter(V);
}
-const SCEV *ScalarEvolution::getExistingSCEV(Value *V) {
+SCEVUse ScalarEvolution::getExistingSCEV(Value *V) {
assert(isSCEVable(V->getType()) && "Value is not SCEVable!");
ValueExprMapType::iterator I = ValueExprMap.find_as(V);
if (I != ValueExprMap.end()) {
- const SCEV *S = I->second;
+ SCEVUse S = I->second;
assert(checkValidity(S) &&
"existing SCEV has not been properly invalidated");
return S;
@@ -4585,8 +4609,7 @@ const SCEV *ScalarEvolution::getExistingSCEV(Value *V) {
}
/// Return a SCEV corresponding to -V = -1*V
-const SCEV *ScalarEvolution::getNegativeSCEV(const SCEV *V,
- SCEV::NoWrapFlags Flags) {
+SCEVUse ScalarEvolution::getNegativeSCEV(SCEVUse V, SCEV::NoWrapFlags Flags) {
if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V))
return getConstant(
cast<ConstantInt>(ConstantExpr::getNeg(VC->getValue())));
@@ -4597,7 +4620,7 @@ const SCEV *ScalarEvolution::getNegativeSCEV(const SCEV *V,
}
/// If Expr computes ~A, return A else return nullptr
-static const SCEV *MatchNotExpr(const SCEV *Expr) {
+static SCEVUse MatchNotExpr(SCEVUse Expr) {
const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Expr);
if (!Add || Add->getNumOperands() != 2 ||
!Add->getOperand(0)->isAllOnesValue())
@@ -4612,7 +4635,7 @@ static const SCEV *MatchNotExpr(const SCEV *Expr) {
}
/// Return a SCEV corresponding to ~V = -1-V
-const SCEV *ScalarEvolution::getNotSCEV(const SCEV *V) {
+SCEVUse ScalarEvolution::getNotSCEV(SCEVUse V) {
assert(!V->getType()->isPointerTy() && "Can't negate pointer");
if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V))
@@ -4622,17 +4645,17 @@ const SCEV *ScalarEvolution::getNotSCEV(const SCEV *V) {
// Fold ~(u|s)(min|max)(~x, ~y) to (u|s)(max|min)(x, y)
if (const SCEVMinMaxExpr *MME = dyn_cast<SCEVMinMaxExpr>(V)) {
auto MatchMinMaxNegation = [&](const SCEVMinMaxExpr *MME) {
- SmallVector<const SCEV *, 2> MatchedOperands;
- for (const SCEV *Operand : MME->operands()) {
- const SCEV *Matched = MatchNotExpr(Operand);
+ SmallVector<SCEVUse, 2> MatchedOperands;
+ for (SCEVUse Operand : MME->operands()) {
+ SCEVUse Matched = MatchNotExpr(Operand);
if (!Matched)
- return (const SCEV *)nullptr;
+ return (SCEVUse) nullptr;
MatchedOperands.push_back(Matched);
}
return getMinMaxExpr(SCEVMinMaxExpr::negate(MME->getSCEVType()),
MatchedOperands);
};
- if (const SCEV *Replaced = MatchMinMaxNegation(MME))
+ if (SCEVUse Replaced = MatchMinMaxNegation(MME))
return Replaced;
}
@@ -4641,12 +4664,12 @@ const SCEV *ScalarEvolution::getNotSCEV(const SCEV *V) {
return getMinusSCEV(getMinusOne(Ty), V);
}
-const SCEV *ScalarEvolution::removePointerBase(const SCEV *P) {
+SCEVUse ScalarEvolution::removePointerBase(SCEVUse P) {
assert(P->getType()->isPointerTy());
if (auto *AddRec = dyn_cast<SCEVAddRecExpr>(P)) {
// The base of an AddRec is the first operand.
- SmallVector<const SCEV *> Ops{AddRec->operands()};
+ SmallVector<SCEVUse> Ops{AddRec->operands()};
Ops[0] = removePointerBase(Ops[0]);
// Don't try to transfer nowrap flags for now. We could in some cases
// (for example, if pointer operand of the AddRec is a SCEVUnknown).
@@ -4654,9 +4677,9 @@ const SCEV *ScalarEvolution::removePointerBase(const SCEV *P) {
}
if (auto *Add = dyn_cast<SCEVAddExpr>(P)) {
// The base of an Add is the pointer operand.
- SmallVector<const SCEV *> Ops{Add->operands()};
- const SCEV **PtrOp = nullptr;
- for (const SCEV *&AddOp : Ops) {
+ SmallVector<SCEVUse> Ops{Add->operands()};
+ SCEVUse *PtrOp = nullptr;
+ for (SCEVUse &AddOp : Ops) {
if (AddOp->getType()->isPointerTy()) {
assert(!PtrOp && "Cannot have multiple pointer ops");
PtrOp = &AddOp;
@@ -4671,9 +4694,8 @@ const SCEV *ScalarEvolution::removePointerBase(const SCEV *P) {
return getZero(P->getType());
}
-const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS, const SCEV *RHS,
- SCEV::NoWrapFlags Flags,
- unsigned Depth) {
+SCEVUse ScalarEvolution::getMinusSCEV(SCEVUse LHS, SCEVUse RHS,
+ SCEV::NoWrapFlags Flags, unsigned Depth) {
// Fast path: X - X --> 0.
if (LHS == RHS)
return getZero(LHS->getType());
@@ -4721,8 +4743,8 @@ const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS, const SCEV *RHS,
return getAddExpr(LHS, getNegativeSCEV(RHS, NegFlags), AddFlags, Depth);
}
-const SCEV *ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V, Type *Ty,
- unsigned Depth) {
+SCEVUse ScalarEvolution::getTruncateOrZeroExtend(SCEVUse V, Type *Ty,
+ unsigned Depth) {
Type *SrcTy = V->getType();
assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
"Cannot truncate or zero extend with non-integer arguments!");
@@ -4733,8 +4755,8 @@ const SCEV *ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V, Type *Ty,
return getZeroExtendExpr(V, Ty, Depth);
}
-const SCEV *ScalarEvolution::getTruncateOrSignExtend(const SCEV *V, Type *Ty,
- unsigned Depth) {
+SCEVUse ScalarEvolution::getTruncateOrSignExtend(SCEVUse V, Type *Ty,
+ unsigned Depth) {
Type *SrcTy = V->getType();
assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
"Cannot truncate or zero extend with non-integer arguments!");
@@ -4745,8 +4767,7 @@ const SCEV *ScalarEvolution::getTruncateOrSignExtend(const SCEV *V, Type *Ty,
return getSignExtendExpr(V, Ty, Depth);
}
-const SCEV *
-ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, Type *Ty) {
+SCEVUse ScalarEvolution::getNoopOrZeroExtend(SCEVUse V, Type *Ty) {
Type *SrcTy = V->getType();
assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
"Cannot noop or zero extend with non-integer arguments!");
@@ -4757,8 +4778,7 @@ ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, Type *Ty) {
return getZeroExtendExpr(V, Ty);
}
-const SCEV *
-ScalarEvolution::getNoopOrSignExtend(const SCEV *V, Type *Ty) {
+SCEVUse ScalarEvolution::getNoopOrSignExtend(SCEVUse V, Type *Ty) {
Type *SrcTy = V->getType();
assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
"Cannot noop or sign extend with non-integer arguments!");
@@ -4769,8 +4789,7 @@ ScalarEvolution::getNoopOrSignExtend(const SCEV *V, Type *Ty) {
return getSignExtendExpr(V, Ty);
}
-const SCEV *
-ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, Type *Ty) {
+SCEVUse ScalarEvolution::getNoopOrAnyExtend(SCEVUse V, Type *Ty) {
Type *SrcTy = V->getType();
assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
"Cannot noop or any extend with non-integer arguments!");
@@ -4781,8 +4800,7 @@ ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, Type *Ty) {
return getAnyExtendExpr(V, Ty);
}
-const SCEV *
-ScalarEvolution::getTruncateOrNoop(const SCEV *V, Type *Ty) {
+SCEVUse ScalarEvolution::getTruncateOrNoop(SCEVUse V, Type *Ty) {
Type *SrcTy = V->getType();
assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
"Cannot truncate or noop with non-integer arguments!");
@@ -4793,10 +4811,9 @@ ScalarEvolution::getTruncateOrNoop(const SCEV *V, Type *Ty) {
return getTruncateExpr(V, Ty);
}
-const SCEV *ScalarEvolution::getUMaxFromMismatchedTypes(const SCEV *LHS,
- const SCEV *RHS) {
- const SCEV *PromotedLHS = LHS;
- const SCEV *PromotedRHS = RHS;
+SCEVUse ScalarEvolution::getUMaxFromMismatchedTypes(SCEVUse LHS, SCEVUse RHS) {
+ SCEVUse PromotedLHS = LHS;
+ SCEVUse PromotedRHS = RHS;
if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType()))
PromotedRHS = getZeroExtendExpr(RHS, LHS->getType());
@@ -4806,15 +4823,20 @@ const SCEV *ScalarEvolution::getUMaxFromMismatchedTypes(const SCEV *LHS,
return getUMaxExpr(PromotedLHS, PromotedRHS);
}
-const SCEV *ScalarEvolution::getUMinFromMismatchedTypes(const SCEV *LHS,
- const SCEV *RHS,
- bool Sequential) {
- SmallVector<const SCEV *, 2> Ops = { LHS, RHS };
+SCEVUse ScalarEvolution::getUMinFromMismatchedTypes(SCEVUse LHS, SCEVUse RHS,
+ bool Sequential) {
+ SmallVector<SCEVUse, 2> Ops = {LHS, RHS};
return getUMinFromMismatchedTypes(Ops, Sequential);
}
-const SCEV *
-ScalarEvolution::getUMinFromMismatchedTypes(SmallVectorImpl<const SCEV *> &Ops,
+SCEVUse ScalarEvolution::getUMinFromMismatchedTypes(ArrayRef<const SCEV *> Ops,
+ bool Sequential) {
+ SmallVector<SCEVUse> Ops2(Ops);
+ return getUMinFromMismatchedTypes(Ops2, Sequential);
+}
+
+SCEVUse
+ScalarEvolution::getUMinFromMismatchedTypes(SmallVectorImpl<SCEVUse> &Ops,
bool Sequential) {
assert(!Ops.empty() && "At least one operand must be!");
// Trivial case.
@@ -4823,7 +4845,7 @@ ScalarEvolution::getUMinFromMismatchedTypes(SmallVectorImpl<const SCEV *> &Ops,
// Find the max type first.
Type *MaxType = nullptr;
- for (const auto *S : Ops)
+ for (const auto S : Ops)
if (MaxType)
MaxType = getWiderType(MaxType, S->getType());
else
@@ -4831,15 +4853,15 @@ ScalarEvolution::getUMinFromMismatchedTypes(SmallVectorImpl<const SCEV *> &Ops,
assert(MaxType && "Failed to find maximum type!");
// Extend all ops to max type.
- SmallVector<const SCEV *, 2> PromotedOps;
- for (const auto *S : Ops)
+ SmallVector<SCEVUse, 2> PromotedOps;
+ for (const auto S : Ops)
PromotedOps.push_back(getNoopOrZeroExtend(S, MaxType));
// Generate umin.
return getUMinExpr(PromotedOps, Sequential);
}
-const SCEV *ScalarEvolution::getPointerBase(const SCEV *V) {
+SCEVUse ScalarEvolution::getPointerBase(SCEVUse V) {
// A pointer operand may evaluate to a nonpointer expression, such as null.
if (!V->getType()->isPointerTy())
return V;
@@ -4848,8 +4870,8 @@ const SCEV *ScalarEvolution::getPointerBase(const SCEV *V) {
if (auto *AddRec = dyn_cast<SCEVAddRecExpr>(V)) {
V = AddRec->getStart();
} else if (auto *Add = dyn_cast<SCEVAddExpr>(V)) {
- const SCEV *PtrOp = nullptr;
- for (const SCEV *AddOp : Add->operands()) {
+ SCEVUse PtrOp = nullptr;
+ for (SCEVUse AddOp : Add->operands()) {
if (AddOp->getType()->isPointerTy()) {
assert(!PtrOp && "Cannot have multiple pointer ops");
PtrOp = AddOp;
@@ -4883,10 +4905,10 @@ namespace {
/// If SCEV contains non-invariant unknown SCEV rewrite cannot be done.
class SCEVInitRewriter : public SCEVRewriteVisitor<SCEVInitRewriter> {
public:
- static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE,
- bool IgnoreOtherLoops = true) {
+ static SCEVUse rewrite(SCEVUse S, const Loop *L, ScalarEvolution &SE,
+ bool IgnoreOtherLoops = true) {
SCEVInitRewriter Rewriter(L, SE);
- const SCEV *Result = Rewriter.visit(S);
+ SCEVUse Result = Rewriter.visit(S);
if (Rewriter.hasSeenLoopVariantSCEVUnknown())
return SE.getCouldNotCompute();
return Rewriter.hasSeenOtherLoops() && !IgnoreOtherLoops
@@ -4894,13 +4916,13 @@ class SCEVInitRewriter : public SCEVRewriteVisitor<SCEVInitRewriter> {
: Result;
}
- const SCEV *visitUnknown(const SCEVUnknown *Expr) {
+ SCEVUse visitUnknown(const SCEVUnknown *Expr) {
if (!SE.isLoopInvariant(Expr, L))
SeenLoopVariantSCEVUnknown = true;
return Expr;
}
- const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) {
+ SCEVUse visitAddRecExpr(const SCEVAddRecExpr *Expr) {
// Only re-write AddRecExprs for this loop.
if (Expr->getLoop() == L)
return Expr->getStart();
@@ -4927,21 +4949,21 @@ class SCEVInitRewriter : public SCEVRewriteVisitor<SCEVInitRewriter> {
/// If SCEV contains non-invariant unknown SCEV rewrite cannot be done.
class SCEVPostIncRewriter : public SCEVRewriteVisitor<SCEVPostIncRewriter> {
public:
- static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE) {
+ static SCEVUse rewrite(SCEVUse S, const Loop *L, ScalarEvolution &SE) {
SCEVPostIncRewriter Rewriter(L, SE);
- const SCEV *Result = Rewriter.visit(S);
+ SCEVUse Result = Rewriter.visit(S);
return Rewriter.hasSeenLoopVariantSCEVUnknown()
? SE.getCouldNotCompute()
: Result;
}
- const SCEV *visitUnknown(const SCEVUnknown *Expr) {
+ SCEVUse visitUnknown(const SCEVUnknown *Expr) {
if (!SE.isLoopInvariant(Expr, L))
SeenLoopVariantSCEVUnknown = true;
return Expr;
}
- const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) {
+ SCEVUse visitAddRecExpr(const SCEVAddRecExpr *Expr) {
// Only re-write AddRecExprs for this loop.
if (Expr->getLoop() == L)
return Expr->getPostIncExpr(SE);
@@ -4968,8 +4990,7 @@ class SCEVPostIncRewriter : public SCEVRewriteVisitor<SCEVPostIncRewriter> {
class SCEVBackedgeConditionFolder
: public SCEVRewriteVisitor<SCEVBackedgeConditionFolder> {
public:
- static const SCEV *rewrite(const SCEV *S, const Loop *L,
- ScalarEvolution &SE) {
+ static SCEVUse rewrite(SCEVUse S, const Loop *L, ScalarEvolution &SE) {
bool IsPosBECond = false;
Value *BECond = nullptr;
if (BasicBlock *Latch = L->getLoopLatch()) {
@@ -4987,8 +5008,8 @@ class SCEVBackedgeConditionFolder
return Rewriter.visit(S);
}
- const SCEV *visitUnknown(const SCEVUnknown *Expr) {
- const SCEV *Result = Expr;
+ SCEVUse visitUnknown(const SCEVUnknown *Expr) {
+ SCEVUse Result = Expr;
bool InvariantF = SE.isLoopInvariant(Expr, L);
if (!InvariantF) {
@@ -4996,7 +5017,7 @@ class SCEVBackedgeConditionFolder
switch (I->getOpcode()) {
case Instruction::Select: {
SelectInst *SI = cast<SelectInst>(I);
- std::optional<const SCEV *> Res =
+ std::optional<SCEVUse> Res =
compareWithBackedgeCondition(SI->getCondition());
if (Res) {
bool IsOne = cast<SCEVConstant>(*Res)->getValue()->isOne();
@@ -5005,7 +5026,7 @@ class SCEVBackedgeConditionFolder
break;
}
default: {
- std::optional<const SCEV *> Res = compareWithBackedgeCondition(I);
+ std::optional<SCEVUse> Res = compareWithBackedgeCondition(I);
if (Res)
Result = *Res;
break;
@@ -5021,7 +5042,7 @@ class SCEVBackedgeConditionFolder
: SCEVRewriteVisitor(SE), L(L), BackedgeCond(BECond),
IsPositiveBECond(IsPosBECond) {}
- std::optional<const SCEV *> compareWithBackedgeCondition(Value *IC);
+ std::optional<SCEVUse> compareWithBackedgeCondition(Value *IC);
const Loop *L;
/// Loop back condition.
@@ -5030,7 +5051,7 @@ class SCEVBackedgeConditionFolder
bool IsPositiveBECond;
};
-std::optional<const SCEV *>
+std::optional<SCEVUse>
SCEVBackedgeConditionFolder::compareWithBackedgeCondition(Value *IC) {
// If value matches the backedge condition for loop latch,
@@ -5044,21 +5065,20 @@ SCEVBackedgeConditionFolder::compareWithBackedgeCondition(Value *IC) {
class SCEVShiftRewriter : public SCEVRewriteVisitor<SCEVShiftRewriter> {
public:
- static const SCEV *rewrite(const SCEV *S, const Loop *L,
- ScalarEvolution &SE) {
+ static SCEVUse rewrite(SCEVUse S, const Loop *L, ScalarEvolution &SE) {
SCEVShiftRewriter Rewriter(L, SE);
- const SCEV *Result = Rewriter.visit(S);
+ SCEVUse Result = Rewriter.visit(S);
return Rewriter.isValid() ? Result : SE.getCouldNotCompute();
}
- const SCEV *visitUnknown(const SCEVUnknown *Expr) {
+ SCEVUse visitUnknown(const SCEVUnknown *Expr) {
// Only allow AddRecExprs for this loop.
if (!SE.isLoopInvariant(Expr, L))
Valid = false;
return Expr;
}
- const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) {
+ SCEVUse visitAddRecExpr(const SCEVAddRecExpr *Expr) {
if (Expr->getLoop() == L && Expr->isAffine())
return SE.getMinusSCEV(Expr, Expr->getStepRecurrence(SE));
Valid = false;
@@ -5087,7 +5107,7 @@ ScalarEvolution::proveNoWrapViaConstantRanges(const SCEVAddRecExpr *AR) {
SCEV::NoWrapFlags Result = SCEV::FlagAnyWrap;
if (!AR->hasNoSelfWrap()) {
- const SCEV *BECount = getConstantMaxBackedgeTakenCount(AR->getLoop());
+ SCEVUse BECount = getConstantMaxBackedgeTakenCount(AR->getLoop());
if (const SCEVConstant *BECountMax = dyn_cast<SCEVConstant>(BECount)) {
ConstantRange StepCR = getSignedRange(AR->getStepRecurrence(*this));
const APInt &BECountAP = BECountMax->getAPInt();
@@ -5135,7 +5155,7 @@ ScalarEvolution::proveNoSignedWrapViaInduction(const SCEVAddRecExpr *AR) {
if (!SignedWrapViaInductionTried.insert(AR).second)
return Result;
- const SCEV *Step = AR->getStepRecurrence(*this);
+ SCEVUse Step = AR->getStepRecurrence(*this);
const Loop *L = AR->getLoop();
// Check whether the backedge-taken count is SCEVCouldNotCompute.
@@ -5146,7 +5166,7 @@ ScalarEvolution::proveNoSignedWrapViaInduction(const SCEVAddRecExpr *AR) {
// in infinite recursion. In the later case, the analysis code will
// cope with a conservative value, and it will take care to purge
// that value once it has finished.
- const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L);
+ SCEVUse MaxBECount = getConstantMaxBackedgeTakenCount(L);
// Normally, in the cases we can prove no-overflow via a
// backedge guarding condition, we can also compute a backedge
@@ -5165,8 +5185,7 @@ ScalarEvolution::proveNoSignedWrapViaInduction(const SCEVAddRecExpr *AR) {
// start value and the backedge is guarded by a comparison with the post-inc
// value, the addrec is safe.
ICmpInst::Predicate Pred;
- const SCEV *OverflowLimit =
- getSignedOverflowLimitForStep(Step, &Pred, this);
+ SCEVUse OverflowLimit = getSignedOverflowLimitForStep(Step, &Pred, this);
if (OverflowLimit &&
(isLoopBackedgeGuardedByCond(L, Pred, AR, OverflowLimit) ||
isKnownOnEveryIteration(Pred, AR, OverflowLimit))) {
@@ -5188,7 +5207,7 @@ ScalarEvolution::proveNoUnsignedWrapViaInduction(const SCEVAddRecExpr *AR) {
if (!UnsignedWrapViaInductionTried.insert(AR).second)
return Result;
- const SCEV *Step = AR->getStepRecurrence(*this);
+ SCEVUse Step = AR->getStepRecurrence(*this);
unsigned BitWidth = getTypeSizeInBits(AR->getType());
const Loop *L = AR->getLoop();
@@ -5200,7 +5219,7 @@ ScalarEvolution::proveNoUnsignedWrapViaInduction(const SCEVAddRecExpr *AR) {
// in infinite recursion. In the later case, the analysis code will
// cope with a conservative value, and it will take care to purge
// that value once it has finished.
- const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L);
+ SCEVUse MaxBECount = getConstantMaxBackedgeTakenCount(L);
// Normally, in the cases we can prove no-overflow via a
// backedge guarding condition, we can also compute a backedge
@@ -5219,8 +5238,8 @@ ScalarEvolution::proveNoUnsignedWrapViaInduction(const SCEVAddRecExpr *AR) {
// start value and the backedge is guarded by a comparison with the post-inc
// value, the addrec is safe.
if (isKnownPositive(Step)) {
- const SCEV *N = getConstant(APInt::getMinValue(BitWidth) -
- getUnsignedRangeMax(Step));
+ SCEVUse N =
+ getConstant(APInt::getMinValue(BitWidth) - getUnsignedRangeMax(Step));
if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT, AR, N) ||
isKnownOnEveryIteration(ICmpInst::ICMP_ULT, AR, N)) {
Result = setFlags(Result, SCEV::FlagNUW);
@@ -5369,7 +5388,7 @@ static std::optional<BinaryOp> MatchBinaryOp(Value *V, const DataLayout &DL,
/// we return the type of the truncation operation, and indicate whether the
/// truncated type should be treated as signed/unsigned by setting
/// \p Signed to true/false, respectively.
-static Type *isSimpleCastedPHI(const SCEV *Op, const SCEVUnknown *SymbolicPHI,
+static Type *isSimpleCastedPHI(SCEVUse Op, const SCEVUnknown *SymbolicPHI,
bool &Signed, ScalarEvolution &SE) {
// The case where Op == SymbolicPHI (that is, with no type conversions on
// the way) is handled by the regular add recurrence creating logic and
@@ -5398,7 +5417,7 @@ static Type *isSimpleCastedPHI(const SCEV *Op, const SCEVUnknown *SymbolicPHI,
: dyn_cast<SCEVTruncateExpr>(ZExt->getOperand());
if (!Trunc)
return nullptr;
- const SCEV *X = Trunc->getOperand();
+ SCEVUse X = Trunc->getOperand();
if (X != SymbolicPHI)
return nullptr;
Signed = SExt != nullptr;
@@ -5467,8 +5486,9 @@ static const Loop *isIntegerLoopHeaderPHI(const PHINode *PN, LoopInfo &LI) {
// which correspond to a phi->trunc->add->sext/zext->phi update chain.
//
// 3) Outline common code with createAddRecFromPHI to avoid duplication.
-std::optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
-ScalarEvolution::createAddRecFromPHIWithCastsImpl(const SCEVUnknown *SymbolicPHI) {
+std::optional<std::pair<SCEVUse, SmallVector<const SCEVPredicate *, 3>>>
+ScalarEvolution::createAddRecFromPHIWithCastsImpl(
+ const SCEVUnknown *SymbolicPHI) {
SmallVector<const SCEVPredicate *, 3> Predicates;
// *** Part1: Analyze if we have a phi-with-cast pattern for which we can
@@ -5501,7 +5521,7 @@ ScalarEvolution::createAddRecFromPHIWithCastsImpl(const SCEVUnknown *SymbolicPHI
if (!BEValueV || !StartValueV)
return std::nullopt;
- const SCEV *BEValue = getSCEV(BEValueV);
+ SCEVUse BEValue = getSCEV(BEValueV);
// If the value coming around the backedge is an add with the symbolic
// value we just inserted, possibly with casts that we can ignore under
@@ -5527,11 +5547,11 @@ ScalarEvolution::createAddRecFromPHIWithCastsImpl(const SCEVUnknown *SymbolicPHI
return std::nullopt;
// Create an add with everything but the specified operand.
- SmallVector<const SCEV *, 8> Ops;
+ SmallVector<SCEVUse, 8> Ops;
for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
if (i != FoundIndex)
Ops.push_back(Add->getOperand(i));
- const SCEV *Accum = getAddExpr(Ops);
+ SCEVUse Accum = getAddExpr(Ops);
// The runtime checks will not be valid if the step amount is
// varying inside the loop.
@@ -5589,8 +5609,8 @@ ScalarEvolution::createAddRecFromPHIWithCastsImpl(const SCEVUnknown *SymbolicPHI
//
// Create a truncated addrec for which we will add a no overflow check (P1).
- const SCEV *StartVal = getSCEV(StartValueV);
- const SCEV *PHISCEV =
+ SCEVUse StartVal = getSCEV(StartValueV);
+ SCEVUse PHISCEV =
getAddRecExpr(getTruncateExpr(StartVal, TruncTy),
getTruncateExpr(Accum, TruncTy), L, SCEV::FlagAnyWrap);
@@ -5617,11 +5637,10 @@ ScalarEvolution::createAddRecFromPHIWithCastsImpl(const SCEVUnknown *SymbolicPHI
// Construct the extended SCEV: (Ext ix (Trunc iy (Expr) to ix) to iy)
// for each of StartVal and Accum
- auto getExtendedExpr = [&](const SCEV *Expr,
- bool CreateSignExtend) -> const SCEV * {
+ auto getExtendedExpr = [&](SCEVUse Expr, bool CreateSignExtend) -> SCEVUse {
assert(isLoopInvariant(Expr, L) && "Expr is expected to be invariant");
- const SCEV *TruncatedExpr = getTruncateExpr(Expr, TruncTy);
- const SCEV *ExtendedExpr =
+ SCEVUse TruncatedExpr = getTruncateExpr(Expr, TruncTy);
+ SCEVUse ExtendedExpr =
CreateSignExtend ? getSignExtendExpr(TruncatedExpr, Expr->getType())
: getZeroExtendExpr(TruncatedExpr, Expr->getType());
return ExtendedExpr;
@@ -5632,13 +5651,12 @@ ScalarEvolution::createAddRecFromPHIWithCastsImpl(const SCEVUnknown *SymbolicPHI
// = getExtendedExpr(Expr)
// Determine whether the predicate P: Expr == ExtendedExpr
// is known to be false at compile time
- auto PredIsKnownFalse = [&](const SCEV *Expr,
- const SCEV *ExtendedExpr) -> bool {
+ auto PredIsKnownFalse = [&](SCEVUse Expr, SCEVUse ExtendedExpr) -> bool {
return Expr != ExtendedExpr &&
isKnownPredicate(ICmpInst::ICMP_NE, Expr, ExtendedExpr);
};
- const SCEV *StartExtended = getExtendedExpr(StartVal, Signed);
+ SCEVUse StartExtended = getExtendedExpr(StartVal, Signed);
if (PredIsKnownFalse(StartVal, StartExtended)) {
LLVM_DEBUG(dbgs() << "P2 is compile-time false\n";);
return std::nullopt;
@@ -5646,14 +5664,13 @@ ScalarEvolution::createAddRecFromPHIWithCastsImpl(const SCEVUnknown *SymbolicPHI
// The Step is always Signed (because the overflow checks are either
// NSSW or NUSW)
- const SCEV *AccumExtended = getExtendedExpr(Accum, /*CreateSignExtend=*/true);
+ SCEVUse AccumExtended = getExtendedExpr(Accum, /*CreateSignExtend=*/true);
if (PredIsKnownFalse(Accum, AccumExtended)) {
LLVM_DEBUG(dbgs() << "P3 is compile-time false\n";);
return std::nullopt;
}
- auto AppendPredicate = [&](const SCEV *Expr,
- const SCEV *ExtendedExpr) -> void {
+ auto AppendPredicate = [&](SCEVUse Expr, SCEVUse ExtendedExpr) -> void {
if (Expr != ExtendedExpr &&
!isKnownPredicate(ICmpInst::ICMP_EQ, Expr, ExtendedExpr)) {
const SCEVPredicate *Pred = getEqualPredicate(Expr, ExtendedExpr);
@@ -5669,16 +5686,16 @@ ScalarEvolution::createAddRecFromPHIWithCastsImpl(const SCEVUnknown *SymbolicPHI
// which the casts had been folded away. The caller can rewrite SymbolicPHI
// into NewAR if it will also add the runtime overflow checks specified in
// Predicates.
- auto *NewAR = getAddRecExpr(StartVal, Accum, L, SCEV::FlagAnyWrap);
+ auto NewAR = getAddRecExpr(StartVal, Accum, L, SCEV::FlagAnyWrap);
- std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> PredRewrite =
+ std::pair<SCEVUse, SmallVector<const SCEVPredicate *, 3>> PredRewrite =
std::make_pair(NewAR, Predicates);
// Remember the result of the analysis for this SCEV at this locayyytion.
PredicatedSCEVRewrites[{SymbolicPHI, L}] = PredRewrite;
return PredRewrite;
}
-std::optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
+std::optional<std::pair<SCEVUse, SmallVector<const SCEVPredicate *, 3>>>
ScalarEvolution::createAddRecFromPHIWithCasts(const SCEVUnknown *SymbolicPHI) {
auto *PN = cast<PHINode>(SymbolicPHI->getValue());
const Loop *L = isIntegerLoopHeaderPHI(PN, LI);
@@ -5688,7 +5705,7 @@ ScalarEvolution::createAddRecFromPHIWithCasts(const SCEVUnknown *SymbolicPHI) {
// Check to see if we already analyzed this PHI.
auto I = PredicatedSCEVRewrites.find({SymbolicPHI, L});
if (I != PredicatedSCEVRewrites.end()) {
- std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> Rewrite =
+ std::pair<SCEVUse, SmallVector<const SCEVPredicate *, 3>> Rewrite =
I->second;
// Analysis was done before and failed to create an AddRec:
if (Rewrite.first == SymbolicPHI)
@@ -5700,8 +5717,8 @@ ScalarEvolution::createAddRecFromPHIWithCasts(const SCEVUnknown *SymbolicPHI) {
return Rewrite;
}
- std::optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
- Rewrite = createAddRecFromPHIWithCastsImpl(SymbolicPHI);
+ std::optional<std::pair<SCEVUse, SmallVector<const SCEVPredicate *, 3>>>
+ Rewrite = createAddRecFromPHIWithCastsImpl(SymbolicPHI);
// Record in the cache that the analysis failed
if (!Rewrite) {
@@ -5724,7 +5741,7 @@ bool PredicatedScalarEvolution::areAddRecsEqualWithPreds(
if (AR1 == AR2)
return true;
- auto areExprsEqual = [&](const SCEV *Expr1, const SCEV *Expr2) -> bool {
+ auto areExprsEqual = [&](SCEVUse Expr1, SCEVUse Expr2) -> bool {
if (Expr1 != Expr2 && !Preds->implies(SE.getEqualPredicate(Expr1, Expr2)) &&
!Preds->implies(SE.getEqualPredicate(Expr2, Expr1)))
return false;
@@ -5743,9 +5760,8 @@ bool PredicatedScalarEvolution::areAddRecsEqualWithPreds(
/// common) cases: PN = PHI(Start, OP(Self, LoopInvariant)).
/// If it fails, createAddRecFromPHI will use a more general, but slow,
/// technique for finding the AddRec expression.
-const SCEV *ScalarEvolution::createSimpleAffineAddRec(PHINode *PN,
- Value *BEValueV,
- Value *StartValueV) {
+SCEVUse ScalarEvolution::createSimpleAffineAddRec(PHINode *PN, Value *BEValueV,
+ Value *StartValueV) {
const Loop *L = LI.getLoopFor(PN->getParent());
assert(L && L->getHeader() == PN->getParent());
assert(BEValueV && StartValueV);
@@ -5757,7 +5773,7 @@ const SCEV *ScalarEvolution::createSimpleAffineAddRec(PHINode *PN,
if (BO->Opcode != Instruction::Add)
return nullptr;
- const SCEV *Accum = nullptr;
+ SCEVUse Accum = nullptr;
if (BO->LHS == PN && L->isLoopInvariant(BO->RHS))
Accum = getSCEV(BO->RHS);
else if (BO->RHS == PN && L->isLoopInvariant(BO->LHS))
@@ -5772,8 +5788,8 @@ const SCEV *ScalarEvolution::createSimpleAffineAddRec(PHINode *PN,
if (BO->IsNSW)
Flags = setFlags(Flags, SCEV::FlagNSW);
- const SCEV *StartVal = getSCEV(StartValueV);
- const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags);
+ SCEVUse StartVal = getSCEV(StartValueV);
+ SCEVUse PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags);
insertValueToMap(PN, PHISCEV);
if (auto *AR = dyn_cast<SCEVAddRecExpr>(PHISCEV)) {
@@ -5795,7 +5811,7 @@ const SCEV *ScalarEvolution::createSimpleAffineAddRec(PHINode *PN,
return PHISCEV;
}
-const SCEV *ScalarEvolution::createAddRecFromPHI(PHINode *PN) {
+SCEVUse ScalarEvolution::createAddRecFromPHI(PHINode *PN) {
const Loop *L = LI.getLoopFor(PN->getParent());
if (!L || L->getHeader() != PN->getParent())
return nullptr;
@@ -5828,16 +5844,16 @@ const SCEV *ScalarEvolution::createAddRecFromPHI(PHINode *PN) {
// First, try to find AddRec expression without creating a fictituos symbolic
// value for PN.
- if (auto *S = createSimpleAffineAddRec(PN, BEValueV, StartValueV))
+ if (auto S = createSimpleAffineAddRec(PN, BEValueV, StartValueV))
return S;
// Handle PHI node value symbolically.
- const SCEV *SymbolicName = getUnknown(PN);
+ SCEVUse SymbolicName = getUnknown(PN);
insertValueToMap(PN, SymbolicName);
// Using this symbolic name for the PHI, analyze the value coming around
// the back-edge.
- const SCEV *BEValue = getSCEV(BEValueV);
+ SCEVUse BEValue = getSCEV(BEValueV);
// NOTE: If BEValue is loop invariant, we know that the PHI node just
// has a special value for the first iteration of the loop.
@@ -5857,12 +5873,12 @@ const SCEV *ScalarEvolution::createAddRecFromPHI(PHINode *PN) {
if (FoundIndex != Add->getNumOperands()) {
// Create an add with everything but the specified operand.
- SmallVector<const SCEV *, 8> Ops;
+ SmallVector<SCEVUse, 8> Ops;
for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
if (i != FoundIndex)
Ops.push_back(SCEVBackedgeConditionFolder::rewrite(Add->getOperand(i),
L, *this));
- const SCEV *Accum = getAddExpr(Ops);
+ SCEVUse Accum = getAddExpr(Ops);
// This is not a valid addrec if the step amount is varying each
// loop iteration, but is not itself an addrec in this loop.
@@ -5896,8 +5912,8 @@ const SCEV *ScalarEvolution::createAddRecFromPHI(PHINode *PN) {
// for instance.
}
- const SCEV *StartVal = getSCEV(StartValueV);
- const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags);
+ SCEVUse StartVal = getSCEV(StartValueV);
+ SCEVUse PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags);
// Okay, for the entire analysis of this edge we assumed the PHI
// to be symbolic. We now need to go back and purge all of the
@@ -5931,11 +5947,11 @@ const SCEV *ScalarEvolution::createAddRecFromPHI(PHINode *PN) {
// We can generalize this saying that i is the shifted value of BEValue
// by one iteration:
// PHI(f(0), f({1,+,1})) --> f({0,+,1})
- const SCEV *Shifted = SCEVShiftRewriter::rewrite(BEValue, L, *this);
- const SCEV *Start = SCEVInitRewriter::rewrite(Shifted, L, *this, false);
+ SCEVUse Shifted = SCEVShiftRewriter::rewrite(BEValue, L, *this);
+ SCEVUse Start = SCEVInitRewriter::rewrite(Shifted, L, *this, false);
if (Shifted != getCouldNotCompute() &&
Start != getCouldNotCompute()) {
- const SCEV *StartVal = getSCEV(StartValueV);
+ SCEVUse StartVal = getSCEV(StartValueV);
if (Start == StartVal) {
// Okay, for the entire analysis of this edge we assumed the PHI
// to be symbolic. We now need to go back and purge all of the
@@ -5989,7 +6005,7 @@ static bool BrPHIToSelect(DominatorTree &DT, BranchInst *BI, PHINode *Merge,
return false;
}
-const SCEV *ScalarEvolution::createNodeFromSelectLikePHI(PHINode *PN) {
+SCEVUse ScalarEvolution::createNodeFromSelectLikePHI(PHINode *PN) {
auto IsReachable =
[&](BasicBlock *BB) { return DT.isReachableFromEntry(BB); };
if (PN->getNumIncomingValues() == 2 && all_of(PN->blocks(), IsReachable)) {
@@ -6021,24 +6037,24 @@ const SCEV *ScalarEvolution::createNodeFromSelectLikePHI(PHINode *PN) {
return nullptr;
}
-const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) {
- if (const SCEV *S = createAddRecFromPHI(PN))
+SCEVUse ScalarEvolution::createNodeForPHI(PHINode *PN) {
+ if (SCEVUse S = createAddRecFromPHI(PN))
return S;
if (Value *V = simplifyInstruction(PN, {getDataLayout(), &TLI, &DT, &AC}))
return getSCEV(V);
- if (const SCEV *S = createNodeFromSelectLikePHI(PN))
+ if (SCEVUse S = createNodeFromSelectLikePHI(PN))
return S;
// If it's not a loop phi, we can't handle it yet.
return getUnknown(PN);
}
-bool SCEVMinMaxExprContains(const SCEV *Root, const SCEV *OperandToFind,
+bool SCEVMinMaxExprContains(SCEVUse Root, SCEVUse OperandToFind,
SCEVTypes RootKind) {
struct FindClosure {
- const SCEV *OperandToFind;
+ SCEVUse OperandToFind;
const SCEVTypes RootKind; // Must be a sequential min/max expression.
const SCEVTypes NonSequentialRootKind; // Non-seq variant of RootKind.
@@ -6051,13 +6067,13 @@ bool SCEVMinMaxExprContains(const SCEV *Root, const SCEV *OperandToFind,
scZeroExtend == Kind;
};
- FindClosure(const SCEV *OperandToFind, SCEVTypes RootKind)
+ FindClosure(SCEVUse OperandToFind, SCEVTypes RootKind)
: OperandToFind(OperandToFind), RootKind(RootKind),
NonSequentialRootKind(
SCEVSequentialMinMaxExpr::getEquivalentNonSequentialSCEVType(
RootKind)) {}
- bool follow(const SCEV *S) {
+ bool follow(SCEVUse S) {
Found = S == OperandToFind;
return !isDone() && canRecurseInto(S->getSCEVType());
@@ -6071,7 +6087,7 @@ bool SCEVMinMaxExprContains(const SCEV *Root, const SCEV *OperandToFind,
return FC.Found;
}
-std::optional<const SCEV *>
+std::optional<SCEVUse>
ScalarEvolution::createNodeForSelectOrPHIInstWithICmpInstCond(Type *Ty,
ICmpInst *Cond,
Value *TrueVal,
@@ -6097,10 +6113,10 @@ ScalarEvolution::createNodeForSelectOrPHIInstWithICmpInstCond(Type *Ty,
// a > b ? b+x : a+x -> min(a, b)+x
if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(Ty)) {
bool Signed = ICI->isSigned();
- const SCEV *LA = getSCEV(TrueVal);
- const SCEV *RA = getSCEV(FalseVal);
- const SCEV *LS = getSCEV(LHS);
- const SCEV *RS = getSCEV(RHS);
+ SCEVUse LA = getSCEV(TrueVal);
+ SCEVUse RA = getSCEV(FalseVal);
+ SCEVUse LS = getSCEV(LHS);
+ SCEVUse RS = getSCEV(RHS);
if (LA->getType()->isPointerTy()) {
// FIXME: Handle cases where LS/RS are pointers not equal to LA/RA.
// Need to make sure we can't produce weird expressions involving
@@ -6110,7 +6126,7 @@ ScalarEvolution::createNodeForSelectOrPHIInstWithICmpInstCond(Type *Ty,
if (LA == RS && RA == LS)
return Signed ? getSMinExpr(LS, RS) : getUMinExpr(LS, RS);
}
- auto CoerceOperand = [&](const SCEV *Op) -> const SCEV * {
+ auto CoerceOperand = [&](SCEVUse Op) -> SCEVUse {
if (Op->getType()->isPointerTy()) {
Op = getLosslessPtrToIntExpr(Op);
if (isa<SCEVCouldNotCompute>(Op))
@@ -6126,8 +6142,8 @@ ScalarEvolution::createNodeForSelectOrPHIInstWithICmpInstCond(Type *Ty,
RS = CoerceOperand(RS);
if (isa<SCEVCouldNotCompute>(LS) || isa<SCEVCouldNotCompute>(RS))
break;
- const SCEV *LDiff = getMinusSCEV(LA, LS);
- const SCEV *RDiff = getMinusSCEV(RA, RS);
+ SCEVUse LDiff = getMinusSCEV(LA, LS);
+ SCEVUse RDiff = getMinusSCEV(RA, RS);
if (LDiff == RDiff)
return getAddExpr(Signed ? getSMaxExpr(LS, RS) : getUMaxExpr(LS, RS),
LDiff);
@@ -6146,11 +6162,11 @@ ScalarEvolution::createNodeForSelectOrPHIInstWithICmpInstCond(Type *Ty,
// x == 0 ? C+y : x+y -> umax(x, C)+y iff C u<= 1
if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(Ty) &&
isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isZero()) {
- const SCEV *X = getNoopOrZeroExtend(getSCEV(LHS), Ty);
- const SCEV *TrueValExpr = getSCEV(TrueVal); // C+y
- const SCEV *FalseValExpr = getSCEV(FalseVal); // x+y
- const SCEV *Y = getMinusSCEV(FalseValExpr, X); // y = (x+y)-x
- const SCEV *C = getMinusSCEV(TrueValExpr, Y); // C = (C+y)-y
+ SCEVUse X = getNoopOrZeroExtend(getSCEV(LHS), Ty);
+ SCEVUse TrueValExpr = getSCEV(TrueVal); // C+y
+ SCEVUse FalseValExpr = getSCEV(FalseVal); // x+y
+ SCEVUse Y = getMinusSCEV(FalseValExpr, X); // y = (x+y)-x
+ SCEVUse C = getMinusSCEV(TrueValExpr, Y); // C = (C+y)-y
if (isa<SCEVConstant>(C) && cast<SCEVConstant>(C)->getAPInt().ule(1))
return getAddExpr(getUMaxExpr(X, C), Y);
}
@@ -6160,11 +6176,11 @@ ScalarEvolution::createNodeForSelectOrPHIInstWithICmpInstCond(Type *Ty,
// -> umin_seq(x, umin (..., umin_seq(...), ...))
if (isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isZero() &&
isa<ConstantInt>(TrueVal) && cast<ConstantInt>(TrueVal)->isZero()) {
- const SCEV *X = getSCEV(LHS);
+ SCEVUse X = getSCEV(LHS);
while (auto *ZExt = dyn_cast<SCEVZeroExtendExpr>(X))
X = ZExt->getOperand();
if (getTypeSizeInBits(X->getType()) <= getTypeSizeInBits(Ty)) {
- const SCEV *FalseValExpr = getSCEV(FalseVal);
+ SCEVUse FalseValExpr = getSCEV(FalseVal);
if (SCEVMinMaxExprContains(FalseValExpr, X, scSequentialUMinExpr))
return getUMinExpr(getNoopOrZeroExtend(X, Ty), FalseValExpr,
/*Sequential=*/true);
@@ -6178,9 +6194,10 @@ ScalarEvolution::createNodeForSelectOrPHIInstWithICmpInstCond(Type *Ty,
return std::nullopt;
}
-static std::optional<const SCEV *>
-createNodeForSelectViaUMinSeq(ScalarEvolution *SE, const SCEV *CondExpr,
- const SCEV *TrueExpr, const SCEV *FalseExpr) {
+static std::optional<SCEVUse> createNodeForSelectViaUMinSeq(ScalarEvolution *SE,
+ SCEVUse CondExpr,
+ SCEVUse TrueExpr,
+ SCEVUse FalseExpr) {
assert(CondExpr->getType()->isIntegerTy(1) &&
TrueExpr->getType() == FalseExpr->getType() &&
TrueExpr->getType()->isIntegerTy(1) &&
@@ -6198,7 +6215,7 @@ createNodeForSelectViaUMinSeq(ScalarEvolution *SE, const SCEV *CondExpr,
if (!isa<SCEVConstant>(TrueExpr) && !isa<SCEVConstant>(FalseExpr))
return std::nullopt;
- const SCEV *X, *C;
+ SCEVUse X, C;
if (isa<SCEVConstant>(TrueExpr)) {
CondExpr = SE->getNotSCEV(CondExpr);
X = FalseExpr;
@@ -6211,20 +6228,23 @@ createNodeForSelectViaUMinSeq(ScalarEvolution *SE, const SCEV *CondExpr,
/*Sequential=*/true));
}
-static std::optional<const SCEV *>
-createNodeForSelectViaUMinSeq(ScalarEvolution *SE, Value *Cond, Value *TrueVal,
- Value *FalseVal) {
+static std::optional<SCEVUse> createNodeForSelectViaUMinSeq(ScalarEvolution *SE,
+ Value *Cond,
+ Value *TrueVal,
+ Value *FalseVal) {
if (!isa<ConstantInt>(TrueVal) && !isa<ConstantInt>(FalseVal))
return std::nullopt;
- const auto *SECond = SE->getSCEV(Cond);
- const auto *SETrue = SE->getSCEV(TrueVal);
- const auto *SEFalse = SE->getSCEV(FalseVal);
+ const auto SECond = SE->getSCEV(Cond);
+ const auto SETrue = SE->getSCEV(TrueVal);
+ const auto SEFalse = SE->getSCEV(FalseVal);
return createNodeForSelectViaUMinSeq(SE, SECond, SETrue, SEFalse);
}
-const SCEV *ScalarEvolution::createNodeForSelectOrPHIViaUMinSeq(
- Value *V, Value *Cond, Value *TrueVal, Value *FalseVal) {
+SCEVUse ScalarEvolution::createNodeForSelectOrPHIViaUMinSeq(Value *V,
+ Value *Cond,
+ Value *TrueVal,
+ Value *FalseVal) {
assert(Cond->getType()->isIntegerTy(1) && "Select condition is not an i1?");
assert(TrueVal->getType() == FalseVal->getType() &&
V->getType() == TrueVal->getType() &&
@@ -6234,16 +6254,16 @@ const SCEV *ScalarEvolution::createNodeForSelectOrPHIViaUMinSeq(
if (!V->getType()->isIntegerTy(1))
return getUnknown(V);
- if (std::optional<const SCEV *> S =
+ if (std::optional<SCEVUse> S =
createNodeForSelectViaUMinSeq(this, Cond, TrueVal, FalseVal))
return *S;
return getUnknown(V);
}
-const SCEV *ScalarEvolution::createNodeForSelectOrPHI(Value *V, Value *Cond,
- Value *TrueVal,
- Value *FalseVal) {
+SCEVUse ScalarEvolution::createNodeForSelectOrPHI(Value *V, Value *Cond,
+ Value *TrueVal,
+ Value *FalseVal) {
// Handle "constant" branch or select. This can occur for instance when a
// loop pass transforms an inner loop and moves on to process the outer loop.
if (auto *CI = dyn_cast<ConstantInt>(Cond))
@@ -6251,7 +6271,7 @@ const SCEV *ScalarEvolution::createNodeForSelectOrPHI(Value *V, Value *Cond,
if (auto *I = dyn_cast<Instruction>(V)) {
if (auto *ICI = dyn_cast<ICmpInst>(Cond)) {
- if (std::optional<const SCEV *> S =
+ if (std::optional<SCEVUse> S =
createNodeForSelectOrPHIInstWithICmpInstCond(I->getType(), ICI,
TrueVal, FalseVal))
return *S;
@@ -6263,17 +6283,17 @@ const SCEV *ScalarEvolution::createNodeForSelectOrPHI(Value *V, Value *Cond,
/// Expand GEP instructions into add and multiply operations. This allows them
/// to be analyzed by regular SCEV code.
-const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) {
+SCEVUse ScalarEvolution::createNodeForGEP(GEPOperator *GEP) {
assert(GEP->getSourceElementType()->isSized() &&
"GEP source element type must be sized");
- SmallVector<const SCEV *, 4> IndexExprs;
+ SmallVector<SCEVUse, 4> IndexExprs;
for (Value *Index : GEP->indices())
IndexExprs.push_back(getSCEV(Index));
return getGEPExpr(GEP, IndexExprs);
}
-APInt ScalarEvolution::getConstantMultipleImpl(const SCEV *S) {
+APInt ScalarEvolution::getConstantMultipleImpl(SCEVUse S) {
uint64_t BitWidth = getTypeSizeInBits(S->getType());
auto GetShiftedByZeros = [BitWidth](uint32_t TrailingZeros) {
return TrailingZeros >= BitWidth
@@ -6316,7 +6336,7 @@ APInt ScalarEvolution::getConstantMultipleImpl(const SCEV *S) {
if (M->hasNoUnsignedWrap()) {
// The result is the product of all operand results.
APInt Res = getConstantMultiple(M->getOperand(0));
- for (const SCEV *Operand : M->operands().drop_front())
+ for (SCEVUse Operand : M->operands().drop_front())
Res = Res * getConstantMultiple(Operand);
return Res;
}
@@ -6324,7 +6344,7 @@ APInt ScalarEvolution::getConstantMultipleImpl(const SCEV *S) {
// If there are no wrap guarentees, find the trailing zeros, which is the
// sum of trailing zeros for all its operands.
uint32_t TZ = 0;
- for (const SCEV *Operand : M->operands())
+ for (SCEVUse Operand : M->operands())
TZ += getMinTrailingZeros(Operand);
return GetShiftedByZeros(TZ);
}
@@ -6335,7 +6355,7 @@ APInt ScalarEvolution::getConstantMultipleImpl(const SCEV *S) {
return GetGCDMultiple(N);
// Find the trailing bits, which is the minimum of its operands.
uint32_t TZ = getMinTrailingZeros(N->getOperand(0));
- for (const SCEV *Operand : N->operands().drop_front())
+ for (SCEVUse Operand : N->operands().drop_front())
TZ = std::min(TZ, getMinTrailingZeros(Operand));
return GetShiftedByZeros(TZ);
}
@@ -6359,7 +6379,7 @@ APInt ScalarEvolution::getConstantMultipleImpl(const SCEV *S) {
llvm_unreachable("Unknown SCEV kind!");
}
-APInt ScalarEvolution::getConstantMultiple(const SCEV *S) {
+APInt ScalarEvolution::getConstantMultiple(SCEVUse S) {
auto I = ConstantMultipleCache.find(S);
if (I != ConstantMultipleCache.end())
return I->second;
@@ -6370,12 +6390,12 @@ APInt ScalarEvolution::getConstantMultiple(const SCEV *S) {
return InsertPair.first->second;
}
-APInt ScalarEvolution::getNonZeroConstantMultiple(const SCEV *S) {
+APInt ScalarEvolution::getNonZeroConstantMultiple(SCEVUse S) {
APInt Multiple = getConstantMultiple(S);
return Multiple == 0 ? APInt(Multiple.getBitWidth(), 1) : Multiple;
}
-uint32_t ScalarEvolution::getMinTrailingZeros(const SCEV *S) {
+uint32_t ScalarEvolution::getMinTrailingZeros(SCEVUse S) {
return std::min(getConstantMultiple(S).countTrailingZeros(),
(unsigned)getTypeSizeInBits(S->getType()));
}
@@ -6526,17 +6546,17 @@ getRangeForUnknownRecurrence(const SCEVUnknown *U) {
}
const ConstantRange &
-ScalarEvolution::getRangeRefIter(const SCEV *S,
+ScalarEvolution::getRangeRefIter(SCEVUse S,
ScalarEvolution::RangeSignHint SignHint) {
- DenseMap<const SCEV *, ConstantRange> &Cache =
+ DenseMap<SCEVUse, ConstantRange> &Cache =
SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED ? UnsignedRanges
: SignedRanges;
- SmallVector<const SCEV *> WorkList;
- SmallPtrSet<const SCEV *, 8> Seen;
+ SmallVector<SCEVUse> WorkList;
+ SmallPtrSet<SCEVUse, 8> Seen;
// Add Expr to the worklist, if Expr is either an N-ary expression or a
// SCEVUnknown PHI node.
- auto AddToWorklist = [&WorkList, &Seen, &Cache](const SCEV *Expr) {
+ auto AddToWorklist = [&WorkList, &Seen, &Cache](SCEVUse Expr) {
if (!Seen.insert(Expr).second)
return;
if (Cache.contains(Expr))
@@ -6571,11 +6591,11 @@ ScalarEvolution::getRangeRefIter(const SCEV *S,
// Build worklist by queuing operands of N-ary expressions and phi nodes.
for (unsigned I = 0; I != WorkList.size(); ++I) {
- const SCEV *P = WorkList[I];
+ SCEVUse P = WorkList[I];
auto *UnknownS = dyn_cast<SCEVUnknown>(P);
// If it is not a `SCEVUnknown`, just recurse into operands.
if (!UnknownS) {
- for (const SCEV *Op : P->operands())
+ for (SCEVUse Op : P->operands())
AddToWorklist(Op);
continue;
}
@@ -6592,7 +6612,7 @@ ScalarEvolution::getRangeRefIter(const SCEV *S,
// Use getRangeRef to compute ranges for items in the worklist in reverse
// order. This will force ranges for earlier operands to be computed before
// their users in most cases.
- for (const SCEV *P : reverse(drop_begin(WorkList))) {
+ for (SCEVUse P : reverse(drop_begin(WorkList))) {
getRangeRef(P, SignHint);
if (auto *UnknownS = dyn_cast<SCEVUnknown>(P))
@@ -6607,9 +6627,10 @@ ScalarEvolution::getRangeRefIter(const SCEV *S,
/// Determine the range for a particular SCEV. If SignHint is
/// HINT_RANGE_UNSIGNED (resp. HINT_RANGE_SIGNED) then getRange prefers ranges
/// with a "cleaner" unsigned (resp. signed) representation.
-const ConstantRange &ScalarEvolution::getRangeRef(
- const SCEV *S, ScalarEvolution::RangeSignHint SignHint, unsigned Depth) {
- DenseMap<const SCEV *, ConstantRange> &Cache =
+const ConstantRange &
+ScalarEvolution::getRangeRef(SCEVUse S, ScalarEvolution::RangeSignHint SignHint,
+ unsigned Depth) {
+ DenseMap<SCEVUse, ConstantRange> &Cache =
SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED ? UnsignedRanges
: SignedRanges;
ConstantRange::PreferredRangeType RangeType =
@@ -6617,7 +6638,7 @@ const ConstantRange &ScalarEvolution::getRangeRef(
: ConstantRange::Signed;
// See if we've computed this range already.
- DenseMap<const SCEV *, ConstantRange>::iterator I = Cache.find(S);
+ DenseMap<SCEVUse, ConstantRange>::iterator I = Cache.find(S);
if (I != Cache.end())
return I->second;
@@ -6752,8 +6773,7 @@ const ConstantRange &ScalarEvolution::getRangeRef(
// TODO: non-affine addrec
if (AddRec->isAffine()) {
- const SCEV *MaxBEScev =
- getConstantMaxBackedgeTakenCount(AddRec->getLoop());
+ SCEVUse MaxBEScev = getConstantMaxBackedgeTakenCount(AddRec->getLoop());
if (!isa<SCEVCouldNotCompute>(MaxBEScev)) {
APInt MaxBECount = cast<SCEVConstant>(MaxBEScev)->getAPInt();
@@ -6780,7 +6800,7 @@ const ConstantRange &ScalarEvolution::getRangeRef(
// Now try symbolic BE count and more powerful methods.
if (UseExpensiveRangeSharpening) {
- const SCEV *SymbolicMaxBECount =
+ SCEVUse SymbolicMaxBECount =
getSymbolicMaxBackedgeTakenCount(AddRec->getLoop());
if (!isa<SCEVCouldNotCompute>(SymbolicMaxBECount) &&
getTypeSizeInBits(MaxBEScev->getType()) <= BitWidth &&
@@ -7011,8 +7031,7 @@ static ConstantRange getRangeForAffineARHelper(APInt Step,
return ConstantRange::getNonEmpty(std::move(NewLower), std::move(NewUpper));
}
-ConstantRange ScalarEvolution::getRangeForAffineAR(const SCEV *Start,
- const SCEV *Step,
+ConstantRange ScalarEvolution::getRangeForAffineAR(SCEVUse Start, SCEVUse Step,
const APInt &MaxBECount) {
assert(getTypeSizeInBits(Start->getType()) ==
getTypeSizeInBits(Step->getType()) &&
@@ -7041,13 +7060,13 @@ ConstantRange ScalarEvolution::getRangeForAffineAR(const SCEV *Start,
}
ConstantRange ScalarEvolution::getRangeForAffineNoSelfWrappingAR(
- const SCEVAddRecExpr *AddRec, const SCEV *MaxBECount, unsigned BitWidth,
+ const SCEVAddRecExpr *AddRec, SCEVUse MaxBECount, unsigned BitWidth,
ScalarEvolution::RangeSignHint SignHint) {
assert(AddRec->isAffine() && "Non-affine AddRecs are not suppored!\n");
assert(AddRec->hasNoSelfWrap() &&
"This only works for non-self-wrapping AddRecs!");
const bool IsSigned = SignHint == HINT_RANGE_SIGNED;
- const SCEV *Step = AddRec->getStepRecurrence(*this);
+ SCEVUse Step = AddRec->getStepRecurrence(*this);
// Only deal with constant step to save compile time.
if (!isa<SCEVConstant>(Step))
return ConstantRange::getFull(BitWidth);
@@ -7060,9 +7079,9 @@ ConstantRange ScalarEvolution::getRangeForAffineNoSelfWrappingAR(
getTypeSizeInBits(AddRec->getType()))
return ConstantRange::getFull(BitWidth);
MaxBECount = getNoopOrZeroExtend(MaxBECount, AddRec->getType());
- const SCEV *RangeWidth = getMinusOne(AddRec->getType());
- const SCEV *StepAbs = getUMinExpr(Step, getNegativeSCEV(Step));
- const SCEV *MaxItersWithoutWrap = getUDivExpr(RangeWidth, StepAbs);
+ SCEVUse RangeWidth = getMinusOne(AddRec->getType());
+ SCEVUse StepAbs = getUMinExpr(Step, getNegativeSCEV(Step));
+ SCEVUse MaxItersWithoutWrap = getUDivExpr(RangeWidth, StepAbs);
if (!isKnownPredicateViaConstantRanges(ICmpInst::ICMP_ULE, MaxBECount,
MaxItersWithoutWrap))
return ConstantRange::getFull(BitWidth);
@@ -7071,7 +7090,7 @@ ConstantRange ScalarEvolution::getRangeForAffineNoSelfWrappingAR(
IsSigned ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE;
ICmpInst::Predicate GEPred =
IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE;
- const SCEV *End = AddRec->evaluateAtIteration(MaxBECount, *this);
+ SCEVUse End = AddRec->evaluateAtIteration(MaxBECount, *this);
// We know that there is no self-wrap. Let's take Start and End values and
// look at all intermediate values V1, V2, ..., Vn that IndVar takes during
@@ -7085,7 +7104,7 @@ ConstantRange ScalarEvolution::getRangeForAffineNoSelfWrappingAR(
// outside and inside the range [Min(Start, End), Max(Start, End)]. Using that
// knowledge, let's try to prove that we are dealing with Case 1. It is so if
// Start <= End and step is positive, or Start >= End and step is negative.
- const SCEV *Start = applyLoopGuards(AddRec->getStart(), AddRec->getLoop());
+ SCEVUse Start = applyLoopGuards(AddRec->getStart(), AddRec->getLoop());
ConstantRange StartRange = getRangeRef(Start, SignHint);
ConstantRange EndRange = getRangeRef(End, SignHint);
ConstantRange RangeBetween = StartRange.unionWith(EndRange);
@@ -7108,8 +7127,7 @@ ConstantRange ScalarEvolution::getRangeForAffineNoSelfWrappingAR(
return ConstantRange::getFull(BitWidth);
}
-ConstantRange ScalarEvolution::getRangeViaFactoring(const SCEV *Start,
- const SCEV *Step,
+ConstantRange ScalarEvolution::getRangeViaFactoring(SCEVUse Start, SCEVUse Step,
const APInt &MaxBECount) {
// RangeOf({C?A:B,+,C?P:Q}) == RangeOf(C?{A,+,P}:{B,+,Q})
// == RangeOf({A,+,P}) union RangeOf({B,+,Q})
@@ -7124,8 +7142,7 @@ ConstantRange ScalarEvolution::getRangeViaFactoring(const SCEV *Start,
APInt TrueValue;
APInt FalseValue;
- explicit SelectPattern(ScalarEvolution &SE, unsigned BitWidth,
- const SCEV *S) {
+ explicit SelectPattern(ScalarEvolution &SE, unsigned BitWidth, SCEVUse S) {
std::optional<unsigned> CastOp;
APInt Offset(BitWidth, 0);
@@ -7214,10 +7231,10 @@ ConstantRange ScalarEvolution::getRangeViaFactoring(const SCEV *Start,
// FIXME: without the explicit `this` receiver below, MSVC errors out with
// C2352 and C2512 (otherwise it isn't needed).
- const SCEV *TrueStart = this->getConstant(StartPattern.TrueValue);
- const SCEV *TrueStep = this->getConstant(StepPattern.TrueValue);
- const SCEV *FalseStart = this->getConstant(StartPattern.FalseValue);
- const SCEV *FalseStep = this->getConstant(StepPattern.FalseValue);
+ SCEVUse TrueStart = this->getConstant(StartPattern.TrueValue);
+ SCEVUse TrueStep = this->getConstant(StepPattern.TrueValue);
+ SCEVUse FalseStart = this->getConstant(StartPattern.FalseValue);
+ SCEVUse FalseStep = this->getConstant(StepPattern.FalseValue);
ConstantRange TrueRange =
this->getRangeForAffineAR(TrueStart, TrueStep, MaxBECount);
@@ -7243,8 +7260,7 @@ SCEV::NoWrapFlags ScalarEvolution::getNoWrapFlagsFromUB(const Value *V) {
return isSCEVExprNeverPoison(BinOp) ? Flags : SCEV::FlagAnyWrap;
}
-const Instruction *
-ScalarEvolution::getNonTrivialDefiningScopeBound(const SCEV *S) {
+const Instruction *ScalarEvolution::getNonTrivialDefiningScopeBound(SCEVUse S) {
if (auto *AddRec = dyn_cast<SCEVAddRecExpr>(S))
return &*AddRec->getLoop()->getHeader()->begin();
if (auto *U = dyn_cast<SCEVUnknown>(S))
@@ -7253,14 +7269,13 @@ ScalarEvolution::getNonTrivialDefiningScopeBound(const SCEV *S) {
return nullptr;
}
-const Instruction *
-ScalarEvolution::getDefiningScopeBound(ArrayRef<const SCEV *> Ops,
- bool &Precise) {
+const Instruction *ScalarEvolution::getDefiningScopeBound(ArrayRef<SCEVUse> Ops,
+ bool &Precise) {
Precise = true;
// Do a bounded search of the def relation of the requested SCEVs.
- SmallSet<const SCEV *, 16> Visited;
- SmallVector<const SCEV *> Worklist;
- auto pushOp = [&](const SCEV *S) {
+ SmallSet<SCEVUse, 16> Visited;
+ SmallVector<SCEVUse> Worklist;
+ auto pushOp = [&](SCEVUse S) {
if (!Visited.insert(S).second)
return;
// Threshold of 30 here is arbitrary.
@@ -7271,17 +7286,17 @@ ScalarEvolution::getDefiningScopeBound(ArrayRef<const SCEV *> Ops,
Worklist.push_back(S);
};
- for (const auto *S : Ops)
+ for (const auto S : Ops)
pushOp(S);
const Instruction *Bound = nullptr;
while (!Worklist.empty()) {
- auto *S = Worklist.pop_back_val();
+ auto S = Worklist.pop_back_val();
if (auto *DefI = getNonTrivialDefiningScopeBound(S)) {
if (!Bound || DT.dominates(Bound, DefI))
Bound = DefI;
} else {
- for (const auto *Op : S->operands())
+ for (const auto Op : S->operands())
pushOp(Op);
}
}
@@ -7289,7 +7304,7 @@ ScalarEvolution::getDefiningScopeBound(ArrayRef<const SCEV *> Ops,
}
const Instruction *
-ScalarEvolution::getDefiningScopeBound(ArrayRef<const SCEV *> Ops) {
+ScalarEvolution::getDefiningScopeBound(ArrayRef<SCEVUse> Ops) {
bool Discard;
return getDefiningScopeBound(Ops, Discard);
}
@@ -7329,7 +7344,7 @@ bool ScalarEvolution::isSCEVExprNeverPoison(const Instruction *I) {
// executed every time we enter that scope. When the bounding scope is a
// loop (the common case), this is equivalent to proving I executes on every
// iteration of that loop.
- SmallVector<const SCEV *> SCEVOps;
+ SmallVector<SCEVUse> SCEVOps;
for (const Use &Op : I->operands()) {
// I could be an extractvalue from a call to an overflow intrinsic.
// TODO: We can do better here in some cases.
@@ -7425,7 +7440,7 @@ bool ScalarEvolution::loopIsFiniteByAssumption(const Loop *L) {
return isFinite(L) || (isMustProgress(L) && loopHasNoSideEffects(L));
}
-const SCEV *ScalarEvolution::createSCEVIter(Value *V) {
+SCEVUse ScalarEvolution::createSCEVIter(Value *V) {
// Worklist item with a Value and a bool indicating whether all operands have
// been visited already.
using PointerTy = PointerIntPair<Value *, 1, bool>;
@@ -7441,7 +7456,7 @@ const SCEV *ScalarEvolution::createSCEVIter(Value *V) {
continue;
SmallVector<Value *> Ops;
- const SCEV *CreatedSCEV = nullptr;
+ SCEVUse CreatedSCEV = nullptr;
// If all operands have been visited already, create the SCEV.
if (E.getInt()) {
CreatedSCEV = createSCEV(CurV);
@@ -7466,8 +7481,8 @@ const SCEV *ScalarEvolution::createSCEVIter(Value *V) {
return getExistingSCEV(V);
}
-const SCEV *
-ScalarEvolution::getOperandsToCreate(Value *V, SmallVectorImpl<Value *> &Ops) {
+SCEVUse ScalarEvolution::getOperandsToCreate(Value *V,
+ SmallVectorImpl<Value *> &Ops) {
if (!isSCEVable(V->getType()))
return getUnknown(V);
@@ -7653,7 +7668,7 @@ ScalarEvolution::getOperandsToCreate(Value *V, SmallVectorImpl<Value *> &Ops) {
return nullptr;
}
-const SCEV *ScalarEvolution::createSCEV(Value *V) {
+SCEVUse ScalarEvolution::createSCEV(Value *V) {
if (!isSCEVable(V->getType()))
return getUnknown(V);
@@ -7671,8 +7686,8 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) {
else if (!isa<ConstantExpr>(V))
return getUnknown(V);
- const SCEV *LHS;
- const SCEV *RHS;
+ SCEVUse LHS;
+ SCEVUse RHS;
Operator *U = cast<Operator>(V);
if (auto BO =
@@ -7685,10 +7700,10 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) {
// because it leads to N-1 getAddExpr calls for N ultimate operands.
// Instead, gather up all the operands and make a single getAddExpr call.
// LLVM IR canonical form means we need only traverse the left operands.
- SmallVector<const SCEV *, 4> AddOps;
+ SmallVector<SCEVUse, 4> AddOps;
do {
if (BO->Op) {
- if (auto *OpSCEV = getExistingSCEV(BO->Op)) {
+ if (auto OpSCEV = getExistingSCEV(BO->Op)) {
AddOps.push_back(OpSCEV);
break;
}
@@ -7700,10 +7715,10 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) {
// since the flags are only known to apply to this particular
// addition - they may not apply to other additions that can be
// formed with operands from AddOps.
- const SCEV *RHS = getSCEV(BO->RHS);
+ SCEVUse RHS = getSCEV(BO->RHS);
SCEV::NoWrapFlags Flags = getNoWrapFlagsFromUB(BO->Op);
if (Flags != SCEV::FlagAnyWrap) {
- const SCEV *LHS = getSCEV(BO->LHS);
+ SCEVUse LHS = getSCEV(BO->LHS);
if (BO->Opcode == Instruction::Sub)
AddOps.push_back(getMinusSCEV(LHS, RHS, Flags));
else
@@ -7731,10 +7746,10 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) {
}
case Instruction::Mul: {
- SmallVector<const SCEV *, 4> MulOps;
+ SmallVector<SCEVUse, 4> MulOps;
do {
if (BO->Op) {
- if (auto *OpSCEV = getExistingSCEV(BO->Op)) {
+ if (auto OpSCEV = getExistingSCEV(BO->Op)) {
MulOps.push_back(OpSCEV);
break;
}
@@ -7800,19 +7815,19 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) {
APInt EffectiveMask =
APInt::getLowBitsSet(BitWidth, BitWidth - LZ - TZ).shl(TZ);
if ((LZ != 0 || TZ != 0) && !((~A & ~Known.Zero) & EffectiveMask)) {
- const SCEV *MulCount = getConstant(APInt::getOneBitSet(BitWidth, TZ));
- const SCEV *LHS = getSCEV(BO->LHS);
- const SCEV *ShiftedLHS = nullptr;
+ SCEVUse MulCount = getConstant(APInt::getOneBitSet(BitWidth, TZ));
+ SCEVUse LHS = getSCEV(BO->LHS);
+ SCEVUse ShiftedLHS = nullptr;
if (auto *LHSMul = dyn_cast<SCEVMulExpr>(LHS)) {
if (auto *OpC = dyn_cast<SCEVConstant>(LHSMul->getOperand(0))) {
// For an expression like (x * 8) & 8, simplify the multiply.
unsigned MulZeros = OpC->getAPInt().countr_zero();
unsigned GCD = std::min(MulZeros, TZ);
APInt DivAmt = APInt::getOneBitSet(BitWidth, TZ - GCD);
- SmallVector<const SCEV*, 4> MulOps;
+ SmallVector<SCEVUse, 4> MulOps;
MulOps.push_back(getConstant(OpC->getAPInt().lshr(GCD)));
append_range(MulOps, LHSMul->operands().drop_front());
- auto *NewMul = getMulExpr(MulOps, LHSMul->getNoWrapFlags());
+ auto NewMul = getMulExpr(MulOps, LHSMul->getNoWrapFlags());
ShiftedLHS = getUDivExpr(NewMul, getConstant(DivAmt));
}
}
@@ -7860,7 +7875,7 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) {
if (const SCEVZeroExtendExpr *Z =
dyn_cast<SCEVZeroExtendExpr>(getSCEV(BO->LHS))) {
Type *UTy = BO->LHS->getType();
- const SCEV *Z0 = Z->getOperand();
+ SCEVUse Z0 = Z->getOperand();
Type *Z0Ty = Z0->getType();
unsigned Z0TySize = getTypeSizeInBits(Z0Ty);
@@ -7936,9 +7951,9 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) {
Type *TruncTy = IntegerType::get(getContext(), BitWidth - AShrAmt);
Operator *L = dyn_cast<Operator>(BO->LHS);
- const SCEV *AddTruncateExpr = nullptr;
+ SCEVUse AddTruncateExpr = nullptr;
ConstantInt *ShlAmtCI = nullptr;
- const SCEV *AddConstant = nullptr;
+ SCEVUse AddConstant = nullptr;
if (L && L->getOpcode() == Instruction::Add) {
// X = Shl A, n
@@ -7950,7 +7965,7 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) {
ConstantInt *AddOperandCI = dyn_cast<ConstantInt>(L->getOperand(1));
if (LShift && LShift->getOpcode() == Instruction::Shl) {
if (AddOperandCI) {
- const SCEV *ShlOp0SCEV = getSCEV(LShift->getOperand(0));
+ SCEVUse ShlOp0SCEV = getSCEV(LShift->getOperand(0));
ShlAmtCI = dyn_cast<ConstantInt>(LShift->getOperand(1));
// since we truncate to TruncTy, the AddConstant should be of the
// same type, so create a new Constant with type same as TruncTy.
@@ -7968,7 +7983,7 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) {
// Y = AShr X, m
// Both n and m are constant.
- const SCEV *ShlOp0SCEV = getSCEV(L->getOperand(0));
+ SCEVUse ShlOp0SCEV = getSCEV(L->getOperand(0));
ShlAmtCI = dyn_cast<ConstantInt>(L->getOperand(1));
AddTruncateExpr = getTruncateExpr(ShlOp0SCEV, TruncTy);
}
@@ -7989,8 +8004,7 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) {
if (ShlAmt.ult(BitWidth) && ShlAmt.uge(AShrAmt)) {
APInt Mul = APInt::getOneBitSet(BitWidth - AShrAmt,
ShlAmtCI->getZExtValue() - AShrAmt);
- const SCEV *CompositeExpr =
- getMulExpr(AddTruncateExpr, getConstant(Mul));
+ SCEVUse CompositeExpr = getMulExpr(AddTruncateExpr, getConstant(Mul));
if (L->getOpcode() != Instruction::Shl)
CompositeExpr = getAddExpr(CompositeExpr, AddConstant);
@@ -8020,8 +8034,8 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) {
// but by that point the NSW information has potentially been lost.
if (BO->Opcode == Instruction::Sub && BO->IsNSW) {
Type *Ty = U->getType();
- auto *V1 = getSignExtendExpr(getSCEV(BO->LHS), Ty);
- auto *V2 = getSignExtendExpr(getSCEV(BO->RHS), Ty);
+ auto V1 = getSignExtendExpr(getSCEV(BO->LHS), Ty);
+ auto V2 = getSignExtendExpr(getSCEV(BO->RHS), Ty);
return getMinusSCEV(V1, V2, SCEV::FlagNSW);
}
}
@@ -8035,11 +8049,11 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) {
case Instruction::PtrToInt: {
// Pointer to integer cast is straight-forward, so do model it.
- const SCEV *Op = getSCEV(U->getOperand(0));
+ SCEVUse Op = getSCEV(U->getOperand(0));
Type *DstIntTy = U->getType();
// But only if effective SCEV (integer) type is wide enough to represent
// all possible pointer values.
- const SCEV *IntOp = getPtrToIntExpr(Op, DstIntTy);
+ SCEVUse IntOp = getPtrToIntExpr(Op, DstIntTy);
if (isa<SCEVCouldNotCompute>(IntOp))
return getUnknown(V);
return IntOp;
@@ -8100,15 +8114,15 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) {
RHS = getSCEV(II->getArgOperand(1));
return getSMinExpr(LHS, RHS);
case Intrinsic::usub_sat: {
- const SCEV *X = getSCEV(II->getArgOperand(0));
- const SCEV *Y = getSCEV(II->getArgOperand(1));
- const SCEV *ClampedY = getUMinExpr(X, Y);
+ SCEVUse X = getSCEV(II->getArgOperand(0));
+ SCEVUse Y = getSCEV(II->getArgOperand(1));
+ SCEVUse ClampedY = getUMinExpr(X, Y);
return getMinusSCEV(X, ClampedY, SCEV::FlagNUW);
}
case Intrinsic::uadd_sat: {
- const SCEV *X = getSCEV(II->getArgOperand(0));
- const SCEV *Y = getSCEV(II->getArgOperand(1));
- const SCEV *ClampedX = getUMinExpr(X, getNotSCEV(Y));
+ SCEVUse X = getSCEV(II->getArgOperand(0));
+ SCEVUse Y = getSCEV(II->getArgOperand(1));
+ SCEVUse ClampedX = getUMinExpr(X, getNotSCEV(Y));
return getAddExpr(ClampedX, Y, SCEV::FlagNUW);
}
case Intrinsic::start_loop_iterations:
@@ -8133,7 +8147,7 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) {
// Iteration Count Computation Code
//
-const SCEV *ScalarEvolution::getTripCountFromExitCount(const SCEV *ExitCount) {
+SCEVUse ScalarEvolution::getTripCountFromExitCount(SCEVUse ExitCount) {
if (isa<SCEVCouldNotCompute>(ExitCount))
return getCouldNotCompute();
@@ -8144,9 +8158,9 @@ const SCEV *ScalarEvolution::getTripCountFromExitCount(const SCEV *ExitCount) {
return getTripCountFromExitCount(ExitCount, EvalTy, nullptr);
}
-const SCEV *ScalarEvolution::getTripCountFromExitCount(const SCEV *ExitCount,
- Type *EvalTy,
- const Loop *L) {
+SCEVUse ScalarEvolution::getTripCountFromExitCount(SCEVUse ExitCount,
+ Type *EvalTy,
+ const Loop *L) {
if (isa<SCEVCouldNotCompute>(ExitCount))
return getCouldNotCompute();
@@ -8225,12 +8239,12 @@ unsigned ScalarEvolution::getSmallConstantTripMultiple(const Loop *L) {
}
unsigned ScalarEvolution::getSmallConstantTripMultiple(const Loop *L,
- const SCEV *ExitCount) {
+ SCEVUse ExitCount) {
if (ExitCount == getCouldNotCompute())
return 1;
// Get the trip count
- const SCEV *TCExpr = getTripCountFromExitCount(applyLoopGuards(ExitCount, L));
+ SCEVUse TCExpr = getTripCountFromExitCount(applyLoopGuards(ExitCount, L));
APInt Multiple = getNonZeroConstantMultiple(TCExpr);
// If a trip multiple is huge (>=2^32), the trip count is still divisible by
@@ -8258,13 +8272,13 @@ ScalarEvolution::getSmallConstantTripMultiple(const Loop *L,
assert(ExitingBlock && "Must pass a non-null exiting block!");
assert(L->isLoopExiting(ExitingBlock) &&
"Exiting block must actually branch out of the loop!");
- const SCEV *ExitCount = getExitCount(L, ExitingBlock);
+ SCEVUse ExitCount = getExitCount(L, ExitingBlock);
return getSmallConstantTripMultiple(L, ExitCount);
}
-const SCEV *ScalarEvolution::getExitCount(const Loop *L,
- const BasicBlock *ExitingBlock,
- ExitCountKind Kind) {
+SCEVUse ScalarEvolution::getExitCount(const Loop *L,
+ const BasicBlock *ExitingBlock,
+ ExitCountKind Kind) {
switch (Kind) {
case Exact:
return getBackedgeTakenInfo(L).getExact(ExitingBlock, this);
@@ -8276,14 +8290,13 @@ const SCEV *ScalarEvolution::getExitCount(const Loop *L,
llvm_unreachable("Invalid ExitCountKind!");
}
-const SCEV *
-ScalarEvolution::getPredicatedBackedgeTakenCount(const Loop *L,
- SmallVector<const SCEVPredicate *, 4> &Preds) {
+SCEVUse ScalarEvolution::getPredicatedBackedgeTakenCount(
+ const Loop *L, SmallVector<const SCEVPredicate *, 4> &Preds) {
return getPredicatedBackedgeTakenInfo(L).getExact(L, this, &Preds);
}
-const SCEV *ScalarEvolution::getBackedgeTakenCount(const Loop *L,
- ExitCountKind Kind) {
+SCEVUse ScalarEvolution::getBackedgeTakenCount(const Loop *L,
+ ExitCountKind Kind) {
switch (Kind) {
case Exact:
return getBackedgeTakenInfo(L).getExact(L, this);
@@ -8352,7 +8365,7 @@ ScalarEvolution::getBackedgeTakenInfo(const Loop *L) {
// only done to produce more precise results.
if (Result.hasAnyInfo()) {
// Invalidate any expression using an addrec in this loop.
- SmallVector<const SCEV *, 8> ToForget;
+ SmallVector<SCEVUse, 8> ToForget;
auto LoopUsersIt = LoopUsers.find(L);
if (LoopUsersIt != LoopUsers.end())
append_range(ToForget, LoopUsersIt->second);
@@ -8399,7 +8412,7 @@ void ScalarEvolution::forgetAllLoops() {
void ScalarEvolution::visitAndClearUsers(
SmallVectorImpl<Instruction *> &Worklist,
SmallPtrSetImpl<Instruction *> &Visited,
- SmallVectorImpl<const SCEV *> &ToForget) {
+ SmallVectorImpl<SCEVUse> &ToForget) {
while (!Worklist.empty()) {
Instruction *I = Worklist.pop_back_val();
if (!isSCEVable(I->getType()))
@@ -8422,7 +8435,7 @@ void ScalarEvolution::forgetLoop(const Loop *L) {
SmallVector<const Loop *, 16> LoopWorklist(1, L);
SmallVector<Instruction *, 32> Worklist;
SmallPtrSet<Instruction *, 16> Visited;
- SmallVector<const SCEV *, 16> ToForget;
+ SmallVector<SCEVUse, 16> ToForget;
// Iterate over all the loops and sub-loops to drop SCEV information.
while (!LoopWorklist.empty()) {
@@ -8435,7 +8448,7 @@ void ScalarEvolution::forgetLoop(const Loop *L) {
// Drop information about predicated SCEV rewrites for this loop.
for (auto I = PredicatedSCEVRewrites.begin();
I != PredicatedSCEVRewrites.end();) {
- std::pair<const SCEV *, const Loop *> Entry = I->first;
+ std::pair<SCEVUse, const Loop *> Entry = I->first;
if (Entry.second == CurrL)
PredicatedSCEVRewrites.erase(I++);
else
@@ -8471,7 +8484,7 @@ void ScalarEvolution::forgetValue(Value *V) {
// Drop information about expressions based on loop-header PHIs.
SmallVector<Instruction *, 16> Worklist;
SmallPtrSet<Instruction *, 8> Visited;
- SmallVector<const SCEV *, 8> ToForget;
+ SmallVector<SCEVUse, 8> ToForget;
Worklist.push_back(I);
Visited.insert(I);
visitAndClearUsers(Worklist, Visited, ToForget);
@@ -8487,14 +8500,14 @@ void ScalarEvolution::forgetLcssaPhiWithNewPredecessor(Loop *L, PHINode *V) {
// directly using a SCEVUnknown/SCEVAddRec defined in the loop. After an
// extra predecessor is added, this is no longer valid. Find all Unknowns and
// AddRecs defined in the loop and invalidate any SCEV's making use of them.
- if (const SCEV *S = getExistingSCEV(V)) {
+ if (SCEVUse S = getExistingSCEV(V)) {
struct InvalidationRootCollector {
Loop *L;
- SmallVector<const SCEV *, 8> Roots;
+ SmallVector<SCEVUse, 8> Roots;
InvalidationRootCollector(Loop *L) : L(L) {}
- bool follow(const SCEV *S) {
+ bool follow(SCEVUse S) {
if (auto *SU = dyn_cast<SCEVUnknown>(S)) {
if (auto *I = dyn_cast<Instruction>(SU->getValue()))
if (L->contains(I))
@@ -8531,7 +8544,7 @@ void ScalarEvolution::forgetBlockAndLoopDispositions(Value *V) {
if (!isSCEVable(V->getType()))
return;
- const SCEV *S = getExistingSCEV(V);
+ SCEVUse S = getExistingSCEV(V);
if (!S)
return;
@@ -8539,17 +8552,17 @@ void ScalarEvolution::forgetBlockAndLoopDispositions(Value *V) {
// S's users may change if S's disposition changes (i.e. a user may change to
// loop-invariant, if S changes to loop invariant), so also invalidate
// dispositions of S's users recursively.
- SmallVector<const SCEV *, 8> Worklist = {S};
- SmallPtrSet<const SCEV *, 8> Seen = {S};
+ SmallVector<SCEVUse, 8> Worklist = {S};
+ SmallPtrSet<SCEVUse, 8> Seen = {S};
while (!Worklist.empty()) {
- const SCEV *Curr = Worklist.pop_back_val();
+ SCEVUse Curr = Worklist.pop_back_val();
bool LoopDispoRemoved = LoopDispositions.erase(Curr);
bool BlockDispoRemoved = BlockDispositions.erase(Curr);
if (!LoopDispoRemoved && !BlockDispoRemoved)
continue;
auto Users = SCEVUsers.find(Curr);
if (Users != SCEVUsers.end())
- for (const auto *User : Users->second)
+ for (const auto User : Users->second)
if (Seen.insert(User).second)
Worklist.push_back(User);
}
@@ -8561,9 +8574,9 @@ void ScalarEvolution::forgetBlockAndLoopDispositions(Value *V) {
/// is never skipped. This is a valid assumption as long as the loop exits via
/// that test. For precise results, it is the caller's responsibility to specify
/// the relevant loop exiting block using getExact(ExitingBlock, SE).
-const SCEV *
-ScalarEvolution::BackedgeTakenInfo::getExact(const Loop *L, ScalarEvolution *SE,
- SmallVector<const SCEVPredicate *, 4> *Preds) const {
+SCEVUse ScalarEvolution::BackedgeTakenInfo::getExact(
+ const Loop *L, ScalarEvolution *SE,
+ SmallVector<const SCEVPredicate *, 4> *Preds) const {
// If any exits were not computable, the loop is not computable.
if (!isComplete() || ExitNotTaken.empty())
return SE->getCouldNotCompute();
@@ -8575,9 +8588,9 @@ ScalarEvolution::BackedgeTakenInfo::getExact(const Loop *L, ScalarEvolution *SE,
// All exiting blocks we have gathered dominate loop's latch, so exact trip
// count is simply a minimum out of all these calculated exit counts.
- SmallVector<const SCEV *, 2> Ops;
+ SmallVector<SCEVUse, 2> Ops;
for (const auto &ENT : ExitNotTaken) {
- const SCEV *BECount = ENT.ExactNotTaken;
+ SCEVUse BECount = ENT.ExactNotTaken;
assert(BECount != SE->getCouldNotCompute() && "Bad exit SCEV!");
assert(SE->DT.dominates(ENT.ExitingBlock, Latch) &&
"We should only have known counts for exiting blocks that dominate "
@@ -8600,7 +8613,7 @@ ScalarEvolution::BackedgeTakenInfo::getExact(const Loop *L, ScalarEvolution *SE,
}
/// Get the exact not taken count for this loop exit.
-const SCEV *
+SCEVUse
ScalarEvolution::BackedgeTakenInfo::getExact(const BasicBlock *ExitingBlock,
ScalarEvolution *SE) const {
for (const auto &ENT : ExitNotTaken)
@@ -8610,7 +8623,7 @@ ScalarEvolution::BackedgeTakenInfo::getExact(const BasicBlock *ExitingBlock,
return SE->getCouldNotCompute();
}
-const SCEV *ScalarEvolution::BackedgeTakenInfo::getConstantMax(
+SCEVUse ScalarEvolution::BackedgeTakenInfo::getConstantMax(
const BasicBlock *ExitingBlock, ScalarEvolution *SE) const {
for (const auto &ENT : ExitNotTaken)
if (ENT.ExitingBlock == ExitingBlock && ENT.hasAlwaysTruePredicate())
@@ -8619,7 +8632,7 @@ const SCEV *ScalarEvolution::BackedgeTakenInfo::getConstantMax(
return SE->getCouldNotCompute();
}
-const SCEV *ScalarEvolution::BackedgeTakenInfo::getSymbolicMax(
+SCEVUse ScalarEvolution::BackedgeTakenInfo::getSymbolicMax(
const BasicBlock *ExitingBlock, ScalarEvolution *SE) const {
for (const auto &ENT : ExitNotTaken)
if (ENT.ExitingBlock == ExitingBlock && ENT.hasAlwaysTruePredicate())
@@ -8629,7 +8642,7 @@ const SCEV *ScalarEvolution::BackedgeTakenInfo::getSymbolicMax(
}
/// getConstantMax - Get the constant max backedge taken count for the loop.
-const SCEV *
+SCEVUse
ScalarEvolution::BackedgeTakenInfo::getConstantMax(ScalarEvolution *SE) const {
auto PredicateNotAlwaysTrue = [](const ExitNotTakenInfo &ENT) {
return !ENT.hasAlwaysTruePredicate();
@@ -8644,7 +8657,7 @@ ScalarEvolution::BackedgeTakenInfo::getConstantMax(ScalarEvolution *SE) const {
return getConstantMax();
}
-const SCEV *
+SCEVUse
ScalarEvolution::BackedgeTakenInfo::getSymbolicMax(const Loop *L,
ScalarEvolution *SE) {
if (!SymbolicMax)
@@ -8660,12 +8673,12 @@ bool ScalarEvolution::BackedgeTakenInfo::isConstantMaxOrZero(
return MaxOrZero && !any_of(ExitNotTaken, PredicateNotAlwaysTrue);
}
-ScalarEvolution::ExitLimit::ExitLimit(const SCEV *E)
+ScalarEvolution::ExitLimit::ExitLimit(SCEVUse E)
: ExitLimit(E, E, E, false, std::nullopt) {}
ScalarEvolution::ExitLimit::ExitLimit(
- const SCEV *E, const SCEV *ConstantMaxNotTaken,
- const SCEV *SymbolicMaxNotTaken, bool MaxOrZero,
+ SCEVUse E, SCEVUse ConstantMaxNotTaken, SCEVUse SymbolicMaxNotTaken,
+ bool MaxOrZero,
ArrayRef<const SmallPtrSetImpl<const SCEVPredicate *> *> PredSetList)
: ExactNotTaken(E), ConstantMaxNotTaken(ConstantMaxNotTaken),
SymbolicMaxNotTaken(SymbolicMaxNotTaken), MaxOrZero(MaxOrZero) {
@@ -8700,17 +8713,16 @@ ScalarEvolution::ExitLimit::ExitLimit(
}
ScalarEvolution::ExitLimit::ExitLimit(
- const SCEV *E, const SCEV *ConstantMaxNotTaken,
- const SCEV *SymbolicMaxNotTaken, bool MaxOrZero,
- const SmallPtrSetImpl<const SCEVPredicate *> &PredSet)
+ SCEVUse E, SCEVUse ConstantMaxNotTaken, SCEVUse SymbolicMaxNotTaken,
+ bool MaxOrZero, const SmallPtrSetImpl<const SCEVPredicate *> &PredSet)
: ExitLimit(E, ConstantMaxNotTaken, SymbolicMaxNotTaken, MaxOrZero,
- { &PredSet }) {}
+ {&PredSet}) {}
/// Allocate memory for BackedgeTakenInfo and copy the not-taken count of each
/// computable exit into a persistent ExitNotTakenInfo array.
ScalarEvolution::BackedgeTakenInfo::BackedgeTakenInfo(
ArrayRef<ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo> ExitCounts,
- bool IsComplete, const SCEV *ConstantMax, bool MaxOrZero)
+ bool IsComplete, SCEVUse ConstantMax, bool MaxOrZero)
: ConstantMax(ConstantMax), IsComplete(IsComplete), MaxOrZero(MaxOrZero) {
using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo;
@@ -8741,8 +8753,8 @@ ScalarEvolution::computeBackedgeTakenCount(const Loop *L,
SmallVector<EdgeExitInfo, 4> ExitCounts;
bool CouldComputeBECount = true;
BasicBlock *Latch = L->getLoopLatch(); // may be NULL.
- const SCEV *MustExitMaxBECount = nullptr;
- const SCEV *MayExitMaxBECount = nullptr;
+ SCEVUse MustExitMaxBECount = nullptr;
+ SCEVUse MayExitMaxBECount = nullptr;
bool MustExitMaxOrZero = false;
// Compute the ExitLimit for each loop exit. Use this to populate ExitCounts
@@ -8813,8 +8825,10 @@ ScalarEvolution::computeBackedgeTakenCount(const Loop *L,
}
}
}
- const SCEV *MaxBECount = MustExitMaxBECount ? MustExitMaxBECount :
- (MayExitMaxBECount ? MayExitMaxBECount : getCouldNotCompute());
+ SCEVUse MaxBECount =
+ MustExitMaxBECount
+ ? MustExitMaxBECount
+ : (MayExitMaxBECount ? MayExitMaxBECount : getCouldNotCompute());
// The loop backedge will be taken the maximum or zero times if there's
// a single exit that must be taken the maximum or zero times.
bool MaxOrZero = (MustExitMaxOrZero && ExitingBlocks.size() == 1);
@@ -8978,7 +8992,7 @@ ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondImpl(
NWR.getEquivalentICmp(Pred, NewRHSC, Offset);
if (!ExitIfTrue)
Pred = ICmpInst::getInversePredicate(Pred);
- auto *LHS = getSCEV(WO->getLHS());
+ auto LHS = getSCEV(WO->getLHS());
if (Offset != 0)
LHS = getAddExpr(LHS, getConstant(Offset));
auto EL = computeExitLimitFromICmp(L, Pred, LHS, getConstant(NewRHSC),
@@ -9023,9 +9037,9 @@ ScalarEvolution::computeExitLimitFromCondFromBinOp(
if (isa<ConstantInt>(Op0))
return Op0 == NeutralElement ? EL1 : EL0;
- const SCEV *BECount = getCouldNotCompute();
- const SCEV *ConstantMaxBECount = getCouldNotCompute();
- const SCEV *SymbolicMaxBECount = getCouldNotCompute();
+ SCEVUse BECount = getCouldNotCompute();
+ SCEVUse ConstantMaxBECount = getCouldNotCompute();
+ SCEVUse SymbolicMaxBECount = getCouldNotCompute();
if (EitherMayExit) {
bool UseSequentialUMin = !isa<BinaryOperator>(ExitCond);
// Both conditions must be same for the loop to continue executing.
@@ -9083,16 +9097,15 @@ ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromICmp(
Pred = ExitCond->getInversePredicate();
const ICmpInst::Predicate OriginalPred = Pred;
- const SCEV *LHS = getSCEV(ExitCond->getOperand(0));
- const SCEV *RHS = getSCEV(ExitCond->getOperand(1));
+ SCEVUse LHS = getSCEV(ExitCond->getOperand(0));
+ SCEVUse RHS = getSCEV(ExitCond->getOperand(1));
ExitLimit EL = computeExitLimitFromICmp(L, Pred, LHS, RHS, ControlsOnlyExit,
AllowPredicates);
if (EL.hasAnyInfo())
return EL;
- auto *ExhaustiveCount =
- computeExitCountExhaustively(L, ExitCond, ExitIfTrue);
+ auto ExhaustiveCount = computeExitCountExhaustively(L, ExitCond, ExitIfTrue);
if (!isa<SCEVCouldNotCompute>(ExhaustiveCount))
return ExhaustiveCount;
@@ -9101,7 +9114,7 @@ ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromICmp(
ExitCond->getOperand(1), L, OriginalPred);
}
ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromICmp(
- const Loop *L, ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS,
+ const Loop *L, ICmpInst::Predicate Pred, SCEVUse LHS, SCEVUse RHS,
bool ControlsOnlyExit, bool AllowPredicates) {
// Try to evaluate any dependencies out of the loop.
@@ -9130,7 +9143,7 @@ ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromICmp(
ConstantRange CompRange =
ConstantRange::makeExactICmpRegion(Pred, RHSC->getAPInt());
- const SCEV *Ret = AddRec->getNumIterationsInRange(CompRange, *this);
+ SCEVUse Ret = AddRec->getNumIterationsInRange(CompRange, *this);
if (!isa<SCEVCouldNotCompute>(Ret)) return Ret;
}
@@ -9142,7 +9155,7 @@ ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromICmp(
if (ControllingFiniteLoop && isLoopInvariant(RHS, L)) {
// TODO: We can peel off any functions which are invertible *in L*. Loop
// invariant terms are effectively constants for our purposes here.
- auto *InnerLHS = LHS;
+ auto InnerLHS = LHS;
if (auto *ZExt = dyn_cast<SCEVZeroExtendExpr>(LHS))
InnerLHS = ZExt->getOperand();
if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(InnerLHS)) {
@@ -9151,7 +9164,7 @@ ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromICmp(
StrideC && StrideC->getAPInt().isPowerOf2()) {
auto Flags = AR->getNoWrapFlags();
Flags = setFlags(Flags, SCEV::FlagNW);
- SmallVector<const SCEV*> Operands{AR->operands()};
+ SmallVector<SCEVUse> Operands{AR->operands()};
Flags = StrengthenNoWrapFlags(this, scAddRecExpr, Operands, Flags);
setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), Flags);
}
@@ -9249,8 +9262,8 @@ ScalarEvolution::computeExitLimitFromSingleExitSwitch(const Loop *L,
assert(L->contains(Switch->getDefaultDest()) &&
"Default case must not exit the loop!");
- const SCEV *LHS = getSCEVAtScope(Switch->getCondition(), L);
- const SCEV *RHS = getConstant(Switch->findCaseDest(ExitingBlock));
+ SCEVUse LHS = getSCEVAtScope(Switch->getCondition(), L);
+ SCEVUse RHS = getConstant(Switch->findCaseDest(ExitingBlock));
// while (X != Y) --> while (X-Y != 0)
ExitLimit EL = howFarToZero(getMinusSCEV(LHS, RHS), L, ControlsOnlyExit);
@@ -9263,8 +9276,8 @@ ScalarEvolution::computeExitLimitFromSingleExitSwitch(const Loop *L,
static ConstantInt *
EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C,
ScalarEvolution &SE) {
- const SCEV *InVal = SE.getConstant(C);
- const SCEV *Val = AddRec->evaluateAtIteration(InVal, SE);
+ SCEVUse InVal = SE.getConstant(C);
+ SCEVUse Val = AddRec->evaluateAtIteration(InVal, SE);
assert(isa<SCEVConstant>(Val) &&
"Evaluation of SCEV at constant didn't fold correctly?");
return cast<SCEVConstant>(Val)->getValue();
@@ -9405,7 +9418,7 @@ ScalarEvolution::ExitLimit ScalarEvolution::computeShiftCompareExitLimit(
if (Result->isZeroValue()) {
unsigned BitWidth = getTypeSizeInBits(RHS->getType());
- const SCEV *UpperBound =
+ SCEVUse UpperBound =
getConstant(getEffectiveSCEVType(RHS->getType()), BitWidth);
return ExitLimit(getCouldNotCompute(), UpperBound, UpperBound, false);
}
@@ -9655,9 +9668,9 @@ ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN,
}
}
-const SCEV *ScalarEvolution::computeExitCountExhaustively(const Loop *L,
- Value *Cond,
- bool ExitWhen) {
+SCEVUse ScalarEvolution::computeExitCountExhaustively(const Loop *L,
+ Value *Cond,
+ bool ExitWhen) {
PHINode *PN = getConstantEvolvingPHI(Cond, L);
if (!PN) return getCouldNotCompute();
@@ -9722,9 +9735,8 @@ const SCEV *ScalarEvolution::computeExitCountExhaustively(const Loop *L,
return getCouldNotCompute();
}
-const SCEV *ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) {
- SmallVector<std::pair<const Loop *, const SCEV *>, 2> &Values =
- ValuesAtScopes[V];
+SCEVUse ScalarEvolution::getSCEVAtScope(SCEVUse V, const Loop *L) {
+ SmallVector<std::pair<const Loop *, SCEVUse>, 2> &Values = ValuesAtScopes[V];
// Check to see if we've folded this expression at this loop before.
for (auto &LS : Values)
if (LS.first == L)
@@ -9733,7 +9745,7 @@ const SCEV *ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) {
Values.emplace_back(L, nullptr);
// Otherwise compute it.
- const SCEV *C = computeSCEVAtScope(V, L);
+ SCEVUse C = computeSCEVAtScope(V, L);
for (auto &LS : reverse(ValuesAtScopes[V]))
if (LS.first == L) {
LS.second = C;
@@ -9748,7 +9760,7 @@ const SCEV *ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) {
/// will return Constants for objects which aren't represented by a
/// SCEVConstant, because SCEVConstant is restricted to ConstantInt.
/// Returns NULL if the SCEV isn't representable as a Constant.
-static Constant *BuildConstantFromSCEV(const SCEV *V) {
+static Constant *BuildConstantFromSCEV(SCEVUse V) {
switch (V->getSCEVType()) {
case scCouldNotCompute:
case scAddRecExpr:
@@ -9774,7 +9786,7 @@ static Constant *BuildConstantFromSCEV(const SCEV *V) {
case scAddExpr: {
const SCEVAddExpr *SA = cast<SCEVAddExpr>(V);
Constant *C = nullptr;
- for (const SCEV *Op : SA->operands()) {
+ for (SCEVUse Op : SA->operands()) {
Constant *OpC = BuildConstantFromSCEV(Op);
if (!OpC)
return nullptr;
@@ -9809,9 +9821,8 @@ static Constant *BuildConstantFromSCEV(const SCEV *V) {
llvm_unreachable("Unknown SCEV kind!");
}
-const SCEV *
-ScalarEvolution::getWithOperands(const SCEV *S,
- SmallVectorImpl<const SCEV *> &NewOps) {
+SCEVUse ScalarEvolution::getWithOperands(SCEVUse S,
+ SmallVectorImpl<SCEVUse> &NewOps) {
switch (S->getSCEVType()) {
case scTruncate:
case scZeroExtend:
@@ -9845,7 +9856,7 @@ ScalarEvolution::getWithOperands(const SCEV *S,
llvm_unreachable("Unknown SCEV kind!");
}
-const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) {
+SCEVUse ScalarEvolution::computeSCEVAtScope(SCEVUse V, const Loop *L) {
switch (V->getSCEVType()) {
case scConstant:
case scVScale:
@@ -9858,21 +9869,21 @@ const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) {
// Avoid performing the look-up in the common case where the specified
// expression has no loop-variant portions.
for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) {
- const SCEV *OpAtScope = getSCEVAtScope(AddRec->getOperand(i), L);
+ SCEVUse OpAtScope = getSCEVAtScope(AddRec->getOperand(i), L);
if (OpAtScope == AddRec->getOperand(i))
continue;
// Okay, at least one of these operands is loop variant but might be
// foldable. Build a new instance of the folded commutative expression.
- SmallVector<const SCEV *, 8> NewOps;
+ SmallVector<SCEVUse, 8> NewOps;
NewOps.reserve(AddRec->getNumOperands());
append_range(NewOps, AddRec->operands().take_front(i));
NewOps.push_back(OpAtScope);
for (++i; i != e; ++i)
NewOps.push_back(getSCEVAtScope(AddRec->getOperand(i), L));
- const SCEV *FoldedRec = getAddRecExpr(
- NewOps, AddRec->getLoop(), AddRec->getNoWrapFlags(SCEV::FlagNW));
+ SCEVUse FoldedRec = getAddRecExpr(NewOps, AddRec->getLoop(),
+ AddRec->getNoWrapFlags(SCEV::FlagNW));
AddRec = dyn_cast<SCEVAddRecExpr>(FoldedRec);
// The addrec may be folded to a nonrecurrence, for example, if the
// induction variable is multiplied by zero after constant folding. Go
@@ -9887,7 +9898,7 @@ const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) {
if (!AddRec->getLoop()->contains(L)) {
// To evaluate this recurrence, we need to know how many times the AddRec
// loop iterates. Compute this now.
- const SCEV *BackedgeTakenCount = getBackedgeTakenCount(AddRec->getLoop());
+ SCEVUse BackedgeTakenCount = getBackedgeTakenCount(AddRec->getLoop());
if (BackedgeTakenCount == getCouldNotCompute())
return AddRec;
@@ -9909,15 +9920,15 @@ const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) {
case scUMinExpr:
case scSMinExpr:
case scSequentialUMinExpr: {
- ArrayRef<const SCEV *> Ops = V->operands();
+ ArrayRef<SCEVUse> Ops = V->operands();
// Avoid performing the look-up in the common case where the specified
// expression has no loop-variant portions.
for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
- const SCEV *OpAtScope = getSCEVAtScope(Ops[i], L);
+ SCEVUse OpAtScope = getSCEVAtScope(Ops[i], L);
if (OpAtScope != Ops[i]) {
// Okay, at least one of these operands is loop variant but might be
// foldable. Build a new instance of the folded commutative expression.
- SmallVector<const SCEV *, 8> NewOps;
+ SmallVector<SCEVUse, 8> NewOps;
NewOps.reserve(Ops.size());
append_range(NewOps, Ops.take_front(i));
NewOps.push_back(OpAtScope);
@@ -9950,7 +9961,7 @@ const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) {
// to see if the loop that contains it has a known backedge-taken
// count. If so, we may be able to force computation of the exit
// value.
- const SCEV *BackedgeTakenCount = getBackedgeTakenCount(CurrLoop);
+ SCEVUse BackedgeTakenCount = getBackedgeTakenCount(CurrLoop);
// This trivial case can show up in some degenerate cases where
// the incoming IR has not yet been fully simplified.
if (BackedgeTakenCount->isZero()) {
@@ -10015,8 +10026,8 @@ const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) {
if (!isSCEVable(Op->getType()))
return V;
- const SCEV *OrigV = getSCEV(Op);
- const SCEV *OpV = getSCEVAtScope(OrigV, L);
+ SCEVUse OrigV = getSCEV(Op);
+ SCEVUse OpV = getSCEVAtScope(OrigV, L);
MadeImprovement |= OrigV != OpV;
Constant *C = BuildConstantFromSCEV(OpV);
@@ -10044,11 +10055,11 @@ const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) {
llvm_unreachable("Unknown SCEV type!");
}
-const SCEV *ScalarEvolution::getSCEVAtScope(Value *V, const Loop *L) {
+SCEVUse ScalarEvolution::getSCEVAtScope(Value *V, const Loop *L) {
return getSCEVAtScope(getSCEV(V), L);
}
-const SCEV *ScalarEvolution::stripInjectiveFunctions(const SCEV *S) const {
+SCEVUse ScalarEvolution::stripInjectiveFunctions(SCEVUse S) const {
if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S))
return stripInjectiveFunctions(ZExt->getOperand());
if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S))
@@ -10064,8 +10075,8 @@ const SCEV *ScalarEvolution::stripInjectiveFunctions(const SCEV *S) const {
/// A and B isn't important.
///
/// If the equation does not have a solution, SCEVCouldNotCompute is returned.
-static const SCEV *SolveLinEquationWithOverflow(const APInt &A, const SCEV *B,
- ScalarEvolution &SE) {
+static SCEVUse SolveLinEquationWithOverflow(const APInt &A, SCEVUse B,
+ ScalarEvolution &SE) {
uint32_t BW = A.getBitWidth();
assert(BW == SE.getTypeSizeInBits(B->getType()));
assert(A != 0 && "A must be non-zero.");
@@ -10097,7 +10108,7 @@ static const SCEV *SolveLinEquationWithOverflow(const APInt &A, const SCEV *B,
// I * (B / D) mod (N / D)
// To simplify the computation, we factor out the divide by D:
// (I * B mod N) / D
- const SCEV *D = SE.getConstant(APInt::getOneBitSet(BW, Mult2));
+ SCEVUse D = SE.getConstant(APInt::getOneBitSet(BW, Mult2));
return SE.getUDivExactExpr(SE.getMulExpr(B, SE.getConstant(I)), D);
}
@@ -10374,7 +10385,7 @@ SolveQuadraticAddRecRange(const SCEVAddRecExpr *AddRec,
return TruncIfPossible(MinOptional(SL.first, SU.first), BitWidth);
}
-ScalarEvolution::ExitLimit ScalarEvolution::howFarToZero(const SCEV *V,
+ScalarEvolution::ExitLimit ScalarEvolution::howFarToZero(SCEVUse V,
const Loop *L,
bool ControlsOnlyExit,
bool AllowPredicates) {
@@ -10388,7 +10399,8 @@ ScalarEvolution::ExitLimit ScalarEvolution::howFarToZero(const SCEV *V,
// If the value is a constant
if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) {
// If the value is already zero, the branch will execute zero times.
- if (C->getValue()->isZero()) return C;
+ if (C->getValue()->isZero())
+ return SCEVUse(C);
return getCouldNotCompute(); // Otherwise it will loop infinitely.
}
@@ -10433,8 +10445,8 @@ ScalarEvolution::ExitLimit ScalarEvolution::howFarToZero(const SCEV *V,
// where BW is the common bit width of Start and Step.
// Get the initial value for the loop.
- const SCEV *Start = getSCEVAtScope(AddRec->getStart(), L->getParentLoop());
- const SCEV *Step = getSCEVAtScope(AddRec->getOperand(1), L->getParentLoop());
+ SCEVUse Start = getSCEVAtScope(AddRec->getStart(), L->getParentLoop());
+ SCEVUse Step = getSCEVAtScope(AddRec->getOperand(1), L->getParentLoop());
// For now we handle only constant steps.
//
@@ -10452,7 +10464,7 @@ ScalarEvolution::ExitLimit ScalarEvolution::howFarToZero(const SCEV *V,
// N = Start/-Step
// First compute the unsigned distance from zero in the direction of Step.
bool CountDown = StepC->getAPInt().isNegative();
- const SCEV *Distance = CountDown ? Start : getNegativeSCEV(Start);
+ SCEVUse Distance = CountDown ? Start : getNegativeSCEV(Start);
// Handle unitary steps, which cannot wraparound.
// 1*N = -Start; -1*N = Start (mod 2^BW), so:
@@ -10468,9 +10480,9 @@ ScalarEvolution::ExitLimit ScalarEvolution::howFarToZero(const SCEV *V,
// Explicitly handling this here is necessary because getUnsignedRange
// isn't context-sensitive; it doesn't know that we only care about the
// range inside the loop.
- const SCEV *Zero = getZero(Distance->getType());
- const SCEV *One = getOne(Distance->getType());
- const SCEV *DistancePlusOne = getAddExpr(Distance, One);
+ SCEVUse Zero = getZero(Distance->getType());
+ SCEVUse One = getOne(Distance->getType());
+ SCEVUse DistancePlusOne = getAddExpr(Distance, One);
if (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_NE, DistancePlusOne, Zero)) {
// If Distance + 1 doesn't overflow, we can compute the maximum distance
// as "unsigned_max(Distance + 1) - 1".
@@ -10488,34 +10500,33 @@ ScalarEvolution::ExitLimit ScalarEvolution::howFarToZero(const SCEV *V,
// will have undefined behavior due to wrapping.
if (ControlsOnlyExit && AddRec->hasNoSelfWrap() &&
loopHasNoAbnormalExits(AddRec->getLoop())) {
- const SCEV *Exact =
+ SCEVUse Exact =
getUDivExpr(Distance, CountDown ? getNegativeSCEV(Step) : Step);
- const SCEV *ConstantMax = getCouldNotCompute();
+ SCEVUse ConstantMax = getCouldNotCompute();
if (Exact != getCouldNotCompute()) {
APInt MaxInt = getUnsignedRangeMax(applyLoopGuards(Exact, L));
ConstantMax =
getConstant(APIntOps::umin(MaxInt, getUnsignedRangeMax(Exact)));
}
- const SCEV *SymbolicMax =
- isa<SCEVCouldNotCompute>(Exact) ? ConstantMax : Exact;
+ SCEVUse SymbolicMax = isa<SCEVCouldNotCompute>(Exact) ? ConstantMax : Exact;
return ExitLimit(Exact, ConstantMax, SymbolicMax, false, Predicates);
}
// Solve the general equation.
- const SCEV *E = SolveLinEquationWithOverflow(StepC->getAPInt(),
- getNegativeSCEV(Start), *this);
+ SCEVUse E = SolveLinEquationWithOverflow(StepC->getAPInt(),
+ getNegativeSCEV(Start), *this);
- const SCEV *M = E;
+ SCEVUse M = E;
if (E != getCouldNotCompute()) {
APInt MaxWithGuards = getUnsignedRangeMax(applyLoopGuards(E, L));
M = getConstant(APIntOps::umin(MaxWithGuards, getUnsignedRangeMax(E)));
}
- auto *S = isa<SCEVCouldNotCompute>(E) ? M : E;
+ auto S = isa<SCEVCouldNotCompute>(E) ? M : E;
return ExitLimit(E, M, S, false, Predicates);
}
-ScalarEvolution::ExitLimit
-ScalarEvolution::howFarToNonZero(const SCEV *V, const Loop *L) {
+ScalarEvolution::ExitLimit ScalarEvolution::howFarToNonZero(SCEVUse V,
+ const Loop *L) {
// Loops that look like: while (X == 0) are very strange indeed. We don't
// handle them yet except for the trivial case. This could be expanded in the
// future as needed.
@@ -10555,7 +10566,7 @@ ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(const BasicBlock *BB)
/// expressions are equal, however for the purposes of looking for a condition
/// guarding a loop, it can be useful to be a little more general, since a
/// front-end may have replicated the controlling expression.
-static bool HasSameValue(const SCEV *A, const SCEV *B) {
+static bool HasSameValue(SCEVUse A, SCEVUse B) {
// Quick check to see if they are the same SCEV.
if (A == B) return true;
@@ -10579,7 +10590,7 @@ static bool HasSameValue(const SCEV *A, const SCEV *B) {
return false;
}
-static bool MatchBinarySub(const SCEV *S, const SCEV *&LHS, const SCEV *&RHS) {
+static bool MatchBinarySub(SCEVUse S, SCEVUse &LHS, SCEVUse &RHS) {
const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S);
if (!Add || Add->getNumOperands() != 2)
return false;
@@ -10599,7 +10610,7 @@ static bool MatchBinarySub(const SCEV *S, const SCEV *&LHS, const SCEV *&RHS) {
}
bool ScalarEvolution::SimplifyICmpOperands(ICmpInst::Predicate &Pred,
- const SCEV *&LHS, const SCEV *&RHS,
+ SCEVUse &LHS, SCEVUse &RHS,
unsigned Depth) {
bool Changed = false;
// Simplifies ICMP to trivial true or false by turning it into '0 == 0' or
@@ -10783,23 +10794,23 @@ bool ScalarEvolution::SimplifyICmpOperands(ICmpInst::Predicate &Pred,
return Changed;
}
-bool ScalarEvolution::isKnownNegative(const SCEV *S) {
+bool ScalarEvolution::isKnownNegative(SCEVUse S) {
return getSignedRangeMax(S).isNegative();
}
-bool ScalarEvolution::isKnownPositive(const SCEV *S) {
+bool ScalarEvolution::isKnownPositive(SCEVUse S) {
return getSignedRangeMin(S).isStrictlyPositive();
}
-bool ScalarEvolution::isKnownNonNegative(const SCEV *S) {
+bool ScalarEvolution::isKnownNonNegative(SCEVUse S) {
return !getSignedRangeMin(S).isNegative();
}
-bool ScalarEvolution::isKnownNonPositive(const SCEV *S) {
+bool ScalarEvolution::isKnownNonPositive(SCEVUse S) {
return !getSignedRangeMax(S).isStrictlyPositive();
}
-bool ScalarEvolution::isKnownNonZero(const SCEV *S) {
+bool ScalarEvolution::isKnownNonZero(SCEVUse S) {
// Query push down for cases where the unsigned range is
// less than sufficient.
if (const auto *SExt = dyn_cast<SCEVSignExtendExpr>(S))
@@ -10807,20 +10818,20 @@ bool ScalarEvolution::isKnownNonZero(const SCEV *S) {
return getUnsignedRangeMin(S) != 0;
}
-std::pair<const SCEV *, const SCEV *>
-ScalarEvolution::SplitIntoInitAndPostInc(const Loop *L, const SCEV *S) {
+std::pair<SCEVUse, SCEVUse>
+ScalarEvolution::SplitIntoInitAndPostInc(const Loop *L, SCEVUse S) {
// Compute SCEV on entry of loop L.
- const SCEV *Start = SCEVInitRewriter::rewrite(S, L, *this);
+ SCEVUse Start = SCEVInitRewriter::rewrite(S, L, *this);
if (Start == getCouldNotCompute())
return { Start, Start };
// Compute post increment SCEV for loop L.
- const SCEV *PostInc = SCEVPostIncRewriter::rewrite(S, L, *this);
+ SCEVUse PostInc = SCEVPostIncRewriter::rewrite(S, L, *this);
assert(PostInc != getCouldNotCompute() && "Unexpected could not compute");
return { Start, PostInc };
}
-bool ScalarEvolution::isKnownViaInduction(ICmpInst::Predicate Pred,
- const SCEV *LHS, const SCEV *RHS) {
+bool ScalarEvolution::isKnownViaInduction(ICmpInst::Predicate Pred, SCEVUse LHS,
+ SCEVUse RHS) {
// First collect all loops.
SmallPtrSet<const Loop *, 8> LoopsUsed;
getUsedLoops(LHS, LoopsUsed);
@@ -10869,8 +10880,8 @@ bool ScalarEvolution::isKnownViaInduction(ICmpInst::Predicate Pred,
isLoopEntryGuardedByCond(MDL, Pred, SplitLHS.first, SplitRHS.first);
}
-bool ScalarEvolution::isKnownPredicate(ICmpInst::Predicate Pred,
- const SCEV *LHS, const SCEV *RHS) {
+bool ScalarEvolution::isKnownPredicate(ICmpInst::Predicate Pred, SCEVUse LHS,
+ SCEVUse RHS) {
// Canonicalize the inputs first.
(void)SimplifyICmpOperands(Pred, LHS, RHS);
@@ -10885,8 +10896,8 @@ bool ScalarEvolution::isKnownPredicate(ICmpInst::Predicate Pred,
}
std::optional<bool> ScalarEvolution::evaluatePredicate(ICmpInst::Predicate Pred,
- const SCEV *LHS,
- const SCEV *RHS) {
+ SCEVUse LHS,
+ SCEVUse RHS) {
if (isKnownPredicate(Pred, LHS, RHS))
return true;
if (isKnownPredicate(ICmpInst::getInversePredicate(Pred), LHS, RHS))
@@ -10894,17 +10905,16 @@ std::optional<bool> ScalarEvolution::evaluatePredicate(ICmpInst::Predicate Pred,
return std::nullopt;
}
-bool ScalarEvolution::isKnownPredicateAt(ICmpInst::Predicate Pred,
- const SCEV *LHS, const SCEV *RHS,
- const Instruction *CtxI) {
+bool ScalarEvolution::isKnownPredicateAt(ICmpInst::Predicate Pred, SCEVUse LHS,
+ SCEVUse RHS, const Instruction *CtxI) {
// TODO: Analyze guards and assumes from Context's block.
return isKnownPredicate(Pred, LHS, RHS) ||
isBasicBlockEntryGuardedByCond(CtxI->getParent(), Pred, LHS, RHS);
}
std::optional<bool>
-ScalarEvolution::evaluatePredicateAt(ICmpInst::Predicate Pred, const SCEV *LHS,
- const SCEV *RHS, const Instruction *CtxI) {
+ScalarEvolution::evaluatePredicateAt(ICmpInst::Predicate Pred, SCEVUse LHS,
+ SCEVUse RHS, const Instruction *CtxI) {
std::optional<bool> KnownWithoutContext = evaluatePredicate(Pred, LHS, RHS);
if (KnownWithoutContext)
return KnownWithoutContext;
@@ -10920,7 +10930,7 @@ ScalarEvolution::evaluatePredicateAt(ICmpInst::Predicate Pred, const SCEV *LHS,
bool ScalarEvolution::isKnownOnEveryIteration(ICmpInst::Predicate Pred,
const SCEVAddRecExpr *LHS,
- const SCEV *RHS) {
+ SCEVUse RHS) {
const Loop *L = LHS->getLoop();
return isLoopEntryGuardedByCond(L, Pred, LHS->getStart(), RHS) &&
isLoopBackedgeGuardedByCond(L, Pred, LHS->getPostIncExpr(*this), RHS);
@@ -10978,7 +10988,7 @@ ScalarEvolution::getMonotonicPredicateTypeImpl(const SCEVAddRecExpr *LHS,
if (!LHS->hasNoSignedWrap())
return std::nullopt;
- const SCEV *Step = LHS->getStepRecurrence(*this);
+ SCEVUse Step = LHS->getStepRecurrence(*this);
if (isKnownNonNegative(Step))
return IsGreater ? MonotonicallyIncreasing : MonotonicallyDecreasing;
@@ -10991,7 +11001,7 @@ ScalarEvolution::getMonotonicPredicateTypeImpl(const SCEVAddRecExpr *LHS,
std::optional<ScalarEvolution::LoopInvariantPredicate>
ScalarEvolution::getLoopInvariantPredicate(ICmpInst::Predicate Pred,
- const SCEV *LHS, const SCEV *RHS,
+ SCEVUse LHS, SCEVUse RHS,
const Loop *L,
const Instruction *CtxI) {
// If there is a loop-invariant, force it into the RHS, otherwise bail out.
@@ -11077,8 +11087,8 @@ ScalarEvolution::getLoopInvariantPredicate(ICmpInst::Predicate Pred,
std::optional<ScalarEvolution::LoopInvariantPredicate>
ScalarEvolution::getLoopInvariantExitCondDuringFirstIterations(
- ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, const Loop *L,
- const Instruction *CtxI, const SCEV *MaxIter) {
+ ICmpInst::Predicate Pred, SCEVUse LHS, SCEVUse RHS, const Loop *L,
+ const Instruction *CtxI, SCEVUse MaxIter) {
if (auto LIP = getLoopInvariantExitCondDuringFirstIterationsImpl(
Pred, LHS, RHS, L, CtxI, MaxIter))
return LIP;
@@ -11088,7 +11098,7 @@ ScalarEvolution::getLoopInvariantExitCondDuringFirstIterations(
// work, try the following trick: if the a predicate is invariant for X, it
// is also invariant for umin(X, ...). So try to find something that works
// among subexpressions of MaxIter expressed as umin.
- for (auto *Op : UMin->operands())
+ for (auto Op : UMin->operands())
if (auto LIP = getLoopInvariantExitCondDuringFirstIterationsImpl(
Pred, LHS, RHS, L, CtxI, Op))
return LIP;
@@ -11097,8 +11107,8 @@ ScalarEvolution::getLoopInvariantExitCondDuringFirstIterations(
std::optional<ScalarEvolution::LoopInvariantPredicate>
ScalarEvolution::getLoopInvariantExitCondDuringFirstIterationsImpl(
- ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, const Loop *L,
- const Instruction *CtxI, const SCEV *MaxIter) {
+ ICmpInst::Predicate Pred, SCEVUse LHS, SCEVUse RHS, const Loop *L,
+ const Instruction *CtxI, SCEVUse MaxIter) {
// Try to prove the following set of facts:
// - The predicate is monotonic in the iteration space.
// - If the check does not fail on the 1st iteration:
@@ -11125,9 +11135,9 @@ ScalarEvolution::getLoopInvariantExitCondDuringFirstIterationsImpl(
return std::nullopt;
// TODO: Support steps other than +/- 1.
- const SCEV *Step = AR->getStepRecurrence(*this);
- auto *One = getOne(Step->getType());
- auto *MinusOne = getNegativeSCEV(One);
+ SCEVUse Step = AR->getStepRecurrence(*this);
+ auto One = getOne(Step->getType());
+ auto MinusOne = getNegativeSCEV(One);
if (Step != One && Step != MinusOne)
return std::nullopt;
@@ -11138,7 +11148,7 @@ ScalarEvolution::getLoopInvariantExitCondDuringFirstIterationsImpl(
return std::nullopt;
// Value of IV on suggested last iteration.
- const SCEV *Last = AR->evaluateAtIteration(MaxIter, *this);
+ SCEVUse Last = AR->evaluateAtIteration(MaxIter, *this);
// Does it still meet the requirement?
if (!isLoopBackedgeGuardedByCond(L, Pred, Last, RHS))
return std::nullopt;
@@ -11151,7 +11161,7 @@ ScalarEvolution::getLoopInvariantExitCondDuringFirstIterationsImpl(
CmpInst::isSigned(Pred) ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE;
if (Step == MinusOne)
NoOverflowPred = CmpInst::getSwappedPredicate(NoOverflowPred);
- const SCEV *Start = AR->getStart();
+ SCEVUse Start = AR->getStart();
if (!isKnownPredicateAt(NoOverflowPred, Start, Last, CtxI))
return std::nullopt;
@@ -11160,7 +11170,7 @@ ScalarEvolution::getLoopInvariantExitCondDuringFirstIterationsImpl(
}
bool ScalarEvolution::isKnownPredicateViaConstantRanges(
- ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS) {
+ ICmpInst::Predicate Pred, SCEVUse LHS, SCEVUse RHS) {
if (HasSameValue(LHS, RHS))
return ICmpInst::isTrueWhenEqual(Pred);
@@ -11186,7 +11196,7 @@ bool ScalarEvolution::isKnownPredicateViaConstantRanges(
auto UR = getUnsignedRange(RHS);
if (CheckRanges(UL, UR))
return true;
- auto *Diff = getMinusSCEV(LHS, RHS);
+ auto Diff = getMinusSCEV(LHS, RHS);
return !isa<SCEVCouldNotCompute>(Diff) && isKnownNonZero(Diff);
}
@@ -11202,17 +11212,16 @@ bool ScalarEvolution::isKnownPredicateViaConstantRanges(
}
bool ScalarEvolution::isKnownPredicateViaNoOverflow(ICmpInst::Predicate Pred,
- const SCEV *LHS,
- const SCEV *RHS) {
+ SCEVUse LHS, SCEVUse RHS) {
// Match X to (A + C1)<ExpectedFlags> and Y to (A + C2)<ExpectedFlags>, where
// C1 and C2 are constant integers. If either X or Y are not add expressions,
// consider them as X + 0 and Y + 0 respectively. C1 and C2 are returned via
// OutC1 and OutC2.
- auto MatchBinaryAddToConst = [this](const SCEV *X, const SCEV *Y,
- APInt &OutC1, APInt &OutC2,
+ auto MatchBinaryAddToConst = [this](SCEVUse X, SCEVUse Y, APInt &OutC1,
+ APInt &OutC2,
SCEV::NoWrapFlags ExpectedFlags) {
- const SCEV *XNonConstOp, *XConstOp;
- const SCEV *YNonConstOp, *YConstOp;
+ SCEVUse XNonConstOp, XConstOp;
+ SCEVUse YNonConstOp, YConstOp;
SCEV::NoWrapFlags XFlagsPresent;
SCEV::NoWrapFlags YFlagsPresent;
@@ -11295,8 +11304,7 @@ bool ScalarEvolution::isKnownPredicateViaNoOverflow(ICmpInst::Predicate Pred,
}
bool ScalarEvolution::isKnownPredicateViaSplitting(ICmpInst::Predicate Pred,
- const SCEV *LHS,
- const SCEV *RHS) {
+ SCEVUse LHS, SCEVUse RHS) {
if (Pred != ICmpInst::ICMP_ULT || ProvingSplitPredicate)
return false;
@@ -11317,8 +11325,8 @@ bool ScalarEvolution::isKnownPredicateViaSplitting(ICmpInst::Predicate Pred,
}
bool ScalarEvolution::isImpliedViaGuard(const BasicBlock *BB,
- ICmpInst::Predicate Pred,
- const SCEV *LHS, const SCEV *RHS) {
+ ICmpInst::Predicate Pred, SCEVUse LHS,
+ SCEVUse RHS) {
// No need to even try if we know the module has no guards.
if (!HasGuards)
return false;
@@ -11336,10 +11344,9 @@ bool ScalarEvolution::isImpliedViaGuard(const BasicBlock *BB,
/// isLoopBackedgeGuardedByCond - Test whether the backedge of the loop is
/// protected by a conditional between LHS and RHS. This is used to
/// to eliminate casts.
-bool
-ScalarEvolution::isLoopBackedgeGuardedByCond(const Loop *L,
- ICmpInst::Predicate Pred,
- const SCEV *LHS, const SCEV *RHS) {
+bool ScalarEvolution::isLoopBackedgeGuardedByCond(const Loop *L,
+ ICmpInst::Predicate Pred,
+ SCEVUse LHS, SCEVUse RHS) {
// Interpret a null as meaning no loop, where there is obviously no guard
// (interprocedural conditions notwithstanding). Do not bother about
// unreachable loops.
@@ -11375,15 +11382,15 @@ ScalarEvolution::isLoopBackedgeGuardedByCond(const Loop *L,
// See if we can exploit a trip count to prove the predicate.
const auto &BETakenInfo = getBackedgeTakenInfo(L);
- const SCEV *LatchBECount = BETakenInfo.getExact(Latch, this);
+ SCEVUse LatchBECount = BETakenInfo.getExact(Latch, this);
if (LatchBECount != getCouldNotCompute()) {
// We know that Latch branches back to the loop header exactly
// LatchBECount times. This means the backdege condition at Latch is
// equivalent to "{0,+,1} u< LatchBECount".
Type *Ty = LatchBECount->getType();
auto NoWrapFlags = SCEV::NoWrapFlags(SCEV::FlagNUW | SCEV::FlagNW);
- const SCEV *LoopCounter =
- getAddRecExpr(getZero(Ty), getOne(Ty), L, NoWrapFlags);
+ SCEVUse LoopCounter =
+ getAddRecExpr(getZero(Ty), getOne(Ty), L, NoWrapFlags);
if (isImpliedCond(Pred, LHS, RHS, ICmpInst::ICMP_ULT, LoopCounter,
LatchBECount))
return true;
@@ -11444,8 +11451,7 @@ ScalarEvolution::isLoopBackedgeGuardedByCond(const Loop *L,
bool ScalarEvolution::isBasicBlockEntryGuardedByCond(const BasicBlock *BB,
ICmpInst::Predicate Pred,
- const SCEV *LHS,
- const SCEV *RHS) {
+ SCEVUse LHS, SCEVUse RHS) {
// Do not bother proving facts for unreachable code.
if (!DT.isReachableFromEntry(BB))
return true;
@@ -11544,8 +11550,7 @@ bool ScalarEvolution::isBasicBlockEntryGuardedByCond(const BasicBlock *BB,
bool ScalarEvolution::isLoopEntryGuardedByCond(const Loop *L,
ICmpInst::Predicate Pred,
- const SCEV *LHS,
- const SCEV *RHS) {
+ SCEVUse LHS, SCEVUse RHS) {
// Interpret a null as meaning no loop, where there is obviously no guard
// (interprocedural conditions notwithstanding).
if (!L)
@@ -11563,10 +11568,9 @@ bool ScalarEvolution::isLoopEntryGuardedByCond(const Loop *L,
return isBasicBlockEntryGuardedByCond(L->getHeader(), Pred, LHS, RHS);
}
-bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred, const SCEV *LHS,
- const SCEV *RHS,
- const Value *FoundCondValue, bool Inverse,
- const Instruction *CtxI) {
+bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred, SCEVUse LHS,
+ SCEVUse RHS, const Value *FoundCondValue,
+ bool Inverse, const Instruction *CtxI) {
// False conditions implies anything. Do not bother analyzing it further.
if (FoundCondValue ==
ConstantInt::getBool(FoundCondValue->getContext(), Inverse))
@@ -11601,16 +11605,15 @@ bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred, const SCEV *LHS,
else
FoundPred = ICI->getPredicate();
- const SCEV *FoundLHS = getSCEV(ICI->getOperand(0));
- const SCEV *FoundRHS = getSCEV(ICI->getOperand(1));
+ SCEVUse FoundLHS = getSCEV(ICI->getOperand(0));
+ SCEVUse FoundRHS = getSCEV(ICI->getOperand(1));
return isImpliedCond(Pred, LHS, RHS, FoundPred, FoundLHS, FoundRHS, CtxI);
}
-bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred, const SCEV *LHS,
- const SCEV *RHS,
- ICmpInst::Predicate FoundPred,
- const SCEV *FoundLHS, const SCEV *FoundRHS,
+bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred, SCEVUse LHS,
+ SCEVUse RHS, ICmpInst::Predicate FoundPred,
+ SCEVUse FoundLHS, SCEVUse FoundRHS,
const Instruction *CtxI) {
// Balance the types.
if (getTypeSizeInBits(LHS->getType()) <
@@ -11623,14 +11626,14 @@ bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred, const SCEV *LHS,
auto *NarrowType = LHS->getType();
auto *WideType = FoundLHS->getType();
auto BitWidth = getTypeSizeInBits(NarrowType);
- const SCEV *MaxValue = getZeroExtendExpr(
+ SCEVUse MaxValue = getZeroExtendExpr(
getConstant(APInt::getMaxValue(BitWidth)), WideType);
if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE, FoundLHS,
MaxValue) &&
isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE, FoundRHS,
MaxValue)) {
- const SCEV *TruncFoundLHS = getTruncateExpr(FoundLHS, NarrowType);
- const SCEV *TruncFoundRHS = getTruncateExpr(FoundRHS, NarrowType);
+ SCEVUse TruncFoundLHS = getTruncateExpr(FoundLHS, NarrowType);
+ SCEVUse TruncFoundRHS = getTruncateExpr(FoundRHS, NarrowType);
if (isImpliedCondBalancedTypes(Pred, LHS, RHS, FoundPred, TruncFoundLHS,
TruncFoundRHS, CtxI))
return true;
@@ -11662,10 +11665,12 @@ bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred, const SCEV *LHS,
FoundRHS, CtxI);
}
-bool ScalarEvolution::isImpliedCondBalancedTypes(
- ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS,
- ICmpInst::Predicate FoundPred, const SCEV *FoundLHS, const SCEV *FoundRHS,
- const Instruction *CtxI) {
+bool ScalarEvolution::isImpliedCondBalancedTypes(ICmpInst::Predicate Pred,
+ SCEVUse LHS, SCEVUse RHS,
+ ICmpInst::Predicate FoundPred,
+ SCEVUse FoundLHS,
+ SCEVUse FoundRHS,
+ const Instruction *CtxI) {
assert(getTypeSizeInBits(LHS->getType()) ==
getTypeSizeInBits(FoundLHS->getType()) &&
"Types should be balanced!");
@@ -11743,8 +11748,8 @@ bool ScalarEvolution::isImpliedCondBalancedTypes(
// Create local copies that we can freely swap and canonicalize our
// conditions to "le/lt".
ICmpInst::Predicate CanonicalPred = Pred, CanonicalFoundPred = FoundPred;
- const SCEV *CanonicalLHS = LHS, *CanonicalRHS = RHS,
- *CanonicalFoundLHS = FoundLHS, *CanonicalFoundRHS = FoundRHS;
+ SCEVUse CanonicalLHS = LHS, CanonicalRHS = RHS,
+ CanonicalFoundLHS = FoundLHS, CanonicalFoundRHS = FoundRHS;
if (ICmpInst::isGT(CanonicalPred) || ICmpInst::isGE(CanonicalPred)) {
CanonicalPred = ICmpInst::getSwappedPredicate(CanonicalPred);
CanonicalFoundPred = ICmpInst::getSwappedPredicate(CanonicalFoundPred);
@@ -11777,7 +11782,7 @@ bool ScalarEvolution::isImpliedCondBalancedTypes(
(isa<SCEVConstant>(FoundLHS) || isa<SCEVConstant>(FoundRHS))) {
const SCEVConstant *C = nullptr;
- const SCEV *V = nullptr;
+ SCEVUse V = nullptr;
if (isa<SCEVConstant>(FoundLHS)) {
C = cast<SCEVConstant>(FoundLHS);
@@ -11866,8 +11871,7 @@ bool ScalarEvolution::isImpliedCondBalancedTypes(
return false;
}
-bool ScalarEvolution::splitBinaryAdd(const SCEV *Expr,
- const SCEV *&L, const SCEV *&R,
+bool ScalarEvolution::splitBinaryAdd(SCEVUse Expr, SCEVUse &L, SCEVUse &R,
SCEV::NoWrapFlags &Flags) {
const auto *AE = dyn_cast<SCEVAddExpr>(Expr);
if (!AE || AE->getNumOperands() != 2)
@@ -11879,8 +11883,8 @@ bool ScalarEvolution::splitBinaryAdd(const SCEV *Expr,
return true;
}
-std::optional<APInt>
-ScalarEvolution::computeConstantDifference(const SCEV *More, const SCEV *Less) {
+std::optional<APInt> ScalarEvolution::computeConstantDifference(SCEVUse More,
+ SCEVUse Less) {
// We avoid subtracting expressions here because this function is usually
// fairly deep in the call stack (i.e. is called many times).
@@ -11916,8 +11920,8 @@ ScalarEvolution::computeConstantDifference(const SCEV *More, const SCEV *Less) {
}
SCEV::NoWrapFlags Flags;
- const SCEV *LLess = nullptr, *RLess = nullptr;
- const SCEV *LMore = nullptr, *RMore = nullptr;
+ SCEVUse LLess = nullptr, RLess = nullptr;
+ SCEVUse LMore = nullptr, RMore = nullptr;
const SCEVConstant *C1 = nullptr, *C2 = nullptr;
// Compare (X + C1) vs X.
if (splitBinaryAdd(Less, LLess, RLess, Flags))
@@ -11939,8 +11943,8 @@ ScalarEvolution::computeConstantDifference(const SCEV *More, const SCEV *Less) {
}
bool ScalarEvolution::isImpliedCondOperandsViaAddRecStart(
- ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS,
- const SCEV *FoundLHS, const SCEV *FoundRHS, const Instruction *CtxI) {
+ ICmpInst::Predicate Pred, SCEVUse LHS, SCEVUse RHS, SCEVUse FoundLHS,
+ SCEVUse FoundRHS, const Instruction *CtxI) {
// Try to recognize the following pattern:
//
// FoundRHS = ...
@@ -11984,8 +11988,8 @@ bool ScalarEvolution::isImpliedCondOperandsViaAddRecStart(
}
bool ScalarEvolution::isImpliedCondOperandsViaNoOverflow(
- ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS,
- const SCEV *FoundLHS, const SCEV *FoundRHS) {
+ ICmpInst::Predicate Pred, SCEVUse LHS, SCEVUse RHS, SCEVUse FoundLHS,
+ SCEVUse FoundRHS) {
if (Pred != CmpInst::ICMP_SLT && Pred != CmpInst::ICMP_ULT)
return false;
@@ -12060,10 +12064,9 @@ bool ScalarEvolution::isImpliedCondOperandsViaNoOverflow(
getConstant(FoundRHSLimit));
}
-bool ScalarEvolution::isImpliedViaMerge(ICmpInst::Predicate Pred,
- const SCEV *LHS, const SCEV *RHS,
- const SCEV *FoundLHS,
- const SCEV *FoundRHS, unsigned Depth) {
+bool ScalarEvolution::isImpliedViaMerge(ICmpInst::Predicate Pred, SCEVUse LHS,
+ SCEVUse RHS, SCEVUse FoundLHS,
+ SCEVUse FoundRHS, unsigned Depth) {
const PHINode *LPhi = nullptr, *RPhi = nullptr;
auto ClearOnExit = make_scope_exit([&]() {
@@ -12117,7 +12120,7 @@ bool ScalarEvolution::isImpliedViaMerge(ICmpInst::Predicate Pred,
const BasicBlock *LBB = LPhi->getParent();
const SCEVAddRecExpr *RAR = dyn_cast<SCEVAddRecExpr>(RHS);
- auto ProvedEasily = [&](const SCEV *S1, const SCEV *S2) {
+ auto ProvedEasily = [&](SCEVUse S1, SCEVUse S2) {
return isKnownViaNonRecursiveReasoning(Pred, S1, S2) ||
isImpliedCondOperandsViaRanges(Pred, S1, S2, Pred, FoundLHS, FoundRHS) ||
isImpliedViaOperations(Pred, S1, S2, FoundLHS, FoundRHS, Depth);
@@ -12129,8 +12132,8 @@ bool ScalarEvolution::isImpliedViaMerge(ICmpInst::Predicate Pred,
// the predicate is true for incoming values from this block, then the
// predicate is also true for the Phis.
for (const BasicBlock *IncBB : predecessors(LBB)) {
- const SCEV *L = getSCEV(LPhi->getIncomingValueForBlock(IncBB));
- const SCEV *R = getSCEV(RPhi->getIncomingValueForBlock(IncBB));
+ SCEVUse L = getSCEV(LPhi->getIncomingValueForBlock(IncBB));
+ SCEVUse R = getSCEV(RPhi->getIncomingValueForBlock(IncBB));
if (!ProvedEasily(L, R))
return false;
}
@@ -12145,12 +12148,12 @@ bool ScalarEvolution::isImpliedViaMerge(ICmpInst::Predicate Pred,
auto *RLoop = RAR->getLoop();
auto *Predecessor = RLoop->getLoopPredecessor();
assert(Predecessor && "Loop with AddRec with no predecessor?");
- const SCEV *L1 = getSCEV(LPhi->getIncomingValueForBlock(Predecessor));
+ SCEVUse L1 = getSCEV(LPhi->getIncomingValueForBlock(Predecessor));
if (!ProvedEasily(L1, RAR->getStart()))
return false;
auto *Latch = RLoop->getLoopLatch();
assert(Latch && "Loop with AddRec with no latch?");
- const SCEV *L2 = getSCEV(LPhi->getIncomingValueForBlock(Latch));
+ SCEVUse L2 = getSCEV(LPhi->getIncomingValueForBlock(Latch));
if (!ProvedEasily(L2, RAR->getPostIncExpr(*this)))
return false;
} else {
@@ -12162,7 +12165,7 @@ bool ScalarEvolution::isImpliedViaMerge(ICmpInst::Predicate Pred,
// Check that RHS is available in this block.
if (!dominates(RHS, IncBB))
return false;
- const SCEV *L = getSCEV(LPhi->getIncomingValueForBlock(IncBB));
+ SCEVUse L = getSCEV(LPhi->getIncomingValueForBlock(IncBB));
// Make sure L does not refer to a value from a potentially previous
// iteration of a loop.
if (!properlyDominates(L, LBB))
@@ -12175,10 +12178,9 @@ bool ScalarEvolution::isImpliedViaMerge(ICmpInst::Predicate Pred,
}
bool ScalarEvolution::isImpliedCondOperandsViaShift(ICmpInst::Predicate Pred,
- const SCEV *LHS,
- const SCEV *RHS,
- const SCEV *FoundLHS,
- const SCEV *FoundRHS) {
+ SCEVUse LHS, SCEVUse RHS,
+ SCEVUse FoundLHS,
+ SCEVUse FoundRHS) {
// We want to imply LHS < RHS from LHS < (RHS >> shiftvalue). First, make
// sure that we are dealing with same LHS.
if (RHS == FoundRHS) {
@@ -12198,7 +12200,7 @@ bool ScalarEvolution::isImpliedCondOperandsViaShift(ICmpInst::Predicate Pred,
using namespace PatternMatch;
if (match(SUFoundRHS->getValue(),
m_LShr(m_Value(Shiftee), m_Value(ShiftValue)))) {
- auto *ShifteeS = getSCEV(Shiftee);
+ auto ShifteeS = getSCEV(Shiftee);
// Prove one of the following:
// LHS <u (shiftee >> shiftvalue) && shiftee <=u RHS ---> LHS <u RHS
// LHS <=u (shiftee >> shiftvalue) && shiftee <=u RHS ---> LHS <=u RHS
@@ -12217,9 +12219,8 @@ bool ScalarEvolution::isImpliedCondOperandsViaShift(ICmpInst::Predicate Pred,
}
bool ScalarEvolution::isImpliedCondOperands(ICmpInst::Predicate Pred,
- const SCEV *LHS, const SCEV *RHS,
- const SCEV *FoundLHS,
- const SCEV *FoundRHS,
+ SCEVUse LHS, SCEVUse RHS,
+ SCEVUse FoundLHS, SCEVUse FoundRHS,
const Instruction *CtxI) {
if (isImpliedCondOperandsViaRanges(Pred, LHS, RHS, Pred, FoundLHS, FoundRHS))
return true;
@@ -12240,8 +12241,7 @@ bool ScalarEvolution::isImpliedCondOperands(ICmpInst::Predicate Pred,
/// Is MaybeMinMaxExpr an (U|S)(Min|Max) of Candidate and some other values?
template <typename MinMaxExprType>
-static bool IsMinMaxConsistingOf(const SCEV *MaybeMinMaxExpr,
- const SCEV *Candidate) {
+static bool IsMinMaxConsistingOf(SCEVUse MaybeMinMaxExpr, SCEVUse Candidate) {
const MinMaxExprType *MinMaxExpr = dyn_cast<MinMaxExprType>(MaybeMinMaxExpr);
if (!MinMaxExpr)
return false;
@@ -12251,7 +12251,7 @@ static bool IsMinMaxConsistingOf(const SCEV *MaybeMinMaxExpr,
static bool IsKnownPredicateViaAddRecStart(ScalarEvolution &SE,
ICmpInst::Predicate Pred,
- const SCEV *LHS, const SCEV *RHS) {
+ SCEVUse LHS, SCEVUse RHS) {
// If both sides are affine addrecs for the same loop, with equal
// steps, and we know the recurrences don't wrap, then we only
// need to check the predicate on the starting values.
@@ -12284,8 +12284,8 @@ static bool IsKnownPredicateViaAddRecStart(ScalarEvolution &SE,
/// Is LHS `Pred` RHS true on the virtue of LHS or RHS being a Min or Max
/// expression?
static bool IsKnownPredicateViaMinOrMax(ScalarEvolution &SE,
- ICmpInst::Predicate Pred,
- const SCEV *LHS, const SCEV *RHS) {
+ ICmpInst::Predicate Pred, SCEVUse LHS,
+ SCEVUse RHS) {
switch (Pred) {
default:
return false;
@@ -12316,9 +12316,8 @@ static bool IsKnownPredicateViaMinOrMax(ScalarEvolution &SE,
}
bool ScalarEvolution::isImpliedViaOperations(ICmpInst::Predicate Pred,
- const SCEV *LHS, const SCEV *RHS,
- const SCEV *FoundLHS,
- const SCEV *FoundRHS,
+ SCEVUse LHS, SCEVUse RHS,
+ SCEVUse FoundLHS, SCEVUse FoundRHS,
unsigned Depth) {
assert(getTypeSizeInBits(LHS->getType()) ==
getTypeSizeInBits(RHS->getType()) &&
@@ -12346,7 +12345,7 @@ bool ScalarEvolution::isImpliedViaOperations(ICmpInst::Predicate Pred,
// Knowing that both FoundLHS and FoundRHS are non-negative, and knowing
// FoundLHS >u FoundRHS, we also know that FoundLHS >s FoundRHS. Let us
// use this fact to prove that LHS and RHS are non-negative.
- const SCEV *MinusOne = getMinusOne(LHS->getType());
+ SCEVUse MinusOne = getMinusOne(LHS->getType());
if (isImpliedCondOperands(ICmpInst::ICMP_SGT, LHS, MinusOne, FoundLHS,
FoundRHS) &&
isImpliedCondOperands(ICmpInst::ICMP_SGT, RHS, MinusOne, FoundLHS,
@@ -12357,7 +12356,7 @@ bool ScalarEvolution::isImpliedViaOperations(ICmpInst::Predicate Pred,
if (Pred != ICmpInst::ICMP_SGT)
return false;
- auto GetOpFromSExt = [&](const SCEV *S) {
+ auto GetOpFromSExt = [&](SCEVUse S) {
if (auto *Ext = dyn_cast<SCEVSignExtendExpr>(S))
return Ext->getOperand();
// TODO: If S is a SCEVConstant then you can cheaply "strip" the sext off
@@ -12366,13 +12365,13 @@ bool ScalarEvolution::isImpliedViaOperations(ICmpInst::Predicate Pred,
};
// Acquire values from extensions.
- auto *OrigLHS = LHS;
- auto *OrigFoundLHS = FoundLHS;
+ auto OrigLHS = LHS;
+ auto OrigFoundLHS = FoundLHS;
LHS = GetOpFromSExt(LHS);
FoundLHS = GetOpFromSExt(FoundLHS);
// Is the SGT predicate can be proved trivially or using the found context.
- auto IsSGTViaContext = [&](const SCEV *S1, const SCEV *S2) {
+ auto IsSGTViaContext = [&](SCEVUse S1, SCEVUse S2) {
return isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGT, S1, S2) ||
isImpliedViaOperations(ICmpInst::ICMP_SGT, S1, S2, OrigFoundLHS,
FoundRHS, Depth + 1);
@@ -12391,12 +12390,12 @@ bool ScalarEvolution::isImpliedViaOperations(ICmpInst::Predicate Pred,
if (!LHSAddExpr->hasNoSignedWrap())
return false;
- auto *LL = LHSAddExpr->getOperand(0);
- auto *LR = LHSAddExpr->getOperand(1);
- auto *MinusOne = getMinusOne(RHS->getType());
+ auto LL = LHSAddExpr->getOperand(0);
+ auto LR = LHSAddExpr->getOperand(1);
+ auto MinusOne = getMinusOne(RHS->getType());
// Checks that S1 >= 0 && S2 > RHS, trivially or using the found context.
- auto IsSumGreaterThanRHS = [&](const SCEV *S1, const SCEV *S2) {
+ auto IsSumGreaterThanRHS = [&](SCEVUse S1, SCEVUse S2) {
return IsSGTViaContext(S1, MinusOne) && IsSGTViaContext(S2, RHS);
};
// Try to prove the following rule:
@@ -12426,7 +12425,7 @@ bool ScalarEvolution::isImpliedViaOperations(ICmpInst::Predicate Pred,
// We want to make sure that LHS = FoundLHS / Denominator. If it is so,
// then a SCEV for the numerator already exists and matches with FoundLHS.
- auto *Numerator = getExistingSCEV(LL);
+ auto Numerator = getExistingSCEV(LL);
if (!Numerator || Numerator->getType() != FoundLHS->getType())
return false;
@@ -12447,14 +12446,14 @@ bool ScalarEvolution::isImpliedViaOperations(ICmpInst::Predicate Pred,
// Given that:
// FoundLHS > FoundRHS, LHS = FoundLHS / Denominator, Denominator > 0.
auto *WTy = getWiderType(DTy, FRHSTy);
- auto *DenominatorExt = getNoopOrSignExtend(Denominator, WTy);
- auto *FoundRHSExt = getNoopOrSignExtend(FoundRHS, WTy);
+ auto DenominatorExt = getNoopOrSignExtend(Denominator, WTy);
+ auto FoundRHSExt = getNoopOrSignExtend(FoundRHS, WTy);
// Try to prove the following rule:
// (FoundRHS > Denominator - 2) && (RHS <= 0) => (LHS > RHS).
// For example, given that FoundLHS > 2. It means that FoundLHS is at
// least 3. If we divide it by Denominator < 4, we will have at least 1.
- auto *DenomMinusTwo = getMinusSCEV(DenominatorExt, getConstant(WTy, 2));
+ auto DenomMinusTwo = getMinusSCEV(DenominatorExt, getConstant(WTy, 2));
if (isKnownNonPositive(RHS) &&
IsSGTViaContext(FoundRHSExt, DenomMinusTwo))
return true;
@@ -12466,8 +12465,8 @@ bool ScalarEvolution::isImpliedViaOperations(ICmpInst::Predicate Pred,
// 1. If FoundLHS is negative, then the result is 0.
// 2. If FoundLHS is non-negative, then the result is non-negative.
// Anyways, the result is non-negative.
- auto *MinusOne = getMinusOne(WTy);
- auto *NegDenomMinusOne = getMinusSCEV(MinusOne, DenominatorExt);
+ auto MinusOne = getMinusOne(WTy);
+ auto NegDenomMinusOne = getMinusSCEV(MinusOne, DenominatorExt);
if (isKnownNegative(RHS) &&
IsSGTViaContext(FoundRHSExt, NegDenomMinusOne))
return true;
@@ -12483,8 +12482,8 @@ bool ScalarEvolution::isImpliedViaOperations(ICmpInst::Predicate Pred,
return false;
}
-static bool isKnownPredicateExtendIdiom(ICmpInst::Predicate Pred,
- const SCEV *LHS, const SCEV *RHS) {
+static bool isKnownPredicateExtendIdiom(ICmpInst::Predicate Pred, SCEVUse LHS,
+ SCEVUse RHS) {
// zext x u<= sext x, sext x s<= zext x
switch (Pred) {
case ICmpInst::ICMP_SGE:
@@ -12515,9 +12514,9 @@ static bool isKnownPredicateExtendIdiom(ICmpInst::Predicate Pred,
return false;
}
-bool
-ScalarEvolution::isKnownViaNonRecursiveReasoning(ICmpInst::Predicate Pred,
- const SCEV *LHS, const SCEV *RHS) {
+bool ScalarEvolution::isKnownViaNonRecursiveReasoning(ICmpInst::Predicate Pred,
+ SCEVUse LHS,
+ SCEVUse RHS) {
return isKnownPredicateExtendIdiom(Pred, LHS, RHS) ||
isKnownPredicateViaConstantRanges(Pred, LHS, RHS) ||
IsKnownPredicateViaMinOrMax(*this, Pred, LHS, RHS) ||
@@ -12525,11 +12524,10 @@ ScalarEvolution::isKnownViaNonRecursiveReasoning(ICmpInst::Predicate Pred,
isKnownPredicateViaNoOverflow(Pred, LHS, RHS);
}
-bool
-ScalarEvolution::isImpliedCondOperandsHelper(ICmpInst::Predicate Pred,
- const SCEV *LHS, const SCEV *RHS,
- const SCEV *FoundLHS,
- const SCEV *FoundRHS) {
+bool ScalarEvolution::isImpliedCondOperandsHelper(ICmpInst::Predicate Pred,
+ SCEVUse LHS, SCEVUse RHS,
+ SCEVUse FoundLHS,
+ SCEVUse FoundRHS) {
switch (Pred) {
default: llvm_unreachable("Unexpected ICmpInst::Predicate value!");
case ICmpInst::ICMP_EQ:
@@ -12570,12 +12568,9 @@ ScalarEvolution::isImpliedCondOperandsHelper(ICmpInst::Predicate Pred,
return false;
}
-bool ScalarEvolution::isImpliedCondOperandsViaRanges(ICmpInst::Predicate Pred,
- const SCEV *LHS,
- const SCEV *RHS,
- ICmpInst::Predicate FoundPred,
- const SCEV *FoundLHS,
- const SCEV *FoundRHS) {
+bool ScalarEvolution::isImpliedCondOperandsViaRanges(
+ ICmpInst::Predicate Pred, SCEVUse LHS, SCEVUse RHS,
+ ICmpInst::Predicate FoundPred, SCEVUse FoundLHS, SCEVUse FoundRHS) {
if (!isa<SCEVConstant>(RHS) || !isa<SCEVConstant>(FoundRHS))
// The restriction on `FoundRHS` be lifted easily -- it exists only to
// reduce the compile time impact of this optimization.
@@ -12603,12 +12598,12 @@ bool ScalarEvolution::isImpliedCondOperandsViaRanges(ICmpInst::Predicate Pred,
return LHSRange.icmp(Pred, ConstRHS);
}
-bool ScalarEvolution::canIVOverflowOnLT(const SCEV *RHS, const SCEV *Stride,
+bool ScalarEvolution::canIVOverflowOnLT(SCEVUse RHS, SCEVUse Stride,
bool IsSigned) {
assert(isKnownPositive(Stride) && "Positive stride expected!");
unsigned BitWidth = getTypeSizeInBits(RHS->getType());
- const SCEV *One = getOne(Stride->getType());
+ SCEVUse One = getOne(Stride->getType());
if (IsSigned) {
APInt MaxRHS = getSignedRangeMax(RHS);
@@ -12627,11 +12622,11 @@ bool ScalarEvolution::canIVOverflowOnLT(const SCEV *RHS, const SCEV *Stride,
return (std::move(MaxValue) - MaxStrideMinusOne).ult(MaxRHS);
}
-bool ScalarEvolution::canIVOverflowOnGT(const SCEV *RHS, const SCEV *Stride,
+bool ScalarEvolution::canIVOverflowOnGT(SCEVUse RHS, SCEVUse Stride,
bool IsSigned) {
unsigned BitWidth = getTypeSizeInBits(RHS->getType());
- const SCEV *One = getOne(Stride->getType());
+ SCEVUse One = getOne(Stride->getType());
if (IsSigned) {
APInt MinRHS = getSignedRangeMin(RHS);
@@ -12650,20 +12645,18 @@ bool ScalarEvolution::canIVOverflowOnGT(const SCEV *RHS, const SCEV *Stride,
return (std::move(MinValue) + MaxStrideMinusOne).ugt(MinRHS);
}
-const SCEV *ScalarEvolution::getUDivCeilSCEV(const SCEV *N, const SCEV *D) {
+SCEVUse ScalarEvolution::getUDivCeilSCEV(SCEVUse N, SCEVUse D) {
// umin(N, 1) + floor((N - umin(N, 1)) / D)
// This is equivalent to "1 + floor((N - 1) / D)" for N != 0. The umin
// expression fixes the case of N=0.
- const SCEV *MinNOne = getUMinExpr(N, getOne(N->getType()));
- const SCEV *NMinusOne = getMinusSCEV(N, MinNOne);
+ SCEVUse MinNOne = getUMinExpr(N, getOne(N->getType()));
+ SCEVUse NMinusOne = getMinusSCEV(N, MinNOne);
return getAddExpr(MinNOne, getUDivExpr(NMinusOne, D));
}
-const SCEV *ScalarEvolution::computeMaxBECountForLT(const SCEV *Start,
- const SCEV *Stride,
- const SCEV *End,
- unsigned BitWidth,
- bool IsSigned) {
+SCEVUse ScalarEvolution::computeMaxBECountForLT(SCEVUse Start, SCEVUse Stride,
+ SCEVUse End, unsigned BitWidth,
+ bool IsSigned) {
// The logic in this function assumes we can represent a positive stride.
// If we can't, the backedge-taken count must be zero.
if (IsSigned && BitWidth == 1)
@@ -12709,9 +12702,9 @@ const SCEV *ScalarEvolution::computeMaxBECountForLT(const SCEV *Start,
}
ScalarEvolution::ExitLimit
-ScalarEvolution::howManyLessThans(const SCEV *LHS, const SCEV *RHS,
- const Loop *L, bool IsSigned,
- bool ControlsOnlyExit, bool AllowPredicates) {
+ScalarEvolution::howManyLessThans(SCEVUse LHS, SCEVUse RHS, const Loop *L,
+ bool IsSigned, bool ControlsOnlyExit,
+ bool AllowPredicates) {
SmallPtrSet<const SCEVPredicate *, 4> Predicates;
const SCEVAddRecExpr *IV = dyn_cast<SCEVAddRecExpr>(LHS);
@@ -12784,11 +12777,11 @@ ScalarEvolution::howManyLessThans(const SCEV *LHS, const SCEV *RHS,
if (AR->hasNoUnsignedWrap()) {
// Emulate what getZeroExtendExpr would have done during construction
// if we'd been able to infer the fact just above at that time.
- const SCEV *Step = AR->getStepRecurrence(*this);
+ SCEVUse Step = AR->getStepRecurrence(*this);
Type *Ty = ZExt->getType();
- auto *S = getAddRecExpr(
- getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 0),
- getZeroExtendExpr(Step, Ty, 0), L, AR->getNoWrapFlags());
+ auto S = getAddRecExpr(
+ getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 0),
+ getZeroExtendExpr(Step, Ty, 0), L, AR->getNoWrapFlags());
IV = dyn_cast<SCEVAddRecExpr>(S);
}
}
@@ -12822,7 +12815,7 @@ ScalarEvolution::howManyLessThans(const SCEV *LHS, const SCEV *RHS,
bool NoWrap = ControlsOnlyExit && IV->getNoWrapFlags(WrapType);
ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT;
- const SCEV *Stride = IV->getStepRecurrence(*this);
+ SCEVUse Stride = IV->getStepRecurrence(*this);
bool PositiveStride = isKnownPositive(Stride);
@@ -12888,7 +12881,7 @@ ScalarEvolution::howManyLessThans(const SCEV *LHS, const SCEV *RHS,
// Note: The (Start - Stride) term is used to get the start' term from
// (start' + stride,+,stride). Remember that we only care about the
// result of this expression when stride == 0 at runtime.
- auto *StartIfZero = getMinusSCEV(IV->getStart(), Stride);
+ auto StartIfZero = getMinusSCEV(IV->getStart(), Stride);
return isLoopEntryGuardedByCond(L, Cond, StartIfZero, RHS);
};
if (!wouldZeroStrideBeUB()) {
@@ -12922,14 +12915,14 @@ ScalarEvolution::howManyLessThans(const SCEV *LHS, const SCEV *RHS,
// before any possible exit.
// Note that we have not yet proved RHS invariant (in general).
- const SCEV *Start = IV->getStart();
+ SCEVUse Start = IV->getStart();
// Preserve pointer-typed Start/RHS to pass to isLoopEntryGuardedByCond.
// If we convert to integers, isLoopEntryGuardedByCond will miss some cases.
// Use integer-typed versions for actual computation; we can't subtract
// pointers in general.
- const SCEV *OrigStart = Start;
- const SCEV *OrigRHS = RHS;
+ SCEVUse OrigStart = Start;
+ SCEVUse OrigRHS = RHS;
if (Start->getType()->isPointerTy()) {
Start = getLosslessPtrToIntExpr(Start);
if (isa<SCEVCouldNotCompute>(Start))
@@ -12947,7 +12940,7 @@ ScalarEvolution::howManyLessThans(const SCEV *LHS, const SCEV *RHS,
// bound of the loop (RHS), and the fact that IV does not overflow (which is
// checked above).
if (!isLoopInvariant(RHS, L)) {
- const SCEV *MaxBECount = computeMaxBECountForLT(
+ SCEVUse MaxBECount = computeMaxBECountForLT(
Start, Stride, RHS, getTypeSizeInBits(LHS->getType()), IsSigned);
return ExitLimit(getCouldNotCompute() /* ExactNotTaken */, MaxBECount,
MaxBECount, false /*MaxOrZero*/, Predicates);
@@ -12957,8 +12950,8 @@ ScalarEvolution::howManyLessThans(const SCEV *LHS, const SCEV *RHS,
// backedge count, as if the backedge is taken at least once max(End,Start)
// is End and so the result is as above, and if not max(End,Start) is Start
// so we get a backedge count of zero.
- const SCEV *BECount = nullptr;
- auto *OrigStartMinusStride = getMinusSCEV(OrigStart, Stride);
+ SCEVUse BECount = nullptr;
+ auto OrigStartMinusStride = getMinusSCEV(OrigStart, Stride);
assert(isAvailableAtLoopEntry(OrigStartMinusStride, L) && "Must be!");
assert(isAvailableAtLoopEntry(OrigStart, L) && "Must be!");
assert(isAvailableAtLoopEntry(OrigRHS, L) && "Must be!");
@@ -12983,18 +12976,18 @@ ScalarEvolution::howManyLessThans(const SCEV *LHS, const SCEV *RHS,
// "((RHS - 1) - (Start - Stride)) /u Stride" reassociates to
// "((RHS - (Start - Stride) - 1) /u Stride".
// Our preconditions trivially imply no overflow in that form.
- const SCEV *MinusOne = getMinusOne(Stride->getType());
- const SCEV *Numerator =
+ SCEVUse MinusOne = getMinusOne(Stride->getType());
+ SCEVUse Numerator =
getMinusSCEV(getAddExpr(RHS, MinusOne), getMinusSCEV(Start, Stride));
BECount = getUDivExpr(Numerator, Stride);
}
- const SCEV *BECountIfBackedgeTaken = nullptr;
+ SCEVUse BECountIfBackedgeTaken = nullptr;
if (!BECount) {
auto canProveRHSGreaterThanEqualStart = [&]() {
auto CondGE = IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE;
- const SCEV *GuardedRHS = applyLoopGuards(OrigRHS, L);
- const SCEV *GuardedStart = applyLoopGuards(OrigStart, L);
+ SCEVUse GuardedRHS = applyLoopGuards(OrigRHS, L);
+ SCEVUse GuardedStart = applyLoopGuards(OrigStart, L);
if (isLoopEntryGuardedByCond(L, CondGE, OrigRHS, OrigStart) ||
isKnownPredicate(CondGE, GuardedRHS, GuardedStart))
@@ -13010,14 +13003,14 @@ ScalarEvolution::howManyLessThans(const SCEV *LHS, const SCEV *RHS,
//
// FIXME: Should isLoopEntryGuardedByCond do this for us?
auto CondGT = IsSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT;
- auto *StartMinusOne = getAddExpr(OrigStart,
- getMinusOne(OrigStart->getType()));
+ auto StartMinusOne =
+ getAddExpr(OrigStart, getMinusOne(OrigStart->getType()));
return isLoopEntryGuardedByCond(L, CondGT, OrigRHS, StartMinusOne);
};
// If we know that RHS >= Start in the context of loop, then we know that
// max(RHS, Start) = RHS at this point.
- const SCEV *End;
+ SCEVUse End;
if (canProveRHSGreaterThanEqualStart()) {
End = RHS;
} else {
@@ -13047,7 +13040,7 @@ ScalarEvolution::howManyLessThans(const SCEV *LHS, const SCEV *RHS,
//
// Using this information, try to prove whether the addition in
// "(Start - End) + (Stride - 1)" has unsigned overflow.
- const SCEV *One = getOne(Stride->getType());
+ SCEVUse One = getOne(Stride->getType());
bool MayAddOverflow = [&] {
if (auto *StrideC = dyn_cast<SCEVConstant>(Stride)) {
if (StrideC->getAPInt().isPowerOf2()) {
@@ -13106,7 +13099,7 @@ ScalarEvolution::howManyLessThans(const SCEV *LHS, const SCEV *RHS,
return true;
}();
- const SCEV *Delta = getMinusSCEV(End, Start);
+ SCEVUse Delta = getMinusSCEV(End, Start);
if (!MayAddOverflow) {
// floor((D + (S - 1)) / S)
// We prefer this formulation if it's legal because it's fewer operations.
@@ -13117,7 +13110,7 @@ ScalarEvolution::howManyLessThans(const SCEV *LHS, const SCEV *RHS,
}
}
- const SCEV *ConstantMaxBECount;
+ SCEVUse ConstantMaxBECount;
bool MaxOrZero = false;
if (isa<SCEVConstant>(BECount)) {
ConstantMaxBECount = BECount;
@@ -13137,15 +13130,16 @@ ScalarEvolution::howManyLessThans(const SCEV *LHS, const SCEV *RHS,
!isa<SCEVCouldNotCompute>(BECount))
ConstantMaxBECount = getConstant(getUnsignedRangeMax(BECount));
- const SCEV *SymbolicMaxBECount =
+ SCEVUse SymbolicMaxBECount =
isa<SCEVCouldNotCompute>(BECount) ? ConstantMaxBECount : BECount;
return ExitLimit(BECount, ConstantMaxBECount, SymbolicMaxBECount, MaxOrZero,
Predicates);
}
-ScalarEvolution::ExitLimit ScalarEvolution::howManyGreaterThans(
- const SCEV *LHS, const SCEV *RHS, const Loop *L, bool IsSigned,
- bool ControlsOnlyExit, bool AllowPredicates) {
+ScalarEvolution::ExitLimit
+ScalarEvolution::howManyGreaterThans(SCEVUse LHS, SCEVUse RHS, const Loop *L,
+ bool IsSigned, bool ControlsOnlyExit,
+ bool AllowPredicates) {
SmallPtrSet<const SCEVPredicate *, 4> Predicates;
// We handle only IV > Invariant
if (!isLoopInvariant(RHS, L))
@@ -13166,7 +13160,7 @@ ScalarEvolution::ExitLimit ScalarEvolution::howManyGreaterThans(
bool NoWrap = ControlsOnlyExit && IV->getNoWrapFlags(WrapType);
ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT;
- const SCEV *Stride = getNegativeSCEV(IV->getStepRecurrence(*this));
+ SCEVUse Stride = getNegativeSCEV(IV->getStepRecurrence(*this));
// Avoid negative or zero stride values
if (!isKnownPositive(Stride))
@@ -13180,8 +13174,8 @@ ScalarEvolution::ExitLimit ScalarEvolution::howManyGreaterThans(
if (canIVOverflowOnGT(RHS, Stride, IsSigned))
return getCouldNotCompute();
- const SCEV *Start = IV->getStart();
- const SCEV *End = RHS;
+ SCEVUse Start = IV->getStart();
+ SCEVUse End = RHS;
if (!isLoopEntryGuardedByCond(L, Cond, getAddExpr(Start, Stride), RHS)) {
// If we know that Start >= RHS in the context of loop, then we know that
// min(RHS, Start) = RHS at this point.
@@ -13206,8 +13200,8 @@ ScalarEvolution::ExitLimit ScalarEvolution::howManyGreaterThans(
// Compute ((Start - End) + (Stride - 1)) / Stride.
// FIXME: This can overflow. Holding off on fixing this for now;
// howManyGreaterThans will hopefully be gone soon.
- const SCEV *One = getOne(Stride->getType());
- const SCEV *BECount = getUDivExpr(
+ SCEVUse One = getOne(Stride->getType());
+ SCEVUse BECount = getUDivExpr(
getAddExpr(getMinusSCEV(Start, End), getMinusSCEV(Stride, One)), Stride);
APInt MaxStart = IsSigned ? getSignedRangeMax(Start)
@@ -13227,7 +13221,7 @@ ScalarEvolution::ExitLimit ScalarEvolution::howManyGreaterThans(
IsSigned ? APIntOps::smax(getSignedRangeMin(RHS), Limit)
: APIntOps::umax(getUnsignedRangeMin(RHS), Limit);
- const SCEV *ConstantMaxBECount =
+ SCEVUse ConstantMaxBECount =
isa<SCEVConstant>(BECount)
? BECount
: getUDivCeilSCEV(getConstant(MaxStart - MinEnd),
@@ -13235,25 +13229,25 @@ ScalarEvolution::ExitLimit ScalarEvolution::howManyGreaterThans(
if (isa<SCEVCouldNotCompute>(ConstantMaxBECount))
ConstantMaxBECount = BECount;
- const SCEV *SymbolicMaxBECount =
+ SCEVUse SymbolicMaxBECount =
isa<SCEVCouldNotCompute>(BECount) ? ConstantMaxBECount : BECount;
return ExitLimit(BECount, ConstantMaxBECount, SymbolicMaxBECount, false,
Predicates);
}
-const SCEV *SCEVAddRecExpr::getNumIterationsInRange(const ConstantRange &Range,
- ScalarEvolution &SE) const {
+SCEVUse SCEVAddRecExpr::getNumIterationsInRange(const ConstantRange &Range,
+ ScalarEvolution &SE) const {
if (Range.isFullSet()) // Infinite loop.
return SE.getCouldNotCompute();
// If the start is a non-zero constant, shift the range to simplify things.
if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(getStart()))
if (!SC->getValue()->isZero()) {
- SmallVector<const SCEV *, 4> Operands(operands());
+ SmallVector<SCEVUse, 4> Operands(operands());
Operands[0] = SE.getZero(SC->getType());
- const SCEV *Shifted = SE.getAddRecExpr(Operands, getLoop(),
- getNoWrapFlags(FlagNW));
+ SCEVUse Shifted =
+ SE.getAddRecExpr(Operands, getLoop(), getNoWrapFlags(FlagNW));
if (const auto *ShiftedAddRec = dyn_cast<SCEVAddRecExpr>(Shifted))
return ShiftedAddRec->getNumIterationsInRange(
Range.subtract(SC->getAPInt()), SE);
@@ -13263,7 +13257,7 @@ const SCEV *SCEVAddRecExpr::getNumIterationsInRange(const ConstantRange &Range,
// The only time we can solve this is when we have all constant indices.
// Otherwise, we cannot determine the overflow conditions.
- if (any_of(operands(), [](const SCEV *Op) { return !isa<SCEVConstant>(Op); }))
+ if (any_of(operands(), [](SCEVUse Op) { return !isa<SCEVConstant>(Op); }))
return SE.getCouldNotCompute();
// Okay at this point we know that all elements of the chrec are constants and
@@ -13322,7 +13316,7 @@ SCEVAddRecExpr::getPostIncExpr(ScalarEvolution &SE) const {
// simplification: it is legal to return ({rec1} + {rec2}). For example, it
// may happen if we reach arithmetic depth limit while simplifying. So we
// construct the returned value explicitly.
- SmallVector<const SCEV *, 3> Ops;
+ SmallVector<SCEVUse, 3> Ops;
// If this is {A,+,B,+,C,...,+,N}, then its step is {B,+,C,+,...,+,N}, and
// (this + Step) is {A+B,+,B+C,+...,+,N}.
for (unsigned i = 0, e = getNumOperands() - 1; i < e; ++i)
@@ -13331,7 +13325,7 @@ SCEVAddRecExpr::getPostIncExpr(ScalarEvolution &SE) const {
// have been popped out earlier). This guarantees us that if the result has
// the same last operand, then it will also not be popped out, meaning that
// the returned value will be an AddRec.
- const SCEV *Last = getOperand(getNumOperands() - 1);
+ SCEVUse Last = getOperand(getNumOperands() - 1);
assert(!Last->isZero() && "Recurrency with zero step?");
Ops.push_back(Last);
return cast<SCEVAddRecExpr>(SE.getAddRecExpr(Ops, getLoop(),
@@ -13339,8 +13333,8 @@ SCEVAddRecExpr::getPostIncExpr(ScalarEvolution &SE) const {
}
// Return true when S contains at least an undef value.
-bool ScalarEvolution::containsUndefs(const SCEV *S) const {
- return SCEVExprContains(S, [](const SCEV *S) {
+bool ScalarEvolution::containsUndefs(SCEVUse S) const {
+ return SCEVExprContains(S, [](SCEVUse S) {
if (const auto *SU = dyn_cast<SCEVUnknown>(S))
return isa<UndefValue>(SU->getValue());
return false;
@@ -13348,8 +13342,8 @@ bool ScalarEvolution::containsUndefs(const SCEV *S) const {
}
// Return true when S contains a value that is a nullptr.
-bool ScalarEvolution::containsErasedValue(const SCEV *S) const {
- return SCEVExprContains(S, [](const SCEV *S) {
+bool ScalarEvolution::containsErasedValue(SCEVUse S) const {
+ return SCEVExprContains(S, [](SCEVUse S) {
if (const auto *SU = dyn_cast<SCEVUnknown>(S))
return SU->getValue() == nullptr;
return false;
@@ -13357,7 +13351,7 @@ bool ScalarEvolution::containsErasedValue(const SCEV *S) const {
}
/// Return the size of an element read or written by Inst.
-const SCEV *ScalarEvolution::getElementSize(Instruction *Inst) {
+SCEVUse ScalarEvolution::getElementSize(Instruction *Inst) {
Type *Ty;
if (StoreInst *Store = dyn_cast<StoreInst>(Inst))
Ty = Store->getValueOperand()->getType();
@@ -13501,7 +13495,7 @@ static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE,
if (ExitingBlocks.size() != 1)
OS << "<multiple exits> ";
- auto *BTC = SE->getBackedgeTakenCount(L);
+ auto BTC = SE->getBackedgeTakenCount(L);
if (!isa<SCEVCouldNotCompute>(BTC)) {
OS << "backedge-taken count is ";
PrintSCEVWithTypeHint(OS, BTC);
@@ -13520,7 +13514,7 @@ static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE,
L->getHeader()->printAsOperand(OS, /*PrintType=*/false);
OS << ": ";
- auto *ConstantBTC = SE->getConstantMaxBackedgeTakenCount(L);
+ auto ConstantBTC = SE->getConstantMaxBackedgeTakenCount(L);
if (!isa<SCEVCouldNotCompute>(ConstantBTC)) {
OS << "constant max backedge-taken count is ";
PrintSCEVWithTypeHint(OS, ConstantBTC);
@@ -13535,7 +13529,7 @@ static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE,
L->getHeader()->printAsOperand(OS, /*PrintType=*/false);
OS << ": ";
- auto *SymbolicBTC = SE->getSymbolicMaxBackedgeTakenCount(L);
+ auto SymbolicBTC = SE->getSymbolicMaxBackedgeTakenCount(L);
if (!isa<SCEVCouldNotCompute>(SymbolicBTC)) {
OS << "symbolic max backedge-taken count is ";
PrintSCEVWithTypeHint(OS, SymbolicBTC);
@@ -13549,14 +13543,14 @@ static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE,
if (ExitingBlocks.size() > 1)
for (BasicBlock *ExitingBlock : ExitingBlocks) {
OS << " symbolic max exit count for " << ExitingBlock->getName() << ": ";
- auto *ExitBTC = SE->getExitCount(L, ExitingBlock,
- ScalarEvolution::SymbolicMaximum);
+ auto ExitBTC =
+ SE->getExitCount(L, ExitingBlock, ScalarEvolution::SymbolicMaximum);
PrintSCEVWithTypeHint(OS, ExitBTC);
OS << "\n";
}
SmallVector<const SCEVPredicate *, 4> Preds;
- auto *PBT = SE->getPredicatedBackedgeTakenCount(L, Preds);
+ auto PBT = SE->getPredicatedBackedgeTakenCount(L, Preds);
if (PBT != BTC || !Preds.empty()) {
OS << "Loop ";
L->getHeader()->printAsOperand(OS, /*PrintType=*/false);
@@ -13629,7 +13623,7 @@ void ScalarEvolution::print(raw_ostream &OS) const {
if (isSCEVable(I.getType()) && !isa<CmpInst>(I)) {
OS << I << '\n';
OS << " --> ";
- const SCEV *SV = SE.getSCEV(&I);
+ SCEVUse SV = SE.getSCEV(&I);
SV->print(OS);
if (!isa<SCEVCouldNotCompute>(SV)) {
OS << " U: ";
@@ -13640,7 +13634,7 @@ void ScalarEvolution::print(raw_ostream &OS) const {
const Loop *L = LI.getLoopFor(I.getParent());
- const SCEV *AtUse = SE.getSCEVAtScope(SV, L);
+ SCEVUse AtUse = SE.getSCEVAtScope(SV, L);
if (AtUse != SV) {
OS << " --> ";
AtUse->print(OS);
@@ -13654,7 +13648,7 @@ void ScalarEvolution::print(raw_ostream &OS) const {
if (L) {
OS << "\t\t" "Exits: ";
- const SCEV *ExitValue = SE.getSCEVAtScope(SV, L->getParentLoop());
+ SCEVUse ExitValue = SE.getSCEVAtScope(SV, L->getParentLoop());
if (!SE.isLoopInvariant(ExitValue, L)) {
OS << "<<Unknown>>";
} else {
@@ -13703,7 +13697,7 @@ void ScalarEvolution::print(raw_ostream &OS) const {
}
ScalarEvolution::LoopDisposition
-ScalarEvolution::getLoopDisposition(const SCEV *S, const Loop *L) {
+ScalarEvolution::getLoopDisposition(SCEVUse S, const Loop *L) {
auto &Values = LoopDispositions[S];
for (auto &V : Values) {
if (V.getPointer() == L)
@@ -13722,7 +13716,7 @@ ScalarEvolution::getLoopDisposition(const SCEV *S, const Loop *L) {
}
ScalarEvolution::LoopDisposition
-ScalarEvolution::computeLoopDisposition(const SCEV *S, const Loop *L) {
+ScalarEvolution::computeLoopDisposition(SCEVUse S, const Loop *L) {
switch (S->getSCEVType()) {
case scConstant:
case scVScale:
@@ -13750,7 +13744,7 @@ ScalarEvolution::computeLoopDisposition(const SCEV *S, const Loop *L) {
// This recurrence is variant w.r.t. L if any of its operands
// are variant.
- for (const auto *Op : AR->operands())
+ for (const auto Op : AR->operands())
if (!isLoopInvariant(Op, L))
return LoopVariant;
@@ -13770,7 +13764,7 @@ ScalarEvolution::computeLoopDisposition(const SCEV *S, const Loop *L) {
case scSMinExpr:
case scSequentialUMinExpr: {
bool HasVarying = false;
- for (const auto *Op : S->operands()) {
+ for (const auto Op : S->operands()) {
LoopDisposition D = getLoopDisposition(Op, L);
if (D == LoopVariant)
return LoopVariant;
@@ -13793,16 +13787,16 @@ ScalarEvolution::computeLoopDisposition(const SCEV *S, const Loop *L) {
llvm_unreachable("Unknown SCEV kind!");
}
-bool ScalarEvolution::isLoopInvariant(const SCEV *S, const Loop *L) {
+bool ScalarEvolution::isLoopInvariant(SCEVUse S, const Loop *L) {
return getLoopDisposition(S, L) == LoopInvariant;
}
-bool ScalarEvolution::hasComputableLoopEvolution(const SCEV *S, const Loop *L) {
+bool ScalarEvolution::hasComputableLoopEvolution(SCEVUse S, const Loop *L) {
return getLoopDisposition(S, L) == LoopComputable;
}
ScalarEvolution::BlockDisposition
-ScalarEvolution::getBlockDisposition(const SCEV *S, const BasicBlock *BB) {
+ScalarEvolution::getBlockDisposition(SCEVUse S, const BasicBlock *BB) {
auto &Values = BlockDispositions[S];
for (auto &V : Values) {
if (V.getPointer() == BB)
@@ -13821,7 +13815,7 @@ ScalarEvolution::getBlockDisposition(const SCEV *S, const BasicBlock *BB) {
}
ScalarEvolution::BlockDisposition
-ScalarEvolution::computeBlockDisposition(const SCEV *S, const BasicBlock *BB) {
+ScalarEvolution::computeBlockDisposition(SCEVUse S, const BasicBlock *BB) {
switch (S->getSCEVType()) {
case scConstant:
case scVScale:
@@ -13851,7 +13845,7 @@ ScalarEvolution::computeBlockDisposition(const SCEV *S, const BasicBlock *BB) {
case scSMinExpr:
case scSequentialUMinExpr: {
bool Proper = true;
- for (const SCEV *NAryOp : S->operands()) {
+ for (SCEVUse NAryOp : S->operands()) {
BlockDisposition D = getBlockDisposition(NAryOp, BB);
if (D == DoesNotDominateBlock)
return DoesNotDominateBlock;
@@ -13876,16 +13870,16 @@ ScalarEvolution::computeBlockDisposition(const SCEV *S, const BasicBlock *BB) {
llvm_unreachable("Unknown SCEV kind!");
}
-bool ScalarEvolution::dominates(const SCEV *S, const BasicBlock *BB) {
+bool ScalarEvolution::dominates(SCEVUse S, const BasicBlock *BB) {
return getBlockDisposition(S, BB) >= DominatesBlock;
}
-bool ScalarEvolution::properlyDominates(const SCEV *S, const BasicBlock *BB) {
+bool ScalarEvolution::properlyDominates(SCEVUse S, const BasicBlock *BB) {
return getBlockDisposition(S, BB) == ProperlyDominatesBlock;
}
-bool ScalarEvolution::hasOperand(const SCEV *S, const SCEV *Op) const {
- return SCEVExprContains(S, [&](const SCEV *Expr) { return Expr == Op; });
+bool ScalarEvolution::hasOperand(SCEVUse S, SCEVUse Op) const {
+ return SCEVExprContains(S, [&](SCEVUse Expr) { return Expr == Op; });
}
void ScalarEvolution::forgetBackedgeTakenCounts(const Loop *L,
@@ -13895,7 +13889,7 @@ void ScalarEvolution::forgetBackedgeTakenCounts(const Loop *L,
auto It = BECounts.find(L);
if (It != BECounts.end()) {
for (const ExitNotTakenInfo &ENT : It->second.ExitNotTaken) {
- for (const SCEV *S : {ENT.ExactNotTaken, ENT.SymbolicMaxNotTaken}) {
+ for (SCEVUse S : {ENT.ExactNotTaken, ENT.SymbolicMaxNotTaken}) {
if (!isa<SCEVConstant>(S)) {
auto UserIt = BECountUsers.find(S);
assert(UserIt != BECountUsers.end());
@@ -13907,25 +13901,25 @@ void ScalarEvolution::forgetBackedgeTakenCounts(const Loop *L,
}
}
-void ScalarEvolution::forgetMemoizedResults(ArrayRef<const SCEV *> SCEVs) {
- SmallPtrSet<const SCEV *, 8> ToForget(SCEVs.begin(), SCEVs.end());
- SmallVector<const SCEV *, 8> Worklist(ToForget.begin(), ToForget.end());
+void ScalarEvolution::forgetMemoizedResults(ArrayRef<SCEVUse> SCEVs) {
+ SmallPtrSet<SCEVUse, 8> ToForget(SCEVs.begin(), SCEVs.end());
+ SmallVector<SCEVUse, 8> Worklist(ToForget.begin(), ToForget.end());
while (!Worklist.empty()) {
- const SCEV *Curr = Worklist.pop_back_val();
+ SCEVUse Curr = Worklist.pop_back_val();
auto Users = SCEVUsers.find(Curr);
if (Users != SCEVUsers.end())
- for (const auto *User : Users->second)
+ for (const auto User : Users->second)
if (ToForget.insert(User).second)
Worklist.push_back(User);
}
- for (const auto *S : ToForget)
+ for (const auto S : ToForget)
forgetMemoizedResultsImpl(S);
for (auto I = PredicatedSCEVRewrites.begin();
I != PredicatedSCEVRewrites.end();) {
- std::pair<const SCEV *, const Loop *> Entry = I->first;
+ std::pair<SCEVUse, const Loop *> Entry = I->first;
if (ToForget.count(Entry.first))
PredicatedSCEVRewrites.erase(I++);
else
@@ -13933,7 +13927,7 @@ void ScalarEvolution::forgetMemoizedResults(ArrayRef<const SCEV *> SCEVs) {
}
}
-void ScalarEvolution::forgetMemoizedResultsImpl(const SCEV *S) {
+void ScalarEvolution::forgetMemoizedResultsImpl(SCEVUse S) {
LoopDispositions.erase(S);
BlockDispositions.erase(S);
UnsignedRanges.erase(S);
@@ -13988,14 +13982,13 @@ void ScalarEvolution::forgetMemoizedResultsImpl(const SCEV *S) {
FoldCacheUser.erase(S);
}
-void
-ScalarEvolution::getUsedLoops(const SCEV *S,
- SmallPtrSetImpl<const Loop *> &LoopsUsed) {
+void ScalarEvolution::getUsedLoops(SCEVUse S,
+ SmallPtrSetImpl<const Loop *> &LoopsUsed) {
struct FindUsedLoops {
FindUsedLoops(SmallPtrSetImpl<const Loop *> &LoopsUsed)
: LoopsUsed(LoopsUsed) {}
SmallPtrSetImpl<const Loop *> &LoopsUsed;
- bool follow(const SCEV *S) {
+ bool follow(SCEVUse S) {
if (auto *AR = dyn_cast<SCEVAddRecExpr>(S))
LoopsUsed.insert(AR->getLoop());
return true;
@@ -14027,8 +14020,8 @@ void ScalarEvolution::getReachableBlocks(
}
if (auto *Cmp = dyn_cast<ICmpInst>(Cond)) {
- const SCEV *L = getSCEV(Cmp->getOperand(0));
- const SCEV *R = getSCEV(Cmp->getOperand(1));
+ SCEVUse L = getSCEV(Cmp->getOperand(0));
+ SCEVUse R = getSCEV(Cmp->getOperand(1));
if (isKnownPredicateViaConstantRanges(Cmp->getPredicate(), L, R)) {
Worklist.push_back(TrueBB);
continue;
@@ -14055,15 +14048,15 @@ void ScalarEvolution::verify() const {
struct SCEVMapper : public SCEVRewriteVisitor<SCEVMapper> {
SCEVMapper(ScalarEvolution &SE) : SCEVRewriteVisitor<SCEVMapper>(SE) {}
- const SCEV *visitConstant(const SCEVConstant *Constant) {
+ SCEVUse visitConstant(const SCEVConstant *Constant) {
return SE.getConstant(Constant->getAPInt());
}
- const SCEV *visitUnknown(const SCEVUnknown *Expr) {
+ SCEVUse visitUnknown(const SCEVUnknown *Expr) {
return SE.getUnknown(Expr->getValue());
}
- const SCEV *visitCouldNotCompute(const SCEVCouldNotCompute *Expr) {
+ SCEVUse visitCouldNotCompute(const SCEVCouldNotCompute *Expr) {
return SE.getCouldNotCompute();
}
};
@@ -14072,7 +14065,7 @@ void ScalarEvolution::verify() const {
SmallPtrSet<BasicBlock *, 16> ReachableBlocks;
SE2.getReachableBlocks(ReachableBlocks, F);
- auto GetDelta = [&](const SCEV *Old, const SCEV *New) -> const SCEV * {
+ auto GetDelta = [&](SCEVUse Old, SCEVUse New) -> SCEVUse {
if (containsUndefs(Old) || containsUndefs(New)) {
// SCEV treats "undef" as an unknown but consistent value (i.e. it does
// not propagate undef aggressively). This means we can (and do) fail
@@ -14083,7 +14076,7 @@ void ScalarEvolution::verify() const {
}
// Unless VerifySCEVStrict is set, we only compare constant deltas.
- const SCEV *Delta = SE2.getMinusSCEV(Old, New);
+ SCEVUse Delta = SE2.getMinusSCEV(Old, New);
if (!VerifySCEVStrict && !isa<SCEVConstant>(Delta))
return nullptr;
@@ -14105,9 +14098,9 @@ void ScalarEvolution::verify() const {
if (It == BackedgeTakenCounts.end())
continue;
- auto *CurBECount =
+ auto CurBECount =
SCM.visit(It->second.getExact(L, const_cast<ScalarEvolution *>(this)));
- auto *NewBECount = SE2.getBackedgeTakenCount(L);
+ auto NewBECount = SE2.getBackedgeTakenCount(L);
if (CurBECount == SE2.getCouldNotCompute() ||
NewBECount == SE2.getCouldNotCompute()) {
@@ -14126,7 +14119,7 @@ void ScalarEvolution::verify() const {
SE.getTypeSizeInBits(NewBECount->getType()))
CurBECount = SE2.getZeroExtendExpr(CurBECount, NewBECount->getType());
- const SCEV *Delta = GetDelta(CurBECount, NewBECount);
+ SCEVUse Delta = GetDelta(CurBECount, NewBECount);
if (Delta && !Delta->isZero()) {
dbgs() << "Trip Count for " << *L << " Changed!\n";
dbgs() << "Old: " << *CurBECount << "\n";
@@ -14164,9 +14157,9 @@ void ScalarEvolution::verify() const {
if (auto *I = dyn_cast<Instruction>(&*KV.first)) {
if (!ReachableBlocks.contains(I->getParent()))
continue;
- const SCEV *OldSCEV = SCM.visit(KV.second);
- const SCEV *NewSCEV = SE2.getSCEV(I);
- const SCEV *Delta = GetDelta(OldSCEV, NewSCEV);
+ SCEVUse OldSCEV = SCM.visit(KV.second);
+ SCEVUse NewSCEV = SE2.getSCEV(I);
+ SCEVUse Delta = GetDelta(OldSCEV, NewSCEV);
if (Delta && !Delta->isZero()) {
dbgs() << "SCEV for value " << *I << " changed!\n"
<< "Old: " << *OldSCEV << "\n"
@@ -14195,7 +14188,7 @@ void ScalarEvolution::verify() const {
// Verify integrity of SCEV users.
for (const auto &S : UniqueSCEVs) {
- for (const auto *Op : S.operands()) {
+ for (const auto Op : S.operands()) {
// We do not store dependencies of constants.
if (isa<SCEVConstant>(Op))
continue;
@@ -14210,10 +14203,10 @@ void ScalarEvolution::verify() const {
// Verify integrity of ValuesAtScopes users.
for (const auto &ValueAndVec : ValuesAtScopes) {
- const SCEV *Value = ValueAndVec.first;
+ SCEVUse Value = ValueAndVec.first;
for (const auto &LoopAndValueAtScope : ValueAndVec.second) {
const Loop *L = LoopAndValueAtScope.first;
- const SCEV *ValueAtScope = LoopAndValueAtScope.second;
+ SCEVUse ValueAtScope = LoopAndValueAtScope.second;
if (!isa<SCEVConstant>(ValueAtScope)) {
auto It = ValuesAtScopesUsers.find(ValueAtScope);
if (It != ValuesAtScopesUsers.end() &&
@@ -14227,10 +14220,10 @@ void ScalarEvolution::verify() const {
}
for (const auto &ValueAtScopeAndVec : ValuesAtScopesUsers) {
- const SCEV *ValueAtScope = ValueAtScopeAndVec.first;
+ SCEVUse ValueAtScope = ValueAtScopeAndVec.first;
for (const auto &LoopAndValue : ValueAtScopeAndVec.second) {
const Loop *L = LoopAndValue.first;
- const SCEV *Value = LoopAndValue.second;
+ SCEVUse Value = LoopAndValue.second;
assert(!isa<SCEVConstant>(Value));
auto It = ValuesAtScopes.find(Value);
if (It != ValuesAtScopes.end() &&
@@ -14248,7 +14241,7 @@ void ScalarEvolution::verify() const {
Predicated ? PredicatedBackedgeTakenCounts : BackedgeTakenCounts;
for (const auto &LoopAndBEInfo : BECounts) {
for (const ExitNotTakenInfo &ENT : LoopAndBEInfo.second.ExitNotTaken) {
- for (const SCEV *S : {ENT.ExactNotTaken, ENT.SymbolicMaxNotTaken}) {
+ for (SCEVUse S : {ENT.ExactNotTaken, ENT.SymbolicMaxNotTaken}) {
if (!isa<SCEVConstant>(S)) {
auto UserIt = BECountUsers.find(S);
if (UserIt != BECountUsers.end() &&
@@ -14423,14 +14416,14 @@ void ScalarEvolutionWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>();
}
-const SCEVPredicate *ScalarEvolution::getEqualPredicate(const SCEV *LHS,
- const SCEV *RHS) {
+const SCEVPredicate *ScalarEvolution::getEqualPredicate(SCEVUse LHS,
+ SCEVUse RHS) {
return getComparePredicate(ICmpInst::ICMP_EQ, LHS, RHS);
}
const SCEVPredicate *
ScalarEvolution::getComparePredicate(const ICmpInst::Predicate Pred,
- const SCEV *LHS, const SCEV *RHS) {
+ SCEVUse LHS, SCEVUse RHS) {
FoldingSetNodeID ID;
assert(LHS->getType() == RHS->getType() &&
"Type mismatch between LHS and RHS");
@@ -14478,14 +14471,14 @@ class SCEVPredicateRewriter : public SCEVRewriteVisitor<SCEVPredicateRewriter> {
///
/// If \p NewPreds is non-null, rewrite is free to add further predicates to
/// \p NewPreds such that the result will be an AddRecExpr.
- static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE,
- SmallPtrSetImpl<const SCEVPredicate *> *NewPreds,
- const SCEVPredicate *Pred) {
+ static SCEVUse rewrite(SCEVUse S, const Loop *L, ScalarEvolution &SE,
+ SmallPtrSetImpl<const SCEVPredicate *> *NewPreds,
+ const SCEVPredicate *Pred) {
SCEVPredicateRewriter Rewriter(L, SE, NewPreds, Pred);
return Rewriter.visit(S);
}
- const SCEV *visitUnknown(const SCEVUnknown *Expr) {
+ SCEVUse visitUnknown(const SCEVUnknown *Expr) {
if (Pred) {
if (auto *U = dyn_cast<SCEVUnionPredicate>(Pred)) {
for (const auto *Pred : U->getPredicates())
@@ -14502,13 +14495,13 @@ class SCEVPredicateRewriter : public SCEVRewriteVisitor<SCEVPredicateRewriter> {
return convertToAddRecWithPreds(Expr);
}
- const SCEV *visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr) {
- const SCEV *Operand = visit(Expr->getOperand());
+ SCEVUse visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr) {
+ SCEVUse Operand = visit(Expr->getOperand());
const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Operand);
if (AR && AR->getLoop() == L && AR->isAffine()) {
// This couldn't be folded because the operand didn't have the nuw
// flag. Add the nusw flag as an assumption that we could make.
- const SCEV *Step = AR->getStepRecurrence(SE);
+ SCEVUse Step = AR->getStepRecurrence(SE);
Type *Ty = Expr->getType();
if (addOverflowAssumption(AR, SCEVWrapPredicate::IncrementNUSW))
return SE.getAddRecExpr(SE.getZeroExtendExpr(AR->getStart(), Ty),
@@ -14518,13 +14511,13 @@ class SCEVPredicateRewriter : public SCEVRewriteVisitor<SCEVPredicateRewriter> {
return SE.getZeroExtendExpr(Operand, Expr->getType());
}
- const SCEV *visitSignExtendExpr(const SCEVSignExtendExpr *Expr) {
- const SCEV *Operand = visit(Expr->getOperand());
+ SCEVUse visitSignExtendExpr(const SCEVSignExtendExpr *Expr) {
+ SCEVUse Operand = visit(Expr->getOperand());
const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Operand);
if (AR && AR->getLoop() == L && AR->isAffine()) {
// This couldn't be folded because the operand didn't have the nsw
// flag. Add the nssw flag as an assumption that we could make.
- const SCEV *Step = AR->getStepRecurrence(SE);
+ SCEVUse Step = AR->getStepRecurrence(SE);
Type *Ty = Expr->getType();
if (addOverflowAssumption(AR, SCEVWrapPredicate::IncrementNSSW))
return SE.getAddRecExpr(SE.getSignExtendExpr(AR->getStart(), Ty),
@@ -14561,11 +14554,10 @@ class SCEVPredicateRewriter : public SCEVRewriteVisitor<SCEVPredicateRewriter> {
// If \p Expr does not meet these conditions (is not a PHI node, or we
// couldn't create an AddRec for it, or couldn't add the predicate), we just
// return \p Expr.
- const SCEV *convertToAddRecWithPreds(const SCEVUnknown *Expr) {
+ SCEVUse convertToAddRecWithPreds(const SCEVUnknown *Expr) {
if (!isa<PHINode>(Expr->getValue()))
return Expr;
- std::optional<
- std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
+ std::optional<std::pair<SCEVUse, SmallVector<const SCEVPredicate *, 3>>>
PredicatedRewrite = SE.createAddRecFromPHIWithCasts(Expr);
if (!PredicatedRewrite)
return Expr;
@@ -14588,15 +14580,13 @@ class SCEVPredicateRewriter : public SCEVRewriteVisitor<SCEVPredicateRewriter> {
} // end anonymous namespace
-const SCEV *
-ScalarEvolution::rewriteUsingPredicate(const SCEV *S, const Loop *L,
- const SCEVPredicate &Preds) {
+SCEVUse ScalarEvolution::rewriteUsingPredicate(SCEVUse S, const Loop *L,
+ const SCEVPredicate &Preds) {
return SCEVPredicateRewriter::rewrite(S, L, *this, nullptr, &Preds);
}
const SCEVAddRecExpr *ScalarEvolution::convertSCEVToAddRecWithPredicates(
- const SCEV *S, const Loop *L,
- SmallPtrSetImpl<const SCEVPredicate *> &Preds) {
+ SCEVUse S, const Loop *L, SmallPtrSetImpl<const SCEVPredicate *> &Preds) {
SmallPtrSet<const SCEVPredicate *, 4> TransformPreds;
S = SCEVPredicateRewriter::rewrite(S, L, *this, &TransformPreds, nullptr);
auto *AddRec = dyn_cast<SCEVAddRecExpr>(S);
@@ -14618,9 +14608,9 @@ SCEVPredicate::SCEVPredicate(const FoldingSetNodeIDRef ID,
: FastID(ID), Kind(Kind) {}
SCEVComparePredicate::SCEVComparePredicate(const FoldingSetNodeIDRef ID,
- const ICmpInst::Predicate Pred,
- const SCEV *LHS, const SCEV *RHS)
- : SCEVPredicate(ID, P_Compare), Pred(Pred), LHS(LHS), RHS(RHS) {
+ const ICmpInst::Predicate Pred,
+ SCEVUse LHS, SCEVUse RHS)
+ : SCEVPredicate(ID, P_Compare), Pred(Pred), LHS(LHS), RHS(RHS) {
assert(LHS->getType() == RHS->getType() && "LHS and RHS types don't match");
assert(LHS != RHS && "LHS and RHS are the same SCEV");
}
@@ -14744,9 +14734,8 @@ PredicatedScalarEvolution::PredicatedScalarEvolution(ScalarEvolution &SE,
Preds = std::make_unique<SCEVUnionPredicate>(Empty);
}
-void ScalarEvolution::registerUser(const SCEV *User,
- ArrayRef<const SCEV *> Ops) {
- for (const auto *Op : Ops)
+void ScalarEvolution::registerUser(SCEVUse User, ArrayRef<SCEVUse> Ops) {
+ for (const auto Op : Ops)
// We do not expect that forgetting cached data for SCEVConstants will ever
// open any prospects for sharpening or introduce any correctness issues,
// so we don't bother storing their dependencies.
@@ -14754,8 +14743,8 @@ void ScalarEvolution::registerUser(const SCEV *User,
SCEVUsers[Op].insert(User);
}
-const SCEV *PredicatedScalarEvolution::getSCEV(Value *V) {
- const SCEV *Expr = SE.getSCEV(V);
+SCEVUse PredicatedScalarEvolution::getSCEV(Value *V) {
+ SCEVUse Expr = SE.getSCEV(V);
RewriteEntry &Entry = RewriteMap[Expr];
// If we already have an entry and the version matches, return it.
@@ -14767,13 +14756,13 @@ const SCEV *PredicatedScalarEvolution::getSCEV(Value *V) {
if (Entry.second)
Expr = Entry.second;
- const SCEV *NewSCEV = SE.rewriteUsingPredicate(Expr, &L, *Preds);
+ SCEVUse NewSCEV = SE.rewriteUsingPredicate(Expr, &L, *Preds);
Entry = {Generation, NewSCEV};
return NewSCEV;
}
-const SCEV *PredicatedScalarEvolution::getBackedgeTakenCount() {
+SCEVUse PredicatedScalarEvolution::getBackedgeTakenCount() {
if (!BackedgeCount) {
SmallVector<const SCEVPredicate *, 4> Preds;
BackedgeCount = SE.getPredicatedBackedgeTakenCount(&L, Preds);
@@ -14802,7 +14791,7 @@ void PredicatedScalarEvolution::updateGeneration() {
// If the generation number wrapped recompute everything.
if (++Generation == 0) {
for (auto &II : RewriteMap) {
- const SCEV *Rewritten = II.second.second;
+ SCEVUse Rewritten = II.second.second;
II.second = {Generation, SE.rewriteUsingPredicate(Rewritten, &L, *Preds)};
}
}
@@ -14810,7 +14799,7 @@ void PredicatedScalarEvolution::updateGeneration() {
void PredicatedScalarEvolution::setNoOverflow(
Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags) {
- const SCEV *Expr = getSCEV(V);
+ SCEVUse Expr = getSCEV(V);
const auto *AR = cast<SCEVAddRecExpr>(Expr);
auto ImpliedFlags = SCEVWrapPredicate::getImpliedFlags(AR, SE);
@@ -14826,7 +14815,7 @@ void PredicatedScalarEvolution::setNoOverflow(
bool PredicatedScalarEvolution::hasNoOverflow(
Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags) {
- const SCEV *Expr = getSCEV(V);
+ SCEVUse Expr = getSCEV(V);
const auto *AR = cast<SCEVAddRecExpr>(Expr);
Flags = SCEVWrapPredicate::clearFlags(
@@ -14841,7 +14830,7 @@ bool PredicatedScalarEvolution::hasNoOverflow(
}
const SCEVAddRecExpr *PredicatedScalarEvolution::getAsAddRec(Value *V) {
- const SCEV *Expr = this->getSCEV(V);
+ SCEVUse Expr = this->getSCEV(V);
SmallPtrSet<const SCEVPredicate *, 4> NewPreds;
auto *New = SE.convertSCEVToAddRecWithPredicates(Expr, &L, NewPreds);
@@ -14871,7 +14860,7 @@ void PredicatedScalarEvolution::print(raw_ostream &OS, unsigned Depth) const {
if (!SE.isSCEVable(I.getType()))
continue;
- auto *Expr = SE.getSCEV(&I);
+ auto Expr = SE.getSCEV(&I);
auto II = RewriteMap.find(Expr);
if (II == RewriteMap.end())
@@ -14892,8 +14881,7 @@ void PredicatedScalarEvolution::print(raw_ostream &OS, unsigned Depth) const {
// for URem with constant power-of-2 second operands.
// It's not always easy, as A and B can be folded (imagine A is X / 2, and B is
// 4, A / B becomes X / 8).
-bool ScalarEvolution::matchURem(const SCEV *Expr, const SCEV *&LHS,
- const SCEV *&RHS) {
+bool ScalarEvolution::matchURem(SCEVUse Expr, SCEVUse &LHS, SCEVUse &RHS) {
// Try to match 'zext (trunc A to iB) to iY', which is used
// for URem with constant power-of-2 second operands. Make sure the size of
// the operand A matches the size of the whole expressions.
@@ -14915,13 +14903,13 @@ bool ScalarEvolution::matchURem(const SCEV *Expr, const SCEV *&LHS,
if (Add == nullptr || Add->getNumOperands() != 2)
return false;
- const SCEV *A = Add->getOperand(1);
+ SCEVUse A = Add->getOperand(1);
const auto *Mul = dyn_cast<SCEVMulExpr>(Add->getOperand(0));
if (Mul == nullptr)
return false;
- const auto MatchURemWithDivisor = [&](const SCEV *B) {
+ const auto MatchURemWithDivisor = [&](SCEVUse B) {
// (SomeExpr + (-(SomeExpr / B) * B)).
if (Expr == getURemExpr(A, B)) {
LHS = A;
@@ -14945,17 +14933,16 @@ bool ScalarEvolution::matchURem(const SCEV *Expr, const SCEV *&LHS,
return false;
}
-const SCEV *
-ScalarEvolution::computeSymbolicMaxBackedgeTakenCount(const Loop *L) {
+SCEVUse ScalarEvolution::computeSymbolicMaxBackedgeTakenCount(const Loop *L) {
SmallVector<BasicBlock*, 16> ExitingBlocks;
L->getExitingBlocks(ExitingBlocks);
// Form an expression for the maximum exit count possible for this loop. We
// merge the max and exact information to approximate a version of
// getConstantMaxBackedgeTakenCount which isn't restricted to just constants.
- SmallVector<const SCEV*, 4> ExitCounts;
+ SmallVector<SCEVUse, 4> ExitCounts;
for (BasicBlock *ExitingBB : ExitingBlocks) {
- const SCEV *ExitCount =
+ SCEVUse ExitCount =
getExitCount(L, ExitingBB, ScalarEvolution::SymbolicMaximum);
if (!isa<SCEVCouldNotCompute>(ExitCount)) {
assert(DT.dominates(ExitingBB, L->getLoopLatch()) &&
@@ -14973,34 +14960,33 @@ ScalarEvolution::computeSymbolicMaxBackedgeTakenCount(const Loop *L) {
/// in the map. It skips AddRecExpr because we cannot guarantee that the
/// replacement is loop invariant in the loop of the AddRec.
class SCEVLoopGuardRewriter : public SCEVRewriteVisitor<SCEVLoopGuardRewriter> {
- const DenseMap<const SCEV *, const SCEV *> ⤅
+ const DenseMap<const SCEV *, SCEVUse> ⤅
public:
- SCEVLoopGuardRewriter(ScalarEvolution &SE,
- DenseMap<const SCEV *, const SCEV *> &M)
+ SCEVLoopGuardRewriter(ScalarEvolution &SE, DenseMap<const SCEV *, SCEVUse> &M)
: SCEVRewriteVisitor(SE), Map(M) {}
- const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { return Expr; }
+ SCEVUse visitAddRecExpr(const SCEVAddRecExpr *Expr) { return Expr; }
- const SCEV *visitUnknown(const SCEVUnknown *Expr) {
+ SCEVUse visitUnknown(const SCEVUnknown *Expr) {
auto I = Map.find(Expr);
if (I == Map.end())
return Expr;
return I->second;
}
- const SCEV *visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr) {
+ SCEVUse visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr) {
auto I = Map.find(Expr);
if (I == Map.end()) {
// If we didn't find the extact ZExt expr in the map, check if there's an
// entry for a smaller ZExt we can use instead.
Type *Ty = Expr->getType();
- const SCEV *Op = Expr->getOperand(0);
+ SCEVUse Op = Expr->getOperand(0);
unsigned Bitwidth = Ty->getScalarSizeInBits() / 2;
while (Bitwidth % 8 == 0 && Bitwidth >= 8 &&
Bitwidth > Op->getType()->getScalarSizeInBits()) {
Type *NarrowTy = IntegerType::get(SE.getContext(), Bitwidth);
- auto *NarrowExt = SE.getZeroExtendExpr(Op, NarrowTy);
+ auto NarrowExt = SE.getZeroExtendExpr(Op, NarrowTy);
auto I = Map.find(NarrowExt);
if (I != Map.end())
return SE.getZeroExtendExpr(I->second, Ty);
@@ -15013,7 +14999,7 @@ class SCEVLoopGuardRewriter : public SCEVRewriteVisitor<SCEVLoopGuardRewriter> {
return I->second;
}
- const SCEV *visitSignExtendExpr(const SCEVSignExtendExpr *Expr) {
+ SCEVUse visitSignExtendExpr(const SCEVSignExtendExpr *Expr) {
auto I = Map.find(Expr);
if (I == Map.end())
return SCEVRewriteVisitor<SCEVLoopGuardRewriter>::visitSignExtendExpr(
@@ -15021,14 +15007,14 @@ class SCEVLoopGuardRewriter : public SCEVRewriteVisitor<SCEVLoopGuardRewriter> {
return I->second;
}
- const SCEV *visitUMinExpr(const SCEVUMinExpr *Expr) {
+ SCEVUse visitUMinExpr(const SCEVUMinExpr *Expr) {
auto I = Map.find(Expr);
if (I == Map.end())
return SCEVRewriteVisitor<SCEVLoopGuardRewriter>::visitUMinExpr(Expr);
return I->second;
}
- const SCEV *visitSMinExpr(const SCEVSMinExpr *Expr) {
+ SCEVUse visitSMinExpr(const SCEVSMinExpr *Expr) {
auto I = Map.find(Expr);
if (I == Map.end())
return SCEVRewriteVisitor<SCEVLoopGuardRewriter>::visitSMinExpr(Expr);
@@ -15036,12 +15022,11 @@ class SCEVLoopGuardRewriter : public SCEVRewriteVisitor<SCEVLoopGuardRewriter> {
}
};
-const SCEV *ScalarEvolution::applyLoopGuards(const SCEV *Expr, const Loop *L) {
- SmallVector<const SCEV *> ExprsToRewrite;
- auto CollectCondition = [&](ICmpInst::Predicate Predicate, const SCEV *LHS,
- const SCEV *RHS,
- DenseMap<const SCEV *, const SCEV *>
- &RewriteMap) {
+SCEVUse ScalarEvolution::applyLoopGuards(SCEVUse Expr, const Loop *L) {
+ SmallVector<SCEVUse> ExprsToRewrite;
+ auto CollectCondition = [&](ICmpInst::Predicate Predicate, SCEVUse LHS,
+ SCEVUse RHS,
+ DenseMap<const SCEV *, SCEVUse> &RewriteMap) {
// WARNING: It is generally unsound to apply any wrap flags to the proposed
// replacement SCEV which isn't directly implied by the structure of that
// SCEV. In particular, using contextual facts to imply flags is *NOT*
@@ -15076,7 +15061,7 @@ const SCEV *ScalarEvolution::applyLoopGuards(const SCEV *Expr, const Loop *L) {
if (ExactRegion.isWrappedSet() || ExactRegion.isFullSet())
return false;
auto I = RewriteMap.find(LHSUnknown);
- const SCEV *RewrittenLHS = I != RewriteMap.end() ? I->second : LHSUnknown;
+ SCEVUse RewrittenLHS = I != RewriteMap.end() ? I->second : LHSUnknown;
RewriteMap[LHSUnknown] = getUMaxExpr(
getConstant(ExactRegion.getUnsignedMin()),
getUMinExpr(RewrittenLHS, getConstant(ExactRegion.getUnsignedMax())));
@@ -15090,8 +15075,7 @@ const SCEV *ScalarEvolution::applyLoopGuards(const SCEV *Expr, const Loop *L) {
// constant operand. If so, return in \p SCTy the SCEV type and in \p RHS
// the non-constant operand and in \p LHS the constant operand.
auto IsMinMaxSCEVWithNonNegativeConstant =
- [&](const SCEV *Expr, SCEVTypes &SCTy, const SCEV *&LHS,
- const SCEV *&RHS) {
+ [&](SCEVUse Expr, SCEVTypes &SCTy, SCEVUse &LHS, SCEVUse &RHS) {
if (auto *MinMax = dyn_cast<SCEVMinMaxExpr>(Expr)) {
if (MinMax->getNumOperands() != 2)
return false;
@@ -15109,7 +15093,7 @@ const SCEV *ScalarEvolution::applyLoopGuards(const SCEV *Expr, const Loop *L) {
// Checks whether Expr is a non-negative constant, and Divisor is a positive
// constant, and returns their APInt in ExprVal and in DivisorVal.
- auto GetNonNegExprAndPosDivisor = [&](const SCEV *Expr, const SCEV *Divisor,
+ auto GetNonNegExprAndPosDivisor = [&](SCEVUse Expr, SCEVUse Divisor,
APInt &ExprVal, APInt &DivisorVal) {
auto *ConstExpr = dyn_cast<SCEVConstant>(Expr);
auto *ConstDivisor = dyn_cast<SCEVConstant>(Divisor);
@@ -15123,8 +15107,7 @@ const SCEV *ScalarEvolution::applyLoopGuards(const SCEV *Expr, const Loop *L) {
// Return a new SCEV that modifies \p Expr to the closest number divides by
// \p Divisor and greater or equal than Expr.
// For now, only handle constant Expr and Divisor.
- auto GetNextSCEVDividesByDivisor = [&](const SCEV *Expr,
- const SCEV *Divisor) {
+ auto GetNextSCEVDividesByDivisor = [&](SCEVUse Expr, SCEVUse Divisor) {
APInt ExprVal;
APInt DivisorVal;
if (!GetNonNegExprAndPosDivisor(Expr, Divisor, ExprVal, DivisorVal))
@@ -15139,8 +15122,7 @@ const SCEV *ScalarEvolution::applyLoopGuards(const SCEV *Expr, const Loop *L) {
// Return a new SCEV that modifies \p Expr to the closest number divides by
// \p Divisor and less or equal than Expr.
// For now, only handle constant Expr and Divisor.
- auto GetPreviousSCEVDividesByDivisor = [&](const SCEV *Expr,
- const SCEV *Divisor) {
+ auto GetPreviousSCEVDividesByDivisor = [&](SCEVUse Expr, SCEVUse Divisor) {
APInt ExprVal;
APInt DivisorVal;
if (!GetNonNegExprAndPosDivisor(Expr, Divisor, ExprVal, DivisorVal))
@@ -15153,10 +15135,9 @@ const SCEV *ScalarEvolution::applyLoopGuards(const SCEV *Expr, const Loop *L) {
// Apply divisibilty by \p Divisor on MinMaxExpr with constant values,
// recursively. This is done by aligning up/down the constant value to the
// Divisor.
- std::function<const SCEV *(const SCEV *, const SCEV *)>
- ApplyDivisibiltyOnMinMaxExpr = [&](const SCEV *MinMaxExpr,
- const SCEV *Divisor) {
- const SCEV *MinMaxLHS = nullptr, *MinMaxRHS = nullptr;
+ std::function<SCEVUse(SCEVUse, SCEVUse)> ApplyDivisibiltyOnMinMaxExpr =
+ [&](SCEVUse MinMaxExpr, SCEVUse Divisor) {
+ SCEVUse MinMaxLHS = nullptr, MinMaxRHS = nullptr;
SCEVTypes SCTy;
if (!IsMinMaxSCEVWithNonNegativeConstant(MinMaxExpr, SCTy, MinMaxLHS,
MinMaxRHS))
@@ -15165,10 +15146,10 @@ const SCEV *ScalarEvolution::applyLoopGuards(const SCEV *Expr, const Loop *L) {
isa<SCEVSMinExpr>(MinMaxExpr) || isa<SCEVUMinExpr>(MinMaxExpr);
assert(isKnownNonNegative(MinMaxLHS) &&
"Expected non-negative operand!");
- auto *DivisibleExpr =
+ auto DivisibleExpr =
IsMin ? GetPreviousSCEVDividesByDivisor(MinMaxLHS, Divisor)
: GetNextSCEVDividesByDivisor(MinMaxLHS, Divisor);
- SmallVector<const SCEV *> Ops = {
+ SmallVector<SCEVUse> Ops = {
ApplyDivisibiltyOnMinMaxExpr(MinMaxRHS, Divisor), DivisibleExpr};
return getMinMaxExpr(SCTy, Ops);
};
@@ -15180,15 +15161,14 @@ const SCEV *ScalarEvolution::applyLoopGuards(const SCEV *Expr, const Loop *L) {
RHSC->getValue()->isNullValue()) {
// If LHS is A % B, i.e. A % B == 0, rewrite A to (A /u B) * B to
// explicitly express that.
- const SCEV *URemLHS = nullptr;
- const SCEV *URemRHS = nullptr;
+ SCEVUse URemLHS = nullptr;
+ SCEVUse URemRHS = nullptr;
if (matchURem(LHS, URemLHS, URemRHS)) {
if (const SCEVUnknown *LHSUnknown = dyn_cast<SCEVUnknown>(URemLHS)) {
auto I = RewriteMap.find(LHSUnknown);
- const SCEV *RewrittenLHS =
- I != RewriteMap.end() ? I->second : LHSUnknown;
+ SCEVUse RewrittenLHS = I != RewriteMap.end() ? I->second : LHSUnknown;
RewrittenLHS = ApplyDivisibiltyOnMinMaxExpr(RewrittenLHS, URemRHS);
- const auto *Multiple =
+ const auto Multiple =
getMulExpr(getUDivExpr(RewrittenLHS, URemRHS), URemRHS);
RewriteMap[LHSUnknown] = Multiple;
ExprsToRewrite.push_back(LHSUnknown);
@@ -15211,8 +15191,7 @@ const SCEV *ScalarEvolution::applyLoopGuards(const SCEV *Expr, const Loop *L) {
// and \p FromRewritten are the same (i.e. there has been no rewrite
// registered for \p From), then puts this value in the list of rewritten
// expressions.
- auto AddRewrite = [&](const SCEV *From, const SCEV *FromRewritten,
- const SCEV *To) {
+ auto AddRewrite = [&](SCEVUse From, SCEVUse FromRewritten, SCEVUse To) {
if (From == FromRewritten)
ExprsToRewrite.push_back(From);
RewriteMap[From] = To;
@@ -15221,7 +15200,7 @@ const SCEV *ScalarEvolution::applyLoopGuards(const SCEV *Expr, const Loop *L) {
// Checks whether \p S has already been rewritten. In that case returns the
// existing rewrite because we want to chain further rewrites onto the
// already rewritten value. Otherwise returns \p S.
- auto GetMaybeRewritten = [&](const SCEV *S) {
+ auto GetMaybeRewritten = [&](SCEVUse S) {
auto I = RewriteMap.find(S);
return I != RewriteMap.end() ? I->second : S;
};
@@ -15233,13 +15212,13 @@ const SCEV *ScalarEvolution::applyLoopGuards(const SCEV *Expr, const Loop *L) {
// example, if Expr = umin (umax ((A /u 8) * 8, 16), 64), return true since
// (A /u 8) * 8 matched the pattern, and return the constant SCEV 8 in \p
// DividesBy.
- std::function<bool(const SCEV *, const SCEV *&)> HasDivisibiltyInfo =
- [&](const SCEV *Expr, const SCEV *&DividesBy) {
+ std::function<bool(SCEVUse, SCEVUse &)> HasDivisibiltyInfo =
+ [&](SCEVUse Expr, SCEVUse &DividesBy) {
if (auto *Mul = dyn_cast<SCEVMulExpr>(Expr)) {
if (Mul->getNumOperands() != 2)
return false;
- auto *MulLHS = Mul->getOperand(0);
- auto *MulRHS = Mul->getOperand(1);
+ auto MulLHS = Mul->getOperand(0);
+ auto MulRHS = Mul->getOperand(1);
if (isa<SCEVConstant>(MulLHS))
std::swap(MulLHS, MulRHS);
if (auto *Div = dyn_cast<SCEVUDivExpr>(MulLHS))
@@ -15255,8 +15234,8 @@ const SCEV *ScalarEvolution::applyLoopGuards(const SCEV *Expr, const Loop *L) {
};
// Return true if Expr known to divide by \p DividesBy.
- std::function<bool(const SCEV *, const SCEV *&)> IsKnownToDivideBy =
- [&](const SCEV *Expr, const SCEV *DividesBy) {
+ std::function<bool(SCEVUse, SCEVUse &)> IsKnownToDivideBy =
+ [&](SCEVUse Expr, SCEVUse DividesBy) {
if (getURemExpr(Expr, DividesBy)->isZero())
return true;
if (auto *MinMax = dyn_cast<SCEVMinMaxExpr>(Expr))
@@ -15265,8 +15244,8 @@ const SCEV *ScalarEvolution::applyLoopGuards(const SCEV *Expr, const Loop *L) {
return false;
};
- const SCEV *RewrittenLHS = GetMaybeRewritten(LHS);
- const SCEV *DividesBy = nullptr;
+ SCEVUse RewrittenLHS = GetMaybeRewritten(LHS);
+ SCEVUse DividesBy = nullptr;
if (HasDivisibiltyInfo(RewrittenLHS, DividesBy))
// Check that the whole expression is divided by DividesBy
DividesBy =
@@ -15283,50 +15262,50 @@ const SCEV *ScalarEvolution::applyLoopGuards(const SCEV *Expr, const Loop *L) {
// We cannot express strict predicates in SCEV, so instead we replace them
// with non-strict ones against plus or minus one of RHS depending on the
// predicate.
- const SCEV *One = getOne(RHS->getType());
+ SCEVUse One = getOne(RHS->getType());
switch (Predicate) {
- case CmpInst::ICMP_ULT:
- if (RHS->getType()->isPointerTy())
- return;
- RHS = getUMaxExpr(RHS, One);
- [[fallthrough]];
- case CmpInst::ICMP_SLT: {
- RHS = getMinusSCEV(RHS, One);
- RHS = DividesBy ? GetPreviousSCEVDividesByDivisor(RHS, DividesBy) : RHS;
- break;
- }
- case CmpInst::ICMP_UGT:
- case CmpInst::ICMP_SGT:
- RHS = getAddExpr(RHS, One);
- RHS = DividesBy ? GetNextSCEVDividesByDivisor(RHS, DividesBy) : RHS;
- break;
- case CmpInst::ICMP_ULE:
- case CmpInst::ICMP_SLE:
- RHS = DividesBy ? GetPreviousSCEVDividesByDivisor(RHS, DividesBy) : RHS;
- break;
- case CmpInst::ICMP_UGE:
- case CmpInst::ICMP_SGE:
- RHS = DividesBy ? GetNextSCEVDividesByDivisor(RHS, DividesBy) : RHS;
- break;
- default:
- break;
+ case CmpInst::ICMP_ULT:
+ if (RHS->getType()->isPointerTy())
+ return;
+ RHS = getUMaxExpr(RHS, One);
+ [[fallthrough]];
+ case CmpInst::ICMP_SLT: {
+ RHS = getMinusSCEV(RHS, One);
+ RHS = DividesBy ? GetPreviousSCEVDividesByDivisor(RHS, DividesBy) : RHS;
+ break;
+ }
+ case CmpInst::ICMP_UGT:
+ case CmpInst::ICMP_SGT:
+ RHS = getAddExpr(RHS, One);
+ RHS = DividesBy ? GetNextSCEVDividesByDivisor(RHS, DividesBy) : RHS;
+ break;
+ case CmpInst::ICMP_ULE:
+ case CmpInst::ICMP_SLE:
+ RHS = DividesBy ? GetPreviousSCEVDividesByDivisor(RHS, DividesBy) : RHS;
+ break;
+ case CmpInst::ICMP_UGE:
+ case CmpInst::ICMP_SGE:
+ RHS = DividesBy ? GetNextSCEVDividesByDivisor(RHS, DividesBy) : RHS;
+ break;
+ default:
+ break;
}
- SmallVector<const SCEV *, 16> Worklist(1, LHS);
- SmallPtrSet<const SCEV *, 16> Visited;
+ SmallVector<SCEVUse, 16> Worklist(1, LHS);
+ SmallPtrSet<SCEVUse, 16> Visited;
auto EnqueueOperands = [&Worklist](const SCEVNAryExpr *S) {
append_range(Worklist, S->operands());
};
while (!Worklist.empty()) {
- const SCEV *From = Worklist.pop_back_val();
+ SCEVUse From = Worklist.pop_back_val();
if (isa<SCEVConstant>(From))
continue;
if (!Visited.insert(From).second)
continue;
- const SCEV *FromRewritten = GetMaybeRewritten(From);
- const SCEV *To = nullptr;
+ SCEVUse FromRewritten = GetMaybeRewritten(From);
+ SCEVUse To = nullptr;
switch (Predicate) {
case CmpInst::ICMP_ULT:
@@ -15360,7 +15339,7 @@ const SCEV *ScalarEvolution::applyLoopGuards(const SCEV *Expr, const Loop *L) {
case CmpInst::ICMP_NE:
if (isa<SCEVConstant>(RHS) &&
cast<SCEVConstant>(RHS)->getValue()->isNullValue()) {
- const SCEV *OneAlignedUp =
+ SCEVUse OneAlignedUp =
DividesBy ? GetNextSCEVDividesByDivisor(One, DividesBy) : One;
To = getUMaxExpr(FromRewritten, OneAlignedUp);
}
@@ -15417,7 +15396,7 @@ const SCEV *ScalarEvolution::applyLoopGuards(const SCEV *Expr, const Loop *L) {
// Conditions are processed in reverse order, so the earliest conditions is
// processed first. This ensures the SCEVs with the shortest dependency chains
// are constructed first.
- DenseMap<const SCEV *, const SCEV *> RewriteMap;
+ DenseMap<const SCEV *, SCEVUse> RewriteMap;
for (auto [Term, EnterIfTrue] : reverse(Terms)) {
SmallVector<Value *, 8> Worklist;
SmallPtrSet<Value *, 8> Visited;
@@ -15430,8 +15409,8 @@ const SCEV *ScalarEvolution::applyLoopGuards(const SCEV *Expr, const Loop *L) {
if (auto *Cmp = dyn_cast<ICmpInst>(Cond)) {
auto Predicate =
EnterIfTrue ? Cmp->getPredicate() : Cmp->getInversePredicate();
- const auto *LHS = getSCEV(Cmp->getOperand(0));
- const auto *RHS = getSCEV(Cmp->getOperand(1));
+ const auto LHS = getSCEV(Cmp->getOperand(0));
+ const auto RHS = getSCEV(Cmp->getOperand(1));
CollectCondition(Predicate, LHS, RHS, RewriteMap);
continue;
}
@@ -15452,8 +15431,8 @@ const SCEV *ScalarEvolution::applyLoopGuards(const SCEV *Expr, const Loop *L) {
// expressions with the information in the map. This applies information to
// sub-expressions.
if (ExprsToRewrite.size() > 1) {
- for (const SCEV *Expr : ExprsToRewrite) {
- const SCEV *RewriteTo = RewriteMap[Expr];
+ for (SCEVUse Expr : ExprsToRewrite) {
+ SCEVUse RewriteTo = RewriteMap[Expr];
RewriteMap.erase(Expr);
SCEVLoopGuardRewriter Rewriter(*this, RewriteMap);
RewriteMap.insert({Expr, Rewriter.visit(RewriteTo)});
diff --git a/llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp b/llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp
index 104e8ceb79670..081c75fb6950e 100644
--- a/llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp
+++ b/llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp
@@ -430,8 +430,8 @@ bool InductiveRangeCheck::reassociateSubLHS(
auto getExprScaledIfOverflow = [&](Instruction::BinaryOps BinOp,
const SCEV *LHS,
const SCEV *RHS) -> const SCEV * {
- const SCEV *(ScalarEvolution::*Operation)(const SCEV *, const SCEV *,
- SCEV::NoWrapFlags, unsigned);
+ SCEVUse (ScalarEvolution:: *Operation)(SCEVUse, SCEVUse, SCEV::NoWrapFlags,
+ unsigned);
switch (BinOp) {
default:
llvm_unreachable("Unsupported binary op");
@@ -750,7 +750,7 @@ InductiveRangeCheck::computeSafeIterationSpace(ScalarEvolution &SE,
const SCEV *Zero = SE.getZero(M->getType());
// This function returns SCEV equal to 1 if X is non-negative 0 otherwise.
- auto SCEVCheckNonNegative = [&](const SCEV *X) {
+ auto SCEVCheckNonNegative = [&](const SCEV *X) -> const SCEV * {
const Loop *L = IndVar->getLoop();
const SCEV *Zero = SE.getZero(X->getType());
const SCEV *One = SE.getOne(X->getType());
diff --git a/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp b/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
index c7e25c9f3d2c9..5f2110bb15544 100644
--- a/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
@@ -842,8 +842,8 @@ bool LoopIdiomRecognize::processLoopMemSet(MemSetInst *MSI,
return false;
}
- const SCEV *PointerStrideSCEV = Ev->getOperand(1);
- const SCEV *MemsetSizeSCEV = SE->getSCEV(MSI->getLength());
+ SCEVUse PointerStrideSCEV = Ev->getOperand(1);
+ SCEVUse MemsetSizeSCEV = SE->getSCEV(MSI->getLength());
if (!PointerStrideSCEV || !MemsetSizeSCEV)
return false;
@@ -885,9 +885,9 @@ bool LoopIdiomRecognize::processLoopMemSet(MemSetInst *MSI,
// Compare positive direction PointerStrideSCEV with MemsetSizeSCEV
IsNegStride = PointerStrideSCEV->isNonConstantNegative();
- const SCEV *PositiveStrideSCEV =
- IsNegStride ? SE->getNegativeSCEV(PointerStrideSCEV)
- : PointerStrideSCEV;
+ SCEVUse PositiveStrideSCEV = IsNegStride
+ ? SE->getNegativeSCEV(PointerStrideSCEV)
+ : PointerStrideSCEV;
LLVM_DEBUG(dbgs() << " MemsetSizeSCEV: " << *MemsetSizeSCEV << "\n"
<< " PositiveStrideSCEV: " << *PositiveStrideSCEV
<< "\n");
diff --git a/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp b/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
index 35a17d6060c94..f094d6af136a8 100644
--- a/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
@@ -3232,8 +3232,9 @@ void LSRInstance::GenerateIVChain(const IVChain &Chain,
// IncExpr was the result of subtraction of two narrow values, so must
// be signed.
const SCEV *IncExpr = SE.getNoopOrSignExtend(Inc.IncExpr, IntTy);
- LeftOverExpr = LeftOverExpr ?
- SE.getAddExpr(LeftOverExpr, IncExpr) : IncExpr;
+ LeftOverExpr = LeftOverExpr
+ ? SE.getAddExpr(LeftOverExpr, IncExpr).getPointer()
+ : IncExpr;
}
if (LeftOverExpr && !LeftOverExpr->isZero()) {
// Expand the IV increment.
@@ -3613,7 +3614,7 @@ static const SCEV *CollectSubexprs(const SCEV *S, const SCEVConstant *C,
for (const SCEV *S : Add->operands()) {
const SCEV *Remainder = CollectSubexprs(S, C, Ops, L, SE, Depth+1);
if (Remainder)
- Ops.push_back(C ? SE.getMulExpr(C, Remainder) : Remainder);
+ Ops.push_back(C ? SE.getMulExpr(C, Remainder).getPointer() : Remainder);
}
return nullptr;
} else if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) {
@@ -3626,7 +3627,7 @@ static const SCEV *CollectSubexprs(const SCEV *S, const SCEVConstant *C,
// Split the non-zero AddRec unless it is part of a nested recurrence that
// does not pertain to this loop.
if (Remainder && (AR->getLoop() == L || !isa<SCEVAddRecExpr>(Remainder))) {
- Ops.push_back(C ? SE.getMulExpr(C, Remainder) : Remainder);
+ Ops.push_back(C ? SE.getMulExpr(C, Remainder).getPointer() : Remainder);
Remainder = nullptr;
}
if (Remainder != AR->getStart()) {
diff --git a/llvm/test/Transforms/IndVarSimplify/turn-to-invariant.ll b/llvm/test/Transforms/IndVarSimplify/turn-to-invariant.ll
index 326ee75e135b0..bc9e8004ec5df 100644
--- a/llvm/test/Transforms/IndVarSimplify/turn-to-invariant.ll
+++ b/llvm/test/Transforms/IndVarSimplify/turn-to-invariant.ll
@@ -846,11 +846,9 @@ failed:
define i32 @test_litter_conditions_constant(i32 %start, i32 %len) {
; CHECK-LABEL: @test_litter_conditions_constant(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[START:%.*]], -1
-; CHECK-NEXT: [[RANGE_CHECK_FIRST_ITER:%.*]] = icmp ult i32 [[TMP0]], [[LEN:%.*]]
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[START]], [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[BACKEDGE:%.*]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[START:%.*]], [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[BACKEDGE:%.*]] ]
; CHECK-NEXT: [[CANONICAL_IV:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[CANONICAL_IV_NEXT:%.*]], [[BACKEDGE]] ]
; CHECK-NEXT: [[CONSTANT_CHECK:%.*]] = icmp ult i32 [[CANONICAL_IV]], 65635
; CHECK-NEXT: br i1 [[CONSTANT_CHECK]], label [[CONSTANT_CHECK_PASSED:%.*]], label [[CONSTANT_CHECK_FAILED:%.*]]
@@ -860,8 +858,10 @@ define i32 @test_litter_conditions_constant(i32 %start, i32 %len) {
; CHECK-NEXT: [[AND_1:%.*]] = and i1 [[ZERO_CHECK]], [[FAKE_1]]
; CHECK-NEXT: br i1 [[AND_1]], label [[RANGE_CHECK_BLOCK:%.*]], label [[FAILED_1:%.*]]
; CHECK: range_check_block:
+; CHECK-NEXT: [[IV_MINUS_1:%.*]] = add i32 [[IV]], -1
+; CHECK-NEXT: [[RANGE_CHECK:%.*]] = icmp ult i32 [[IV_MINUS_1]], [[LEN:%.*]]
; CHECK-NEXT: [[FAKE_2:%.*]] = call i1 @cond()
-; CHECK-NEXT: [[AND_2:%.*]] = and i1 [[RANGE_CHECK_FIRST_ITER]], [[FAKE_2]]
+; CHECK-NEXT: [[AND_2:%.*]] = and i1 [[RANGE_CHECK]], [[FAKE_2]]
; CHECK-NEXT: br i1 [[AND_2]], label [[BACKEDGE]], label [[FAILED_2:%.*]]
; CHECK: backedge:
; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], -1
diff --git a/llvm/unittests/Analysis/ScalarEvolutionTest.cpp b/llvm/unittests/Analysis/ScalarEvolutionTest.cpp
index 8ab22ef746c25..2ee98205d5ad2 100644
--- a/llvm/unittests/Analysis/ScalarEvolutionTest.cpp
+++ b/llvm/unittests/Analysis/ScalarEvolutionTest.cpp
@@ -63,10 +63,10 @@ static std::optional<APInt> computeConstantDifference(ScalarEvolution &SE,
return SE.computeConstantDifference(LHS, RHS);
}
- static bool matchURem(ScalarEvolution &SE, const SCEV *Expr, const SCEV *&LHS,
- const SCEV *&RHS) {
- return SE.matchURem(Expr, LHS, RHS);
- }
+static bool matchURem(ScalarEvolution &SE, const SCEV *Expr, SCEVUse &LHS,
+ SCEVUse &RHS) {
+ return SE.matchURem(Expr, LHS, RHS);
+}
static bool isImpliedCond(
ScalarEvolution &SE, ICmpInst::Predicate Pred, const SCEV *LHS,
@@ -1480,8 +1480,8 @@ TEST_F(ScalarEvolutionsTest, MatchURem) {
runWithSE(*M, "test", [&](Function &F, LoopInfo &LI, ScalarEvolution &SE) {
for (auto *N : {"rem1", "rem2", "rem3", "rem5"}) {
auto *URemI = getInstructionByName(F, N);
- auto *S = SE.getSCEV(URemI);
- const SCEV *LHS, *RHS;
+ const SCEV *S = SE.getSCEV(URemI);
+ SCEVUse LHS, RHS;
EXPECT_TRUE(matchURem(SE, S, LHS, RHS));
EXPECT_EQ(LHS, SE.getSCEV(URemI->getOperand(0)));
EXPECT_EQ(RHS, SE.getSCEV(URemI->getOperand(1)));
@@ -1493,8 +1493,8 @@ TEST_F(ScalarEvolutionsTest, MatchURem) {
// match results are extended to the size of the input expression.
auto *Ext = getInstructionByName(F, "ext");
auto *URem1 = getInstructionByName(F, "rem4");
- auto *S = SE.getSCEV(Ext);
- const SCEV *LHS, *RHS;
+ const SCEV *S = SE.getSCEV(Ext);
+ SCEVUse LHS, RHS;
EXPECT_TRUE(matchURem(SE, S, LHS, RHS));
EXPECT_NE(LHS, SE.getSCEV(URem1->getOperand(0)));
// RHS and URem1->getOperand(1) have different widths, so compare the
>From 3a3232d0a81bc911fbe33acbb30eee7167e00c10 Mon Sep 17 00:00:00 2001
From: Florian Hahn <flo at fhahn.com>
Date: Mon, 13 May 2024 18:07:55 +0100
Subject: [PATCH 3/4] !fixup use raw pointer (const SCEV * + lower bits) for
AddPointer.
---
llvm/include/llvm/Analysis/ScalarEvolution.h | 2 +
llvm/lib/Analysis/ScalarEvolution.cpp | 51 ++++++++++----------
2 files changed, 28 insertions(+), 25 deletions(-)
diff --git a/llvm/include/llvm/Analysis/ScalarEvolution.h b/llvm/include/llvm/Analysis/ScalarEvolution.h
index 2859df9964555..1da7b9255b4d8 100644
--- a/llvm/include/llvm/Analysis/ScalarEvolution.h
+++ b/llvm/include/llvm/Analysis/ScalarEvolution.h
@@ -81,6 +81,8 @@ class SCEVUse : public PointerIntPair<const SCEV *, 2> {
const SCEV *operator->() const { return getPointer(); }
const SCEV *operator->() { return getPointer(); }
+ void *getRawPointer() { return getOpaqueValue(); }
+
/// Print out the internal representation of this scalar to the specified
/// stream. This should really only be used for debugging purposes.
void print(raw_ostream &OS) const;
diff --git a/llvm/lib/Analysis/ScalarEvolution.cpp b/llvm/lib/Analysis/ScalarEvolution.cpp
index 605b66f2ad633..f9706404e439a 100644
--- a/llvm/lib/Analysis/ScalarEvolution.cpp
+++ b/llvm/lib/Analysis/ScalarEvolution.cpp
@@ -1023,7 +1023,7 @@ SCEVUse ScalarEvolution::getLosslessPtrToIntExpr(SCEVUse Op, unsigned Depth) {
// What would be an ID for such a SCEV cast expression?
FoldingSetNodeID ID;
ID.AddInteger(scPtrToInt);
- ID.AddPointer(Op);
+ ID.AddPointer(Op.getRawPointer());
void *IP = nullptr;
@@ -1154,7 +1154,7 @@ SCEVUse ScalarEvolution::getTruncateExpr(SCEVUse Op, Type *Ty, unsigned Depth) {
FoldingSetNodeID ID;
ID.AddInteger(scTruncate);
- ID.AddPointer(Op);
+ ID.AddPointer(Op.getRawPointer());
ID.AddPointer(Ty);
void *IP = nullptr;
if (SCEVUse S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP))
@@ -1475,8 +1475,8 @@ bool ScalarEvolution::proveNoWrapByVaryingStart(SCEVUse Start, SCEVUse Step,
FoldingSetNodeID ID;
ID.AddInteger(scAddRecExpr);
- ID.AddPointer(PreStart);
- ID.AddPointer(Step);
+ ID.AddPointer(PreStart.getRawPointer());
+ ID.AddPointer(Step.getRawPointer());
ID.AddPointer(L);
void *IP = nullptr;
const auto *PreAR =
@@ -1595,7 +1595,7 @@ SCEVUse ScalarEvolution::getZeroExtendExprImpl(SCEVUse Op, Type *Ty,
// computed a SCEV for this Op and Ty.
FoldingSetNodeID ID;
ID.AddInteger(scZeroExtend);
- ID.AddPointer(Op);
+ ID.AddPointer(Op.getRawPointer());
ID.AddPointer(Ty);
void *IP = nullptr;
if (SCEVUse S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP))
@@ -1936,7 +1936,7 @@ SCEVUse ScalarEvolution::getSignExtendExprImpl(SCEVUse Op, Type *Ty,
// computed a SCEV for this Op and Ty.
FoldingSetNodeID ID;
ID.AddInteger(scSignExtend);
- ID.AddPointer(Op);
+ ID.AddPointer(Op.getRawPointer());
ID.AddPointer(Ty);
void *IP = nullptr;
if (SCEVUse S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP))
@@ -2242,7 +2242,7 @@ SCEVUse ScalarEvolution::getAnyExtendExpr(SCEVUse Op, Type *Ty) {
/// may be exposed. This helps getAddRecExpr short-circuit extra work in
/// the common case where no interesting opportunities are present, and
/// is also used as a check to avoid infinite recursion.
-static bool CollectAddOperandsWithScales(DenseMap<const SCEV *, APInt> &M,
+static bool CollectAddOperandsWithScales(DenseMap<SCEVUse, APInt> &M,
SmallVectorImpl<SCEVUse> &NewOps,
APInt &AccumulatedConstant,
ArrayRef<SCEVUse> Ops,
@@ -2290,7 +2290,7 @@ static bool CollectAddOperandsWithScales(DenseMap<const SCEV *, APInt> &M,
}
} else {
// An ordinary operand. Update the map.
- std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair =
+ std::pair<DenseMap<SCEVUse, APInt>::iterator, bool> Pair =
M.insert({Ops[i], Scale});
if (Pair.second) {
NewOps.push_back(Pair.first->first);
@@ -2762,7 +2762,7 @@ SCEVUse ScalarEvolution::getAddExpr(SmallVectorImpl<SCEVUse> &Ops,
// operands multiplied by constant values.
if (Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx])) {
uint64_t BitWidth = getTypeSizeInBits(Ty);
- DenseMap<const SCEV *, APInt> M;
+ DenseMap<SCEVUse, APInt> M;
SmallVector<SCEVUse, 8> NewOps;
APInt AccumulatedConstant(BitWidth, 0);
if (CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant,
@@ -2999,7 +2999,7 @@ SCEVUse ScalarEvolution::getOrCreateAddExpr(ArrayRef<SCEVUse> Ops,
FoldingSetNodeID ID;
ID.AddInteger(scAddExpr);
for (SCEVUse Op : Ops)
- ID.AddPointer(Op);
+ ID.AddPointer(Op.getRawPointer());
void *IP = nullptr;
SCEVAddExpr *S =
static_cast<SCEVAddExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
@@ -3021,7 +3021,7 @@ SCEVUse ScalarEvolution::getOrCreateAddRecExpr(ArrayRef<SCEVUse> Ops,
FoldingSetNodeID ID;
ID.AddInteger(scAddRecExpr);
for (SCEVUse Op : Ops)
- ID.AddPointer(Op);
+ ID.AddPointer(Op.getRawPointer());
ID.AddPointer(L);
void *IP = nullptr;
SCEVAddRecExpr *S =
@@ -3044,7 +3044,7 @@ SCEVUse ScalarEvolution::getOrCreateMulExpr(ArrayRef<SCEVUse> Ops,
FoldingSetNodeID ID;
ID.AddInteger(scMulExpr);
for (SCEVUse Op : Ops)
- ID.AddPointer(Op);
+ ID.AddPointer(Op.getRawPointer());
void *IP = nullptr;
SCEVMulExpr *S =
static_cast<SCEVMulExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
@@ -3444,8 +3444,8 @@ SCEVUse ScalarEvolution::getUDivExpr(SCEVUse LHS, SCEVUse RHS) {
FoldingSetNodeID ID;
ID.AddInteger(scUDivExpr);
- ID.AddPointer(LHS);
- ID.AddPointer(RHS);
+ ID.AddPointer(LHS.getRawPointer());
+ ID.AddPointer(RHS.getRawPointer());
void *IP = nullptr;
if (SCEVUse S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP))
return S;
@@ -3511,8 +3511,8 @@ SCEVUse ScalarEvolution::getUDivExpr(SCEVUse LHS, SCEVUse RHS) {
// already cached.
ID.clear();
ID.AddInteger(scUDivExpr);
- ID.AddPointer(LHS);
- ID.AddPointer(RHS);
+ ID.AddPointer(LHS.getRawPointer());
+ ID.AddPointer(RHS.getRawPointer());
IP = nullptr;
if (SCEVUse S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP))
return S;
@@ -3843,7 +3843,7 @@ SCEV *ScalarEvolution::findExistingSCEVInCache(SCEVTypes SCEVType,
FoldingSetNodeID ID;
ID.AddInteger(SCEVType);
for (SCEVUse Op : Ops)
- ID.AddPointer(Op);
+ ID.AddPointer(Op.getRawPointer());
void *IP = nullptr;
return UniqueSCEVs.FindNodeOrInsertPos(ID, IP);
}
@@ -3986,7 +3986,7 @@ SCEVUse ScalarEvolution::getMinMaxExpr(SCEVTypes Kind,
FoldingSetNodeID ID;
ID.AddInteger(Kind);
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
- ID.AddPointer(Ops[i]);
+ ID.AddPointer(Ops[i].getRawPointer());
void *IP = nullptr;
SCEVUse ExistingSCEV = UniqueSCEVs.FindNodeOrInsertPos(ID, IP);
if (ExistingSCEV)
@@ -4373,7 +4373,7 @@ ScalarEvolution::getSequentialMinMaxExpr(SCEVTypes Kind,
FoldingSetNodeID ID;
ID.AddInteger(Kind);
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
- ID.AddPointer(Ops[i]);
+ ID.AddPointer(Ops[i].getRawPointer());
void *IP = nullptr;
SCEVUse ExistingSCEV = UniqueSCEVs.FindNodeOrInsertPos(ID, IP);
if (ExistingSCEV)
@@ -14430,8 +14430,8 @@ ScalarEvolution::getComparePredicate(const ICmpInst::Predicate Pred,
// Unique this node based on the arguments
ID.AddInteger(SCEVPredicate::P_Compare);
ID.AddInteger(Pred);
- ID.AddPointer(LHS);
- ID.AddPointer(RHS);
+ ID.AddPointer(LHS.getRawPointer());
+ ID.AddPointer(RHS.getRawPointer());
void *IP = nullptr;
if (const auto *S = UniquePreds.FindNodeOrInsertPos(ID, IP))
return S;
@@ -14447,6 +14447,7 @@ const SCEVPredicate *ScalarEvolution::getWrapPredicate(
FoldingSetNodeID ID;
// Unique this node based on the arguments
ID.AddInteger(SCEVPredicate::P_Wrap);
+ // TODO: Use SCEVUse
ID.AddPointer(AR);
ID.AddInteger(AddedFlags);
void *IP = nullptr;
@@ -14960,10 +14961,10 @@ SCEVUse ScalarEvolution::computeSymbolicMaxBackedgeTakenCount(const Loop *L) {
/// in the map. It skips AddRecExpr because we cannot guarantee that the
/// replacement is loop invariant in the loop of the AddRec.
class SCEVLoopGuardRewriter : public SCEVRewriteVisitor<SCEVLoopGuardRewriter> {
- const DenseMap<const SCEV *, SCEVUse> ⤅
+ const DenseMap<SCEVUse, SCEVUse> ⤅
public:
- SCEVLoopGuardRewriter(ScalarEvolution &SE, DenseMap<const SCEV *, SCEVUse> &M)
+ SCEVLoopGuardRewriter(ScalarEvolution &SE, DenseMap<SCEVUse, SCEVUse> &M)
: SCEVRewriteVisitor(SE), Map(M) {}
SCEVUse visitAddRecExpr(const SCEVAddRecExpr *Expr) { return Expr; }
@@ -15026,7 +15027,7 @@ SCEVUse ScalarEvolution::applyLoopGuards(SCEVUse Expr, const Loop *L) {
SmallVector<SCEVUse> ExprsToRewrite;
auto CollectCondition = [&](ICmpInst::Predicate Predicate, SCEVUse LHS,
SCEVUse RHS,
- DenseMap<const SCEV *, SCEVUse> &RewriteMap) {
+ DenseMap<SCEVUse, SCEVUse> &RewriteMap) {
// WARNING: It is generally unsound to apply any wrap flags to the proposed
// replacement SCEV which isn't directly implied by the structure of that
// SCEV. In particular, using contextual facts to imply flags is *NOT*
@@ -15396,7 +15397,7 @@ SCEVUse ScalarEvolution::applyLoopGuards(SCEVUse Expr, const Loop *L) {
// Conditions are processed in reverse order, so the earliest conditions is
// processed first. This ensures the SCEVs with the shortest dependency chains
// are constructed first.
- DenseMap<const SCEV *, SCEVUse> RewriteMap;
+ DenseMap<SCEVUse, SCEVUse> RewriteMap;
for (auto [Term, EnterIfTrue] : reverse(Terms)) {
SmallVector<Value *, 8> Worklist;
SmallPtrSet<Value *, 8> Visited;
>From 07989af69f9b71787ff1d468ff025ae30eb7b4bc Mon Sep 17 00:00:00 2001
From: Florian Hahn <flo at fhahn.com>
Date: Wed, 22 May 2024 15:30:38 +0100
Subject: [PATCH 4/4] !fix formatting
---
llvm/lib/Analysis/ScalarEvolution.cpp | 10 +++++-----
.../Scalar/InductiveRangeCheckElimination.cpp | 4 ++--
2 files changed, 7 insertions(+), 7 deletions(-)
diff --git a/llvm/lib/Analysis/ScalarEvolution.cpp b/llvm/lib/Analysis/ScalarEvolution.cpp
index f9706404e439a..b21ef9fbadcd2 100644
--- a/llvm/lib/Analysis/ScalarEvolution.cpp
+++ b/llvm/lib/Analysis/ScalarEvolution.cpp
@@ -1274,8 +1274,8 @@ static SCEVUse getUnsignedOverflowLimitForStep(SCEVUse Step,
namespace {
struct ExtendOpTraitsBase {
- typedef SCEVUse (ScalarEvolution:: *GetExtendExprTy)(SCEVUse, Type *,
- unsigned);
+ typedef SCEVUse (ScalarEvolution::*GetExtendExprTy)(SCEVUse, Type *,
+ unsigned);
};
// Used to make code generic over signed and unsigned overflow.
@@ -2309,8 +2309,8 @@ static bool CollectAddOperandsWithScales(DenseMap<SCEVUse, APInt> &M,
bool ScalarEvolution::willNotOverflow(Instruction::BinaryOps BinOp, bool Signed,
SCEVUse LHS, SCEVUse RHS,
const Instruction *CtxI) {
- SCEVUse (ScalarEvolution:: *Operation)(SCEVUse, SCEVUse, SCEV::NoWrapFlags,
- unsigned);
+ SCEVUse (ScalarEvolution::*Operation)(SCEVUse, SCEVUse, SCEV::NoWrapFlags,
+ unsigned);
switch (BinOp) {
default:
llvm_unreachable("Unsupported binary op");
@@ -2325,7 +2325,7 @@ bool ScalarEvolution::willNotOverflow(Instruction::BinaryOps BinOp, bool Signed,
break;
}
- SCEVUse (ScalarEvolution:: *Extension)(SCEVUse, Type *, unsigned) =
+ SCEVUse (ScalarEvolution::*Extension)(SCEVUse, Type *, unsigned) =
Signed ? &ScalarEvolution::getSignExtendExpr
: &ScalarEvolution::getZeroExtendExpr;
diff --git a/llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp b/llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp
index 081c75fb6950e..78375d14d204f 100644
--- a/llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp
+++ b/llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp
@@ -430,8 +430,8 @@ bool InductiveRangeCheck::reassociateSubLHS(
auto getExprScaledIfOverflow = [&](Instruction::BinaryOps BinOp,
const SCEV *LHS,
const SCEV *RHS) -> const SCEV * {
- SCEVUse (ScalarEvolution:: *Operation)(SCEVUse, SCEVUse, SCEV::NoWrapFlags,
- unsigned);
+ SCEVUse (ScalarEvolution::*Operation)(SCEVUse, SCEVUse, SCEV::NoWrapFlags,
+ unsigned);
switch (BinOp) {
default:
llvm_unreachable("Unsupported binary op");
More information about the llvm-commits
mailing list