[llvm] [polly] [SCEV] Introduce SCEVUse, use it instead of const SCEV * (NFCI). (PR #91961)
Florian Hahn via llvm-commits
llvm-commits at lists.llvm.org
Fri Mar 13 03:04:13 PDT 2026
https://github.com/fhahn updated https://github.com/llvm/llvm-project/pull/91961
>From a612db76f6340cbd21d80e316d795a55998406fc Mon Sep 17 00:00:00 2001
From: Florian Hahn <flo at fhahn.com>
Date: Wed, 18 Feb 2026 12:36:13 +0000
Subject: [PATCH 1/4] [SCEV] Introduce SCEVUse wrapper type (NFC)
Add SCEVUse as a PointerIntPair wrapper around const SCEV * to prepare
for storing additional per-use information.
This commit contains the mechanical changes of adding an intial SCEVUse
wrapper and updating all relevant interfaces to take SCEVUse. Note that
currently the integer part is never set, and all SCEVUses are
considered canonical.
---
llvm/include/llvm/Analysis/ScalarEvolution.h | 164 ++++--
.../Analysis/ScalarEvolutionExpressions.h | 148 ++---
.../Analysis/ScalarEvolutionPatternMatch.h | 16 +-
.../llvm/Transforms/Scalar/NaryReassociate.h | 15 +-
llvm/lib/Analysis/Delinearization.cpp | 6 +-
llvm/lib/Analysis/ScalarEvolution.cpp | 520 ++++++++++--------
llvm/lib/Analysis/ScalarEvolutionDivision.cpp | 4 +-
.../Analysis/ScalarEvolutionNormalization.cpp | 4 +-
.../lib/Target/ARM/ARMTargetTransformInfo.cpp | 2 +-
llvm/lib/Transforms/Scalar/IndVarSimplify.cpp | 7 +-
.../Scalar/InductiveRangeCheckElimination.cpp | 2 +-
.../Transforms/Scalar/LoopDataPrefetch.cpp | 7 +-
llvm/lib/Transforms/Scalar/LoopFuse.cpp | 11 +-
.../Transforms/Scalar/LoopIdiomRecognize.cpp | 8 +-
.../lib/Transforms/Scalar/LoopPredication.cpp | 9 +-
.../Transforms/Scalar/LoopStrengthReduce.cpp | 65 ++-
.../lib/Transforms/Scalar/NaryReassociate.cpp | 51 +-
.../Scalar/StraightLineStrengthReduce.cpp | 11 +-
.../Utils/ScalarEvolutionExpander.cpp | 6 +-
.../Vectorize/LoopVectorizationLegality.cpp | 3 +-
llvm/lib/Transforms/Vectorize/VPlanUtils.cpp | 35 +-
.../Analysis/ScalarEvolutionTest.cpp | 21 +-
.../Utils/ScalarEvolutionExpanderTest.cpp | 16 +-
polly/include/polly/CodeGen/BlockGenerators.h | 1 -
polly/include/polly/Support/ScopHelper.h | 3 +-
polly/lib/Analysis/ScopDetection.cpp | 2 +-
polly/lib/Support/SCEVValidator.cpp | 4 +-
polly/lib/Support/ScopHelper.cpp | 32 +-
28 files changed, 653 insertions(+), 520 deletions(-)
diff --git a/llvm/include/llvm/Analysis/ScalarEvolution.h b/llvm/include/llvm/Analysis/ScalarEvolution.h
index 6aff53a1e7b70..b8f2f02255506 100644
--- a/llvm/include/llvm/Analysis/ScalarEvolution.h
+++ b/llvm/include/llvm/Analysis/ScalarEvolution.h
@@ -66,6 +66,76 @@ enum SCEVTypes : unsigned short;
LLVM_ABI extern bool VerifySCEV;
+class SCEV;
+
+struct SCEVUse : PointerIntPair<const SCEV *, 2> {
+ SCEVUse() : PointerIntPair() { setFromOpaqueValue(nullptr); }
+ SCEVUse(const SCEV *S) : PointerIntPair() { setFromOpaqueValue((void *)S); }
+ SCEVUse(const SCEV *S, unsigned Flags) : PointerIntPair(S, Flags) {}
+
+ operator const SCEV *() const { return getPointer(); }
+ const SCEV *operator->() const { return getPointer(); }
+
+ void *getRawPointer() const { return getOpaqueValue(); }
+
+ unsigned getFlags() const { return getInt(); }
+
+ bool operator==(const SCEVUse &RHS) const {
+ return getRawPointer() == RHS.getRawPointer();
+ }
+
+ bool operator==(const SCEV *RHS) const { return getRawPointer() == RHS; }
+
+ /// Print out the internal representation of this scalar to the specified
+ /// stream. This should really only be used for debugging purposes.
+ void print(raw_ostream &OS) const;
+
+ /// This method is used for debugging.
+ void dump() const;
+};
+
+/// Provide PointerLikeTypeTraits for SCEVUse, so it can be used with
+/// SmallPtrSet, among others.
+template <> struct PointerLikeTypeTraits<SCEVUse> {
+ static inline void *getAsVoidPointer(SCEVUse U) { return U.getOpaqueValue(); }
+ static inline SCEVUse getFromVoidPointer(void *P) {
+ SCEVUse U;
+ U.setFromOpaqueValue(P);
+ return U;
+ }
+
+ /// The Low bits are used by the PointerIntPair.
+ static constexpr int NumLowBitsAvailable = 0;
+};
+
+template <> struct DenseMapInfo<SCEVUse> {
+ static constexpr uintptr_t Log2MaxAlign = 12;
+
+ static inline SCEVUse getEmptyKey() {
+ uintptr_t Val = static_cast<uintptr_t>(-1);
+ return PointerLikeTypeTraits<SCEVUse>::getFromVoidPointer((void *)Val);
+ }
+
+ static inline SCEVUse getTombstoneKey() {
+ uintptr_t Val = static_cast<uintptr_t>(-2);
+ return PointerLikeTypeTraits<SCEVUse>::getFromVoidPointer((void *)Val);
+ }
+
+ static unsigned getHashValue(SCEVUse U) { return hash_value(U.getRawPointer()); }
+
+ static bool isEqual(const SCEVUse LHS, const SCEVUse RHS) {
+ return LHS.getRawPointer() == RHS.getRawPointer();
+ }
+};
+
+template<> struct simplify_type<SCEVUse> {
+ using SimpleType = const SCEV *;
+
+ static SimpleType getSimplifiedValue(SCEVUse &Val) {
+ return Val.getPointer();
+ }
+};
+
/// This class represents an analyzed expression in the program. These are
/// opaque objects that the client is not allowed to do much with directly.
///
@@ -144,7 +214,7 @@ class SCEV : public FoldingSetNode {
LLVM_ABI Type *getType() const;
/// Return operands of this SCEV expression.
- LLVM_ABI ArrayRef<const SCEV *> operands() const;
+ LLVM_ABI ArrayRef<SCEVUse> operands() const;
/// Return true if the expression is a constant zero.
LLVM_ABI bool isZero() const;
@@ -554,6 +624,7 @@ class ScalarEvolution {
/// Notify this ScalarEvolution that \p User directly uses SCEVs in \p Ops.
LLVM_ABI void registerUser(const SCEV *User, ArrayRef<const SCEV *> Ops);
+ LLVM_ABI void registerUser(const SCEV *User, ArrayRef<SCEVUse> Ops);
/// Return true if the SCEV expression contains an undef value.
LLVM_ABI bool containsUndefs(const SCEV *S) const;
@@ -592,46 +663,47 @@ class ScalarEvolution {
unsigned Depth = 0);
LLVM_ABI const SCEV *getCastExpr(SCEVTypes Kind, const SCEV *Op, Type *Ty);
LLVM_ABI const SCEV *getAnyExtendExpr(const SCEV *Op, Type *Ty);
- LLVM_ABI const SCEV *getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
+
+ LLVM_ABI const SCEV *getAddExpr(SmallVectorImpl<SCEVUse> &Ops,
SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
unsigned Depth = 0);
- const SCEV *getAddExpr(const SCEV *LHS, const SCEV *RHS,
+ const SCEV *getAddExpr(SCEVUse LHS, SCEVUse RHS,
SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
unsigned Depth = 0) {
- SmallVector<const SCEV *, 2> Ops = {LHS, RHS};
+ SmallVector<SCEVUse, 2> Ops = {LHS, RHS};
return getAddExpr(Ops, Flags, Depth);
}
- const SCEV *getAddExpr(const SCEV *Op0, const SCEV *Op1, const SCEV *Op2,
+ const SCEV *getAddExpr(SCEVUse Op0, SCEVUse Op1, SCEVUse Op2,
SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
unsigned Depth = 0) {
- SmallVector<const SCEV *, 3> Ops = {Op0, Op1, Op2};
+ SmallVector<SCEVUse, 3> Ops = {Op0, Op1, Op2};
return getAddExpr(Ops, Flags, Depth);
}
- LLVM_ABI const SCEV *getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
+ LLVM_ABI const SCEV *getMulExpr(SmallVectorImpl<SCEVUse> &Ops,
SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
unsigned Depth = 0);
- const SCEV *getMulExpr(const SCEV *LHS, const SCEV *RHS,
+ const SCEV *getMulExpr(SCEVUse LHS, SCEVUse RHS,
SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
unsigned Depth = 0) {
- SmallVector<const SCEV *, 2> Ops = {LHS, RHS};
+ SmallVector<SCEVUse, 2> Ops = {LHS, RHS};
return getMulExpr(Ops, Flags, Depth);
}
- const SCEV *getMulExpr(const SCEV *Op0, const SCEV *Op1, const SCEV *Op2,
+ const SCEV *getMulExpr(SCEVUse Op0, SCEVUse Op1, SCEVUse Op2,
SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
unsigned Depth = 0) {
- SmallVector<const SCEV *, 3> Ops = {Op0, Op1, Op2};
+ SmallVector<SCEVUse, 3> Ops = {Op0, Op1, Op2};
return getMulExpr(Ops, Flags, Depth);
}
- LLVM_ABI const SCEV *getUDivExpr(const SCEV *LHS, const SCEV *RHS);
- LLVM_ABI const SCEV *getUDivExactExpr(const SCEV *LHS, const SCEV *RHS);
- LLVM_ABI const SCEV *getURemExpr(const SCEV *LHS, const SCEV *RHS);
- LLVM_ABI const SCEV *getAddRecExpr(const SCEV *Start, const SCEV *Step,
- const Loop *L, SCEV::NoWrapFlags Flags);
- LLVM_ABI const SCEV *getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands,
+ LLVM_ABI const SCEV *getUDivExpr(SCEVUse LHS, SCEVUse RHS);
+ LLVM_ABI const SCEV *getUDivExactExpr(SCEVUse LHS, SCEVUse RHS);
+ LLVM_ABI const SCEV *getURemExpr(SCEVUse LHS, SCEVUse RHS);
+ LLVM_ABI const SCEV *getAddRecExpr(SCEVUse Start, SCEVUse Step, const Loop *L,
+ SCEV::NoWrapFlags Flags);
+ LLVM_ABI const SCEV *getAddRecExpr(SmallVectorImpl<SCEVUse> &Operands,
const Loop *L, SCEV::NoWrapFlags Flags);
- const SCEV *getAddRecExpr(const SmallVectorImpl<const SCEV *> &Operands,
+ const SCEV *getAddRecExpr(const SmallVectorImpl<SCEVUse> &Operands,
const Loop *L, SCEV::NoWrapFlags Flags) {
- SmallVector<const SCEV *, 4> NewOp(Operands.begin(), Operands.end());
+ SmallVector<SCEVUse, 4> NewOp(Operands.begin(), Operands.end());
return getAddRecExpr(NewOp, L, Flags);
}
@@ -649,26 +721,25 @@ class ScalarEvolution {
/// instead we use IndexExprs.
/// \p IndexExprs The expressions for the indices.
LLVM_ABI const SCEV *getGEPExpr(GEPOperator *GEP,
- ArrayRef<const SCEV *> IndexExprs);
- LLVM_ABI const SCEV *getGEPExpr(const SCEV *BaseExpr,
- ArrayRef<const SCEV *> IndexExprs,
+ ArrayRef<SCEVUse> IndexExprs);
+ LLVM_ABI const SCEV *getGEPExpr(SCEVUse BaseExpr,
+ ArrayRef<SCEVUse> IndexExprs,
Type *SrcElementTy,
GEPNoWrapFlags NW = GEPNoWrapFlags::none());
LLVM_ABI const SCEV *getAbsExpr(const SCEV *Op, bool IsNSW);
LLVM_ABI const SCEV *getMinMaxExpr(SCEVTypes Kind,
- SmallVectorImpl<const SCEV *> &Operands);
+ SmallVectorImpl<SCEVUse> &Operands);
LLVM_ABI const SCEV *
- getSequentialMinMaxExpr(SCEVTypes Kind,
- SmallVectorImpl<const SCEV *> &Operands);
- LLVM_ABI const SCEV *getSMaxExpr(const SCEV *LHS, const SCEV *RHS);
- LLVM_ABI const SCEV *getSMaxExpr(SmallVectorImpl<const SCEV *> &Operands);
- LLVM_ABI const SCEV *getUMaxExpr(const SCEV *LHS, const SCEV *RHS);
- LLVM_ABI const SCEV *getUMaxExpr(SmallVectorImpl<const SCEV *> &Operands);
- LLVM_ABI const SCEV *getSMinExpr(const SCEV *LHS, const SCEV *RHS);
- LLVM_ABI const SCEV *getSMinExpr(SmallVectorImpl<const SCEV *> &Operands);
- LLVM_ABI const SCEV *getUMinExpr(const SCEV *LHS, const SCEV *RHS,
+ getSequentialMinMaxExpr(SCEVTypes Kind, SmallVectorImpl<SCEVUse> &Operands);
+ LLVM_ABI const SCEV *getSMaxExpr(SCEVUse LHS, SCEVUse RHS);
+ LLVM_ABI const SCEV *getSMaxExpr(SmallVectorImpl<SCEVUse> &Operands);
+ LLVM_ABI const SCEV *getUMaxExpr(SCEVUse LHS, SCEVUse RHS);
+ LLVM_ABI const SCEV *getUMaxExpr(SmallVectorImpl<SCEVUse> &Operands);
+ LLVM_ABI const SCEV *getSMinExpr(SCEVUse LHS, SCEVUse RHS);
+ LLVM_ABI const SCEV *getSMinExpr(SmallVectorImpl<SCEVUse> &Operands);
+ LLVM_ABI const SCEV *getUMinExpr(SCEVUse LHS, SCEVUse RHS,
bool Sequential = false);
- LLVM_ABI const SCEV *getUMinExpr(SmallVectorImpl<const SCEV *> &Operands,
+ LLVM_ABI const SCEV *getUMinExpr(SmallVectorImpl<SCEVUse> &Operands,
bool Sequential = false);
LLVM_ABI const SCEV *getUnknown(Value *V);
LLVM_ABI const SCEV *getCouldNotCompute();
@@ -717,7 +788,7 @@ class ScalarEvolution {
/// To compute the difference between two unrelated pointers, you can
/// explicitly convert the arguments using getPtrToIntExpr(), for pointer
/// types that support it.
- LLVM_ABI const SCEV *getMinusSCEV(const SCEV *LHS, const SCEV *RHS,
+ LLVM_ABI const SCEV *getMinusSCEV(SCEVUse LHS, SCEVUse RHS,
SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
unsigned Depth = 0);
@@ -773,9 +844,8 @@ class ScalarEvolution {
/// Promote the operands to the wider of the types using zero-extension, and
/// then perform a umin operation with them. N-ary function.
- LLVM_ABI const SCEV *
- getUMinFromMismatchedTypes(SmallVectorImpl<const SCEV *> &Ops,
- bool Sequential = false);
+ LLVM_ABI const SCEV *getUMinFromMismatchedTypes(SmallVectorImpl<SCEVUse> &Ops,
+ bool Sequential = false);
/// Transitively follow the chain of pointer-type operands until reaching a
/// SCEV that does not have a single pointer operand. This returns a
@@ -2177,7 +2247,7 @@ class ScalarEvolution {
void forgetBackedgeTakenCounts(const Loop *L, bool Predicated);
/// Drop memoized information for all \p SCEVs.
- void forgetMemoizedResults(ArrayRef<const SCEV *> SCEVs);
+ void forgetMemoizedResults(ArrayRef<SCEVUse> SCEVs);
/// Helper for forgetMemoizedResults.
void forgetMemoizedResultsImpl(const SCEV *S);
@@ -2186,7 +2256,7 @@ class ScalarEvolution {
/// from ValueExprMap and collect SCEV expressions in \p ToForget
void visitAndClearUsers(SmallVectorImpl<Instruction *> &Worklist,
SmallPtrSetImpl<Instruction *> &Visited,
- SmallVectorImpl<const SCEV *> &ToForget);
+ SmallVectorImpl<SCEVUse> &ToForget);
/// Erase Value from ValueExprMap and ExprValueMap.
void eraseValueFromMap(Value *V);
@@ -2236,12 +2306,12 @@ class ScalarEvolution {
/// Return a scope which provides an upper bound on the defining scope for
/// a SCEV with the operands in Ops. The outparam Precise is set if the
/// bound found is a precise bound (i.e. must be the defining scope.)
- const Instruction *getDefiningScopeBound(ArrayRef<const SCEV *> Ops,
+ const Instruction *getDefiningScopeBound(ArrayRef<SCEVUse> Ops,
bool &Precise);
/// Wrapper around the above for cases which don't care if the bound
/// is precise.
- const Instruction *getDefiningScopeBound(ArrayRef<const SCEV *> Ops);
+ const Instruction *getDefiningScopeBound(ArrayRef<SCEVUse> Ops);
/// Given two instructions in the same function, return true if we can
/// prove B must execute given A executes.
@@ -2315,16 +2385,16 @@ class ScalarEvolution {
bool canIVOverflowOnGT(const SCEV *RHS, const SCEV *Stride, bool IsSigned);
/// Get add expr already created or create a new one.
- const SCEV *getOrCreateAddExpr(ArrayRef<const SCEV *> Ops,
+ const SCEV *getOrCreateAddExpr(ArrayRef<SCEVUse> Ops,
SCEV::NoWrapFlags Flags);
/// Get mul expr already created or create a new one.
- const SCEV *getOrCreateMulExpr(ArrayRef<const SCEV *> Ops,
+ const SCEV *getOrCreateMulExpr(ArrayRef<SCEVUse> Ops,
SCEV::NoWrapFlags Flags);
// Get addrec expr already created or create a new one.
- const SCEV *getOrCreateAddRecExpr(ArrayRef<const SCEV *> Ops,
- const Loop *L, SCEV::NoWrapFlags Flags);
+ const SCEV *getOrCreateAddRecExpr(ArrayRef<SCEVUse> Ops, const Loop *L,
+ SCEV::NoWrapFlags Flags);
/// Return x if \p Val is f(x) where f is a 1-1 function.
const SCEV *stripInjectiveFunctions(const SCEV *Val) const;
@@ -2337,6 +2407,7 @@ class ScalarEvolution {
/// Look for a SCEV expression with type `SCEVType` and operands `Ops` in
/// `UniqueSCEVs`. Return if found, else nullptr.
SCEV *findExistingSCEVInCache(SCEVTypes SCEVType, ArrayRef<const SCEV *> Ops);
+ SCEV *findExistingSCEVInCache(SCEVTypes SCEVType, ArrayRef<SCEVUse> Ops);
/// Get reachable blocks in this function, making limited use of SCEV
/// reasoning about conditions.
@@ -2345,8 +2416,7 @@ class ScalarEvolution {
/// Return the given SCEV expression with a new set of operands.
/// This preserves the origial nowrap flags.
- const SCEV *getWithOperands(const SCEV *S,
- SmallVectorImpl<const SCEV *> &NewOps);
+ const SCEV *getWithOperands(const SCEV *S, SmallVectorImpl<SCEVUse> &NewOps);
FoldingSet<SCEV> UniqueSCEVs;
FoldingSet<SCEVPredicate> UniquePreds;
diff --git a/llvm/include/llvm/Analysis/ScalarEvolutionExpressions.h b/llvm/include/llvm/Analysis/ScalarEvolutionExpressions.h
index 2fd25dbb9062d..4a8a8e1b9c035 100644
--- a/llvm/include/llvm/Analysis/ScalarEvolutionExpressions.h
+++ b/llvm/include/llvm/Analysis/ScalarEvolutionExpressions.h
@@ -58,6 +58,10 @@ enum SCEVTypes : unsigned short {
scCouldNotCompute
};
+inline SmallVector<SCEVUse> toSCEV(ArrayRef<SCEVUse> Ops) {
+ return SmallVector<SCEVUse>(Ops.begin(), Ops.end());
+}
+
/// This class represents a constant integer value.
class SCEVConstant : public SCEV {
friend class ScalarEvolution;
@@ -94,9 +98,9 @@ class SCEVVScale : public SCEV {
static bool classof(const SCEV *S) { return S->getSCEVType() == scVScale; }
};
-inline unsigned short computeExpressionSize(ArrayRef<const SCEV *> Args) {
+inline unsigned short computeExpressionSize(ArrayRef<SCEVUse> Args) {
APInt Size(16, 1);
- for (const auto *Arg : Args)
+ for (const SCEV *Arg : Args)
Size = Size.uadd_sat(APInt(16, Arg->getExpressionSize()));
return (unsigned short)Size.getZExtValue();
}
@@ -104,19 +108,19 @@ inline unsigned short computeExpressionSize(ArrayRef<const SCEV *> Args) {
/// This is the base class for unary cast operator classes.
class SCEVCastExpr : public SCEV {
protected:
- const SCEV *Op;
+ SCEVUse Op;
Type *Ty;
LLVM_ABI SCEVCastExpr(const FoldingSetNodeIDRef ID, SCEVTypes SCEVTy,
- const SCEV *op, Type *ty);
+ SCEVUse op, Type *ty);
public:
- const SCEV *getOperand() const { return Op; }
- const SCEV *getOperand(unsigned i) const {
+ SCEVUse getOperand() const { return Op; }
+ SCEVUse getOperand(unsigned i) const {
assert(i == 0 && "Operand index out of range!");
return Op;
}
- ArrayRef<const SCEV *> operands() const { return Op; }
+ ArrayRef<SCEVUse> operands() const { return Op; }
size_t getNumOperands() const { return 1; }
Type *getType() const { return Ty; }
@@ -133,11 +137,12 @@ class SCEVCastExpr : public SCEV {
class SCEVPtrToIntExpr : public SCEVCastExpr {
friend class ScalarEvolution;
- SCEVPtrToIntExpr(const FoldingSetNodeIDRef ID, const SCEV *Op, Type *ITy);
+ SCEVPtrToIntExpr(const FoldingSetNodeIDRef ID, SCEVUse Op, Type *ITy);
public:
/// Methods for support type inquiry through isa, cast, and dyn_cast:
static bool classof(const SCEV *S) { return S->getSCEVType() == scPtrToInt; }
+ static bool classof(const SCEVUse *U) { return classof(U->getPointer()); }
};
/// This class represents a cast from a pointer to a pointer-sized integer
@@ -156,7 +161,7 @@ class SCEVPtrToAddrExpr : public SCEVCastExpr {
class SCEVIntegralCastExpr : public SCEVCastExpr {
protected:
LLVM_ABI SCEVIntegralCastExpr(const FoldingSetNodeIDRef ID, SCEVTypes SCEVTy,
- const SCEV *op, Type *ty);
+ SCEVUse op, Type *ty);
public:
/// Methods for support type inquiry through isa, cast, and dyn_cast:
@@ -171,7 +176,7 @@ class SCEVIntegralCastExpr : public SCEVCastExpr {
class SCEVTruncateExpr : public SCEVIntegralCastExpr {
friend class ScalarEvolution;
- SCEVTruncateExpr(const FoldingSetNodeIDRef ID, const SCEV *op, Type *ty);
+ SCEVTruncateExpr(const FoldingSetNodeIDRef ID, SCEVUse op, Type *ty);
public:
/// Methods for support type inquiry through isa, cast, and dyn_cast:
@@ -183,7 +188,7 @@ class SCEVTruncateExpr : public SCEVIntegralCastExpr {
class SCEVZeroExtendExpr : public SCEVIntegralCastExpr {
friend class ScalarEvolution;
- SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID, const SCEV *op, Type *ty);
+ SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID, SCEVUse op, Type *ty);
public:
/// Methods for support type inquiry through isa, cast, and dyn_cast:
@@ -197,7 +202,7 @@ class SCEVZeroExtendExpr : public SCEVIntegralCastExpr {
class SCEVSignExtendExpr : public SCEVIntegralCastExpr {
friend class ScalarEvolution;
- SCEVSignExtendExpr(const FoldingSetNodeIDRef ID, const SCEV *op, Type *ty);
+ SCEVSignExtendExpr(const FoldingSetNodeIDRef ID, SCEVUse op, Type *ty);
public:
/// Methods for support type inquiry through isa, cast, and dyn_cast:
@@ -214,25 +219,23 @@ class SCEVNAryExpr : public SCEV {
// arrays with its SCEVAllocator, so this class just needs a simple
// pointer rather than a more elaborate vector-like data structure.
// This also avoids the need for a non-trivial destructor.
- const SCEV *const *Operands;
+ SCEVUse const *Operands;
size_t NumOperands;
- SCEVNAryExpr(const FoldingSetNodeIDRef ID, enum SCEVTypes T,
- const SCEV *const *O, size_t N)
+ SCEVNAryExpr(const FoldingSetNodeIDRef ID, enum SCEVTypes T, SCEVUse const *O,
+ size_t N)
: SCEV(ID, T, computeExpressionSize(ArrayRef(O, N))), Operands(O),
NumOperands(N) {}
public:
size_t getNumOperands() const { return NumOperands; }
- const SCEV *getOperand(unsigned i) const {
+ SCEVUse getOperand(unsigned i) const {
assert(i < NumOperands && "Operand index out of range!");
return Operands[i];
}
- ArrayRef<const SCEV *> operands() const {
- return ArrayRef(Operands, NumOperands);
- }
+ ArrayRef<SCEVUse> operands() const { return ArrayRef(Operands, NumOperands); }
NoWrapFlags getNoWrapFlags(NoWrapFlags Mask = NoWrapMask) const {
return (NoWrapFlags)(SubclassData & Mask);
@@ -256,13 +259,14 @@ class SCEVNAryExpr : public SCEV {
S->getSCEVType() == scSequentialUMinExpr ||
S->getSCEVType() == scAddRecExpr;
}
+ static bool classof(const SCEVUse *U) { return classof(U->getPointer()); }
};
/// This node is the base class for n'ary commutative operators.
class SCEVCommutativeExpr : public SCEVNAryExpr {
protected:
SCEVCommutativeExpr(const FoldingSetNodeIDRef ID, enum SCEVTypes T,
- const SCEV *const *O, size_t N)
+ SCEVUse const *O, size_t N)
: SCEVNAryExpr(ID, T, O, N) {}
public:
@@ -283,11 +287,10 @@ class SCEVAddExpr : public SCEVCommutativeExpr {
Type *Ty;
- SCEVAddExpr(const FoldingSetNodeIDRef ID, const SCEV *const *O, size_t N)
+ SCEVAddExpr(const FoldingSetNodeIDRef ID, SCEVUse const *O, size_t N)
: SCEVCommutativeExpr(ID, scAddExpr, O, N) {
- auto *FirstPointerTypedOp = find_if(operands(), [](const SCEV *Op) {
- return Op->getType()->isPointerTy();
- });
+ auto *FirstPointerTypedOp = find_if(
+ operands(), [](SCEVUse Op) { return Op->getType()->isPointerTy(); });
if (FirstPointerTypedOp != operands().end())
Ty = (*FirstPointerTypedOp)->getType();
else
@@ -299,13 +302,14 @@ class SCEVAddExpr : public SCEVCommutativeExpr {
/// Methods for support type inquiry through isa, cast, and dyn_cast:
static bool classof(const SCEV *S) { return S->getSCEVType() == scAddExpr; }
+ static bool classof(const SCEVUse *U) { return classof(U->getPointer()); }
};
/// This node represents multiplication of some number of SCEVs.
class SCEVMulExpr : public SCEVCommutativeExpr {
friend class ScalarEvolution;
- SCEVMulExpr(const FoldingSetNodeIDRef ID, const SCEV *const *O, size_t N)
+ SCEVMulExpr(const FoldingSetNodeIDRef ID, SCEVUse const *O, size_t N)
: SCEVCommutativeExpr(ID, scMulExpr, O, N) {}
public:
@@ -313,30 +317,31 @@ class SCEVMulExpr : public SCEVCommutativeExpr {
/// Methods for support type inquiry through isa, cast, and dyn_cast:
static bool classof(const SCEV *S) { return S->getSCEVType() == scMulExpr; }
+ static bool classof(const SCEVUse *U) { return classof(U->getPointer()); }
};
/// This class represents a binary unsigned division operation.
class SCEVUDivExpr : public SCEV {
friend class ScalarEvolution;
- std::array<const SCEV *, 2> Operands;
+ std::array<SCEVUse, 2> Operands;
- SCEVUDivExpr(const FoldingSetNodeIDRef ID, const SCEV *lhs, const SCEV *rhs)
+ SCEVUDivExpr(const FoldingSetNodeIDRef ID, SCEVUse lhs, SCEVUse rhs)
: SCEV(ID, scUDivExpr, computeExpressionSize({lhs, rhs})) {
Operands[0] = lhs;
Operands[1] = rhs;
}
public:
- const SCEV *getLHS() const { return Operands[0]; }
- const SCEV *getRHS() const { return Operands[1]; }
+ SCEVUse getLHS() const { return Operands[0]; }
+ SCEVUse getRHS() const { return Operands[1]; }
size_t getNumOperands() const { return 2; }
- const SCEV *getOperand(unsigned i) const {
+ SCEVUse getOperand(unsigned i) const {
assert((i == 0 || i == 1) && "Operand index out of range!");
return i == 0 ? getLHS() : getRHS();
}
- ArrayRef<const SCEV *> operands() const { return Operands; }
+ ArrayRef<SCEVUse> operands() const { return Operands; }
Type *getType() const {
// In most cases the types of LHS and RHS will be the same, but in some
@@ -364,25 +369,24 @@ class SCEVAddRecExpr : public SCEVNAryExpr {
const Loop *L;
- SCEVAddRecExpr(const FoldingSetNodeIDRef ID, const SCEV *const *O, size_t N,
+ SCEVAddRecExpr(const FoldingSetNodeIDRef ID, SCEVUse const *O, size_t N,
const Loop *l)
: SCEVNAryExpr(ID, scAddRecExpr, O, N), L(l) {}
public:
Type *getType() const { return getStart()->getType(); }
- const SCEV *getStart() const { return Operands[0]; }
+ SCEVUse getStart() const { return Operands[0]; }
const Loop *getLoop() const { return L; }
/// Constructs and returns the recurrence indicating how much this
/// expression steps by. If this is a polynomial of degree N, it
/// returns a chrec of degree N-1. We cannot determine whether
/// the step recurrence has self-wraparound.
- const SCEV *getStepRecurrence(ScalarEvolution &SE) const {
+ SCEVUse getStepRecurrence(ScalarEvolution &SE) const {
if (isAffine())
return getOperand(1);
- return SE.getAddRecExpr(
- SmallVector<const SCEV *, 3>(operands().drop_front()), getLoop(),
- FlagAnyWrap);
+ return SE.getAddRecExpr(toSCEV(operands().drop_front()), getLoop(),
+ FlagAnyWrap);
}
/// Return true if this represents an expression A + B*x where A
@@ -414,9 +418,9 @@ class SCEVAddRecExpr : public SCEVNAryExpr {
/// Return the value of this chain of recurrences at the specified iteration
/// number. Takes an explicit list of operands to represent an AddRec.
- LLVM_ABI static const SCEV *
- evaluateAtIteration(ArrayRef<const SCEV *> Operands, const SCEV *It,
- ScalarEvolution &SE);
+ LLVM_ABI static const SCEV *evaluateAtIteration(ArrayRef<SCEVUse> Operands,
+ const SCEV *It,
+ ScalarEvolution &SE);
/// Return the number of iterations of this loop that produce
/// values in the specified constant range. Another way of
@@ -449,7 +453,7 @@ class SCEVMinMaxExpr : public SCEVCommutativeExpr {
protected:
/// Note: Constructing subclasses via this constructor is allowed
SCEVMinMaxExpr(const FoldingSetNodeIDRef ID, enum SCEVTypes T,
- const SCEV *const *O, size_t N)
+ SCEVUse const *O, size_t N)
: SCEVCommutativeExpr(ID, T, O, N) {
assert(isMinMaxType(T));
// Min and max never overflow
@@ -481,7 +485,7 @@ class SCEVMinMaxExpr : public SCEVCommutativeExpr {
class SCEVSMaxExpr : public SCEVMinMaxExpr {
friend class ScalarEvolution;
- SCEVSMaxExpr(const FoldingSetNodeIDRef ID, const SCEV *const *O, size_t N)
+ SCEVSMaxExpr(const FoldingSetNodeIDRef ID, SCEVUse const *O, size_t N)
: SCEVMinMaxExpr(ID, scSMaxExpr, O, N) {}
public:
@@ -493,7 +497,7 @@ class SCEVSMaxExpr : public SCEVMinMaxExpr {
class SCEVUMaxExpr : public SCEVMinMaxExpr {
friend class ScalarEvolution;
- SCEVUMaxExpr(const FoldingSetNodeIDRef ID, const SCEV *const *O, size_t N)
+ SCEVUMaxExpr(const FoldingSetNodeIDRef ID, SCEVUse const *O, size_t N)
: SCEVMinMaxExpr(ID, scUMaxExpr, O, N) {}
public:
@@ -505,7 +509,7 @@ class SCEVUMaxExpr : public SCEVMinMaxExpr {
class SCEVSMinExpr : public SCEVMinMaxExpr {
friend class ScalarEvolution;
- SCEVSMinExpr(const FoldingSetNodeIDRef ID, const SCEV *const *O, size_t N)
+ SCEVSMinExpr(const FoldingSetNodeIDRef ID, SCEVUse const *O, size_t N)
: SCEVMinMaxExpr(ID, scSMinExpr, O, N) {}
public:
@@ -517,7 +521,7 @@ class SCEVSMinExpr : public SCEVMinMaxExpr {
class SCEVUMinExpr : public SCEVMinMaxExpr {
friend class ScalarEvolution;
- SCEVUMinExpr(const FoldingSetNodeIDRef ID, const SCEV *const *O, size_t N)
+ SCEVUMinExpr(const FoldingSetNodeIDRef ID, SCEVUse const *O, size_t N)
: SCEVMinMaxExpr(ID, scUMinExpr, O, N) {}
public:
@@ -544,7 +548,7 @@ class SCEVSequentialMinMaxExpr : public SCEVNAryExpr {
protected:
/// Note: Constructing subclasses via this constructor is allowed
SCEVSequentialMinMaxExpr(const FoldingSetNodeIDRef ID, enum SCEVTypes T,
- const SCEV *const *O, size_t N)
+ SCEVUse const *O, size_t N)
: SCEVNAryExpr(ID, T, O, N) {
assert(isSequentialMinMaxType(T));
// Min and max never overflow
@@ -571,13 +575,14 @@ class SCEVSequentialMinMaxExpr : public SCEVNAryExpr {
static bool classof(const SCEV *S) {
return isSequentialMinMaxType(S->getSCEVType());
}
+ static bool classof(const SCEVUse *U) { return classof(U->getPointer()); }
};
/// This class represents a sequential/in-order unsigned minimum selection.
class SCEVSequentialUMinExpr : public SCEVSequentialMinMaxExpr {
friend class ScalarEvolution;
- SCEVSequentialUMinExpr(const FoldingSetNodeIDRef ID, const SCEV *const *O,
+ SCEVSequentialUMinExpr(const FoldingSetNodeIDRef ID, SCEVUse const *O,
size_t N)
: SCEVSequentialMinMaxExpr(ID, scSequentialUMinExpr, O, N) {}
@@ -715,7 +720,7 @@ template <typename SV> class SCEVTraversal {
case scUMinExpr:
case scSequentialUMinExpr:
case scAddRecExpr:
- for (const auto *Op : S->operands()) {
+ for (const SCEV *Op : S->operands()) {
push(Op);
if (Visitor.isDone())
break;
@@ -825,9 +830,9 @@ class SCEVRewriteVisitor : public SCEVVisitor<SC, const SCEV *> {
}
const SCEV *visitAddExpr(const SCEVAddExpr *Expr) {
- SmallVector<const SCEV *, 2> Operands;
+ SmallVector<SCEVUse, 2> Operands;
bool Changed = false;
- for (const auto *Op : Expr->operands()) {
+ for (const SCEV *Op : Expr->operands()) {
Operands.push_back(((SC *)this)->visit(Op));
Changed |= Op != Operands.back();
}
@@ -835,9 +840,9 @@ class SCEVRewriteVisitor : public SCEVVisitor<SC, const SCEV *> {
}
const SCEV *visitMulExpr(const SCEVMulExpr *Expr) {
- SmallVector<const SCEV *, 2> Operands;
+ SmallVector<SCEVUse, 2> Operands;
bool Changed = false;
- for (const auto *Op : Expr->operands()) {
+ for (const SCEV *Op : Expr->operands()) {
Operands.push_back(((SC *)this)->visit(Op));
Changed |= Op != Operands.back();
}
@@ -852,9 +857,9 @@ class SCEVRewriteVisitor : public SCEVVisitor<SC, const SCEV *> {
}
const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) {
- SmallVector<const SCEV *, 2> Operands;
+ SmallVector<SCEVUse, 2> Operands;
bool Changed = false;
- for (const auto *Op : Expr->operands()) {
+ for (const SCEV *Op : Expr->operands()) {
Operands.push_back(((SC *)this)->visit(Op));
Changed |= Op != Operands.back();
}
@@ -864,9 +869,9 @@ class SCEVRewriteVisitor : public SCEVVisitor<SC, const SCEV *> {
}
const SCEV *visitSMaxExpr(const SCEVSMaxExpr *Expr) {
- SmallVector<const SCEV *, 2> Operands;
+ SmallVector<SCEVUse, 2> Operands;
bool Changed = false;
- for (const auto *Op : Expr->operands()) {
+ for (const SCEV *Op : Expr->operands()) {
Operands.push_back(((SC *)this)->visit(Op));
Changed |= Op != Operands.back();
}
@@ -874,9 +879,9 @@ class SCEVRewriteVisitor : public SCEVVisitor<SC, const SCEV *> {
}
const SCEV *visitUMaxExpr(const SCEVUMaxExpr *Expr) {
- SmallVector<const SCEV *, 2> Operands;
+ SmallVector<SCEVUse, 2> Operands;
bool Changed = false;
- for (const auto *Op : Expr->operands()) {
+ for (const SCEV *Op : Expr->operands()) {
Operands.push_back(((SC *)this)->visit(Op));
Changed |= Op != Operands.back();
}
@@ -884,9 +889,9 @@ class SCEVRewriteVisitor : public SCEVVisitor<SC, const SCEV *> {
}
const SCEV *visitSMinExpr(const SCEVSMinExpr *Expr) {
- SmallVector<const SCEV *, 2> Operands;
+ SmallVector<SCEVUse, 2> Operands;
bool Changed = false;
- for (const auto *Op : Expr->operands()) {
+ for (const SCEV *Op : Expr->operands()) {
Operands.push_back(((SC *)this)->visit(Op));
Changed |= Op != Operands.back();
}
@@ -894,9 +899,9 @@ class SCEVRewriteVisitor : public SCEVVisitor<SC, const SCEV *> {
}
const SCEV *visitUMinExpr(const SCEVUMinExpr *Expr) {
- SmallVector<const SCEV *, 2> Operands;
+ SmallVector<SCEVUse, 2> Operands;
bool Changed = false;
- for (const auto *Op : Expr->operands()) {
+ for (const SCEV *Op : Expr->operands()) {
Operands.push_back(((SC *)this)->visit(Op));
Changed |= Op != Operands.back();
}
@@ -904,9 +909,9 @@ class SCEVRewriteVisitor : public SCEVVisitor<SC, const SCEV *> {
}
const SCEV *visitSequentialUMinExpr(const SCEVSequentialUMinExpr *Expr) {
- SmallVector<const SCEV *, 2> Operands;
+ SmallVector<SCEVUse, 2> Operands;
bool Changed = false;
- for (const auto *Op : Expr->operands()) {
+ for (const SCEV *Op : Expr->operands()) {
Operands.push_back(((SC *)this)->visit(Op));
Changed |= Op != Operands.back();
}
@@ -964,15 +969,18 @@ class SCEVLoopAddRecRewriter
}
const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) {
- SmallVector<const SCEV *, 2> Operands;
- for (const SCEV *Op : Expr->operands())
- Operands.push_back(visit(Op));
-
const Loop *L = Expr->getLoop();
auto It = Map.find(L);
- if (It == Map.end())
- return SE.getAddRecExpr(Operands, L, Expr->getNoWrapFlags());
+ if (It == Map.end()) {
+ SmallVector<SCEVUse, 2> Operands2;
+ for (const SCEV *Op : Expr->operands())
+ Operands2.push_back(visit(Op));
+ return SE.getAddRecExpr(Operands2, L, Expr->getNoWrapFlags());
+ }
+ SmallVector<SCEVUse, 2> Operands;
+ for (const SCEV *Op : Expr->operands())
+ Operands.push_back(visit(Op));
return SCEVAddRecExpr::evaluateAtIteration(Operands, It->second, SE);
}
diff --git a/llvm/include/llvm/Analysis/ScalarEvolutionPatternMatch.h b/llvm/include/llvm/Analysis/ScalarEvolutionPatternMatch.h
index 7b00d0109a68c..0b47357434782 100644
--- a/llvm/include/llvm/Analysis/ScalarEvolutionPatternMatch.h
+++ b/llvm/include/llvm/Analysis/ScalarEvolutionPatternMatch.h
@@ -13,6 +13,7 @@
#ifndef LLVM_ANALYSIS_SCALAREVOLUTIONPATTERNMATCH_H
#define LLVM_ANALYSIS_SCALAREVOLUTIONPATTERNMATCH_H
+#include "llvm/Analysis/ScalarEvolution.h"
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
namespace llvm {
@@ -22,6 +23,11 @@ template <typename Pattern> bool match(const SCEV *S, const Pattern &P) {
return P.match(S);
}
+template <typename Pattern> bool match(const SCEVUse U, const Pattern &P) {
+ const SCEV *S = U.getPointer();
+ return const_cast<Pattern &>(P).match(S);
+}
+
template <typename Predicate> struct cst_pred_ty : public Predicate {
cst_pred_ty() = default;
cst_pred_ty(uint64_t V) : Predicate(V) {}
@@ -159,7 +165,8 @@ template <typename SCEVTy, typename Op0_t> struct SCEVUnaryExpr_match {
bool match(const SCEV *S) const {
auto *E = dyn_cast<SCEVTy>(S);
- return E && E->getNumOperands() == 1 && Op0.match(E->getOperand(0));
+ return E && E->getNumOperands() == 1 &&
+ Op0.match(E->getOperand(0).getPointer());
}
};
@@ -215,9 +222,10 @@ struct SCEVBinaryExpr_match {
auto *E = dyn_cast<SCEVTy>(S);
return E && E->getNumOperands() == 2 &&
- ((Op0.match(E->getOperand(0)) && Op1.match(E->getOperand(1))) ||
- (Commutable && Op0.match(E->getOperand(1)) &&
- Op1.match(E->getOperand(0))));
+ ((Op0.match(E->getOperand(0).getPointer()) &&
+ Op1.match(E->getOperand(1).getPointer())) ||
+ (Commutable && Op0.match(E->getOperand(1).getPointer()) &&
+ Op1.match(E->getOperand(0).getPointer())));
}
};
diff --git a/llvm/include/llvm/Transforms/Scalar/NaryReassociate.h b/llvm/include/llvm/Transforms/Scalar/NaryReassociate.h
index f0474bc4352e3..417801d470800 100644
--- a/llvm/include/llvm/Transforms/Scalar/NaryReassociate.h
+++ b/llvm/include/llvm/Transforms/Scalar/NaryReassociate.h
@@ -80,6 +80,7 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
+#include "llvm/Analysis/ScalarEvolution.h"
#include "llvm/IR/PassManager.h"
#include "llvm/IR/ValueHandle.h"
@@ -92,8 +93,6 @@ class DominatorTree;
class Function;
class GetElementPtrInst;
class Instruction;
-class ScalarEvolution;
-class SCEV;
class TargetLibraryInfo;
class TargetTransformInfo;
class Type;
@@ -114,7 +113,7 @@ class NaryReassociatePass : public PassInfoMixin<NaryReassociatePass> {
bool doOneIteration(Function &F);
// Reassociates I for better CSE.
- Instruction *tryReassociate(Instruction *I, const SCEV *&OrigSCEV);
+ Instruction *tryReassociate(Instruction *I, SCEVUse &OrigSCEV);
// Reassociate GEP for better CSE.
Instruction *tryReassociateGEP(GetElementPtrInst *GEP);
@@ -143,19 +142,18 @@ class NaryReassociatePass : public PassInfoMixin<NaryReassociatePass> {
Instruction *tryReassociateBinaryOp(Value *LHS, Value *RHS,
BinaryOperator *I);
// Rewrites I to (LHS op RHS) if LHS is computed already.
- Instruction *tryReassociatedBinaryOp(const SCEV *LHS, Value *RHS,
+ Instruction *tryReassociatedBinaryOp(SCEVUse LHS, Value *RHS,
BinaryOperator *I);
// Tries to match Op1 and Op2 by using V.
bool matchTernaryOp(BinaryOperator *I, Value *V, Value *&Op1, Value *&Op2);
// Gets SCEV for (LHS op RHS).
- const SCEV *getBinarySCEV(BinaryOperator *I, const SCEV *LHS,
- const SCEV *RHS);
+ SCEVUse getBinarySCEV(BinaryOperator *I, SCEVUse LHS, SCEVUse RHS);
// Returns the closest dominator of \c Dominatee that computes
// \c CandidateExpr. Returns null if not found.
- Instruction *findClosestMatchingDominator(const SCEV *CandidateExpr,
+ Instruction *findClosestMatchingDominator(SCEVUse CandidateExpr,
Instruction *Dominatee);
// Try to match \p I as signed/unsigned Min/Max and reassociate it. \p
@@ -163,8 +161,7 @@ class NaryReassociatePass : public PassInfoMixin<NaryReassociatePass> {
// done or not. If reassociation was successful newly generated instruction is
// returned, otherwise nullptr.
template <typename PredT>
- Instruction *matchAndReassociateMinOrMax(Instruction *I,
- const SCEV *&OrigSCEV);
+ Instruction *matchAndReassociateMinOrMax(Instruction *I, SCEVUse &OrigSCEV);
// Reassociate Min/Max.
template <typename MaxMinT>
diff --git a/llvm/lib/Analysis/Delinearization.cpp b/llvm/lib/Analysis/Delinearization.cpp
index 5e670b5a5f456..b6125c47f0835 100644
--- a/llvm/lib/Analysis/Delinearization.cpp
+++ b/llvm/lib/Analysis/Delinearization.cpp
@@ -133,7 +133,7 @@ struct SCEVCollectAddRecMultiplies {
bool follow(const SCEV *S) {
if (auto *Mul = dyn_cast<SCEVMulExpr>(S)) {
bool HasAddRec = false;
- SmallVector<const SCEV *, 0> Operands;
+ SmallVector<SCEVUse, 0> Operands;
for (const SCEV *Op : Mul->operands()) {
const SCEVUnknown *Unknown = dyn_cast<SCEVUnknown>(Op);
if (Unknown && !isa<CallInst>(Unknown->getValue())) {
@@ -207,7 +207,7 @@ static bool findArrayDimensionsRec(ScalarEvolution &SE,
// End of recursion.
if (Last == 0) {
if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Step)) {
- SmallVector<const SCEV *, 2> Qs;
+ SmallVector<SCEVUse, 2> Qs;
for (const SCEV *Op : M->operands())
if (!isa<SCEVConstant>(Op))
Qs.push_back(Op);
@@ -266,7 +266,7 @@ static const SCEV *removeConstantFactors(ScalarEvolution &SE, const SCEV *T) {
return T;
if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(T)) {
- SmallVector<const SCEV *, 2> Factors;
+ SmallVector<SCEVUse, 2> Factors;
for (const SCEV *Op : M->operands())
if (!isa<SCEVConstant>(Op))
Factors.push_back(Op);
diff --git a/llvm/lib/Analysis/ScalarEvolution.cpp b/llvm/lib/Analysis/ScalarEvolution.cpp
index 083988f948069..9f0ecdfc0aa9d 100644
--- a/llvm/lib/Analysis/ScalarEvolution.cpp
+++ b/llvm/lib/Analysis/ScalarEvolution.cpp
@@ -258,6 +258,22 @@ static cl::opt<bool> UseContextForNoWrapFlagInference(
// SCEV class definitions
//===----------------------------------------------------------------------===//
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+LLVM_DUMP_METHOD void SCEVUse::dump() const {
+ print(dbgs());
+ dbgs() << '\n';
+}
+#endif
+
+void SCEVUse::print(raw_ostream &OS) const {
+ getPointer()->print(OS);
+ SCEV::NoWrapFlags Flags = static_cast<SCEV::NoWrapFlags>(getInt());
+ if (Flags & SCEV::FlagNUW)
+ OS << "(u nuw)";
+ if (Flags & SCEV::FlagNSW)
+ OS << "(u nsw)";
+}
+
//===----------------------------------------------------------------------===//
// Implementation of the SCEV class.
//
@@ -417,7 +433,7 @@ Type *SCEV::getType() const {
llvm_unreachable("Unknown SCEV kind!");
}
-ArrayRef<const SCEV *> SCEV::operands() const {
+ArrayRef<SCEVUse> SCEV::operands() const {
switch (getSCEVType()) {
case scConstant:
case scVScale:
@@ -516,7 +532,7 @@ const SCEV *ScalarEvolution::getElementCount(Type *Ty, ElementCount EC,
}
SCEVCastExpr::SCEVCastExpr(const FoldingSetNodeIDRef ID, SCEVTypes SCEVTy,
- const SCEV *op, Type *ty)
+ SCEVUse op, Type *ty)
: SCEV(ID, SCEVTy, computeExpressionSize(op)), Op(op), Ty(ty) {}
SCEVPtrToAddrExpr::SCEVPtrToAddrExpr(const FoldingSetNodeIDRef ID,
@@ -526,7 +542,7 @@ SCEVPtrToAddrExpr::SCEVPtrToAddrExpr(const FoldingSetNodeIDRef ID,
"Must be a non-bit-width-changing pointer-to-integer cast!");
}
-SCEVPtrToIntExpr::SCEVPtrToIntExpr(const FoldingSetNodeIDRef ID, const SCEV *Op,
+SCEVPtrToIntExpr::SCEVPtrToIntExpr(const FoldingSetNodeIDRef ID, SCEVUse Op,
Type *ITy)
: SCEVCastExpr(ID, scPtrToInt, Op, ITy) {
assert(getOperand()->getType()->isPointerTy() && Ty->isIntegerTy() &&
@@ -534,26 +550,26 @@ SCEVPtrToIntExpr::SCEVPtrToIntExpr(const FoldingSetNodeIDRef ID, const SCEV *Op,
}
SCEVIntegralCastExpr::SCEVIntegralCastExpr(const FoldingSetNodeIDRef ID,
- SCEVTypes SCEVTy, const SCEV *op,
+ SCEVTypes SCEVTy, SCEVUse op,
Type *ty)
: SCEVCastExpr(ID, SCEVTy, op, ty) {}
-SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeIDRef ID, const SCEV *op,
+SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeIDRef ID, SCEVUse op,
Type *ty)
: SCEVIntegralCastExpr(ID, scTruncate, op, ty) {
assert(getOperand()->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
"Cannot truncate non-integer value!");
}
-SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID,
- const SCEV *op, Type *ty)
+SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID, SCEVUse op,
+ Type *ty)
: SCEVIntegralCastExpr(ID, scZeroExtend, op, ty) {
assert(getOperand()->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
"Cannot zero extend non-integer value!");
}
-SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeIDRef ID,
- const SCEV *op, Type *ty)
+SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeIDRef ID, SCEVUse op,
+ Type *ty)
: SCEVIntegralCastExpr(ID, scSignExtend, op, ty) {
assert(getOperand()->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
"Cannot sign extend non-integer value!");
@@ -561,7 +577,8 @@ SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeIDRef ID,
void SCEVUnknown::deleted() {
// Clear this SCEVUnknown from various maps.
- SE->forgetMemoizedResults(this);
+ SCEVUse U(this);
+ SE->forgetMemoizedResults(U);
// Remove this SCEVUnknown from the uniquing map.
SE->UniqueSCEVs.RemoveNode(this);
@@ -572,7 +589,8 @@ void SCEVUnknown::deleted() {
void SCEVUnknown::allUsesReplacedWith(Value *New) {
// Clear this SCEVUnknown from various maps.
- SE->forgetMemoizedResults(this);
+ SCEVUse U(this);
+ SE->forgetMemoizedResults(U);
// Remove this SCEVUnknown from the uniquing map.
SE->UniqueSCEVs.RemoveNode(this);
@@ -748,8 +766,8 @@ CompareSCEVComplexity(const LoopInfo *const LI, const SCEV *LHS,
case scSMinExpr:
case scUMinExpr:
case scSequentialUMinExpr: {
- ArrayRef<const SCEV *> LOps = LHS->operands();
- ArrayRef<const SCEV *> ROps = RHS->operands();
+ ArrayRef<SCEVUse> LOps = LHS->operands();
+ ArrayRef<SCEVUse> ROps = RHS->operands();
// Lexicographically compare n-ary-like expressions.
unsigned LNumOps = LOps.size(), RNumOps = ROps.size();
@@ -757,7 +775,8 @@ CompareSCEVComplexity(const LoopInfo *const LI, const SCEV *LHS,
return (int)LNumOps - (int)RNumOps;
for (unsigned i = 0; i != LNumOps; ++i) {
- auto X = CompareSCEVComplexity(LI, LOps[i], ROps[i], DT, Depth + 1);
+ auto X = CompareSCEVComplexity(LI, LOps[i].getPointer(),
+ ROps[i].getPointer(), DT, Depth + 1);
if (X != 0)
return X;
}
@@ -779,28 +798,27 @@ CompareSCEVComplexity(const LoopInfo *const LI, const SCEV *LHS,
/// results from this routine. In other words, we don't want the results of
/// this to depend on where the addresses of various SCEV objects happened to
/// land in memory.
-static void GroupByComplexity(SmallVectorImpl<const SCEV *> &Ops,
- LoopInfo *LI, DominatorTree &DT) {
+static void GroupByComplexity(SmallVectorImpl<SCEVUse> &Ops, LoopInfo *LI,
+ DominatorTree &DT) {
if (Ops.size() < 2) return; // Noop
// Whether LHS has provably less complexity than RHS.
- auto IsLessComplex = [&](const SCEV *LHS, const SCEV *RHS) {
+ auto IsLessComplex = [&](SCEVUse LHS, SCEVUse RHS) {
auto Complexity = CompareSCEVComplexity(LI, LHS, RHS, DT);
return Complexity && *Complexity < 0;
};
if (Ops.size() == 2) {
// This is the common case, which also happens to be trivially simple.
// Special case it.
- const SCEV *&LHS = Ops[0], *&RHS = Ops[1];
+ SCEVUse &LHS = Ops[0], &RHS = Ops[1];
if (IsLessComplex(RHS, LHS))
std::swap(LHS, RHS);
return;
}
// Do the rough sort by complexity.
- llvm::stable_sort(Ops, [&](const SCEV *LHS, const SCEV *RHS) {
- return IsLessComplex(LHS, RHS);
- });
+ llvm::stable_sort(
+ Ops, [&](SCEVUse LHS, SCEVUse RHS) { return IsLessComplex(LHS, RHS); });
// Now that we are sorted by complexity, group elements of the same
// complexity. Note that this is, at worst, N^2, but the vector is likely to
@@ -825,7 +843,7 @@ static void GroupByComplexity(SmallVectorImpl<const SCEV *> &Ops,
/// Returns true if \p Ops contains a huge SCEV (the subtree of S contains at
/// least HugeExprThreshold nodes).
-static bool hasHugeExpression(ArrayRef<const SCEV *> Ops) {
+static bool hasHugeExpression(ArrayRef<SCEVUse> Ops) {
return any_of(Ops, [](const SCEV *S) {
return S->getExpressionSize() >= HugeExprThreshold;
});
@@ -842,7 +860,7 @@ static bool hasHugeExpression(ArrayRef<const SCEV *> Ops) {
template <typename FoldT, typename IsIdentityT, typename IsAbsorberT>
static const SCEV *
constantFoldAndGroupOps(ScalarEvolution &SE, LoopInfo &LI, DominatorTree &DT,
- SmallVectorImpl<const SCEV *> &Ops, FoldT Fold,
+ SmallVectorImpl<SCEVUse> &Ops, FoldT Fold,
IsIdentityT IsIdentity, IsAbsorberT IsAbsorber) {
const SCEVConstant *Folded = nullptr;
for (unsigned Idx = 0; Idx < Ops.size();) {
@@ -997,11 +1015,11 @@ const SCEV *SCEVAddRecExpr::evaluateAtIteration(const SCEV *It,
return evaluateAtIteration(operands(), It, SE);
}
-const SCEV *
-SCEVAddRecExpr::evaluateAtIteration(ArrayRef<const SCEV *> Operands,
- const SCEV *It, ScalarEvolution &SE) {
+const SCEV *SCEVAddRecExpr::evaluateAtIteration(ArrayRef<SCEVUse> Operands,
+ const SCEV *It,
+ ScalarEvolution &SE) {
assert(Operands.size() > 0);
- const SCEV *Result = Operands[0];
+ const SCEV *Result = Operands[0].getPointer();
for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
// The computation is correct in the face of overflow provided that the
// multiplication is performed _after_ the evaluation of the binomial
@@ -1010,7 +1028,8 @@ SCEVAddRecExpr::evaluateAtIteration(ArrayRef<const SCEV *> Operands,
if (isa<SCEVCouldNotCompute>(Coeff))
return Coeff;
- Result = SE.getAddExpr(Result, SE.getMulExpr(Operands[i], Coeff));
+ Result =
+ SE.getAddExpr(Result, SE.getMulExpr(Operands[i].getPointer(), Coeff));
}
return Result;
}
@@ -1055,21 +1074,21 @@ class SCEVCastSinkingRewriter
const SCEV *visitAddExpr(const SCEVAddExpr *Expr) {
// Preserve wrap flags on rewritten SCEVAddExpr, which the default
// implementation drops.
- SmallVector<const SCEV *, 2> Operands;
+ SmallVector<SCEVUse, 2> Operands;
bool Changed = false;
- for (const auto *Op : Expr->operands()) {
- Operands.push_back(visit(Op));
- Changed |= Op != Operands.back();
+ for (SCEVUse Op : Expr->operands()) {
+ Operands.push_back(visit(Op.getPointer()));
+ Changed |= Op.getPointer() != Operands.back();
}
return !Changed ? Expr : SE.getAddExpr(Operands, Expr->getNoWrapFlags());
}
const SCEV *visitMulExpr(const SCEVMulExpr *Expr) {
- SmallVector<const SCEV *, 2> Operands;
+ SmallVector<SCEVUse, 2> Operands;
bool Changed = false;
- for (const auto *Op : Expr->operands()) {
- Operands.push_back(visit(Op));
- Changed |= Op != Operands.back();
+ for (SCEVUse Op : Expr->operands()) {
+ Operands.push_back(visit(Op.getPointer()));
+ Changed |= Op.getPointer() != Operands.back();
}
return !Changed ? Expr : SE.getMulExpr(Operands, Expr->getNoWrapFlags());
}
@@ -1215,7 +1234,7 @@ const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op, Type *Ty,
// that replace other casts.
if (isa<SCEVAddExpr>(Op) || isa<SCEVMulExpr>(Op)) {
auto *CommOp = cast<SCEVCommutativeExpr>(Op);
- SmallVector<const SCEV *, 4> Operands;
+ SmallVector<SCEVUse, 4> Operands;
unsigned numTruncs = 0;
for (unsigned i = 0, e = CommOp->getNumOperands(); i != e && numTruncs < 2;
++i) {
@@ -1241,7 +1260,7 @@ const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op, Type *Ty,
// If the input value is a chrec scev, truncate the chrec's operands.
if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) {
- SmallVector<const SCEV *, 4> Operands;
+ SmallVector<SCEVUse, 4> Operands;
for (const SCEV *Op : AddRec->operands())
Operands.push_back(getTruncateExpr(Op, Ty, Depth + 1));
return getAddRecExpr(Operands, AddRec->getLoop(), SCEV::FlagAnyWrap);
@@ -1375,7 +1394,7 @@ static const SCEV *getPreStartForExtend(const SCEVAddRecExpr *AR, Type *Ty,
// subtraction is expensive. For this purpose, perform a quick and dirty
// difference, by checking for Step in the operand list. Note, that
// SA might have repeated ops, like %a + %a + ..., so only remove one.
- SmallVector<const SCEV *, 4> DiffOps(SA->operands());
+ SmallVector<SCEVUse, 4> DiffOps(SA->operands());
for (auto It = DiffOps.begin(); It != DiffOps.end(); ++It)
if (*It == Step) {
DiffOps.erase(It);
@@ -1819,8 +1838,8 @@ const SCEV *ScalarEvolution::getZeroExtendExprImpl(const SCEV *Op, Type *Ty,
if (SA->hasNoUnsignedWrap()) {
// If the addition does not unsign overflow then we can, by definition,
// commute the zero extension with the addition operation.
- SmallVector<const SCEV *, 4> Ops;
- for (const auto *Op : SA->operands())
+ SmallVector<SCEVUse, 4> Ops;
+ for (SCEVUse Op : SA->operands())
Ops.push_back(getZeroExtendExpr(Op, Ty, Depth + 1));
return getAddExpr(Ops, SCEV::FlagNUW, Depth + 1);
}
@@ -1852,8 +1871,8 @@ const SCEV *ScalarEvolution::getZeroExtendExprImpl(const SCEV *Op, Type *Ty,
if (SM->hasNoUnsignedWrap()) {
// If the multiply does not unsign overflow then we can, by definition,
// commute the zero extension with the multiply operation.
- SmallVector<const SCEV *, 4> Ops;
- for (const auto *Op : SM->operands())
+ SmallVector<SCEVUse, 4> Ops;
+ for (SCEVUse Op : SM->operands())
Ops.push_back(getZeroExtendExpr(Op, Ty, Depth + 1));
return getMulExpr(Ops, SCEV::FlagNUW, Depth + 1);
}
@@ -1889,8 +1908,8 @@ const SCEV *ScalarEvolution::getZeroExtendExprImpl(const SCEV *Op, Type *Ty,
// zext(umax(x, y)) -> umax(zext(x), zext(y))
if (isa<SCEVUMinExpr>(Op) || isa<SCEVUMaxExpr>(Op)) {
auto *MinMax = cast<SCEVMinMaxExpr>(Op);
- SmallVector<const SCEV *, 4> Operands;
- for (auto *Operand : MinMax->operands())
+ SmallVector<SCEVUse, 4> Operands;
+ for (SCEVUse Operand : MinMax->operands())
Operands.push_back(getZeroExtendExpr(Operand, Ty));
if (isa<SCEVUMinExpr>(MinMax))
return getUMinExpr(Operands);
@@ -1900,8 +1919,8 @@ const SCEV *ScalarEvolution::getZeroExtendExprImpl(const SCEV *Op, Type *Ty,
// zext(umin_seq(x, y)) -> umin_seq(zext(x), zext(y))
if (auto *MinMax = dyn_cast<SCEVSequentialMinMaxExpr>(Op)) {
assert(isa<SCEVSequentialUMinExpr>(MinMax) && "Not supported!");
- SmallVector<const SCEV *, 4> Operands;
- for (auto *Operand : MinMax->operands())
+ SmallVector<SCEVUse, 4> Operands;
+ for (SCEVUse Operand : MinMax->operands())
Operands.push_back(getZeroExtendExpr(Operand, Ty));
return getUMinExpr(Operands, /*Sequential*/ true);
}
@@ -1990,8 +2009,8 @@ const SCEV *ScalarEvolution::getSignExtendExprImpl(const SCEV *Op, Type *Ty,
if (SA->hasNoSignedWrap()) {
// If the addition does not sign overflow then we can, by definition,
// commute the sign extension with the addition operation.
- SmallVector<const SCEV *, 4> Ops;
- for (const auto *Op : SA->operands())
+ SmallVector<SCEVUse, 4> Ops;
+ for (SCEVUse Op : SA->operands())
Ops.push_back(getSignExtendExpr(Op, Ty, Depth + 1));
return getAddExpr(Ops, SCEV::FlagNSW, Depth + 1);
}
@@ -2160,8 +2179,8 @@ const SCEV *ScalarEvolution::getSignExtendExprImpl(const SCEV *Op, Type *Ty,
// sext(smax(x, y)) -> smax(sext(x), sext(y))
if (isa<SCEVSMinExpr>(Op) || isa<SCEVSMaxExpr>(Op)) {
auto *MinMax = cast<SCEVMinMaxExpr>(Op);
- SmallVector<const SCEV *, 4> Operands;
- for (auto *Operand : MinMax->operands())
+ SmallVector<SCEVUse, 4> Operands;
+ for (SCEVUse Operand : MinMax->operands())
Operands.push_back(getSignExtendExpr(Operand, Ty));
if (isa<SCEVSMinExpr>(MinMax))
return getSMinExpr(Operands);
@@ -2174,7 +2193,7 @@ const SCEV *ScalarEvolution::getSignExtendExprImpl(const SCEV *Op, Type *Ty,
SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator),
Op, Ty);
UniqueSCEVs.InsertNode(S, IP);
- registerUser(S, { Op });
+ registerUser(S, ArrayRef<SCEVUse>(SCEVUse(Op)));
return S;
}
@@ -2229,7 +2248,7 @@ const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op,
// Force the cast to be folded into the operands of an addrec.
if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) {
- SmallVector<const SCEV *, 4> Ops;
+ SmallVector<SCEVUse, 4> Ops;
for (const SCEV *Op : AR->operands())
Ops.push_back(getAnyExtendExpr(Op, Ty));
return getAddRecExpr(Ops, AR->getLoop(), SCEV::FlagNW);
@@ -2266,12 +2285,12 @@ const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op,
/// may be exposed. This helps getAddRecExpr short-circuit extra work in
/// the common case where no interesting opportunities are present, and
/// is also used as a check to avoid infinite recursion.
-static bool
-CollectAddOperandsWithScales(SmallDenseMap<const SCEV *, APInt, 16> &M,
- SmallVectorImpl<const SCEV *> &NewOps,
- APInt &AccumulatedConstant,
- ArrayRef<const SCEV *> Ops, const APInt &Scale,
- ScalarEvolution &SE) {
+static bool CollectAddOperandsWithScales(SmallDenseMap<SCEVUse, APInt, 16> &M,
+ SmallVectorImpl<SCEVUse> &NewOps,
+ APInt &AccumulatedConstant,
+ ArrayRef<SCEVUse> Ops,
+ const APInt &Scale,
+ ScalarEvolution &SE) {
bool Interesting = false;
// Iterate over the add operands. They are sorted, with constants first.
@@ -2294,13 +2313,12 @@ CollectAddOperandsWithScales(SmallDenseMap<const SCEV *, APInt, 16> &M,
if (Mul->getNumOperands() == 2 && isa<SCEVAddExpr>(Mul->getOperand(1))) {
// A multiplication of a constant with another add; recurse.
const SCEVAddExpr *Add = cast<SCEVAddExpr>(Mul->getOperand(1));
- Interesting |=
- CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant,
- Add->operands(), NewScale, SE);
+ Interesting |= CollectAddOperandsWithScales(
+ M, NewOps, AccumulatedConstant, Add->operands(), NewScale, SE);
} else {
// A multiplication of a constant with some other value. Update
// the map.
- SmallVector<const SCEV *, 4> MulOps(drop_begin(Mul->operands()));
+ SmallVector<SCEVUse, 4> MulOps(drop_begin(Mul->operands()));
const SCEV *Key = SE.getMulExpr(MulOps);
auto Pair = M.insert({Key, NewScale});
if (Pair.second) {
@@ -2314,8 +2332,7 @@ CollectAddOperandsWithScales(SmallDenseMap<const SCEV *, APInt, 16> &M,
}
} else {
// An ordinary operand. Update the map.
- std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair =
- M.insert({Ops[i], Scale});
+ auto Pair = M.insert({Ops[i], Scale});
if (Pair.second) {
NewOps.push_back(Pair.first->first);
} else {
@@ -2333,8 +2350,8 @@ CollectAddOperandsWithScales(SmallDenseMap<const SCEV *, APInt, 16> &M,
bool ScalarEvolution::willNotOverflow(Instruction::BinaryOps BinOp, bool Signed,
const SCEV *LHS, const SCEV *RHS,
const Instruction *CtxI) {
- const SCEV *(ScalarEvolution::*Operation)(const SCEV *, const SCEV *,
- SCEV::NoWrapFlags, unsigned);
+ const SCEV *(ScalarEvolution::*Operation)(SCEVUse, SCEVUse, SCEV::NoWrapFlags,
+ unsigned);
switch (BinOp) {
default:
llvm_unreachable("Unsupported binary op");
@@ -2456,7 +2473,7 @@ ScalarEvolution::getStrengthenedNoWrapFlagsFromBinOp(
// can't-overflow flags for the operation if possible.
static SCEV::NoWrapFlags StrengthenNoWrapFlags(ScalarEvolution *SE,
SCEVTypes Type,
- ArrayRef<const SCEV *> Ops,
+ ArrayRef<SCEVUse> Ops,
SCEV::NoWrapFlags Flags) {
using namespace std::placeholders;
@@ -2472,8 +2489,8 @@ static SCEV::NoWrapFlags StrengthenNoWrapFlags(ScalarEvolution *SE,
ScalarEvolution::maskFlags(Flags, SignOrUnsignMask);
// If FlagNSW is true and all the operands are non-negative, infer FlagNUW.
- auto IsKnownNonNegative = [&](const SCEV *S) {
- return SE->isKnownNonNegative(S);
+ auto IsKnownNonNegative = [&](SCEVUse U) {
+ return SE->isKnownNonNegative(U);
};
if (SignOrUnsignWrap == SCEV::FlagNSW && all_of(Ops, IsKnownNonNegative))
@@ -2542,7 +2559,7 @@ bool ScalarEvolution::isAvailableAtLoopEntry(const SCEV *S, const Loop *L) {
}
/// Get a canonical add expression, or something simpler if possible.
-const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
+const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<SCEVUse> &Ops,
SCEV::NoWrapFlags OrigFlags,
unsigned Depth) {
assert(!(OrigFlags & ~(SCEV::FlagNUW | SCEV::FlagNSW)) &&
@@ -2570,7 +2587,7 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
unsigned Idx = isa<SCEVConstant>(Ops[0]) ? 1 : 0;
// Delay expensive flag strengthening until necessary.
- auto ComputeFlags = [this, OrigFlags](ArrayRef<const SCEV *> Ops) {
+ auto ComputeFlags = [this, OrigFlags](ArrayRef<SCEVUse> Ops) {
return StrengthenNoWrapFlags(this, scAddExpr, Ops, OrigFlags);
};
@@ -2598,7 +2615,7 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
while (i+Count != e && Ops[i+Count] == Ops[i])
++Count;
// Merge the values into a multiply.
- const SCEV *Scale = getConstant(Ty, Count);
+ SCEVUse Scale = getConstant(Ty, Count);
const SCEV *Mul = getMulExpr(Scale, Ops[i], SCEV::FlagAnyWrap, Depth + 1);
if (Ops.size() == Count)
return Mul;
@@ -2622,14 +2639,14 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
if (auto *T = dyn_cast<SCEVTruncateExpr>(Ops[Idx]))
return T->getOperand()->getType();
if (const auto *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) {
- const auto *LastOp = Mul->getOperand(Mul->getNumOperands() - 1);
+ SCEVUse LastOp = Mul->getOperand(Mul->getNumOperands() - 1);
if (const auto *T = dyn_cast<SCEVTruncateExpr>(LastOp))
return T->getOperand()->getType();
}
return nullptr;
};
if (auto *SrcType = FindTruncSrcType()) {
- SmallVector<const SCEV *, 8> LargeOps;
+ SmallVector<SCEVUse, 8> LargeOps;
bool Ok = true;
// Check all the operands to see if they can be represented in the
// source type of the truncate.
@@ -2643,7 +2660,7 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
} else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Op)) {
LargeOps.push_back(getAnyExtendExpr(C, SrcType));
} else if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Op)) {
- SmallVector<const SCEV *, 8> LargeMulOps;
+ SmallVector<SCEVUse, 8> LargeMulOps;
for (unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) {
if (const SCEVTruncateExpr *T =
dyn_cast<SCEVTruncateExpr>(M->getOperand(j))) {
@@ -2707,7 +2724,7 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
}
if (PreservedFlags != SCEV::FlagAnyWrap) {
- SmallVector<const SCEV *, 4> NewOps(AddExpr->operands());
+ SmallVector<SCEVUse, 4> NewOps(AddExpr->operands());
NewOps[0] = getConstant(ConstAdd);
return getAddExpr(NewOps, PreservedFlags);
}
@@ -2774,8 +2791,8 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
// operands multiplied by constant values.
if (Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx])) {
uint64_t BitWidth = getTypeSizeInBits(Ty);
- SmallDenseMap<const SCEV *, APInt, 16> M;
- SmallVector<const SCEV *, 8> NewOps;
+ SmallDenseMap<SCEVUse, APInt, 16> M;
+ SmallVector<SCEVUse, 8> NewOps;
APInt AccumulatedConstant(BitWidth, 0);
if (CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant,
Ops, APInt(BitWidth, 1), *this)) {
@@ -2788,7 +2805,7 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
// Some interesting folding opportunity is present, so its worthwhile to
// re-generate the operands list. Group the operands by constant scale,
// to avoid multiplying by the same constant scale multiple times.
- std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare> MulOpLists;
+ std::map<APInt, SmallVector<SCEVUse, 4>, APIntCompare> MulOpLists;
for (const SCEV *NewOp : NewOps)
MulOpLists[M.find(NewOp)->second].push_back(NewOp);
// Re-generate the operands list.
@@ -2829,12 +2846,11 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
if (Mul->getNumOperands() != 2) {
// If the multiply has more than two operands, we must get the
// Y*Z term.
- SmallVector<const SCEV *, 4> MulOps(
- Mul->operands().take_front(MulOp));
+ SmallVector<SCEVUse, 4> MulOps(Mul->operands().take_front(MulOp));
append_range(MulOps, Mul->operands().drop_front(MulOp + 1));
InnerMul = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1);
}
- SmallVector<const SCEV *, 2> TwoOps = {getOne(Ty), InnerMul};
+ SmallVector<SCEVUse, 2> TwoOps = {getOne(Ty), InnerMul};
const SCEV *AddOne = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1);
const SCEV *OuterMul = getMulExpr(AddOne, MulOpSCEV,
SCEV::FlagAnyWrap, Depth + 1);
@@ -2863,19 +2879,18 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
// Fold X + (A*B*C) + (A*D*E) --> X + (A*(B*C+D*E))
const SCEV *InnerMul1 = Mul->getOperand(MulOp == 0);
if (Mul->getNumOperands() != 2) {
- SmallVector<const SCEV *, 4> MulOps(
- Mul->operands().take_front(MulOp));
+ SmallVector<SCEVUse, 4> MulOps(Mul->operands().take_front(MulOp));
append_range(MulOps, Mul->operands().drop_front(MulOp+1));
InnerMul1 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1);
}
const SCEV *InnerMul2 = OtherMul->getOperand(OMulOp == 0);
if (OtherMul->getNumOperands() != 2) {
- SmallVector<const SCEV *, 4> MulOps(
+ SmallVector<SCEVUse, 4> MulOps(
OtherMul->operands().take_front(OMulOp));
append_range(MulOps, OtherMul->operands().drop_front(OMulOp+1));
InnerMul2 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1);
}
- SmallVector<const SCEV *, 2> TwoOps = {InnerMul1, InnerMul2};
+ SmallVector<SCEVUse, 2> TwoOps = {InnerMul1, InnerMul2};
const SCEV *InnerMulSum =
getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1);
const SCEV *OuterMul = getMulExpr(MulOpSCEV, InnerMulSum,
@@ -2900,7 +2915,7 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) {
// Scan all of the other operands to this add and add them to the vector if
// they are loop invariant w.r.t. the recurrence.
- SmallVector<const SCEV *, 8> LIOps;
+ SmallVector<SCEVUse, 8> LIOps;
const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
const Loop *AddRecLoop = AddRec->getLoop();
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
@@ -2922,7 +2937,7 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
// NLI + LI + {Start,+,Step} --> NLI + {LI+Start,+,Step}
LIOps.push_back(AddRec->getStart());
- SmallVector<const SCEV *, 4> AddRecOps(AddRec->operands());
+ SmallVector<SCEVUse, 4> AddRecOps(AddRec->operands());
// It is not in general safe to propagate flags valid on an add within
// the addrec scope to one outside it. We must prove that the inner
@@ -2936,7 +2951,8 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
// in the outer scope.
SCEV::NoWrapFlags AddFlags = Flags;
if (AddFlags != SCEV::FlagAnyWrap) {
- auto *DefI = getDefiningScopeBound(LIOps);
+ SmallVector<SCEVUse, 8> LIOpPtrs(LIOps.begin(), LIOps.end());
+ auto *DefI = getDefiningScopeBound(LIOpPtrs);
auto *ReachI = &*AddRecLoop->getHeader()->begin();
if (!isGuaranteedToTransferExecutionTo(DefI, ReachI))
AddFlags = SCEV::FlagAnyWrap;
@@ -2975,7 +2991,7 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
"AddRecExprs are not sorted in reverse dominance order?");
if (AddRecLoop == cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()) {
// Other + {A,+,B}<L> + {C,+,D}<L> --> Other + {A+C,+,B+D}<L>
- SmallVector<const SCEV *, 4> AddRecOps(AddRec->operands());
+ SmallVector<SCEVUse, 4> AddRecOps(AddRec->operands());
for (; OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
++OtherIdx) {
const auto *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]);
@@ -2986,8 +3002,8 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
append_range(AddRecOps, OtherAddRec->operands().drop_front(i));
break;
}
- SmallVector<const SCEV *, 2> TwoOps = {
- AddRecOps[i], OtherAddRec->getOperand(i)};
+ SmallVector<SCEVUse, 2> TwoOps = {AddRecOps[i],
+ OtherAddRec->getOperand(i)};
AddRecOps[i] = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1);
}
Ops.erase(Ops.begin() + OtherIdx); --OtherIdx;
@@ -3008,9 +3024,8 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
return getOrCreateAddExpr(Ops, ComputeFlags(Ops));
}
-const SCEV *
-ScalarEvolution::getOrCreateAddExpr(ArrayRef<const SCEV *> Ops,
- SCEV::NoWrapFlags Flags) {
+const SCEV *ScalarEvolution::getOrCreateAddExpr(ArrayRef<SCEVUse> Ops,
+ SCEV::NoWrapFlags Flags) {
FoldingSetNodeID ID;
ID.AddInteger(scAddExpr);
for (const SCEV *Op : Ops)
@@ -3019,7 +3034,7 @@ ScalarEvolution::getOrCreateAddExpr(ArrayRef<const SCEV *> Ops,
SCEVAddExpr *S =
static_cast<SCEVAddExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
if (!S) {
- const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
+ SCEVUse *O = SCEVAllocator.Allocate<SCEVUse>(Ops.size());
llvm::uninitialized_copy(Ops, O);
S = new (SCEVAllocator)
SCEVAddExpr(ID.Intern(SCEVAllocator), O, Ops.size());
@@ -3030,9 +3045,9 @@ ScalarEvolution::getOrCreateAddExpr(ArrayRef<const SCEV *> Ops,
return S;
}
-const SCEV *
-ScalarEvolution::getOrCreateAddRecExpr(ArrayRef<const SCEV *> Ops,
- const Loop *L, SCEV::NoWrapFlags Flags) {
+const SCEV *ScalarEvolution::getOrCreateAddRecExpr(ArrayRef<SCEVUse> Ops,
+ const Loop *L,
+ SCEV::NoWrapFlags Flags) {
FoldingSetNodeID ID;
ID.AddInteger(scAddRecExpr);
for (const SCEV *Op : Ops)
@@ -3042,7 +3057,7 @@ ScalarEvolution::getOrCreateAddRecExpr(ArrayRef<const SCEV *> Ops,
SCEVAddRecExpr *S =
static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
if (!S) {
- const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
+ SCEVUse *O = SCEVAllocator.Allocate<SCEVUse>(Ops.size());
llvm::uninitialized_copy(Ops, O);
S = new (SCEVAllocator)
SCEVAddRecExpr(ID.Intern(SCEVAllocator), O, Ops.size(), L);
@@ -3054,9 +3069,8 @@ ScalarEvolution::getOrCreateAddRecExpr(ArrayRef<const SCEV *> Ops,
return S;
}
-const SCEV *
-ScalarEvolution::getOrCreateMulExpr(ArrayRef<const SCEV *> Ops,
- SCEV::NoWrapFlags Flags) {
+const SCEV *ScalarEvolution::getOrCreateMulExpr(ArrayRef<SCEVUse> Ops,
+ SCEV::NoWrapFlags Flags) {
FoldingSetNodeID ID;
ID.AddInteger(scMulExpr);
for (const SCEV *Op : Ops)
@@ -3065,7 +3079,7 @@ ScalarEvolution::getOrCreateMulExpr(ArrayRef<const SCEV *> Ops,
SCEVMulExpr *S =
static_cast<SCEVMulExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
if (!S) {
- const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
+ SCEVUse *O = SCEVAllocator.Allocate<SCEVUse>(Ops.size());
llvm::uninitialized_copy(Ops, O);
S = new (SCEVAllocator) SCEVMulExpr(ID.Intern(SCEVAllocator),
O, Ops.size());
@@ -3131,7 +3145,7 @@ static bool containsConstantInAddMulChain(const SCEV *StartExpr) {
}
/// Get a canonical multiply expression, or something simpler if possible.
-const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
+const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<SCEVUse> &Ops,
SCEV::NoWrapFlags OrigFlags,
unsigned Depth) {
assert(OrigFlags == maskFlags(OrigFlags, SCEV::FlagNUW | SCEV::FlagNSW) &&
@@ -3155,7 +3169,7 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
return Folded;
// Delay expensive flag strengthening until necessary.
- auto ComputeFlags = [this, OrigFlags](ArrayRef<const SCEV *> Ops) {
+ auto ComputeFlags = [this, OrigFlags](const ArrayRef<SCEVUse> Ops) {
return StrengthenNoWrapFlags(this, scMulExpr, Ops, OrigFlags);
};
@@ -3192,11 +3206,11 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
// If we have a mul by -1 of an add, try distributing the -1 among the
// add operands.
if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) {
- SmallVector<const SCEV *, 4> NewOps;
+ SmallVector<SCEVUse, 4> NewOps;
bool AnyFolded = false;
for (const SCEV *AddOp : Add->operands()) {
- const SCEV *Mul = getMulExpr(Ops[0], AddOp, SCEV::FlagAnyWrap,
- Depth + 1);
+ const SCEV *Mul = getMulExpr(Ops[0], SCEVUse(AddOp),
+ SCEV::FlagAnyWrap, Depth + 1);
if (!isa<SCEVMulExpr>(Mul)) AnyFolded = true;
NewOps.push_back(Mul);
}
@@ -3204,10 +3218,10 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
return getAddExpr(NewOps, SCEV::FlagAnyWrap, Depth + 1);
} else if (const auto *AddRec = dyn_cast<SCEVAddRecExpr>(Ops[1])) {
// Negation preserves a recurrence's no self-wrap property.
- SmallVector<const SCEV *, 4> Operands;
+ SmallVector<SCEVUse, 4> Operands;
for (const SCEV *AddRecOp : AddRec->operands())
- Operands.push_back(getMulExpr(Ops[0], AddRecOp, SCEV::FlagAnyWrap,
- Depth + 1));
+ Operands.push_back(getMulExpr(Ops[0], SCEVUse(AddRecOp),
+ SCEV::FlagAnyWrap, Depth + 1));
// Let M be the minimum representable signed value. AddRec with nsw
// multiplied by -1 can have signed overflow if and only if it takes a
// value of M: M * (-1) would stay M and (M + 1) * (-1) would be the
@@ -3302,8 +3316,10 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) {
// Scan all of the other operands to this mul and add them to the vector
// if they are loop invariant w.r.t. the recurrence.
- SmallVector<const SCEV *, 8> LIOps;
- const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
+ const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Ops[Idx]);
+ if (!AddRec)
+ break;
+ SmallVector<SCEVUse, 8> LIOps;
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
if (isAvailableAtLoopEntry(Ops[i], AddRec->getLoop())) {
LIOps.push_back(Ops[i]);
@@ -3314,7 +3330,7 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
// If we found some loop invariants, fold them into the recurrence.
if (!LIOps.empty()) {
// NLI * LI * {Start,+,Step} --> NLI * {LI*Start,+,LI*Step}
- SmallVector<const SCEV *, 4> NewOps;
+ SmallVector<SCEVUse, 4> NewOps;
NewOps.reserve(AddRec->getNumOperands());
const SCEV *Scale = getMulExpr(LIOps, SCEV::FlagAnyWrap, Depth + 1);
@@ -3326,7 +3342,7 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
AddRec->getNoWrapFlags(ComputeFlags({Scale, AddRec}));
for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) {
- NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i),
+ NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i).getPointer(),
SCEV::FlagAnyWrap, Depth + 1));
if (hasFlags(Flags, SCEV::FlagNSW) && !hasFlags(Flags, SCEV::FlagNUW)) {
@@ -3384,10 +3400,10 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
bool Overflow = false;
Type *Ty = AddRec->getType();
bool LargerThan64Bits = getTypeSizeInBits(Ty) > 64;
- SmallVector<const SCEV*, 7> AddRecOps;
+ SmallVector<SCEVUse, 7> AddRecOps;
for (int x = 0, xe = AddRec->getNumOperands() +
OtherAddRec->getNumOperands() - 1; x != xe && !Overflow; ++x) {
- SmallVector <const SCEV *, 7> SumOps;
+ SmallVector<SCEVUse, 7> SumOps;
for (int y = x, ye = 2*x+1; y != ye && !Overflow; ++y) {
uint64_t Coeff1 = Choose(x, 2*x - y, Overflow);
for (int z = std::max(y-x, y-(int)AddRec->getNumOperands()+1),
@@ -3435,8 +3451,7 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
}
/// Represents an unsigned remainder expression based on unsigned division.
-const SCEV *ScalarEvolution::getURemExpr(const SCEV *LHS,
- const SCEV *RHS) {
+const SCEV *ScalarEvolution::getURemExpr(SCEVUse LHS, SCEVUse RHS) {
assert(getEffectiveSCEVType(LHS->getType()) ==
getEffectiveSCEVType(RHS->getType()) &&
"SCEVURemExpr operand types don't match!");
@@ -3464,8 +3479,7 @@ const SCEV *ScalarEvolution::getURemExpr(const SCEV *LHS,
/// Get a canonical unsigned division expression, or something simpler if
/// possible.
-const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS,
- const SCEV *RHS) {
+const SCEV *ScalarEvolution::getUDivExpr(SCEVUse LHS, SCEVUse RHS) {
assert(!LHS->getType()->isPointerTy() &&
"SCEVUDivExpr operand can't be pointer!");
assert(LHS->getType() == RHS->getType() &&
@@ -3513,7 +3527,7 @@ const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS,
getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy),
getZeroExtendExpr(Step, ExtTy),
AR->getLoop(), SCEV::FlagAnyWrap)) {
- SmallVector<const SCEV *, 4> Operands;
+ SmallVector<SCEVUse, 4> Operands;
for (const SCEV *Op : AR->operands())
Operands.push_back(getUDivExpr(Op, RHS));
return getAddRecExpr(Operands, AR->getLoop(), SCEV::FlagNW);
@@ -3562,7 +3576,7 @@ const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS,
}
// (A*B)/C --> A*(B/C) if safe and B/C can be folded.
if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) {
- SmallVector<const SCEV *, 4> Operands;
+ SmallVector<SCEVUse, 4> Operands;
for (const SCEV *Op : M->operands())
Operands.push_back(getZeroExtendExpr(Op, ExtTy));
if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands))
@@ -3571,7 +3585,7 @@ const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS,
const SCEV *Op = M->getOperand(i);
const SCEV *Div = getUDivExpr(Op, RHSC);
if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) {
- Operands = SmallVector<const SCEV *, 4>(M->operands());
+ Operands = SmallVector<SCEVUse, 4>(M->operands());
Operands[i] = Div;
return getMulExpr(Operands);
}
@@ -3594,7 +3608,7 @@ const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS,
// (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded.
if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(LHS)) {
- SmallVector<const SCEV *, 4> Operands;
+ SmallVector<SCEVUse, 4> Operands;
for (const SCEV *Op : A->operands())
Operands.push_back(getZeroExtendExpr(Op, ExtTy));
if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) {
@@ -3639,7 +3653,7 @@ const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS,
SCEV *S = new (SCEVAllocator) SCEVUDivExpr(ID.Intern(SCEVAllocator),
LHS, RHS);
UniqueSCEVs.InsertNode(S, IP);
- registerUser(S, {LHS, RHS});
+ registerUser(S, ArrayRef<SCEVUse>({SCEVUse(LHS), SCEVUse(RHS)}));
return S;
}
@@ -3661,8 +3675,7 @@ APInt gcd(const SCEVConstant *C1, const SCEVConstant *C2) {
/// possible. There is no representation for an exact udiv in SCEV IR, but we
/// can attempt to remove factors from the LHS and RHS. We can't do this when
/// it's not exact because the udiv may be clearing bits.
-const SCEV *ScalarEvolution::getUDivExactExpr(const SCEV *LHS,
- const SCEV *RHS) {
+const SCEV *ScalarEvolution::getUDivExactExpr(SCEVUse LHS, SCEVUse RHS) {
// TODO: we could try to find factors in all sorts of things, but for now we
// just deal with u/exact (multiply, constant). See SCEVDivision towards the
// end of this file for inspiration.
@@ -3676,7 +3689,7 @@ const SCEV *ScalarEvolution::getUDivExactExpr(const SCEV *LHS,
// first element of the mulexpr.
if (const auto *LHSCst = dyn_cast<SCEVConstant>(Mul->getOperand(0))) {
if (LHSCst == RHSCst) {
- SmallVector<const SCEV *, 2> Operands(drop_begin(Mul->operands()));
+ SmallVector<SCEVUse, 2> Operands(drop_begin(Mul->operands()));
return getMulExpr(Operands);
}
@@ -3689,7 +3702,7 @@ const SCEV *ScalarEvolution::getUDivExactExpr(const SCEV *LHS,
cast<SCEVConstant>(getConstant(LHSCst->getAPInt().udiv(Factor)));
RHSCst =
cast<SCEVConstant>(getConstant(RHSCst->getAPInt().udiv(Factor)));
- SmallVector<const SCEV *, 2> Operands;
+ SmallVector<SCEVUse, 2> Operands;
Operands.push_back(LHSCst);
append_range(Operands, Mul->operands().drop_front());
LHS = getMulExpr(Operands);
@@ -3703,7 +3716,7 @@ const SCEV *ScalarEvolution::getUDivExactExpr(const SCEV *LHS,
for (int i = 0, e = Mul->getNumOperands(); i != e; ++i) {
if (Mul->getOperand(i) == RHS) {
- SmallVector<const SCEV *, 2> Operands;
+ SmallVector<SCEVUse, 2> Operands;
append_range(Operands, Mul->operands().take_front(i));
append_range(Operands, Mul->operands().drop_front(i + 1));
return getMulExpr(Operands);
@@ -3715,10 +3728,10 @@ const SCEV *ScalarEvolution::getUDivExactExpr(const SCEV *LHS,
/// Get an add recurrence expression for the specified loop. Simplify the
/// expression as much as possible.
-const SCEV *ScalarEvolution::getAddRecExpr(const SCEV *Start, const SCEV *Step,
+const SCEV *ScalarEvolution::getAddRecExpr(SCEVUse Start, SCEVUse Step,
const Loop *L,
SCEV::NoWrapFlags Flags) {
- SmallVector<const SCEV *, 4> Operands;
+ SmallVector<SCEVUse, 4> Operands;
Operands.push_back(Start);
if (const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step))
if (StepChrec->getLoop() == L) {
@@ -3732,9 +3745,9 @@ const SCEV *ScalarEvolution::getAddRecExpr(const SCEV *Start, const SCEV *Step,
/// Get an add recurrence expression for the specified loop. Simplify the
/// expression as much as possible.
-const SCEV *
-ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands,
- const Loop *L, SCEV::NoWrapFlags Flags) {
+const SCEV *ScalarEvolution::getAddRecExpr(SmallVectorImpl<SCEVUse> &Operands,
+ const Loop *L,
+ SCEV::NoWrapFlags Flags) {
if (Operands.size() == 1) return Operands[0];
#ifndef NDEBUG
Type *ETy = getEffectiveSCEVType(Operands[0]->getType());
@@ -3768,7 +3781,7 @@ ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands,
? (L->getLoopDepth() < NestedLoop->getLoopDepth())
: (!NestedLoop->contains(L) &&
DT.dominates(L->getHeader(), NestedLoop->getHeader()))) {
- SmallVector<const SCEV *, 4> NestedOperands(NestedAR->operands());
+ SmallVector<SCEVUse, 4> NestedOperands(NestedAR->operands());
Operands[0] = NestedAR->getStart();
// AddRecs require their operands be loop-invariant with respect to their
// loops. Don't perform this transformation if it would break this
@@ -3810,7 +3823,7 @@ ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands,
}
const SCEV *ScalarEvolution::getGEPExpr(GEPOperator *GEP,
- ArrayRef<const SCEV *> IndexExprs) {
+ ArrayRef<SCEVUse> IndexExprs) {
const SCEV *BaseExpr = getSCEV(GEP->getPointerOperand());
// getSCEV(Base)->getType() has the same address space as Base->getType()
// because SCEV::getType() preserves the address space.
@@ -3829,8 +3842,8 @@ const SCEV *ScalarEvolution::getGEPExpr(GEPOperator *GEP,
return getGEPExpr(BaseExpr, IndexExprs, GEP->getSourceElementType(), NW);
}
-const SCEV *ScalarEvolution::getGEPExpr(const SCEV *BaseExpr,
- ArrayRef<const SCEV *> IndexExprs,
+const SCEV *ScalarEvolution::getGEPExpr(SCEVUse BaseExpr,
+ ArrayRef<SCEVUse> IndexExprs,
Type *SrcElementTy, GEPNoWrapFlags NW) {
SCEV::NoWrapFlags OffsetWrap = SCEV::FlagAnyWrap;
if (NW.hasNoUnsignedSignedWrap())
@@ -3841,8 +3854,8 @@ const SCEV *ScalarEvolution::getGEPExpr(const SCEV *BaseExpr,
Type *CurTy = BaseExpr->getType();
Type *IntIdxTy = getEffectiveSCEVType(BaseExpr->getType());
bool FirstIter = true;
- SmallVector<const SCEV *, 4> Offsets;
- for (const SCEV *IndexExpr : IndexExprs) {
+ SmallVector<SCEVUse, 4> Offsets;
+ for (SCEVUse IndexExpr : IndexExprs) {
// Compute the (potentially symbolic) offset in bytes for this index.
if (StructType *STy = dyn_cast<StructType>(CurTy)) {
// For a struct, add the member offset.
@@ -3902,13 +3915,23 @@ SCEV *ScalarEvolution::findExistingSCEVInCache(SCEVTypes SCEVType,
return UniqueSCEVs.FindNodeOrInsertPos(ID, IP);
}
+SCEV *ScalarEvolution::findExistingSCEVInCache(SCEVTypes SCEVType,
+ ArrayRef<SCEVUse> Ops) {
+ FoldingSetNodeID ID;
+ ID.AddInteger(SCEVType);
+ for (const SCEV *Op : Ops)
+ ID.AddPointer(Op);
+ void *IP = nullptr;
+ return UniqueSCEVs.FindNodeOrInsertPos(ID, IP);
+}
+
const SCEV *ScalarEvolution::getAbsExpr(const SCEV *Op, bool IsNSW) {
SCEV::NoWrapFlags Flags = IsNSW ? SCEV::FlagNSW : SCEV::FlagAnyWrap;
return getSMaxExpr(Op, getNegativeSCEV(Op, Flags));
}
const SCEV *ScalarEvolution::getMinMaxExpr(SCEVTypes Kind,
- SmallVectorImpl<const SCEV *> &Ops) {
+ SmallVectorImpl<SCEVUse> &Ops) {
assert(SCEVMinMaxExpr::isMinMaxType(Kind) && "Not a SCEVMinMaxExpr!");
assert(!Ops.empty() && "Cannot get empty (u|s)(min|max)!");
if (Ops.size() == 1) return Ops[0];
@@ -4024,7 +4047,7 @@ const SCEV *ScalarEvolution::getMinMaxExpr(SCEVTypes Kind,
const SCEV *ExistingSCEV = UniqueSCEVs.FindNodeOrInsertPos(ID, IP);
if (ExistingSCEV)
return ExistingSCEV;
- const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
+ SCEVUse *O = SCEVAllocator.Allocate<SCEVUse>(Ops.size());
llvm::uninitialized_copy(Ops, O);
SCEV *S = new (SCEVAllocator)
SCEVMinMaxExpr(ID.Intern(SCEVAllocator), Kind, O, Ops.size());
@@ -4062,8 +4085,11 @@ class SCEVSequentialMinMaxDeduplicatingVisitor final
return S;
auto *NAry = cast<SCEVNAryExpr>(S);
- SmallVector<const SCEV *> NewOps;
- bool Changed = visit(Kind, NAry->operands(), NewOps);
+ SmallVector<SCEVUse, 8> Operands;
+ for (SCEVUse U : NAry->operands())
+ Operands.push_back(U.getPointer());
+ SmallVector<SCEVUse> NewOps;
+ bool Changed = visit(Kind, Operands, NewOps);
if (!Changed)
return S;
@@ -4090,10 +4116,10 @@ class SCEVSequentialMinMaxDeduplicatingVisitor final
SCEVSequentialMinMaxExpr::getEquivalentNonSequentialSCEVType(
RootKind)) {}
- bool /*Changed*/ visit(SCEVTypes Kind, ArrayRef<const SCEV *> OrigOps,
- SmallVectorImpl<const SCEV *> &NewOps) {
+ bool /*Changed*/ visit(SCEVTypes Kind, ArrayRef<SCEVUse> OrigOps,
+ SmallVectorImpl<SCEVUse> &NewOps) {
bool Changed = false;
- SmallVector<const SCEV *> Ops;
+ SmallVector<SCEVUse> Ops;
Ops.reserve(OrigOps.size());
for (const SCEV *Op : OrigOps) {
@@ -4313,7 +4339,7 @@ bool ScalarEvolution::canReuseInstruction(
const SCEV *
ScalarEvolution::getSequentialMinMaxExpr(SCEVTypes Kind,
- SmallVectorImpl<const SCEV *> &Ops) {
+ SmallVectorImpl<SCEVUse> &Ops) {
assert(SCEVSequentialMinMaxExpr::isSequentialMinMaxType(Kind) &&
"Not a SCEVSequentialMinMaxExpr!");
assert(!Ops.empty() && "Cannot get empty (u|s)(min|max)!");
@@ -4388,7 +4414,7 @@ ScalarEvolution::getSequentialMinMaxExpr(SCEVTypes Kind,
if (::impliesPoison(Ops[i], Ops[i - 1]) ||
isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_NE, Ops[i - 1],
SaturationPoint)) {
- SmallVector<const SCEV *> SeqOps = {Ops[i - 1], Ops[i]};
+ SmallVector<SCEVUse, 2> SeqOps = {Ops[i - 1], Ops[i]};
Ops[i - 1] = getMinMaxExpr(
SCEVSequentialMinMaxExpr::getEquivalentNonSequentialSCEVType(Kind),
SeqOps);
@@ -4414,7 +4440,7 @@ ScalarEvolution::getSequentialMinMaxExpr(SCEVTypes Kind,
if (ExistingSCEV)
return ExistingSCEV;
- const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
+ SCEVUse *O = SCEVAllocator.Allocate<SCEVUse>(Ops.size());
llvm::uninitialized_copy(Ops, O);
SCEV *S = new (SCEVAllocator)
SCEVSequentialMinMaxExpr(ID.Intern(SCEVAllocator), Kind, O, Ops.size());
@@ -4424,41 +4450,40 @@ ScalarEvolution::getSequentialMinMaxExpr(SCEVTypes Kind,
return S;
}
-const SCEV *ScalarEvolution::getSMaxExpr(const SCEV *LHS, const SCEV *RHS) {
- SmallVector<const SCEV *, 2> Ops = {LHS, RHS};
- return getSMaxExpr(Ops);
+const SCEV *ScalarEvolution::getSMaxExpr(SCEVUse LHS, SCEVUse RHS) {
+ SmallVector<SCEVUse, 2> Ops = {LHS, RHS};
+ return getMinMaxExpr(scSMaxExpr, Ops);
}
-const SCEV *ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
+const SCEV *ScalarEvolution::getSMaxExpr(SmallVectorImpl<SCEVUse> &Ops) {
return getMinMaxExpr(scSMaxExpr, Ops);
}
-const SCEV *ScalarEvolution::getUMaxExpr(const SCEV *LHS, const SCEV *RHS) {
- SmallVector<const SCEV *, 2> Ops = {LHS, RHS};
- return getUMaxExpr(Ops);
+const SCEV *ScalarEvolution::getUMaxExpr(SCEVUse LHS, SCEVUse RHS) {
+ SmallVector<SCEVUse, 2> Ops = {LHS, RHS};
+ return getMinMaxExpr(scUMaxExpr, Ops);
}
-const SCEV *ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
+const SCEV *ScalarEvolution::getUMaxExpr(SmallVectorImpl<SCEVUse> &Ops) {
return getMinMaxExpr(scUMaxExpr, Ops);
}
-const SCEV *ScalarEvolution::getSMinExpr(const SCEV *LHS,
- const SCEV *RHS) {
- SmallVector<const SCEV *, 2> Ops = { LHS, RHS };
- return getSMinExpr(Ops);
+const SCEV *ScalarEvolution::getSMinExpr(SCEVUse LHS, SCEVUse RHS) {
+ SmallVector<SCEVUse, 2> Ops = {LHS, RHS};
+ return getMinMaxExpr(scSMinExpr, Ops);
}
-const SCEV *ScalarEvolution::getSMinExpr(SmallVectorImpl<const SCEV *> &Ops) {
+const SCEV *ScalarEvolution::getSMinExpr(SmallVectorImpl<SCEVUse> &Ops) {
return getMinMaxExpr(scSMinExpr, Ops);
}
-const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS, const SCEV *RHS,
+const SCEV *ScalarEvolution::getUMinExpr(SCEVUse LHS, SCEVUse RHS,
bool Sequential) {
- SmallVector<const SCEV *, 2> Ops = { LHS, RHS };
- return getUMinExpr(Ops, Sequential);
+ SmallVector<SCEVUse, 2> Ops = {LHS, RHS};
+ return getMinMaxExpr(scUMinExpr, Ops);
}
-const SCEV *ScalarEvolution::getUMinExpr(SmallVectorImpl<const SCEV *> &Ops,
+const SCEV *ScalarEvolution::getUMinExpr(SmallVectorImpl<SCEVUse> &Ops,
bool Sequential) {
return Sequential ? getSequentialMinMaxExpr(scSequentialUMinExpr, Ops)
: getMinMaxExpr(scUMinExpr, Ops);
@@ -4681,7 +4706,7 @@ const SCEV *ScalarEvolution::getNotSCEV(const SCEV *V) {
// Fold ~(u|s)(min|max)(~x, ~y) to (u|s)(max|min)(x, y)
if (const SCEVMinMaxExpr *MME = dyn_cast<SCEVMinMaxExpr>(V)) {
auto MatchMinMaxNegation = [&](const SCEVMinMaxExpr *MME) {
- SmallVector<const SCEV *, 2> MatchedOperands;
+ SmallVector<SCEVUse, 2> MatchedOperands;
for (const SCEV *Operand : MME->operands()) {
const SCEV *Matched = MatchNotExpr(Operand);
if (!Matched)
@@ -4705,7 +4730,7 @@ const SCEV *ScalarEvolution::removePointerBase(const SCEV *P) {
if (auto *AddRec = dyn_cast<SCEVAddRecExpr>(P)) {
// The base of an AddRec is the first operand.
- SmallVector<const SCEV *> Ops{AddRec->operands()};
+ SmallVector<SCEVUse> Ops{AddRec->operands()};
Ops[0] = removePointerBase(Ops[0]);
// Don't try to transfer nowrap flags for now. We could in some cases
// (for example, if pointer operand of the AddRec is a SCEVUnknown).
@@ -4713,9 +4738,9 @@ const SCEV *ScalarEvolution::removePointerBase(const SCEV *P) {
}
if (auto *Add = dyn_cast<SCEVAddExpr>(P)) {
// The base of an Add is the pointer operand.
- SmallVector<const SCEV *> Ops{Add->operands()};
- const SCEV **PtrOp = nullptr;
- for (const SCEV *&AddOp : Ops) {
+ SmallVector<SCEVUse> Ops{Add->operands()};
+ SCEVUse *PtrOp = nullptr;
+ for (SCEVUse &AddOp : Ops) {
if (AddOp->getType()->isPointerTy()) {
assert(!PtrOp && "Cannot have multiple pointer ops");
PtrOp = &AddOp;
@@ -4730,7 +4755,7 @@ const SCEV *ScalarEvolution::removePointerBase(const SCEV *P) {
return getZero(P->getType());
}
-const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS, const SCEV *RHS,
+const SCEV *ScalarEvolution::getMinusSCEV(SCEVUse LHS, SCEVUse RHS,
SCEV::NoWrapFlags Flags,
unsigned Depth) {
// Fast path: X - X --> 0.
@@ -4868,12 +4893,12 @@ const SCEV *ScalarEvolution::getUMaxFromMismatchedTypes(const SCEV *LHS,
const SCEV *ScalarEvolution::getUMinFromMismatchedTypes(const SCEV *LHS,
const SCEV *RHS,
bool Sequential) {
- SmallVector<const SCEV *, 2> Ops = { LHS, RHS };
+ SmallVector<SCEVUse, 2> Ops = {LHS, RHS};
return getUMinFromMismatchedTypes(Ops, Sequential);
}
const SCEV *
-ScalarEvolution::getUMinFromMismatchedTypes(SmallVectorImpl<const SCEV *> &Ops,
+ScalarEvolution::getUMinFromMismatchedTypes(SmallVectorImpl<SCEVUse> &Ops,
bool Sequential) {
assert(!Ops.empty() && "At least one operand must be!");
// Trivial case.
@@ -4882,7 +4907,7 @@ ScalarEvolution::getUMinFromMismatchedTypes(SmallVectorImpl<const SCEV *> &Ops,
// Find the max type first.
Type *MaxType = nullptr;
- for (const auto *S : Ops)
+ for (SCEVUse S : Ops)
if (MaxType)
MaxType = getWiderType(MaxType, S->getType());
else
@@ -4890,8 +4915,8 @@ ScalarEvolution::getUMinFromMismatchedTypes(SmallVectorImpl<const SCEV *> &Ops,
assert(MaxType && "Failed to find maximum type!");
// Extend all ops to max type.
- SmallVector<const SCEV *, 2> PromotedOps;
- for (const auto *S : Ops)
+ SmallVector<SCEVUse, 2> PromotedOps;
+ for (SCEVUse S : Ops)
PromotedOps.push_back(getNoopOrZeroExtend(S, MaxType));
// Generate umin.
@@ -5581,7 +5606,7 @@ ScalarEvolution::createAddRecFromPHIWithCastsImpl(const SCEVUnknown *SymbolicPHI
return std::nullopt;
// Create an add with everything but the specified operand.
- SmallVector<const SCEV *, 8> Ops;
+ SmallVector<SCEVUse, 8> Ops;
for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
if (i != FoundIndex)
Ops.push_back(Add->getOperand(i));
@@ -5912,7 +5937,7 @@ const SCEV *ScalarEvolution::createAddRecFromPHI(PHINode *PN) {
if (FoundIndex != Add->getNumOperands()) {
// Create an add with everything but the specified operand.
- SmallVector<const SCEV *, 8> Ops;
+ SmallVector<SCEVUse, 8> Ops;
for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
if (i != FoundIndex)
Ops.push_back(SCEVBackedgeConditionFolder::rewrite(Add->getOperand(i),
@@ -5959,7 +5984,7 @@ const SCEV *ScalarEvolution::createAddRecFromPHI(PHINode *PN) {
// Okay, for the entire analysis of this edge we assumed the PHI
// to be symbolic. We now need to go back and purge all of the
// entries for the scalars that use the symbolic expression.
- forgetMemoizedResults(SymbolicName);
+ forgetMemoizedResults(SCEVUse(SymbolicName));
insertValueToMap(PN, PHISCEV);
if (auto *AR = dyn_cast<SCEVAddRecExpr>(PHISCEV)) {
@@ -5999,7 +6024,7 @@ const SCEV *ScalarEvolution::createAddRecFromPHI(PHINode *PN) {
// Okay, for the entire analysis of this edge we assumed the PHI
// to be symbolic. We now need to go back and purge all of the
// entries for the scalars that use the symbolic expression.
- forgetMemoizedResults(SymbolicName);
+ forgetMemoizedResults(SCEVUse(SymbolicName));
insertValueToMap(PN, Shifted);
return Shifted;
}
@@ -6369,7 +6394,7 @@ const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) {
assert(GEP->getSourceElementType()->isSized() &&
"GEP source element type must be sized");
- SmallVector<const SCEV *, 4> IndexExprs;
+ SmallVector<SCEVUse, 4> IndexExprs;
for (Value *Index : GEP->indices())
IndexExprs.push_back(getSCEV(Index));
return getGEPExpr(GEP, IndexExprs);
@@ -6654,7 +6679,7 @@ ScalarEvolution::getRangeRefIter(const SCEV *S,
DenseMap<const SCEV *, ConstantRange> &Cache =
SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED ? UnsignedRanges
: SignedRanges;
- SmallVector<const SCEV *> WorkList;
+ SmallVector<SCEVUse> WorkList;
SmallPtrSet<const SCEV *, 8> Seen;
// Add Expr to the worklist, if Expr is either an N-ary expression or a
@@ -7380,13 +7405,12 @@ ScalarEvolution::getNonTrivialDefiningScopeBound(const SCEV *S) {
return nullptr;
}
-const Instruction *
-ScalarEvolution::getDefiningScopeBound(ArrayRef<const SCEV *> Ops,
- bool &Precise) {
+const Instruction *ScalarEvolution::getDefiningScopeBound(ArrayRef<SCEVUse> Ops,
+ bool &Precise) {
Precise = true;
// Do a bounded search of the def relation of the requested SCEVs.
SmallPtrSet<const SCEV *, 16> Visited;
- SmallVector<const SCEV *> Worklist;
+ SmallVector<SCEVUse> Worklist;
auto pushOp = [&](const SCEV *S) {
if (!Visited.insert(S).second)
return;
@@ -7398,17 +7422,17 @@ ScalarEvolution::getDefiningScopeBound(ArrayRef<const SCEV *> Ops,
Worklist.push_back(S);
};
- for (const auto *S : Ops)
+ for (SCEVUse S : Ops)
pushOp(S);
const Instruction *Bound = nullptr;
while (!Worklist.empty()) {
- auto *S = Worklist.pop_back_val();
+ SCEVUse S = Worklist.pop_back_val();
if (auto *DefI = getNonTrivialDefiningScopeBound(S)) {
if (!Bound || DT.dominates(Bound, DefI))
Bound = DefI;
} else {
- for (const auto *Op : S->operands())
+ for (SCEVUse Op : S->operands())
pushOp(Op);
}
}
@@ -7416,7 +7440,7 @@ ScalarEvolution::getDefiningScopeBound(ArrayRef<const SCEV *> Ops,
}
const Instruction *
-ScalarEvolution::getDefiningScopeBound(ArrayRef<const SCEV *> Ops) {
+ScalarEvolution::getDefiningScopeBound(ArrayRef<SCEVUse> Ops) {
bool Discard;
return getDefiningScopeBound(Ops, Discard);
}
@@ -7471,7 +7495,7 @@ bool ScalarEvolution::isSCEVExprNeverPoison(const Instruction *I) {
// executed every time we enter that scope. When the bounding scope is a
// loop (the common case), this is equivalent to proving I executes on every
// iteration of that loop.
- SmallVector<const SCEV *> SCEVOps;
+ SmallVector<SCEVUse> SCEVOps;
for (const Use &Op : I->operands()) {
// I could be an extractvalue from a call to an overflow intrinsic.
// TODO: We can do better here in some cases.
@@ -7834,7 +7858,7 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) {
// because it leads to N-1 getAddExpr calls for N ultimate operands.
// Instead, gather up all the operands and make a single getAddExpr call.
// LLVM IR canonical form means we need only traverse the left operands.
- SmallVector<const SCEV *, 4> AddOps;
+ SmallVector<SCEVUse, 4> AddOps;
do {
if (BO->Op) {
if (auto *OpSCEV = getExistingSCEV(BO->Op)) {
@@ -7880,7 +7904,7 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) {
}
case Instruction::Mul: {
- SmallVector<const SCEV *, 4> MulOps;
+ SmallVector<SCEVUse, 4> MulOps;
do {
if (BO->Op) {
if (auto *OpSCEV = getExistingSCEV(BO->Op)) {
@@ -7957,7 +7981,7 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) {
unsigned MulZeros = OpC->getAPInt().countr_zero();
unsigned GCD = std::min(MulZeros, TZ);
APInt DivAmt = APInt::getOneBitSet(BitWidth, TZ - GCD);
- SmallVector<const SCEV*, 4> MulOps;
+ SmallVector<SCEVUse, 4> MulOps;
MulOps.push_back(getConstant(OpC->getAPInt().ashr(GCD)));
append_range(MulOps, LHSMul->operands().drop_front());
auto *NewMul = getMulExpr(MulOps, LHSMul->getNoWrapFlags());
@@ -8536,7 +8560,7 @@ ScalarEvolution::getBackedgeTakenInfo(const Loop *L) {
// only done to produce more precise results.
if (Result.hasAnyInfo()) {
// Invalidate any expression using an addrec in this loop.
- SmallVector<const SCEV *, 8> ToForget;
+ SmallVector<SCEVUse, 8> ToForget;
auto LoopUsersIt = LoopUsers.find(L);
if (LoopUsersIt != LoopUsers.end())
append_range(ToForget, LoopUsersIt->second);
@@ -8583,7 +8607,7 @@ void ScalarEvolution::forgetAllLoops() {
void ScalarEvolution::visitAndClearUsers(
SmallVectorImpl<Instruction *> &Worklist,
SmallPtrSetImpl<Instruction *> &Visited,
- SmallVectorImpl<const SCEV *> &ToForget) {
+ SmallVectorImpl<SCEVUse> &ToForget) {
while (!Worklist.empty()) {
Instruction *I = Worklist.pop_back_val();
if (!isSCEVable(I->getType()) && !isa<WithOverflowInst>(I))
@@ -8606,7 +8630,7 @@ void ScalarEvolution::forgetLoop(const Loop *L) {
SmallVector<const Loop *, 16> LoopWorklist(1, L);
SmallVector<Instruction *, 32> Worklist;
SmallPtrSet<Instruction *, 16> Visited;
- SmallVector<const SCEV *, 16> ToForget;
+ SmallVector<SCEVUse, 16> ToForget;
// Iterate over all the loops and sub-loops to drop SCEV information.
while (!LoopWorklist.empty()) {
@@ -8653,7 +8677,7 @@ void ScalarEvolution::forgetValue(Value *V) {
// Drop information about expressions based on loop-header PHIs.
SmallVector<Instruction *, 16> Worklist;
SmallPtrSet<Instruction *, 8> Visited;
- SmallVector<const SCEV *, 8> ToForget;
+ SmallVector<SCEVUse, 8> ToForget;
Worklist.push_back(I);
Visited.insert(I);
visitAndClearUsers(Worklist, Visited, ToForget);
@@ -8672,7 +8696,7 @@ void ScalarEvolution::forgetLcssaPhiWithNewPredecessor(Loop *L, PHINode *V) {
if (const SCEV *S = getExistingSCEV(V)) {
struct InvalidationRootCollector {
Loop *L;
- SmallVector<const SCEV *, 8> Roots;
+ SmallVector<SCEVUse, 8> Roots;
InvalidationRootCollector(Loop *L) : L(L) {}
@@ -8721,7 +8745,7 @@ void ScalarEvolution::forgetBlockAndLoopDispositions(Value *V) {
// S's users may change if S's disposition changes (i.e. a user may change to
// loop-invariant, if S changes to loop invariant), so also invalidate
// dispositions of S's users recursively.
- SmallVector<const SCEV *, 8> Worklist = {S};
+ SmallVector<SCEVUse, 8> Worklist = {S};
SmallPtrSet<const SCEV *, 8> Seen = {S};
while (!Worklist.empty()) {
const SCEV *Curr = Worklist.pop_back_val();
@@ -8757,7 +8781,7 @@ const SCEV *ScalarEvolution::BackedgeTakenInfo::getExact(
// All exiting blocks we have gathered dominate loop's latch, so exact trip
// count is simply a minimum out of all these calculated exit counts.
- SmallVector<const SCEV *, 2> Ops;
+ SmallVector<SCEVUse, 2> Ops;
for (const auto &ENT : ExitNotTaken) {
const SCEV *BECount = ENT.ExactNotTaken;
assert(BECount != SE->getCouldNotCompute() && "Bad exit SCEV!");
@@ -8825,7 +8849,7 @@ const SCEV *ScalarEvolution::BackedgeTakenInfo::getSymbolicMax(
// merge the max and exact information to approximate a version of
// getConstantMaxBackedgeTakenCount which isn't restricted to just
// constants.
- SmallVector<const SCEV *, 4> ExitCounts;
+ SmallVector<SCEVUse, 4> ExitCounts;
for (const auto &ENT : ExitNotTaken) {
const SCEV *ExitCount = ENT.SymbolicMaxNotTaken;
@@ -9354,7 +9378,7 @@ ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromICmp(
/*OrNegative=*/true)) {
auto Flags = AR->getNoWrapFlags();
Flags = setFlags(Flags, SCEV::FlagNW);
- SmallVector<const SCEV *> Operands{AR->operands()};
+ SmallVector<SCEVUse> Operands{AR->operands()};
Flags = StrengthenNoWrapFlags(this, scAddRecExpr, Operands, Flags);
setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), Flags);
}
@@ -9372,7 +9396,7 @@ ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromICmp(
isKnownPositive(AR->getStepRecurrence(*this))) {
auto Flags = AR->getNoWrapFlags();
Flags = setFlags(Flags, WrapType);
- SmallVector<const SCEV*> Operands{AR->operands()};
+ SmallVector<SCEVUse> Operands{AR->operands()};
Flags = StrengthenNoWrapFlags(this, scAddRecExpr, Operands, Flags);
setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), Flags);
}
@@ -10053,9 +10077,8 @@ static Constant *BuildConstantFromSCEV(const SCEV *V) {
llvm_unreachable("Unknown SCEV kind!");
}
-const SCEV *
-ScalarEvolution::getWithOperands(const SCEV *S,
- SmallVectorImpl<const SCEV *> &NewOps) {
+const SCEV *ScalarEvolution::getWithOperands(const SCEV *S,
+ SmallVectorImpl<SCEVUse> &NewOps) {
switch (S->getSCEVType()) {
case scTruncate:
case scZeroExtend:
@@ -10109,7 +10132,7 @@ const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) {
// Okay, at least one of these operands is loop variant but might be
// foldable. Build a new instance of the folded commutative expression.
- SmallVector<const SCEV *, 8> NewOps;
+ SmallVector<SCEVUse, 8> NewOps;
NewOps.reserve(AddRec->getNumOperands());
append_range(NewOps, AddRec->operands().take_front(i));
NewOps.push_back(OpAtScope);
@@ -10155,21 +10178,22 @@ const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) {
case scUMinExpr:
case scSMinExpr:
case scSequentialUMinExpr: {
- ArrayRef<const SCEV *> Ops = V->operands();
+ ArrayRef<SCEVUse> Ops = V->operands();
// Avoid performing the look-up in the common case where the specified
// expression has no loop-variant portions.
for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
- const SCEV *OpAtScope = getSCEVAtScope(Ops[i], L);
- if (OpAtScope != Ops[i]) {
+ const SCEV *OpAtScope = getSCEVAtScope(Ops[i].getPointer(), L);
+ if (OpAtScope != Ops[i].getPointer()) {
// Okay, at least one of these operands is loop variant but might be
// foldable. Build a new instance of the folded commutative expression.
- SmallVector<const SCEV *, 8> NewOps;
+ SmallVector<SCEVUse, 8> NewOps;
NewOps.reserve(Ops.size());
- append_range(NewOps, Ops.take_front(i));
+ for (unsigned j = 0; j < i; ++j)
+ NewOps.push_back(Ops[j].getPointer());
NewOps.push_back(OpAtScope);
for (++i; i != e; ++i) {
- OpAtScope = getSCEVAtScope(Ops[i], L);
+ OpAtScope = getSCEVAtScope(Ops[i].getPointer(), L);
NewOps.push_back(OpAtScope);
}
@@ -11463,7 +11487,7 @@ ScalarEvolution::getLoopInvariantExitCondDuringFirstIterations(
// work, try the following trick: if the a predicate is invariant for X, it
// is also invariant for umin(X, ...). So try to find something that works
// among subexpressions of MaxIter expressed as umin.
- for (auto *Op : UMin->operands())
+ for (SCEVUse Op : UMin->operands())
if (auto LIP = getLoopInvariantExitCondDuringFirstIterationsImpl(
Pred, LHS, RHS, L, CtxI, Op))
return LIP;
@@ -12779,9 +12803,9 @@ bool ScalarEvolution::isImpliedViaOperations(CmpPredicate Pred, const SCEV *LHS,
if (P != ICmpInst::ICMP_SGT)
return false;
- auto GetOpFromSExt = [&](const SCEV *S) {
+ auto GetOpFromSExt = [&](const SCEV *S) -> const SCEV * {
if (auto *Ext = dyn_cast<SCEVSignExtendExpr>(S))
- return Ext->getOperand();
+ return Ext->getOperand().getPointer();
// TODO: If S is a SCEVConstant then you can cheaply "strip" the sext off
// the constant in some cases.
return S;
@@ -12813,8 +12837,8 @@ bool ScalarEvolution::isImpliedViaOperations(CmpPredicate Pred, const SCEV *LHS,
if (!LHSAddExpr->hasNoSignedWrap())
return false;
- auto *LL = LHSAddExpr->getOperand(0);
- auto *LR = LHSAddExpr->getOperand(1);
+ SCEVUse LL = LHSAddExpr->getOperand(0);
+ SCEVUse LR = LHSAddExpr->getOperand(1);
auto *MinusOne = getMinusOne(RHS->getType());
// Checks that S1 >= 0 && S2 > RHS, trivially or using the found context.
@@ -13671,7 +13695,7 @@ const SCEV *SCEVAddRecExpr::getNumIterationsInRange(const ConstantRange &Range,
// If the start is a non-zero constant, shift the range to simplify things.
if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(getStart()))
if (!SC->getValue()->isZero()) {
- SmallVector<const SCEV *, 4> Operands(operands());
+ SmallVector<SCEVUse, 4> Operands(operands());
Operands[0] = SE.getZero(SC->getType());
const SCEV *Shifted = SE.getAddRecExpr(Operands, getLoop(),
getNoWrapFlags(FlagNW));
@@ -13743,7 +13767,7 @@ SCEVAddRecExpr::getPostIncExpr(ScalarEvolution &SE) const {
// simplification: it is legal to return ({rec1} + {rec2}). For example, it
// may happen if we reach arithmetic depth limit while simplifying. So we
// construct the returned value explicitly.
- SmallVector<const SCEV *, 3> Ops;
+ SmallVector<SCEVUse, 3> Ops;
// If this is {A,+,B,+,C,...,+,N}, then its step is {B,+,C,+,...,+,N}, and
// (this + Step) is {A+B,+,B+C,+...,+,N}.
for (unsigned i = 0, e = getNumOperands() - 1; i < e; ++i)
@@ -14234,7 +14258,7 @@ ScalarEvolution::computeLoopDisposition(const SCEV *S, const Loop *L) {
// This recurrence is variant w.r.t. L if any of its operands
// are variant.
- for (const auto *Op : AR->operands())
+ for (SCEVUse Op : AR->operands())
if (!isLoopInvariant(Op, L))
return LoopVariant;
@@ -14255,7 +14279,7 @@ ScalarEvolution::computeLoopDisposition(const SCEV *S, const Loop *L) {
case scSMinExpr:
case scSequentialUMinExpr: {
bool HasVarying = false;
- for (const auto *Op : S->operands()) {
+ for (SCEVUse Op : S->operands()) {
LoopDisposition D = getLoopDisposition(Op, L);
if (D == LoopVariant)
return LoopVariant;
@@ -14393,9 +14417,9 @@ void ScalarEvolution::forgetBackedgeTakenCounts(const Loop *L,
}
}
-void ScalarEvolution::forgetMemoizedResults(ArrayRef<const SCEV *> SCEVs) {
+void ScalarEvolution::forgetMemoizedResults(ArrayRef<SCEVUse> SCEVs) {
SmallPtrSet<const SCEV *, 8> ToForget(llvm::from_range, SCEVs);
- SmallVector<const SCEV *, 8> Worklist(ToForget.begin(), ToForget.end());
+ SmallVector<SCEVUse, 8> Worklist(ToForget.begin(), ToForget.end());
while (!Worklist.empty()) {
const SCEV *Curr = Worklist.pop_back_val();
@@ -14681,7 +14705,7 @@ void ScalarEvolution::verify() const {
// Verify integrity of SCEV users.
for (const auto &S : UniqueSCEVs) {
- for (const auto *Op : S.operands()) {
+ for (SCEVUse Op : S.operands()) {
// We do not store dependencies of constants.
if (isa<SCEVConstant>(Op))
continue;
@@ -15107,7 +15131,8 @@ const SCEVAddRecExpr *ScalarEvolution::convertSCEVToAddRecWithPredicates(
continue;
ExitCount = getTruncateOrSignExtend(ExitCount, Step->getType());
- const SCEV *Add = getAddExpr(AddRecToCheck->getStart(), ExitCount);
+ const SCEV *Add =
+ getAddExpr(AddRecToCheck->getStart().getPointer(), ExitCount);
if (isKnownPredicate(CmpInst::ICMP_SLT, Add, AddRecToCheck->getStart()))
return nullptr;
}
@@ -15320,6 +15345,15 @@ void ScalarEvolution::registerUser(const SCEV *User,
SCEVUsers[Op].insert(User);
}
+void ScalarEvolution::registerUser(const SCEV *User, ArrayRef<SCEVUse> Ops) {
+ for (const SCEV *Op : Ops)
+ // We do not expect that forgetting cached data for SCEVConstants will ever
+ // open any prospects for sharpening or introduce any correctness issues,
+ // so we don't bother storing their dependencies.
+ if (!isa<SCEVConstant>(Op))
+ SCEVUsers[Op].insert(User);
+}
+
const SCEV *PredicatedScalarEvolution::getSCEV(Value *V) {
const SCEV *Expr = SE.getSCEV(V);
return getPredicatedSCEV(Expr);
@@ -15553,7 +15587,7 @@ void ScalarEvolution::LoopGuards::collectFromPHI(
}
if (P.first) {
const SCEV *LHS = SE.getSCEV(const_cast<PHINode *>(&Phi));
- SmallVector<const SCEV *, 2> Ops({P.first, LHS});
+ SmallVector<SCEVUse, 2> Ops({P.first, LHS});
const SCEV *RHS = SE.getMinMaxExpr(P.second, Ops);
Guards.RewriteMap.insert({LHS, RHS});
}
@@ -15658,7 +15692,7 @@ static const SCEV *applyDivisibilityOnMinMaxExpr(const SCEV *MinMaxExpr,
auto *DivisibleExpr =
IsMin ? getPreviousSCEVDivisibleByDivisor(MinMaxLHS, Divisor, SE)
: getNextSCEVDivisibleByDivisor(MinMaxLHS, Divisor, SE);
- SmallVector<const SCEV *> Ops = {
+ SmallVector<SCEVUse> Ops = {
applyDivisibilityOnMinMaxExpr(MinMaxRHS, Divisor, SE), DivisibleExpr};
return SE.getMinMaxExpr(SCTy, Ops);
}
@@ -15670,7 +15704,7 @@ void ScalarEvolution::LoopGuards::collectFromBlock(
assert(SE.DT.isReachableFromEntry(Block) && SE.DT.isReachableFromEntry(Pred));
- SmallVector<const SCEV *> ExprsToRewrite;
+ SmallVector<SCEVUse> ExprsToRewrite;
auto CollectCondition = [&](ICmpInst::Predicate Predicate, const SCEV *LHS,
const SCEV *RHS,
DenseMap<const SCEV *, const SCEV *> &RewriteMap,
@@ -15785,7 +15819,7 @@ void ScalarEvolution::LoopGuards::collectFromBlock(
break;
}
- SmallVector<const SCEV *, 16> Worklist(1, LHS);
+ SmallVector<SCEVUse, 16> Worklist(1, LHS);
SmallPtrSet<const SCEV *, 16> Visited;
auto EnqueueOperands = [&Worklist](const SCEVNAryExpr *S) {
@@ -16117,14 +16151,14 @@ const SCEV *ScalarEvolution::LoopGuards::rewrite(const SCEV *Expr) const {
SE.getAddExpr(Expr->getOperand(1), Expr->getOperand(2));
if (const SCEV *Rewritten = RewriteSubtraction(Add))
return SE.getAddExpr(
- Expr->getOperand(0), Rewritten,
+ Expr->getOperand(0).getPointer(), Rewritten,
ScalarEvolution::maskFlags(Expr->getNoWrapFlags(), FlagMask));
if (const SCEV *S = Map.lookup(Add))
- return SE.getAddExpr(Expr->getOperand(0), S);
+ return SE.getAddExpr(Expr->getOperand(0).getPointer(), S);
}
- SmallVector<const SCEV *, 2> Operands;
+ SmallVector<SCEVUse, 2> Operands;
bool Changed = false;
- for (const auto *Op : Expr->operands()) {
+ for (SCEVUse Op : Expr->operands()) {
Operands.push_back(
SCEVRewriteVisitor<SCEVLoopGuardRewriter>::visit(Op));
Changed |= Op != Operands.back();
@@ -16138,9 +16172,9 @@ const SCEV *ScalarEvolution::LoopGuards::rewrite(const SCEV *Expr) const {
}
const SCEV *visitMulExpr(const SCEVMulExpr *Expr) {
- SmallVector<const SCEV *, 2> Operands;
+ SmallVector<SCEVUse, 2> Operands;
bool Changed = false;
- for (const auto *Op : Expr->operands()) {
+ for (SCEVUse Op : Expr->operands()) {
Operands.push_back(
SCEVRewriteVisitor<SCEVLoopGuardRewriter>::visit(Op));
Changed |= Op != Operands.back();
diff --git a/llvm/lib/Analysis/ScalarEvolutionDivision.cpp b/llvm/lib/Analysis/ScalarEvolutionDivision.cpp
index 52d82785f6b9c..710473191f8c6 100644
--- a/llvm/lib/Analysis/ScalarEvolutionDivision.cpp
+++ b/llvm/lib/Analysis/ScalarEvolutionDivision.cpp
@@ -149,7 +149,7 @@ void SCEVDivision::visitAddRecExpr(const SCEVAddRecExpr *Numerator) {
}
void SCEVDivision::visitAddExpr(const SCEVAddExpr *Numerator) {
- SmallVector<const SCEV *, 2> Qs, Rs;
+ SmallVector<SCEVUse, 2> Qs, Rs;
Type *Ty = Denominator->getType();
for (const SCEV *Op : Numerator->operands()) {
@@ -175,7 +175,7 @@ void SCEVDivision::visitAddExpr(const SCEVAddExpr *Numerator) {
}
void SCEVDivision::visitMulExpr(const SCEVMulExpr *Numerator) {
- SmallVector<const SCEV *, 2> Qs;
+ SmallVector<SCEVUse, 2> Qs;
Type *Ty = Denominator->getType();
bool FoundDenominatorTerm = false;
diff --git a/llvm/lib/Analysis/ScalarEvolutionNormalization.cpp b/llvm/lib/Analysis/ScalarEvolutionNormalization.cpp
index cfc5b84554548..bfc2d6aafdbfc 100644
--- a/llvm/lib/Analysis/ScalarEvolutionNormalization.cpp
+++ b/llvm/lib/Analysis/ScalarEvolutionNormalization.cpp
@@ -46,10 +46,10 @@ struct NormalizeDenormalizeRewriter
const SCEV *
NormalizeDenormalizeRewriter::visitAddRecExpr(const SCEVAddRecExpr *AR) {
- SmallVector<const SCEV *, 8> Operands;
+ SmallVector<SCEVUse, 8> Operands;
transform(AR->operands(), std::back_inserter(Operands),
- [&](const SCEV *Op) { return visit(Op); });
+ [&](SCEVUse Op) { return visit(Op.getPointer()); });
if (!Pred(AR))
return SE.getAddRecExpr(Operands, AR->getLoop(), SCEV::FlagAnyWrap);
diff --git a/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp b/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp
index bcf4d6286b4d3..d0f3fa86ef007 100644
--- a/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp
+++ b/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp
@@ -2785,7 +2785,7 @@ void ARMTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
bool Runtime = true;
if (ST->hasLOB()) {
if (SE.hasLoopInvariantBackedgeTakenCount(L)) {
- const auto *BETC = SE.getBackedgeTakenCount(L);
+ const SCEV *BETC = SE.getBackedgeTakenCount(L);
auto *Outer = L->getOutermostLoop();
if ((L != Outer && Outer != L->getParentLoop()) ||
(L != Outer && BETC && !SE.isLoopInvariant(BETC, Outer))) {
diff --git a/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp b/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp
index 305fe05c4f180..385ef0e9be551 100644
--- a/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp
+++ b/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp
@@ -1395,10 +1395,11 @@ createReplacement(ICmpInst *ICmp, const Loop *L, BasicBlock *ExitingBB,
// wrap". getLoopInvariantExitCondDuringFirstIterations knows how to deal
// with umin in a smart way, but umin(a, b) - 1 will likely not simplify.
// So we manually construct umin(a - 1, b - 1).
- SmallVector<const SCEV *, 4> Elements;
+ SmallVector<SCEVUse, 4> Elements;
if (auto *UMin = dyn_cast<SCEVUMinExpr>(MaxIter)) {
- for (const SCEV *Op : UMin->operands())
- Elements.push_back(SE->getMinusSCEV(Op, SE->getOne(Op->getType())));
+ for (SCEVUse Op : UMin->operands())
+ Elements.push_back(SE->getMinusSCEV(
+ Op.getPointer(), SE->getOne(Op.getPointer()->getType())));
MaxIter = SE->getUMinFromMismatchedTypes(Elements);
} else
MaxIter = SE->getMinusSCEV(MaxIter, SE->getOne(MaxIter->getType()));
diff --git a/llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp b/llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp
index deff79b403883..97ecbeaae5e37 100644
--- a/llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp
+++ b/llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp
@@ -423,7 +423,7 @@ bool InductiveRangeCheck::reassociateSubLHS(
auto getExprScaledIfOverflow = [&](Instruction::BinaryOps BinOp,
const SCEV *LHS,
const SCEV *RHS) -> const SCEV * {
- const SCEV *(ScalarEvolution::*Operation)(const SCEV *, const SCEV *,
+ const SCEV *(ScalarEvolution::*Operation)(SCEVUse, SCEVUse ,
SCEV::NoWrapFlags, unsigned);
switch (BinOp) {
default:
diff --git a/llvm/lib/Transforms/Scalar/LoopDataPrefetch.cpp b/llvm/lib/Transforms/Scalar/LoopDataPrefetch.cpp
index 6f0d8dcb05e04..b58ed17e87b8f 100644
--- a/llvm/lib/Transforms/Scalar/LoopDataPrefetch.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopDataPrefetch.cpp
@@ -391,9 +391,10 @@ bool LoopDataPrefetch::runOnLoop(Loop *L) {
BasicBlock *BB = P.InsertPt->getParent();
SCEVExpander SCEVE(*SE, "prefaddr");
- const SCEV *NextLSCEV = SE->getAddExpr(P.LSCEVAddRec, SE->getMulExpr(
- SE->getConstant(P.LSCEVAddRec->getType(), ItersAhead),
- P.LSCEVAddRec->getStepRecurrence(*SE)));
+ const SCEV *NextLSCEV = SE->getAddExpr(
+ P.LSCEVAddRec,
+ SE->getMulExpr(SE->getConstant(P.LSCEVAddRec->getType(), ItersAhead),
+ P.LSCEVAddRec->getStepRecurrence(*SE).getPointer()));
if (!SCEVE.isSafeToExpand(NextLSCEV))
continue;
diff --git a/llvm/lib/Transforms/Scalar/LoopFuse.cpp b/llvm/lib/Transforms/Scalar/LoopFuse.cpp
index b28e548e59a47..e8067d22924d7 100644
--- a/llvm/lib/Transforms/Scalar/LoopFuse.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopFuse.cpp
@@ -1132,9 +1132,10 @@ struct LoopFuser {
const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) {
const Loop *ExprL = Expr->getLoop();
- SmallVector<const SCEV *, 2> Operands;
+ SmallVector<SCEVUse, 2> Operands;
if (ExprL == &OldL) {
- append_range(Operands, Expr->operands());
+ for (SCEVUse Op : Expr->operands())
+ Operands.push_back(Op.getPointer());
return SE.getAddRecExpr(Operands, &NewL, Expr->getNoWrapFlags());
}
@@ -1144,11 +1145,11 @@ struct LoopFuser {
Valid = false;
return Expr;
}
- return visit(Expr->getStart());
+ return visit(Expr->getStart().getPointer());
}
- for (const SCEV *Op : Expr->operands())
- Operands.push_back(visit(Op));
+ for (SCEVUse Op : Expr->operands())
+ Operands.push_back(visit(Op.getPointer()));
return SE.getAddRecExpr(Operands, ExprL, Expr->getNoWrapFlags());
}
diff --git a/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp b/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
index 074cc73d53080..d6200b1909226 100644
--- a/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
@@ -889,7 +889,7 @@ bool LoopIdiomRecognize::processLoopMemSet(MemSetInst *MSI,
return false;
}
- const SCEV *MemsetSizeSCEV = SE->getSCEV(MSI->getLength());
+ SCEVUse MemsetSizeSCEV = SE->getSCEV(MSI->getLength());
bool IsNegStride = false;
const bool IsConstantSize = isa<ConstantInt>(MSI->getLength());
@@ -928,9 +928,9 @@ bool LoopIdiomRecognize::processLoopMemSet(MemSetInst *MSI,
// Compare positive direction PointerStrideSCEV with MemsetSizeSCEV
IsNegStride = PointerStrideSCEV->isNonConstantNegative();
- const SCEV *PositiveStrideSCEV =
- IsNegStride ? SE->getNegativeSCEV(PointerStrideSCEV)
- : PointerStrideSCEV;
+ SCEVUse PositiveStrideSCEV =
+ IsNegStride ? SCEVUse(SE->getNegativeSCEV(PointerStrideSCEV))
+ : SCEVUse(PointerStrideSCEV);
LLVM_DEBUG(dbgs() << " MemsetSizeSCEV: " << *MemsetSizeSCEV << "\n"
<< " PositiveStrideSCEV: " << *PositiveStrideSCEV
<< "\n");
diff --git a/llvm/lib/Transforms/Scalar/LoopPredication.cpp b/llvm/lib/Transforms/Scalar/LoopPredication.cpp
index ea261491b7f94..26be4159784f0 100644
--- a/llvm/lib/Transforms/Scalar/LoopPredication.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopPredication.cpp
@@ -1006,9 +1006,9 @@ static const SCEV *getMinAnalyzeableBackedgeTakenCount(ScalarEvolution &SE,
SmallVector<BasicBlock *, 16> ExitingBlocks;
L->getExitingBlocks(ExitingBlocks);
- SmallVector<const SCEV *, 4> ExitCounts;
+ SmallVector<SCEVUse, 4> ExitCounts;
for (BasicBlock *ExitingBB : ExitingBlocks) {
- const SCEV *ExitCount = SE.getExitCount(L, ExitingBB);
+ SCEVUse ExitCount = SE.getExitCount(L, ExitingBB);
if (isa<SCEVCouldNotCompute>(ExitCount))
continue;
assert(DT.dominates(ExitingBB, L->getLoopLatch()) &&
@@ -1018,7 +1018,10 @@ static const SCEV *getMinAnalyzeableBackedgeTakenCount(ScalarEvolution &SE,
}
if (ExitCounts.size() < 2)
return SE.getCouldNotCompute();
- return SE.getUMinFromMismatchedTypes(ExitCounts);
+ SmallVector<SCEVUse, 4> ExitCountPtrs;
+ for (SCEVUse U : ExitCounts)
+ ExitCountPtrs.push_back(U);
+ return SE.getUMinFromMismatchedTypes(ExitCountPtrs);
}
/// This implements an analogous, but entirely distinct transform from the main
diff --git a/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp b/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
index 839942fac6716..a4ca0de522f86 100644
--- a/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
@@ -541,9 +541,8 @@ struct Formula {
/// Recursion helper for initialMatch.
static void DoInitialMatch(const SCEV *S, Loop *L,
- SmallVectorImpl<const SCEV *> &Good,
- SmallVectorImpl<const SCEV *> &Bad,
- ScalarEvolution &SE) {
+ SmallVectorImpl<SCEVUse> &Good,
+ SmallVectorImpl<SCEVUse> &Bad, ScalarEvolution &SE) {
// Collect expressions which properly dominate the loop header.
if (SE.properlyDominates(S, L->getHeader())) {
Good.push_back(S);
@@ -574,11 +573,11 @@ static void DoInitialMatch(const SCEV *S, Loop *L,
// Handle a multiplication by -1 (negation) if it didn't fold.
if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S))
if (Mul->getOperand(0)->isAllOnesValue()) {
- SmallVector<const SCEV *, 4> Ops(drop_begin(Mul->operands()));
+ SmallVector<SCEVUse, 4> Ops(drop_begin(Mul->operands()));
const SCEV *NewMul = SE.getMulExpr(Ops);
- SmallVector<const SCEV *, 4> MyGood;
- SmallVector<const SCEV *, 4> MyBad;
+ SmallVector<SCEVUse, 4> MyGood;
+ SmallVector<SCEVUse, 4> MyBad;
DoInitialMatch(NewMul, L, MyGood, MyBad, SE);
const SCEV *NegOne = SE.getSCEV(ConstantInt::getAllOnesValue(
SE.getEffectiveSCEVType(NewMul->getType())));
@@ -597,8 +596,8 @@ static void DoInitialMatch(const SCEV *S, Loop *L,
/// Incorporate loop-variant parts of S into this Formula, attempting to keep
/// all loop-invariant and loop-computable values in a single base register.
void Formula::initialMatch(const SCEV *S, Loop *L, ScalarEvolution &SE) {
- SmallVector<const SCEV *, 4> Good;
- SmallVector<const SCEV *, 4> Bad;
+ SmallVector<SCEVUse, 4> Good;
+ SmallVector<SCEVUse, 4> Bad;
DoInitialMatch(S, L, Good, Bad, SE);
if (!Good.empty()) {
const SCEV *Sum = SE.getAddExpr(Good);
@@ -877,7 +876,7 @@ static const SCEV *getExactSDiv(const SCEV *LHS, const SCEV *RHS,
// Distribute the sdiv over add operands, if the add doesn't overflow.
if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(LHS)) {
if (IgnoreSignificantBits || isAddSExtable(Add, SE)) {
- SmallVector<const SCEV *, 8> Ops;
+ SmallVector<SCEVUse, 8> Ops;
for (const SCEV *S : Add->operands()) {
const SCEV *Op = getExactSDiv(S, RHS, SE, IgnoreSignificantBits);
if (!Op) return nullptr;
@@ -906,7 +905,7 @@ static const SCEV *getExactSDiv(const SCEV *LHS, const SCEV *RHS,
}
}
- SmallVector<const SCEV *, 4> Ops;
+ SmallVector<SCEVUse, 4> Ops;
bool Found = false;
for (const SCEV *S : Mul->operands()) {
if (!Found)
@@ -928,7 +927,7 @@ static const SCEV *getExactSDiv(const SCEV *LHS, const SCEV *RHS,
/// If S involves the addition of a constant integer value, return that integer
/// value, and mutate S to point to a new SCEV with that value excluded.
-static Immediate ExtractImmediate(const SCEV *&S, ScalarEvolution &SE) {
+static Immediate ExtractImmediate(SCEVUse &S, ScalarEvolution &SE) {
const APInt *C;
if (match(S, m_scev_APInt(C))) {
if (C->getSignificantBits() <= 64) {
@@ -936,13 +935,13 @@ static Immediate ExtractImmediate(const SCEV *&S, ScalarEvolution &SE) {
return Immediate::getFixed(C->getSExtValue());
}
} else if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
- SmallVector<const SCEV *, 8> NewOps(Add->operands());
+ SmallVector<SCEVUse, 8> NewOps(Add->operands());
Immediate Result = ExtractImmediate(NewOps.front(), SE);
if (Result.isNonZero())
S = SE.getAddExpr(NewOps);
return Result;
} else if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) {
- SmallVector<const SCEV *, 8> NewOps(AR->operands());
+ SmallVector<SCEVUse, 8> NewOps(AR->operands());
Immediate Result = ExtractImmediate(NewOps.front(), SE);
if (Result.isNonZero())
S = SE.getAddRecExpr(NewOps, AR->getLoop(),
@@ -959,21 +958,25 @@ static Immediate ExtractImmediate(const SCEV *&S, ScalarEvolution &SE) {
/// If S involves the addition of a GlobalValue address, return that symbol, and
/// mutate S to point to a new SCEV with that value excluded.
-static GlobalValue *ExtractSymbol(const SCEV *&S, ScalarEvolution &SE) {
+static GlobalValue *ExtractSymbol(SCEVUse &S, ScalarEvolution &SE) {
if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
if (GlobalValue *GV = dyn_cast<GlobalValue>(U->getValue())) {
S = SE.getConstant(GV->getType(), 0);
return GV;
}
} else if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
- SmallVector<const SCEV *, 8> NewOps(Add->operands());
- GlobalValue *Result = ExtractSymbol(NewOps.back(), SE);
+ SmallVector<SCEVUse, 8> NewOps(Add->operands());
+ SCEVUse Back = NewOps.back();
+ GlobalValue *Result = ExtractSymbol(Back, SE);
+ NewOps.back() = Back;
if (Result)
S = SE.getAddExpr(NewOps);
return Result;
} else if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) {
- SmallVector<const SCEV *, 8> NewOps(AR->operands());
- GlobalValue *Result = ExtractSymbol(NewOps.front(), SE);
+ SmallVector<SCEVUse, 8> NewOps(AR->operands());
+ SCEVUse Front = NewOps.front();
+ GlobalValue *Result = ExtractSymbol(Front, SE);
+ NewOps.front() = Front;
if (Result)
S = SE.getAddRecExpr(NewOps, AR->getLoop(),
// FIXME: AR->getNoWrapFlags(SCEV::FlagNW)
@@ -2042,11 +2045,13 @@ static bool isAlwaysFoldable(const TargetTransformInfo &TTI,
// Conservatively, create an address with an immediate and a
// base and a scale.
- Immediate BaseOffset = ExtractImmediate(S, SE);
- GlobalValue *BaseGV = ExtractSymbol(S, SE);
+ SCEVUse SCopy = S;
+ Immediate BaseOffset = ExtractImmediate(SCopy, SE);
+ GlobalValue *BaseGV = ExtractSymbol(SCopy, SE);
// If there's anything else involved, it's not foldable.
- if (!S->isZero()) return false;
+ if (!SCopy->isZero())
+ return false;
// Fast-path: zero is always foldable.
if (BaseOffset.isZero() && !BaseGV)
@@ -2816,7 +2821,9 @@ std::pair<size_t, Immediate> LSRInstance::getUse(const SCEV *&Expr,
LSRUse::KindType Kind,
MemAccessTy AccessTy) {
const SCEV *Copy = Expr;
- Immediate Offset = ExtractImmediate(Expr, SE);
+ SCEVUse ExprUse = Expr;
+ Immediate Offset = ExtractImmediate(ExprUse, SE);
+ Expr = ExprUse;
// Basic uses can't accept any offset, for example.
if (!isAlwaysFoldable(TTI, Kind, AccessTy, /*BaseGV=*/ nullptr,
@@ -3958,7 +3965,7 @@ void LSRInstance::GenerateReassociationsImpl(LSRUse &LU, unsigned LUIdx,
continue;
// Collect all operands except *J.
- SmallVector<const SCEV *, 8> InnerAddOps(std::as_const(AddOps).begin(), J);
+ SmallVector<SCEVUse, 8> InnerAddOps(std::as_const(AddOps).begin(), J);
InnerAddOps.append(std::next(J), std::as_const(AddOps).end());
// Don't leave just a constant behind in a register if the constant could
@@ -4049,7 +4056,7 @@ void LSRInstance::GenerateCombinations(LSRUse &LU, unsigned LUIdx,
// Flatten the representation, i.e., reg1 + 1*reg2 => reg1 + reg2, before
// processing the formula.
Base.unscale();
- SmallVector<const SCEV *, 4> Ops;
+ SmallVector<SCEVUse, 4> Ops;
Formula NewBase = Base;
NewBase.BaseRegs.clear();
Type *CombinedIntegerType = nullptr;
@@ -4086,7 +4093,7 @@ void LSRInstance::GenerateCombinations(LSRUse &LU, unsigned LUIdx,
// If we collected at least two registers, generate a formula combining them.
if (Ops.size() > 1) {
- SmallVector<const SCEV *, 4> OpsCopy(Ops); // Don't let SE modify Ops.
+ SmallVector<SCEVUse, 4> OpsCopy(Ops); // Don't let SE modify Ops.
GenerateFormula(SE.getAddExpr(OpsCopy));
}
@@ -4105,7 +4112,7 @@ void LSRInstance::GenerateCombinations(LSRUse &LU, unsigned LUIdx,
void LSRInstance::GenerateSymbolicOffsetsImpl(LSRUse &LU, unsigned LUIdx,
const Formula &Base, size_t Idx,
bool IsScaledReg) {
- const SCEV *G = IsScaledReg ? Base.ScaledReg : Base.BaseRegs[Idx];
+ SCEVUse G = IsScaledReg ? Base.ScaledReg : Base.BaseRegs[Idx];
GlobalValue *GV = ExtractSymbol(G, SE);
if (G->isZero() || !GV)
return;
@@ -4165,7 +4172,7 @@ void LSRInstance::GenerateConstantOffsetsImpl(
}
};
- const SCEV *G = IsScaledReg ? Base.ScaledReg : Base.BaseRegs[Idx];
+ SCEVUse G = IsScaledReg ? Base.ScaledReg : Base.BaseRegs[Idx];
// With constant offsets and constant steps, we can generate pre-inc
// accesses by having the offset equal the step. So, for access #0 with a
@@ -4522,7 +4529,7 @@ void LSRInstance::GenerateCrossUseConstantOffsets() {
DenseMap<const SCEV *, SmallBitVector> UsedByIndicesMap;
SmallVector<const SCEV *, 8> Sequence;
for (const SCEV *Use : RegUses) {
- const SCEV *Reg = Use; // Make a copy for ExtractImmediate to modify.
+ SCEVUse Reg = Use; // Make a copy for ExtractImmediate to modify.
Immediate Imm = ExtractImmediate(Reg, SE);
auto Pair = Map.try_emplace(Reg);
if (Pair.second)
@@ -5716,7 +5723,7 @@ Value *LSRInstance::Expand(const LSRUse &LU, const LSRFixup &LF,
Type *IntTy = SE.getEffectiveSCEVType(Ty);
// Build up a list of operands to add together to form the full base.
- SmallVector<const SCEV *, 8> Ops;
+ SmallVector<SCEVUse, 8> Ops;
// Expand the BaseRegs portion.
for (const SCEV *Reg : F.BaseRegs) {
diff --git a/llvm/lib/Transforms/Scalar/NaryReassociate.cpp b/llvm/lib/Transforms/Scalar/NaryReassociate.cpp
index ec145f2f48bea..af6f2eb45e130 100644
--- a/llvm/lib/Transforms/Scalar/NaryReassociate.cpp
+++ b/llvm/lib/Transforms/Scalar/NaryReassociate.cpp
@@ -225,7 +225,7 @@ bool NaryReassociatePass::doOneIteration(Function &F) {
for (const auto Node : depth_first(DT)) {
BasicBlock *BB = Node->getBlock();
for (Instruction &OrigI : *BB) {
- const SCEV *OrigSCEV = nullptr;
+ SCEVUse OrigSCEV = nullptr;
if (Instruction *NewI = tryReassociate(&OrigI, OrigSCEV)) {
Changed = true;
OrigI.replaceAllUsesWith(NewI);
@@ -234,7 +234,7 @@ bool NaryReassociatePass::doOneIteration(Function &F) {
DeadInsts.push_back(WeakTrackingVH(&OrigI));
// Add the rewritten instruction to SeenExprs; the original
// instruction is deleted.
- const SCEV *NewSCEV = SE->getSCEV(NewI);
+ SCEVUse NewSCEV = SE->getSCEV(NewI);
SeenExprs[NewSCEV].push_back(WeakTrackingVH(NewI));
// Ideally, NewSCEV should equal OldSCEV because tryReassociate(I)
@@ -273,7 +273,7 @@ bool NaryReassociatePass::doOneIteration(Function &F) {
template <typename PredT>
Instruction *
NaryReassociatePass::matchAndReassociateMinOrMax(Instruction *I,
- const SCEV *&OrigSCEV) {
+ SCEVUse &OrigSCEV) {
Value *LHS = nullptr;
Value *RHS = nullptr;
@@ -292,8 +292,8 @@ NaryReassociatePass::matchAndReassociateMinOrMax(Instruction *I,
return nullptr;
}
-Instruction *NaryReassociatePass::tryReassociate(Instruction * I,
- const SCEV *&OrigSCEV) {
+Instruction *NaryReassociatePass::tryReassociate(Instruction *I,
+ SCEVUse &OrigSCEV) {
if (!SE->isSCEVable(I->getType()))
return nullptr;
@@ -397,7 +397,7 @@ NaryReassociatePass::tryReassociateGEPAtIndex(GetElementPtrInst *GEP,
Value *RHS, Type *IndexedType) {
// Look for GEP's closest dominator that has the same SCEV as GEP except that
// the I-th index is replaced with LHS.
- SmallVector<const SCEV *, 4> IndexExprs;
+ SmallVector<SCEVUse, 4> IndexExprs;
for (Use &Index : GEP->indices())
IndexExprs.push_back(SE->getSCEV(Index));
// Replace the I-th index with LHS.
@@ -414,8 +414,10 @@ NaryReassociatePass::tryReassociateGEPAtIndex(GetElementPtrInst *GEP,
// @reassociate_gep_assume for an example of this canonicalization.
IndexExprs[I] = SE->getZeroExtendExpr(IndexExprs[I], GEPArgType);
}
- const SCEV *CandidateExpr = SE->getGEPExpr(cast<GEPOperator>(GEP),
- IndexExprs);
+ SmallVector<SCEVUse, 4> IndexPtrs;
+ for (SCEVUse U : IndexExprs)
+ IndexPtrs.push_back(U);
+ SCEVUse CandidateExpr = SE->getGEPExpr(cast<GEPOperator>(GEP), IndexPtrs);
Value *Candidate = findClosestMatchingDominator(CandidateExpr, GEP);
if (Candidate == nullptr)
@@ -481,8 +483,8 @@ Instruction *NaryReassociatePass::tryReassociateBinaryOp(Value *LHS, Value *RHS,
if (LHS->hasOneUse() && matchTernaryOp(I, LHS, A, B)) {
// I = (A op B) op RHS
// = (A op RHS) op B or (B op RHS) op A
- const SCEV *AExpr = SE->getSCEV(A), *BExpr = SE->getSCEV(B);
- const SCEV *RHSExpr = SE->getSCEV(RHS);
+ SCEVUse AExpr = SE->getSCEV(A), BExpr = SE->getSCEV(B);
+ SCEVUse RHSExpr = SE->getSCEV(RHS);
if (BExpr != RHSExpr) {
if (auto *NewI =
tryReassociatedBinaryOp(getBinarySCEV(I, AExpr, RHSExpr), B, I))
@@ -497,7 +499,7 @@ Instruction *NaryReassociatePass::tryReassociateBinaryOp(Value *LHS, Value *RHS,
return nullptr;
}
-Instruction *NaryReassociatePass::tryReassociatedBinaryOp(const SCEV *LHSExpr,
+Instruction *NaryReassociatePass::tryReassociatedBinaryOp(SCEVUse LHSExpr,
Value *RHS,
BinaryOperator *I) {
// Look for the closest dominator LHS of I that computes LHSExpr, and replace
@@ -535,9 +537,8 @@ bool NaryReassociatePass::matchTernaryOp(BinaryOperator *I, Value *V,
return false;
}
-const SCEV *NaryReassociatePass::getBinarySCEV(BinaryOperator *I,
- const SCEV *LHS,
- const SCEV *RHS) {
+SCEVUse NaryReassociatePass::getBinarySCEV(BinaryOperator *I, SCEVUse LHS,
+ SCEVUse RHS) {
switch (I->getOpcode()) {
case Instruction::Add:
return SE->getAddExpr(LHS, RHS);
@@ -550,7 +551,7 @@ const SCEV *NaryReassociatePass::getBinarySCEV(BinaryOperator *I,
}
Instruction *
-NaryReassociatePass::findClosestMatchingDominator(const SCEV *CandidateExpr,
+NaryReassociatePass::findClosestMatchingDominator(SCEVUse CandidateExpr,
Instruction *Dominatee) {
auto Pos = SeenExprs.find(CandidateExpr);
if (Pos == SeenExprs.end())
@@ -622,12 +623,11 @@ Value *NaryReassociatePass::tryReassociateMinOrMax(Instruction *I,
}))
return nullptr;
- auto tryCombination = [&](Value *A, const SCEV *AExpr, Value *B,
- const SCEV *BExpr, Value *C,
- const SCEV *CExpr) -> Value * {
- SmallVector<const SCEV *, 2> Ops1{BExpr, AExpr};
+ auto tryCombination = [&](Value *A, SCEVUse AExpr, Value *B, SCEVUse BExpr,
+ Value *C, SCEVUse CExpr) -> Value * {
+ SmallVector<SCEVUse, 2> Ops1{BExpr, AExpr};
const SCEVTypes SCEVType = convertToSCEVype(m_MaxMin);
- const SCEV *R1Expr = SE->getMinMaxExpr(SCEVType, Ops1);
+ SCEVUse R1Expr = SE->getMinMaxExpr(SCEVType, Ops1);
Instruction *R1MinMax = findClosestMatchingDominator(R1Expr, I);
@@ -636,9 +636,8 @@ Value *NaryReassociatePass::tryReassociateMinOrMax(Instruction *I,
LLVM_DEBUG(dbgs() << "NARY: Found common sub-expr: " << *R1MinMax << "\n");
- SmallVector<const SCEV *, 2> Ops2{SE->getUnknown(C),
- SE->getUnknown(R1MinMax)};
- const SCEV *R2Expr = SE->getMinMaxExpr(SCEVType, Ops2);
+ SmallVector<SCEVUse, 2> Ops2{SE->getUnknown(C), SE->getUnknown(R1MinMax)};
+ SCEVUse R2Expr = SE->getMinMaxExpr(SCEVType, Ops2);
SCEVExpander Expander(*SE, "nary-reassociate");
Value *NewMinMax = Expander.expandCodeFor(R2Expr, I->getType(), I);
@@ -649,9 +648,9 @@ Value *NaryReassociatePass::tryReassociateMinOrMax(Instruction *I,
return NewMinMax;
};
- const SCEV *AExpr = SE->getSCEV(A);
- const SCEV *BExpr = SE->getSCEV(B);
- const SCEV *RHSExpr = SE->getSCEV(RHS);
+ SCEVUse AExpr = SE->getSCEV(A);
+ SCEVUse BExpr = SE->getSCEV(B);
+ SCEVUse RHSExpr = SE->getSCEV(RHS);
if (BExpr != RHSExpr) {
// Try (A op RHS) op B
diff --git a/llvm/lib/Transforms/Scalar/StraightLineStrengthReduce.cpp b/llvm/lib/Transforms/Scalar/StraightLineStrengthReduce.cpp
index c371a9d2f6416..74d06351eba20 100644
--- a/llvm/lib/Transforms/Scalar/StraightLineStrengthReduce.cpp
+++ b/llvm/lib/Transforms/Scalar/StraightLineStrengthReduce.cpp
@@ -1086,7 +1086,7 @@ void StraightLineStrengthReduce::allocateCandidatesAndFindBasisForGEP(
if (GEP->getType()->isVectorTy())
return;
- SmallVector<const SCEV *, 4> IndexExprs;
+ SmallVector<SCEVUse, 4> IndexExprs;
for (Use &Idx : GEP->indices())
IndexExprs.push_back(SE->getSCEV(Idx));
@@ -1095,12 +1095,15 @@ void StraightLineStrengthReduce::allocateCandidatesAndFindBasisForGEP(
if (GTI.isStruct())
continue;
- const SCEV *OrigIndexExpr = IndexExprs[I - 1];
- IndexExprs[I - 1] = SE->getZero(OrigIndexExpr->getType());
+ SCEVUse OrigIndexExpr = IndexExprs[I - 1];
+ IndexExprs[I - 1] = SE->getZero(OrigIndexExpr.getPointer()->getType());
// The base of this candidate is GEP's base plus the offsets of all
// indices except this current one.
- const SCEV *BaseExpr = SE->getGEPExpr(cast<GEPOperator>(GEP), IndexExprs);
+ SmallVector<SCEVUse, 4> IndexPtrs;
+ for (SCEVUse U : IndexExprs)
+ IndexPtrs.push_back(U);
+ SCEVUse BaseExpr = SE->getGEPExpr(cast<GEPOperator>(GEP), IndexPtrs);
Value *ArrayIdx = GEP->getOperand(I);
uint64_t ElementSize = GTI.getSequentialElementStride(*DL);
IntegerType *PtrIdxTy = cast<IntegerType>(DL->getIndexType(GEP->getType()));
diff --git a/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp b/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp
index 5dd388e95accb..ac60837584763 100644
--- a/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp
+++ b/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp
@@ -563,7 +563,7 @@ Value *SCEVExpander::visitAddExpr(const SCEVAddExpr *S) {
if (isa<PointerType>(Sum->getType())) {
// The running sum expression is a pointer. Try to form a getelementptr
// at this level with that as the base.
- SmallVector<const SCEV *, 4> NewOps;
+ SmallVector<SCEVUse, 4> NewOps;
for (; I != E && I->first == CurLoop; ++I) {
// If the operand is SCEVUnknown and not instructions, peek through
// it, to enable more of it to be folded into the GEP.
@@ -1336,7 +1336,7 @@ Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) {
if (CanonicalIV &&
SE.getTypeSizeInBits(CanonicalIV->getType()) > SE.getTypeSizeInBits(Ty) &&
!S->getType()->isPointerTy()) {
- SmallVector<const SCEV *, 4> NewOps(S->getNumOperands());
+ SmallVector<SCEVUse, 4> NewOps(S->getNumOperands());
for (unsigned i = 0, e = S->getNumOperands(); i != e; ++i)
NewOps[i] = SE.getAnyExtendExpr(S->getOperand(i), CanonicalIV->getType());
Value *V = expand(SE.getAddRecExpr(NewOps, S->getLoop(),
@@ -1360,7 +1360,7 @@ Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) {
S->getNoWrapFlags(SCEV::FlagNUW));
}
- SmallVector<const SCEV *, 4> NewOps(S->operands());
+ SmallVector<SCEVUse, 4> NewOps(S->operands());
NewOps[0] = SE.getConstant(Ty, 0);
const SCEV *Rest = SE.getAddRecExpr(NewOps, L,
S->getNoWrapFlags(SCEV::FlagNW));
diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp
index bd9cf3077a052..ea18cddd3df73 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp
@@ -537,7 +537,8 @@ class SCEVAddRecForUniformityRewriter
const SCEV *NewStep =
SE.getMulExpr(Step, SE.getConstant(Ty, StepMultiplier));
const SCEV *ScaledOffset = SE.getMulExpr(Step, SE.getConstant(Ty, Offset));
- const SCEV *NewStart = SE.getAddExpr(Expr->getStart(), ScaledOffset);
+ const SCEV *NewStart =
+ SE.getAddExpr(Expr->getStart(), SCEVUse(ScaledOffset));
return SE.getAddRecExpr(NewStart, NewStep, TheLoop, SCEV::FlagAnyWrap);
}
diff --git a/llvm/lib/Transforms/Vectorize/VPlanUtils.cpp b/llvm/lib/Transforms/Vectorize/VPlanUtils.cpp
index 821a4f7911bb8..6614e7cc3a7e9 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanUtils.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanUtils.cpp
@@ -149,11 +149,10 @@ const SCEV *vputils::getSCEVExprForVPValue(const VPValue *V,
}
// Helper to create SCEVs for binary and unary operations.
- auto CreateSCEV =
- [&](ArrayRef<VPValue *> Ops,
- function_ref<const SCEV *(ArrayRef<const SCEV *>)> CreateFn)
+ auto CreateSCEV = [&](ArrayRef<VPValue *> Ops,
+ function_ref<const SCEV *(ArrayRef<SCEVUse>)> CreateFn)
-> const SCEV * {
- SmallVector<const SCEV *, 2> SCEVOps;
+ SmallVector<SCEVUse, 2> SCEVOps;
for (VPValue *Op : Ops) {
const SCEV *S = getSCEVExprForVPValue(Op, PSE, L);
if (isa<SCEVCouldNotCompute>(S))
@@ -165,46 +164,46 @@ const SCEV *vputils::getSCEVExprForVPValue(const VPValue *V,
VPValue *LHSVal, *RHSVal;
if (match(V, m_Add(m_VPValue(LHSVal), m_VPValue(RHSVal))))
- return CreateSCEV({LHSVal, RHSVal}, [&](ArrayRef<const SCEV *> Ops) {
+ return CreateSCEV({LHSVal, RHSVal}, [&](ArrayRef<SCEVUse> Ops) {
return SE.getAddExpr(Ops[0], Ops[1], SCEV::FlagAnyWrap, 0);
});
if (match(V, m_Sub(m_VPValue(LHSVal), m_VPValue(RHSVal))))
- return CreateSCEV({LHSVal, RHSVal}, [&](ArrayRef<const SCEV *> Ops) {
+ return CreateSCEV({LHSVal, RHSVal}, [&](ArrayRef<SCEVUse> Ops) {
return SE.getMinusSCEV(Ops[0], Ops[1], SCEV::FlagAnyWrap, 0);
});
if (match(V, m_Not(m_VPValue(LHSVal)))) {
// not X = xor X, -1 = -1 - X
- return CreateSCEV({LHSVal}, [&](ArrayRef<const SCEV *> Ops) {
+ return CreateSCEV({LHSVal}, [&](ArrayRef<SCEVUse> Ops) {
return SE.getMinusSCEV(SE.getMinusOne(Ops[0]->getType()), Ops[0]);
});
}
if (match(V, m_Mul(m_VPValue(LHSVal), m_VPValue(RHSVal))))
- return CreateSCEV({LHSVal, RHSVal}, [&](ArrayRef<const SCEV *> Ops) {
+ return CreateSCEV({LHSVal, RHSVal}, [&](ArrayRef<SCEVUse> Ops) {
return SE.getMulExpr(Ops[0], Ops[1], SCEV::FlagAnyWrap, 0);
});
if (match(V,
m_Binary<Instruction::UDiv>(m_VPValue(LHSVal), m_VPValue(RHSVal))))
- return CreateSCEV({LHSVal, RHSVal}, [&](ArrayRef<const SCEV *> Ops) {
+ return CreateSCEV({LHSVal, RHSVal}, [&](ArrayRef<SCEVUse> Ops) {
return SE.getUDivExpr(Ops[0], Ops[1]);
});
// Handle AND with constant mask: x & (2^n - 1) can be represented as x % 2^n.
const APInt *Mask;
if (match(V, m_c_BinaryAnd(m_VPValue(LHSVal), m_APInt(Mask))) &&
(*Mask + 1).isPowerOf2())
- return CreateSCEV({LHSVal}, [&](ArrayRef<const SCEV *> Ops) {
+ return CreateSCEV({LHSVal}, [&](ArrayRef<SCEVUse> Ops) {
return SE.getURemExpr(Ops[0], SE.getConstant(*Mask + 1));
});
if (match(V, m_Trunc(m_VPValue(LHSVal)))) {
const VPlan *Plan = V->getDefiningRecipe()->getParent()->getPlan();
Type *DestTy = VPTypeAnalysis(*Plan).inferScalarType(V);
- return CreateSCEV({LHSVal}, [&](ArrayRef<const SCEV *> Ops) {
+ return CreateSCEV({LHSVal}, [&](ArrayRef<SCEVUse> Ops) {
return SE.getTruncateExpr(Ops[0], DestTy);
});
}
if (match(V, m_ZExt(m_VPValue(LHSVal)))) {
const VPlan *Plan = V->getDefiningRecipe()->getParent()->getPlan();
Type *DestTy = VPTypeAnalysis(*Plan).inferScalarType(V);
- return CreateSCEV({LHSVal}, [&](ArrayRef<const SCEV *> Ops) {
+ return CreateSCEV({LHSVal}, [&](ArrayRef<SCEVUse> Ops) {
return SE.getZeroExtendExpr(Ops[0], DestTy);
});
}
@@ -225,35 +224,35 @@ const SCEV *vputils::getSCEVExprForVPValue(const VPValue *V,
SE.getSignExtendExpr(V2, DestTy), SCEV::FlagNSW);
}
- return CreateSCEV({LHSVal}, [&](ArrayRef<const SCEV *> Ops) {
+ return CreateSCEV({LHSVal}, [&](ArrayRef<SCEVUse> Ops) {
return SE.getSignExtendExpr(Ops[0], DestTy);
});
}
if (match(V,
m_Intrinsic<Intrinsic::umax>(m_VPValue(LHSVal), m_VPValue(RHSVal))))
- return CreateSCEV({LHSVal, RHSVal}, [&](ArrayRef<const SCEV *> Ops) {
+ return CreateSCEV({LHSVal, RHSVal}, [&](ArrayRef<SCEVUse> Ops) {
return SE.getUMaxExpr(Ops[0], Ops[1]);
});
if (match(V,
m_Intrinsic<Intrinsic::smax>(m_VPValue(LHSVal), m_VPValue(RHSVal))))
- return CreateSCEV({LHSVal, RHSVal}, [&](ArrayRef<const SCEV *> Ops) {
+ return CreateSCEV({LHSVal, RHSVal}, [&](ArrayRef<SCEVUse> Ops) {
return SE.getSMaxExpr(Ops[0], Ops[1]);
});
if (match(V,
m_Intrinsic<Intrinsic::umin>(m_VPValue(LHSVal), m_VPValue(RHSVal))))
- return CreateSCEV({LHSVal, RHSVal}, [&](ArrayRef<const SCEV *> Ops) {
+ return CreateSCEV({LHSVal, RHSVal}, [&](ArrayRef<SCEVUse> Ops) {
return SE.getUMinExpr(Ops[0], Ops[1]);
});
if (match(V,
m_Intrinsic<Intrinsic::smin>(m_VPValue(LHSVal), m_VPValue(RHSVal))))
- return CreateSCEV({LHSVal, RHSVal}, [&](ArrayRef<const SCEV *> Ops) {
+ return CreateSCEV({LHSVal, RHSVal}, [&](ArrayRef<SCEVUse> Ops) {
return SE.getSMinExpr(Ops[0], Ops[1]);
});
ArrayRef<VPValue *> Ops;
Type *SourceElementType;
if (match(V, m_GetElementPtr(SourceElementType, Ops))) {
- const SCEV *GEPExpr = CreateSCEV(Ops, [&](ArrayRef<const SCEV *> Ops) {
+ const SCEV *GEPExpr = CreateSCEV(Ops, [&](ArrayRef<SCEVUse> Ops) {
return SE.getGEPExpr(Ops.front(), Ops.drop_front(), SourceElementType);
});
return PSE.getPredicatedSCEV(GEPExpr);
diff --git a/llvm/unittests/Analysis/ScalarEvolutionTest.cpp b/llvm/unittests/Analysis/ScalarEvolutionTest.cpp
index b17e83c0e5dca..97f6fed571ac0 100644
--- a/llvm/unittests/Analysis/ScalarEvolutionTest.cpp
+++ b/llvm/unittests/Analysis/ScalarEvolutionTest.cpp
@@ -251,12 +251,12 @@ TEST_F(ScalarEvolutionsTest, CommutativeExprOperandOrder) {
EXPECT_EQ(SE.getMulExpr(B, C), SE.getMulExpr(C, B));
EXPECT_EQ(SE.getMulExpr(A, C), SE.getMulExpr(C, A));
- SmallVector<const SCEV *, 3> Ops0 = {A, B, C};
- SmallVector<const SCEV *, 3> Ops1 = {A, C, B};
- SmallVector<const SCEV *, 3> Ops2 = {B, A, C};
- SmallVector<const SCEV *, 3> Ops3 = {B, C, A};
- SmallVector<const SCEV *, 3> Ops4 = {C, B, A};
- SmallVector<const SCEV *, 3> Ops5 = {C, A, B};
+ SmallVector<SCEVUse, 3> Ops0 = {A, B, C};
+ SmallVector<SCEVUse, 3> Ops1 = {A, C, B};
+ SmallVector<SCEVUse, 3> Ops2 = {B, A, C};
+ SmallVector<SCEVUse, 3> Ops3 = {B, C, A};
+ SmallVector<SCEVUse, 3> Ops4 = {C, B, A};
+ SmallVector<SCEVUse, 3> Ops5 = {C, A, B};
const SCEV *Mul0 = SE.getMulExpr(Ops0);
const SCEV *Mul1 = SE.getMulExpr(Ops1);
@@ -542,13 +542,14 @@ TEST_F(ScalarEvolutionsTest, SCEVNormalization) {
auto *L1 = *std::next(LI.begin());
auto *L0 = *std::next(LI.begin(), 2);
- auto GetAddRec = [&SE](const Loop *L, std::initializer_list<const SCEV *> Ops) {
- SmallVector<const SCEV *, 4> OpsCopy(Ops);
+ auto GetAddRec = [&SE](const Loop *L,
+ std::initializer_list<const SCEV *> Ops) {
+ SmallVector<SCEVUse, 4> OpsCopy(Ops.begin(), Ops.end());
return SE.getAddRecExpr(OpsCopy, L, SCEV::FlagAnyWrap);
};
auto GetAdd = [&SE](std::initializer_list<const SCEV *> Ops) {
- SmallVector<const SCEV *, 4> OpsCopy(Ops);
+ SmallVector<SCEVUse, 4> OpsCopy(Ops.begin(), Ops.end());
return SE.getAddExpr(OpsCopy, SCEV::FlagAnyWrap);
};
@@ -1729,7 +1730,7 @@ TEST_F(ScalarEvolutionsTest, ComplexityComparatorIsStrictWeakOrdering2) {
const SCEV *M0 = SE.getNegativeSCEV(P0);
const SCEV *M2 = SE.getNegativeSCEV(P2);
- SmallVector<const SCEV *, 6> Ops = {M2, P0, M0, P1, P2};
+ SmallVector<SCEVUse, 6> Ops = {M2, P0, M0, P1, P2};
// When _LIBCPP_HARDENING_MODE == _LIBCPP_HARDENING_MODE_DEBUG, this will
// crash if the comparator has the specific caching bug.
SE.getAddExpr(Ops);
diff --git a/llvm/unittests/Transforms/Utils/ScalarEvolutionExpanderTest.cpp b/llvm/unittests/Transforms/Utils/ScalarEvolutionExpanderTest.cpp
index 144a28a358f6e..e97d00dda7f8c 100644
--- a/llvm/unittests/Transforms/Utils/ScalarEvolutionExpanderTest.cpp
+++ b/llvm/unittests/Transforms/Utils/ScalarEvolutionExpanderTest.cpp
@@ -873,8 +873,8 @@ TEST_F(ScalarEvolutionExpanderTest, SCEVExpandNonAffineAddRec) {
// Expand {5,+,1,+,1}
auto GetAR3 = [&](ScalarEvolution &SE, Loop *L) -> const SCEVAddRecExpr * {
- SmallVector<const SCEV *, 3> Ops = {SE.getConstant(APInt(ARBitWidth, 5)),
- SE.getOne(ARType), SE.getOne(ARType)};
+ SmallVector<SCEVUse, 3> Ops = {SE.getConstant(APInt(ARBitWidth, 5)),
+ SE.getOne(ARType), SE.getOne(ARType)};
return cast<SCEVAddRecExpr>(SE.getAddRecExpr(Ops, L, SCEV::FlagAnyWrap));
};
TestNoCanonicalIV(GetAR3);
@@ -883,9 +883,9 @@ TEST_F(ScalarEvolutionExpanderTest, SCEVExpandNonAffineAddRec) {
// Expand {5,+,1,+,1,+,1}
auto GetAR4 = [&](ScalarEvolution &SE, Loop *L) -> const SCEVAddRecExpr * {
- SmallVector<const SCEV *, 4> Ops = {SE.getConstant(APInt(ARBitWidth, 5)),
- SE.getOne(ARType), SE.getOne(ARType),
- SE.getOne(ARType)};
+ SmallVector<SCEVUse, 4> Ops = {SE.getConstant(APInt(ARBitWidth, 5)),
+ SE.getOne(ARType), SE.getOne(ARType),
+ SE.getOne(ARType)};
return cast<SCEVAddRecExpr>(SE.getAddRecExpr(Ops, L, SCEV::FlagAnyWrap));
};
TestNoCanonicalIV(GetAR4);
@@ -894,9 +894,9 @@ TEST_F(ScalarEvolutionExpanderTest, SCEVExpandNonAffineAddRec) {
// Expand {5,+,1,+,1,+,1,+,1}
auto GetAR5 = [&](ScalarEvolution &SE, Loop *L) -> const SCEVAddRecExpr * {
- SmallVector<const SCEV *, 5> Ops = {SE.getConstant(APInt(ARBitWidth, 5)),
- SE.getOne(ARType), SE.getOne(ARType),
- SE.getOne(ARType), SE.getOne(ARType)};
+ SmallVector<SCEVUse, 5> Ops = {SE.getConstant(APInt(ARBitWidth, 5)),
+ SE.getOne(ARType), SE.getOne(ARType),
+ SE.getOne(ARType), SE.getOne(ARType)};
return cast<SCEVAddRecExpr>(SE.getAddRecExpr(Ops, L, SCEV::FlagAnyWrap));
};
TestNoCanonicalIV(GetAR5);
diff --git a/polly/include/polly/CodeGen/BlockGenerators.h b/polly/include/polly/CodeGen/BlockGenerators.h
index 401e80eb0fece..338256d9e1dc3 100644
--- a/polly/include/polly/CodeGen/BlockGenerators.h
+++ b/polly/include/polly/CodeGen/BlockGenerators.h
@@ -35,7 +35,6 @@ using llvm::Instruction;
using llvm::LoadInst;
using llvm::Loop;
using llvm::LoopInfo;
-using llvm::LoopToScevMapT;
using llvm::MapVector;
using llvm::PHINode;
using llvm::ScalarEvolution;
diff --git a/polly/include/polly/Support/ScopHelper.h b/polly/include/polly/Support/ScopHelper.h
index 38b731a9f7d8d..fa36acf458823 100644
--- a/polly/include/polly/Support/ScopHelper.h
+++ b/polly/include/polly/Support/ScopHelper.h
@@ -14,6 +14,7 @@
#define POLLY_SUPPORT_IRHELPER_H
#include "llvm/ADT/SetVector.h"
+#include "llvm/Analysis/ScalarEvolution.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/ValueHandle.h"
@@ -37,7 +38,7 @@ class Scop;
class ScopStmt;
/// Same as llvm/Analysis/ScalarEvolutionExpressions.h
-using LoopToScevMapT = llvm::DenseMap<const llvm::Loop *, const llvm::SCEV *>;
+using LoopToScevMapT = llvm::DenseMap<const llvm::Loop *, llvm::SCEVUse>;
/// Enumeration of assumptions Polly can take.
enum AssumptionKind {
diff --git a/polly/lib/Analysis/ScopDetection.cpp b/polly/lib/Analysis/ScopDetection.cpp
index b9fd4b8be7027..99269aafb3d14 100644
--- a/polly/lib/Analysis/ScopDetection.cpp
+++ b/polly/lib/Analysis/ScopDetection.cpp
@@ -886,7 +886,7 @@ ScopDetection::getDelinearizationTerms(DetectionContext &Context,
if (auto *AF2 = dyn_cast<SCEVAddRecExpr>(Op))
collectParametricTerms(SE, AF2, Terms);
if (auto *AF2 = dyn_cast<SCEVMulExpr>(Op)) {
- SmallVector<const SCEV *, 0> Operands;
+ SmallVector<SCEVUse, 0> Operands;
for (const SCEV *MulOp : AF2->operands()) {
if (auto *Const = dyn_cast<SCEVConstant>(MulOp))
diff --git a/polly/lib/Support/SCEVValidator.cpp b/polly/lib/Support/SCEVValidator.cpp
index 5c3b10cc08ded..291853cde8308 100644
--- a/polly/lib/Support/SCEVValidator.cpp
+++ b/polly/lib/Support/SCEVValidator.cpp
@@ -700,7 +700,7 @@ polly::extractConstantFactor(const SCEV *S, ScalarEvolution &SE) {
}
if (auto *Add = dyn_cast<SCEVAddExpr>(S)) {
- SmallVector<const SCEV *, 4> LeftOvers;
+ SmallVector<SCEVUse, 4> LeftOvers;
auto Op0Pair = extractConstantFactor(Add->getOperand(0), SE);
auto *Factor = Op0Pair.first;
if (SE.isKnownNegative(Factor)) {
@@ -729,7 +729,7 @@ polly::extractConstantFactor(const SCEV *S, ScalarEvolution &SE) {
if (!Mul)
return std::make_pair(ConstPart, S);
- SmallVector<const SCEV *, 4> LeftOvers;
+ SmallVector<SCEVUse, 4> LeftOvers;
for (const SCEV *Op : Mul->operands())
if (isa<SCEVConstant>(Op))
ConstPart = cast<SCEVConstant>(SE.getMulExpr(ConstPart, Op));
diff --git a/polly/lib/Support/ScopHelper.cpp b/polly/lib/Support/ScopHelper.cpp
index fe9b8bb1ffea1..fb80edcf1d307 100644
--- a/polly/lib/Support/ScopHelper.cpp
+++ b/polly/lib/Support/ScopHelper.cpp
@@ -243,7 +243,7 @@ struct ScopExpander final : SCEVVisitor<ScopExpander, const SCEV *> {
explicit ScopExpander(const Region &R, ScalarEvolution &SE, Function *GenFn,
ScalarEvolution &GenSE, const char *Name,
- ValueMapT *VMap, LoopToScevMapT *LoopMap,
+ ValueMapT *VMap, polly::LoopToScevMapT *LoopMap,
BasicBlock *RTCBB)
: Expander(GenSE, Name, /*PreserveLCSSA=*/false), Name(Name), R(R),
VMap(VMap), LoopMap(LoopMap), RTCBB(RTCBB), GenSE(GenSE), GenFn(GenFn) {
@@ -272,7 +272,7 @@ struct ScopExpander final : SCEVVisitor<ScopExpander, const SCEV *> {
const char *Name;
const Region &R;
ValueMapT *VMap;
- LoopToScevMapT *LoopMap;
+ polly::LoopToScevMapT *LoopMap;
BasicBlock *RTCBB;
DenseMap<const SCEV *, const SCEV *> SCEVCache;
@@ -389,50 +389,50 @@ struct ScopExpander final : SCEVVisitor<ScopExpander, const SCEV *> {
return GenSE.getUDivExpr(visit(E->getLHS()), RHSScev);
}
const SCEV *visitAddExpr(const SCEVAddExpr *E) {
- SmallVector<const SCEV *, 4> NewOps;
+ SmallVector<SCEVUse, 4> NewOps;
for (const SCEV *Op : E->operands())
NewOps.push_back(visit(Op));
return GenSE.getAddExpr(NewOps);
}
const SCEV *visitMulExpr(const SCEVMulExpr *E) {
- SmallVector<const SCEV *, 4> NewOps;
+ SmallVector<SCEVUse, 4> NewOps;
for (const SCEV *Op : E->operands())
NewOps.push_back(visit(Op));
return GenSE.getMulExpr(NewOps);
}
const SCEV *visitUMaxExpr(const SCEVUMaxExpr *E) {
- SmallVector<const SCEV *, 4> NewOps;
- for (const SCEV *Op : E->operands())
+ SmallVector<SCEVUse, 4> NewOps;
+ for (SCEVUse Op : E->operands())
NewOps.push_back(visit(Op));
return GenSE.getUMaxExpr(NewOps);
}
const SCEV *visitSMaxExpr(const SCEVSMaxExpr *E) {
- SmallVector<const SCEV *, 4> NewOps;
- for (const SCEV *Op : E->operands())
+ SmallVector<SCEVUse, 4> NewOps;
+ for (SCEVUse Op : E->operands())
NewOps.push_back(visit(Op));
return GenSE.getSMaxExpr(NewOps);
}
const SCEV *visitUMinExpr(const SCEVUMinExpr *E) {
- SmallVector<const SCEV *, 4> NewOps;
- for (const SCEV *Op : E->operands())
+ SmallVector<SCEVUse, 4> NewOps;
+ for (SCEVUse Op : E->operands())
NewOps.push_back(visit(Op));
return GenSE.getUMinExpr(NewOps);
}
const SCEV *visitSMinExpr(const SCEVSMinExpr *E) {
- SmallVector<const SCEV *, 4> NewOps;
- for (const SCEV *Op : E->operands())
+ SmallVector<SCEVUse, 4> NewOps;
+ for (SCEVUse Op : E->operands())
NewOps.push_back(visit(Op));
return GenSE.getSMinExpr(NewOps);
}
const SCEV *visitSequentialUMinExpr(const SCEVSequentialUMinExpr *E) {
- SmallVector<const SCEV *, 4> NewOps;
- for (const SCEV *Op : E->operands())
+ SmallVector<SCEVUse, 4> NewOps;
+ for (SCEVUse Op : E->operands())
NewOps.push_back(visit(Op));
return GenSE.getUMinExpr(NewOps, /*Sequential=*/true);
}
const SCEV *visitAddRecExpr(const SCEVAddRecExpr *E) {
- SmallVector<const SCEV *, 4> NewOps;
- for (const SCEV *Op : E->operands())
+ SmallVector<SCEVUse, 4> NewOps;
+ for (SCEVUse Op : E->operands())
NewOps.push_back(visit(Op));
const Loop *L = E->getLoop();
>From 5260019122706dca6c3e42a48ebdf73be0fcbbd2 Mon Sep 17 00:00:00 2001
From: Florian Hahn <flo at fhahn.com>
Date: Thu, 26 Feb 2026 21:08:56 +0000
Subject: [PATCH 2/4] !fixup address latest comments, thanks
---
.../Analysis/ScalarEvolutionExpressions.h | 38 +++++++++----------
llvm/lib/Transforms/Scalar/IndVarSimplify.cpp | 3 +-
.../Transforms/Scalar/LoopDataPrefetch.cpp | 2 +-
llvm/lib/Transforms/Scalar/LoopFuse.cpp | 6 +--
.../lib/Transforms/Scalar/LoopPredication.cpp | 5 +--
.../lib/Transforms/Scalar/NaryReassociate.cpp | 5 +--
.../Scalar/StraightLineStrengthReduce.cpp | 5 +--
7 files changed, 26 insertions(+), 38 deletions(-)
diff --git a/llvm/include/llvm/Analysis/ScalarEvolutionExpressions.h b/llvm/include/llvm/Analysis/ScalarEvolutionExpressions.h
index 4a8a8e1b9c035..b5928a5583c85 100644
--- a/llvm/include/llvm/Analysis/ScalarEvolutionExpressions.h
+++ b/llvm/include/llvm/Analysis/ScalarEvolutionExpressions.h
@@ -219,10 +219,10 @@ class SCEVNAryExpr : public SCEV {
// arrays with its SCEVAllocator, so this class just needs a simple
// pointer rather than a more elaborate vector-like data structure.
// This also avoids the need for a non-trivial destructor.
- SCEVUse const *Operands;
+ const SCEVUse *Operands;
size_t NumOperands;
- SCEVNAryExpr(const FoldingSetNodeIDRef ID, enum SCEVTypes T, SCEVUse const *O,
+ SCEVNAryExpr(const FoldingSetNodeIDRef ID, enum SCEVTypes T, const SCEVUse *O,
size_t N)
: SCEV(ID, T, computeExpressionSize(ArrayRef(O, N))), Operands(O),
NumOperands(N) {}
@@ -266,7 +266,7 @@ class SCEVNAryExpr : public SCEV {
class SCEVCommutativeExpr : public SCEVNAryExpr {
protected:
SCEVCommutativeExpr(const FoldingSetNodeIDRef ID, enum SCEVTypes T,
- SCEVUse const *O, size_t N)
+ const SCEVUse *O, size_t N)
: SCEVNAryExpr(ID, T, O, N) {}
public:
@@ -287,7 +287,7 @@ class SCEVAddExpr : public SCEVCommutativeExpr {
Type *Ty;
- SCEVAddExpr(const FoldingSetNodeIDRef ID, SCEVUse const *O, size_t N)
+ SCEVAddExpr(const FoldingSetNodeIDRef ID, const SCEVUse *O, size_t N)
: SCEVCommutativeExpr(ID, scAddExpr, O, N) {
auto *FirstPointerTypedOp = find_if(
operands(), [](SCEVUse Op) { return Op->getType()->isPointerTy(); });
@@ -309,7 +309,7 @@ class SCEVAddExpr : public SCEVCommutativeExpr {
class SCEVMulExpr : public SCEVCommutativeExpr {
friend class ScalarEvolution;
- SCEVMulExpr(const FoldingSetNodeIDRef ID, SCEVUse const *O, size_t N)
+ SCEVMulExpr(const FoldingSetNodeIDRef ID, const SCEVUse *O, size_t N)
: SCEVCommutativeExpr(ID, scMulExpr, O, N) {}
public:
@@ -369,7 +369,7 @@ class SCEVAddRecExpr : public SCEVNAryExpr {
const Loop *L;
- SCEVAddRecExpr(const FoldingSetNodeIDRef ID, SCEVUse const *O, size_t N,
+ SCEVAddRecExpr(const FoldingSetNodeIDRef ID, const SCEVUse *O, size_t N,
const Loop *l)
: SCEVNAryExpr(ID, scAddRecExpr, O, N), L(l) {}
@@ -453,7 +453,7 @@ class SCEVMinMaxExpr : public SCEVCommutativeExpr {
protected:
/// Note: Constructing subclasses via this constructor is allowed
SCEVMinMaxExpr(const FoldingSetNodeIDRef ID, enum SCEVTypes T,
- SCEVUse const *O, size_t N)
+ const SCEVUse *O, size_t N)
: SCEVCommutativeExpr(ID, T, O, N) {
assert(isMinMaxType(T));
// Min and max never overflow
@@ -485,7 +485,7 @@ class SCEVMinMaxExpr : public SCEVCommutativeExpr {
class SCEVSMaxExpr : public SCEVMinMaxExpr {
friend class ScalarEvolution;
- SCEVSMaxExpr(const FoldingSetNodeIDRef ID, SCEVUse const *O, size_t N)
+ SCEVSMaxExpr(const FoldingSetNodeIDRef ID, const SCEVUse *O, size_t N)
: SCEVMinMaxExpr(ID, scSMaxExpr, O, N) {}
public:
@@ -497,7 +497,7 @@ class SCEVSMaxExpr : public SCEVMinMaxExpr {
class SCEVUMaxExpr : public SCEVMinMaxExpr {
friend class ScalarEvolution;
- SCEVUMaxExpr(const FoldingSetNodeIDRef ID, SCEVUse const *O, size_t N)
+ SCEVUMaxExpr(const FoldingSetNodeIDRef ID, const SCEVUse *O, size_t N)
: SCEVMinMaxExpr(ID, scUMaxExpr, O, N) {}
public:
@@ -509,7 +509,7 @@ class SCEVUMaxExpr : public SCEVMinMaxExpr {
class SCEVSMinExpr : public SCEVMinMaxExpr {
friend class ScalarEvolution;
- SCEVSMinExpr(const FoldingSetNodeIDRef ID, SCEVUse const *O, size_t N)
+ SCEVSMinExpr(const FoldingSetNodeIDRef ID, const SCEVUse *O, size_t N)
: SCEVMinMaxExpr(ID, scSMinExpr, O, N) {}
public:
@@ -521,7 +521,7 @@ class SCEVSMinExpr : public SCEVMinMaxExpr {
class SCEVUMinExpr : public SCEVMinMaxExpr {
friend class ScalarEvolution;
- SCEVUMinExpr(const FoldingSetNodeIDRef ID, SCEVUse const *O, size_t N)
+ SCEVUMinExpr(const FoldingSetNodeIDRef ID, const SCEVUse *O, size_t N)
: SCEVMinMaxExpr(ID, scUMinExpr, O, N) {}
public:
@@ -548,7 +548,7 @@ class SCEVSequentialMinMaxExpr : public SCEVNAryExpr {
protected:
/// Note: Constructing subclasses via this constructor is allowed
SCEVSequentialMinMaxExpr(const FoldingSetNodeIDRef ID, enum SCEVTypes T,
- SCEVUse const *O, size_t N)
+ const SCEVUse *O, size_t N)
: SCEVNAryExpr(ID, T, O, N) {
assert(isSequentialMinMaxType(T));
// Min and max never overflow
@@ -582,7 +582,7 @@ class SCEVSequentialMinMaxExpr : public SCEVNAryExpr {
class SCEVSequentialUMinExpr : public SCEVSequentialMinMaxExpr {
friend class ScalarEvolution;
- SCEVSequentialUMinExpr(const FoldingSetNodeIDRef ID, SCEVUse const *O,
+ SCEVSequentialUMinExpr(const FoldingSetNodeIDRef ID, const SCEVUse *O,
size_t N)
: SCEVSequentialMinMaxExpr(ID, scSequentialUMinExpr, O, N) {}
@@ -969,18 +969,16 @@ class SCEVLoopAddRecRewriter
}
const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) {
+ SmallVector<SCEVUse, 2> Operands;
+ for (SCEVUse Op : Expr->operands())
+ Operands.push_back(visit(Op));
+
const Loop *L = Expr->getLoop();
auto It = Map.find(L);
if (It == Map.end()) {
- SmallVector<SCEVUse, 2> Operands2;
- for (const SCEV *Op : Expr->operands())
- Operands2.push_back(visit(Op));
- return SE.getAddRecExpr(Operands2, L, Expr->getNoWrapFlags());
+ return SE.getAddRecExpr(Operands, L, Expr->getNoWrapFlags());
}
- SmallVector<SCEVUse, 2> Operands;
- for (const SCEV *Op : Expr->operands())
- Operands.push_back(visit(Op));
return SCEVAddRecExpr::evaluateAtIteration(Operands, It->second, SE);
}
diff --git a/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp b/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp
index 385ef0e9be551..745880f1ca0cb 100644
--- a/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp
+++ b/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp
@@ -1398,8 +1398,7 @@ createReplacement(ICmpInst *ICmp, const Loop *L, BasicBlock *ExitingBB,
SmallVector<SCEVUse, 4> Elements;
if (auto *UMin = dyn_cast<SCEVUMinExpr>(MaxIter)) {
for (SCEVUse Op : UMin->operands())
- Elements.push_back(SE->getMinusSCEV(
- Op.getPointer(), SE->getOne(Op.getPointer()->getType())));
+ Elements.push_back(SE->getMinusSCEV(Op, SE->getOne(Op->getType())));
MaxIter = SE->getUMinFromMismatchedTypes(Elements);
} else
MaxIter = SE->getMinusSCEV(MaxIter, SE->getOne(MaxIter->getType()));
diff --git a/llvm/lib/Transforms/Scalar/LoopDataPrefetch.cpp b/llvm/lib/Transforms/Scalar/LoopDataPrefetch.cpp
index b58ed17e87b8f..fe20c24318e12 100644
--- a/llvm/lib/Transforms/Scalar/LoopDataPrefetch.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopDataPrefetch.cpp
@@ -394,7 +394,7 @@ bool LoopDataPrefetch::runOnLoop(Loop *L) {
const SCEV *NextLSCEV = SE->getAddExpr(
P.LSCEVAddRec,
SE->getMulExpr(SE->getConstant(P.LSCEVAddRec->getType(), ItersAhead),
- P.LSCEVAddRec->getStepRecurrence(*SE).getPointer()));
+ P.LSCEVAddRec->getStepRecurrence(*SE)));
if (!SCEVE.isSafeToExpand(NextLSCEV))
continue;
diff --git a/llvm/lib/Transforms/Scalar/LoopFuse.cpp b/llvm/lib/Transforms/Scalar/LoopFuse.cpp
index e8067d22924d7..0dcc72bf4c106 100644
--- a/llvm/lib/Transforms/Scalar/LoopFuse.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopFuse.cpp
@@ -1135,7 +1135,7 @@ struct LoopFuser {
SmallVector<SCEVUse, 2> Operands;
if (ExprL == &OldL) {
for (SCEVUse Op : Expr->operands())
- Operands.push_back(Op.getPointer());
+ Operands.push_back(Op);
return SE.getAddRecExpr(Operands, &NewL, Expr->getNoWrapFlags());
}
@@ -1145,11 +1145,11 @@ struct LoopFuser {
Valid = false;
return Expr;
}
- return visit(Expr->getStart().getPointer());
+ return visit(Expr->getStart());
}
for (SCEVUse Op : Expr->operands())
- Operands.push_back(visit(Op.getPointer()));
+ Operands.push_back(visit(Op));
return SE.getAddRecExpr(Operands, ExprL, Expr->getNoWrapFlags());
}
diff --git a/llvm/lib/Transforms/Scalar/LoopPredication.cpp b/llvm/lib/Transforms/Scalar/LoopPredication.cpp
index 26be4159784f0..b14900d6dd635 100644
--- a/llvm/lib/Transforms/Scalar/LoopPredication.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopPredication.cpp
@@ -1018,10 +1018,7 @@ static const SCEV *getMinAnalyzeableBackedgeTakenCount(ScalarEvolution &SE,
}
if (ExitCounts.size() < 2)
return SE.getCouldNotCompute();
- SmallVector<SCEVUse, 4> ExitCountPtrs;
- for (SCEVUse U : ExitCounts)
- ExitCountPtrs.push_back(U);
- return SE.getUMinFromMismatchedTypes(ExitCountPtrs);
+ return SE.getUMinFromMismatchedTypes(ExitCounts);
}
/// This implements an analogous, but entirely distinct transform from the main
diff --git a/llvm/lib/Transforms/Scalar/NaryReassociate.cpp b/llvm/lib/Transforms/Scalar/NaryReassociate.cpp
index af6f2eb45e130..b0a33710c25bc 100644
--- a/llvm/lib/Transforms/Scalar/NaryReassociate.cpp
+++ b/llvm/lib/Transforms/Scalar/NaryReassociate.cpp
@@ -414,10 +414,7 @@ NaryReassociatePass::tryReassociateGEPAtIndex(GetElementPtrInst *GEP,
// @reassociate_gep_assume for an example of this canonicalization.
IndexExprs[I] = SE->getZeroExtendExpr(IndexExprs[I], GEPArgType);
}
- SmallVector<SCEVUse, 4> IndexPtrs;
- for (SCEVUse U : IndexExprs)
- IndexPtrs.push_back(U);
- SCEVUse CandidateExpr = SE->getGEPExpr(cast<GEPOperator>(GEP), IndexPtrs);
+ SCEVUse CandidateExpr = SE->getGEPExpr(cast<GEPOperator>(GEP), IndexExprs);
Value *Candidate = findClosestMatchingDominator(CandidateExpr, GEP);
if (Candidate == nullptr)
diff --git a/llvm/lib/Transforms/Scalar/StraightLineStrengthReduce.cpp b/llvm/lib/Transforms/Scalar/StraightLineStrengthReduce.cpp
index 74d06351eba20..ed43fb4b63f87 100644
--- a/llvm/lib/Transforms/Scalar/StraightLineStrengthReduce.cpp
+++ b/llvm/lib/Transforms/Scalar/StraightLineStrengthReduce.cpp
@@ -1100,10 +1100,7 @@ void StraightLineStrengthReduce::allocateCandidatesAndFindBasisForGEP(
// The base of this candidate is GEP's base plus the offsets of all
// indices except this current one.
- SmallVector<SCEVUse, 4> IndexPtrs;
- for (SCEVUse U : IndexExprs)
- IndexPtrs.push_back(U);
- SCEVUse BaseExpr = SE->getGEPExpr(cast<GEPOperator>(GEP), IndexPtrs);
+ SCEVUse BaseExpr = SE->getGEPExpr(cast<GEPOperator>(GEP), IndexExprs);
Value *ArrayIdx = GEP->getOperand(I);
uint64_t ElementSize = GTI.getSequentialElementStride(*DL);
IntegerType *PtrIdxTy = cast<IntegerType>(DL->getIndexType(GEP->getType()));
>From d38b24af9ab1316e3e3a5811c72f7fe954f4f9ed Mon Sep 17 00:00:00 2001
From: Florian Hahn <flo at fhahn.com>
Date: Fri, 27 Feb 2026 10:52:30 +0000
Subject: [PATCH 3/4] !fixup more cleanups
---
llvm/include/llvm/Analysis/ScalarEvolution.h | 6 +-
llvm/lib/Analysis/ScalarEvolution.cpp | 55 ++++++++-----------
.../Scalar/InductiveRangeCheckElimination.cpp | 2 +-
llvm/lib/Transforms/Scalar/LoopFuse.cpp | 3 +-
.../Transforms/Scalar/LoopStrengthReduce.cpp | 8 +--
5 files changed, 30 insertions(+), 44 deletions(-)
diff --git a/llvm/include/llvm/Analysis/ScalarEvolution.h b/llvm/include/llvm/Analysis/ScalarEvolution.h
index b8f2f02255506..7346d9fc28178 100644
--- a/llvm/include/llvm/Analysis/ScalarEvolution.h
+++ b/llvm/include/llvm/Analysis/ScalarEvolution.h
@@ -121,14 +121,16 @@ template <> struct DenseMapInfo<SCEVUse> {
return PointerLikeTypeTraits<SCEVUse>::getFromVoidPointer((void *)Val);
}
- static unsigned getHashValue(SCEVUse U) { return hash_value(U.getRawPointer()); }
+ static unsigned getHashValue(SCEVUse U) {
+ return hash_value(U.getRawPointer());
+ }
static bool isEqual(const SCEVUse LHS, const SCEVUse RHS) {
return LHS.getRawPointer() == RHS.getRawPointer();
}
};
-template<> struct simplify_type<SCEVUse> {
+template <> struct simplify_type<SCEVUse> {
using SimpleType = const SCEV *;
static SimpleType getSimplifiedValue(SCEVUse &Val) {
diff --git a/llvm/lib/Analysis/ScalarEvolution.cpp b/llvm/lib/Analysis/ScalarEvolution.cpp
index 9f0ecdfc0aa9d..89dfb20bade47 100644
--- a/llvm/lib/Analysis/ScalarEvolution.cpp
+++ b/llvm/lib/Analysis/ScalarEvolution.cpp
@@ -577,8 +577,7 @@ SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeIDRef ID, SCEVUse op,
void SCEVUnknown::deleted() {
// Clear this SCEVUnknown from various maps.
- SCEVUse U(this);
- SE->forgetMemoizedResults(U);
+ SE->forgetMemoizedResults({this});
// Remove this SCEVUnknown from the uniquing map.
SE->UniqueSCEVs.RemoveNode(this);
@@ -589,8 +588,7 @@ void SCEVUnknown::deleted() {
void SCEVUnknown::allUsesReplacedWith(Value *New) {
// Clear this SCEVUnknown from various maps.
- SCEVUse U(this);
- SE->forgetMemoizedResults(U);
+ SE->forgetMemoizedResults({this});
// Remove this SCEVUnknown from the uniquing map.
SE->UniqueSCEVs.RemoveNode(this);
@@ -2193,7 +2191,7 @@ const SCEV *ScalarEvolution::getSignExtendExprImpl(const SCEV *Op, Type *Ty,
SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator),
Op, Ty);
UniqueSCEVs.InsertNode(S, IP);
- registerUser(S, ArrayRef<SCEVUse>(SCEVUse(Op)));
+ registerUser(S, Op);
return S;
}
@@ -2850,8 +2848,8 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<SCEVUse> &Ops,
append_range(MulOps, Mul->operands().drop_front(MulOp + 1));
InnerMul = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1);
}
- SmallVector<SCEVUse, 2> TwoOps = {getOne(Ty), InnerMul};
- const SCEV *AddOne = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1);
+ const SCEV *AddOne =
+ getAddExpr(getOne(Ty), InnerMul, SCEV::FlagAnyWrap, Depth + 1);
const SCEV *OuterMul = getMulExpr(AddOne, MulOpSCEV,
SCEV::FlagAnyWrap, Depth + 1);
if (Ops.size() == 2) return OuterMul;
@@ -2890,9 +2888,8 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<SCEVUse> &Ops,
append_range(MulOps, OtherMul->operands().drop_front(OMulOp+1));
InnerMul2 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1);
}
- SmallVector<SCEVUse, 2> TwoOps = {InnerMul1, InnerMul2};
const SCEV *InnerMulSum =
- getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1);
+ getAddExpr(InnerMul1, InnerMul2, SCEV::FlagAnyWrap, Depth + 1);
const SCEV *OuterMul = getMulExpr(MulOpSCEV, InnerMulSum,
SCEV::FlagAnyWrap, Depth + 1);
if (Ops.size() == 2) return OuterMul;
@@ -2951,8 +2948,7 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<SCEVUse> &Ops,
// in the outer scope.
SCEV::NoWrapFlags AddFlags = Flags;
if (AddFlags != SCEV::FlagAnyWrap) {
- SmallVector<SCEVUse, 8> LIOpPtrs(LIOps.begin(), LIOps.end());
- auto *DefI = getDefiningScopeBound(LIOpPtrs);
+ auto *DefI = getDefiningScopeBound(LIOps);
auto *ReachI = &*AddRecLoop->getHeader()->begin();
if (!isGuaranteedToTransferExecutionTo(DefI, ReachI))
AddFlags = SCEV::FlagAnyWrap;
@@ -3002,9 +2998,9 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<SCEVUse> &Ops,
append_range(AddRecOps, OtherAddRec->operands().drop_front(i));
break;
}
- SmallVector<SCEVUse, 2> TwoOps = {AddRecOps[i],
- OtherAddRec->getOperand(i)};
- AddRecOps[i] = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1);
+ AddRecOps[i] =
+ getAddExpr(AddRecOps[i], OtherAddRec->getOperand(i),
+ SCEV::FlagAnyWrap, Depth + 1);
}
Ops.erase(Ops.begin() + OtherIdx); --OtherIdx;
}
@@ -3316,10 +3312,8 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<SCEVUse> &Ops,
for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) {
// Scan all of the other operands to this mul and add them to the vector
// if they are loop invariant w.r.t. the recurrence.
- const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Ops[Idx]);
- if (!AddRec)
- break;
SmallVector<SCEVUse, 8> LIOps;
+ const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
if (isAvailableAtLoopEntry(Ops[i], AddRec->getLoop())) {
LIOps.push_back(Ops[i]);
@@ -3342,7 +3336,7 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<SCEVUse> &Ops,
AddRec->getNoWrapFlags(ComputeFlags({Scale, AddRec}));
for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) {
- NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i).getPointer(),
+ NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i),
SCEV::FlagAnyWrap, Depth + 1));
if (hasFlags(Flags, SCEV::FlagNSW) && !hasFlags(Flags, SCEV::FlagNUW)) {
@@ -3653,7 +3647,7 @@ const SCEV *ScalarEvolution::getUDivExpr(SCEVUse LHS, SCEVUse RHS) {
SCEV *S = new (SCEVAllocator) SCEVUDivExpr(ID.Intern(SCEVAllocator),
LHS, RHS);
UniqueSCEVs.InsertNode(S, IP);
- registerUser(S, ArrayRef<SCEVUse>({SCEVUse(LHS), SCEVUse(RHS)}));
+ registerUser(S, ArrayRef<SCEVUse>({LHS, RHS}));
return S;
}
@@ -4085,11 +4079,8 @@ class SCEVSequentialMinMaxDeduplicatingVisitor final
return S;
auto *NAry = cast<SCEVNAryExpr>(S);
- SmallVector<SCEVUse, 8> Operands;
- for (SCEVUse U : NAry->operands())
- Operands.push_back(U.getPointer());
SmallVector<SCEVUse> NewOps;
- bool Changed = visit(Kind, Operands, NewOps);
+ bool Changed = visit(Kind, NAry->operands(), NewOps);
if (!Changed)
return S;
@@ -4480,7 +4471,7 @@ const SCEV *ScalarEvolution::getSMinExpr(SmallVectorImpl<SCEVUse> &Ops) {
const SCEV *ScalarEvolution::getUMinExpr(SCEVUse LHS, SCEVUse RHS,
bool Sequential) {
SmallVector<SCEVUse, 2> Ops = {LHS, RHS};
- return getMinMaxExpr(scUMinExpr, Ops);
+ return getUMinExpr(Ops, Sequential);
}
const SCEV *ScalarEvolution::getUMinExpr(SmallVectorImpl<SCEVUse> &Ops,
@@ -5984,7 +5975,7 @@ const SCEV *ScalarEvolution::createAddRecFromPHI(PHINode *PN) {
// Okay, for the entire analysis of this edge we assumed the PHI
// to be symbolic. We now need to go back and purge all of the
// entries for the scalars that use the symbolic expression.
- forgetMemoizedResults(SCEVUse(SymbolicName));
+ forgetMemoizedResults({SymbolicName});
insertValueToMap(PN, PHISCEV);
if (auto *AR = dyn_cast<SCEVAddRecExpr>(PHISCEV)) {
@@ -6024,7 +6015,7 @@ const SCEV *ScalarEvolution::createAddRecFromPHI(PHINode *PN) {
// Okay, for the entire analysis of this edge we assumed the PHI
// to be symbolic. We now need to go back and purge all of the
// entries for the scalars that use the symbolic expression.
- forgetMemoizedResults(SCEVUse(SymbolicName));
+ forgetMemoizedResults({SymbolicName});
insertValueToMap(PN, Shifted);
return Shifted;
}
@@ -10188,8 +10179,7 @@ const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) {
// foldable. Build a new instance of the folded commutative expression.
SmallVector<SCEVUse, 8> NewOps;
NewOps.reserve(Ops.size());
- for (unsigned j = 0; j < i; ++j)
- NewOps.push_back(Ops[j].getPointer());
+ append_range(NewOps, Ops.take_front(i));
NewOps.push_back(OpAtScope);
for (++i; i != e; ++i) {
@@ -12805,7 +12795,7 @@ bool ScalarEvolution::isImpliedViaOperations(CmpPredicate Pred, const SCEV *LHS,
auto GetOpFromSExt = [&](const SCEV *S) -> const SCEV * {
if (auto *Ext = dyn_cast<SCEVSignExtendExpr>(S))
- return Ext->getOperand().getPointer();
+ return Ext->getOperand();
// TODO: If S is a SCEVConstant then you can cheaply "strip" the sext off
// the constant in some cases.
return S;
@@ -15131,8 +15121,7 @@ const SCEVAddRecExpr *ScalarEvolution::convertSCEVToAddRecWithPredicates(
continue;
ExitCount = getTruncateOrSignExtend(ExitCount, Step->getType());
- const SCEV *Add =
- getAddExpr(AddRecToCheck->getStart().getPointer(), ExitCount);
+ const SCEV *Add = getAddExpr(AddRecToCheck->getStart(), ExitCount);
if (isKnownPredicate(CmpInst::ICMP_SLT, Add, AddRecToCheck->getStart()))
return nullptr;
}
@@ -16151,10 +16140,10 @@ const SCEV *ScalarEvolution::LoopGuards::rewrite(const SCEV *Expr) const {
SE.getAddExpr(Expr->getOperand(1), Expr->getOperand(2));
if (const SCEV *Rewritten = RewriteSubtraction(Add))
return SE.getAddExpr(
- Expr->getOperand(0).getPointer(), Rewritten,
+ Expr->getOperand(0), Rewritten,
ScalarEvolution::maskFlags(Expr->getNoWrapFlags(), FlagMask));
if (const SCEV *S = Map.lookup(Add))
- return SE.getAddExpr(Expr->getOperand(0).getPointer(), S);
+ return SE.getAddExpr(Expr->getOperand(0), S);
}
SmallVector<SCEVUse, 2> Operands;
bool Changed = false;
diff --git a/llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp b/llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp
index 97ecbeaae5e37..308c267969113 100644
--- a/llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp
+++ b/llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp
@@ -423,7 +423,7 @@ bool InductiveRangeCheck::reassociateSubLHS(
auto getExprScaledIfOverflow = [&](Instruction::BinaryOps BinOp,
const SCEV *LHS,
const SCEV *RHS) -> const SCEV * {
- const SCEV *(ScalarEvolution::*Operation)(SCEVUse, SCEVUse ,
+ const SCEV *(ScalarEvolution::*Operation)(SCEVUse, SCEVUse,
SCEV::NoWrapFlags, unsigned);
switch (BinOp) {
default:
diff --git a/llvm/lib/Transforms/Scalar/LoopFuse.cpp b/llvm/lib/Transforms/Scalar/LoopFuse.cpp
index 0dcc72bf4c106..abe03cf1270ab 100644
--- a/llvm/lib/Transforms/Scalar/LoopFuse.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopFuse.cpp
@@ -1134,8 +1134,7 @@ struct LoopFuser {
const Loop *ExprL = Expr->getLoop();
SmallVector<SCEVUse, 2> Operands;
if (ExprL == &OldL) {
- for (SCEVUse Op : Expr->operands())
- Operands.push_back(Op);
+ append_range(Operands, Expr->operands());
return SE.getAddRecExpr(Operands, &NewL, Expr->getNoWrapFlags());
}
diff --git a/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp b/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
index a4ca0de522f86..299aa7bfb4b9e 100644
--- a/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
@@ -966,17 +966,13 @@ static GlobalValue *ExtractSymbol(SCEVUse &S, ScalarEvolution &SE) {
}
} else if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
SmallVector<SCEVUse, 8> NewOps(Add->operands());
- SCEVUse Back = NewOps.back();
- GlobalValue *Result = ExtractSymbol(Back, SE);
- NewOps.back() = Back;
+ GlobalValue *Result = ExtractSymbol(NewOps.back(), SE);
if (Result)
S = SE.getAddExpr(NewOps);
return Result;
} else if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) {
SmallVector<SCEVUse, 8> NewOps(AR->operands());
- SCEVUse Front = NewOps.front();
- GlobalValue *Result = ExtractSymbol(Front, SE);
- NewOps.front() = Front;
+ GlobalValue *Result = ExtractSymbol(NewOps.front(), SE);
if (Result)
S = SE.getAddRecExpr(NewOps, AR->getLoop(),
// FIXME: AR->getNoWrapFlags(SCEV::FlagNW)
>From c722f26faba66e998775352a9db59beaa245887b Mon Sep 17 00:00:00 2001
From: Florian Hahn <flo at fhahn.com>
Date: Wed, 4 Mar 2026 17:03:10 +0000
Subject: [PATCH 4/4] !fixup address comments, thanks
---
llvm/include/llvm/Analysis/ScalarEvolution.h | 2 --
.../llvm/Analysis/ScalarEvolutionExpressions.h | 11 +++--------
2 files changed, 3 insertions(+), 10 deletions(-)
diff --git a/llvm/include/llvm/Analysis/ScalarEvolution.h b/llvm/include/llvm/Analysis/ScalarEvolution.h
index 7346d9fc28178..c59a652509e27 100644
--- a/llvm/include/llvm/Analysis/ScalarEvolution.h
+++ b/llvm/include/llvm/Analysis/ScalarEvolution.h
@@ -109,8 +109,6 @@ template <> struct PointerLikeTypeTraits<SCEVUse> {
};
template <> struct DenseMapInfo<SCEVUse> {
- static constexpr uintptr_t Log2MaxAlign = 12;
-
static inline SCEVUse getEmptyKey() {
uintptr_t Val = static_cast<uintptr_t>(-1);
return PointerLikeTypeTraits<SCEVUse>::getFromVoidPointer((void *)Val);
diff --git a/llvm/include/llvm/Analysis/ScalarEvolutionExpressions.h b/llvm/include/llvm/Analysis/ScalarEvolutionExpressions.h
index b5928a5583c85..5199bbede7f84 100644
--- a/llvm/include/llvm/Analysis/ScalarEvolutionExpressions.h
+++ b/llvm/include/llvm/Analysis/ScalarEvolutionExpressions.h
@@ -58,10 +58,6 @@ enum SCEVTypes : unsigned short {
scCouldNotCompute
};
-inline SmallVector<SCEVUse> toSCEV(ArrayRef<SCEVUse> Ops) {
- return SmallVector<SCEVUse>(Ops.begin(), Ops.end());
-}
-
/// This class represents a constant integer value.
class SCEVConstant : public SCEV {
friend class ScalarEvolution;
@@ -385,8 +381,8 @@ class SCEVAddRecExpr : public SCEVNAryExpr {
SCEVUse getStepRecurrence(ScalarEvolution &SE) const {
if (isAffine())
return getOperand(1);
- return SE.getAddRecExpr(toSCEV(operands().drop_front()), getLoop(),
- FlagAnyWrap);
+ return SE.getAddRecExpr(SmallVector<SCEVUse, 3>(operands().drop_front()),
+ getLoop(), FlagAnyWrap);
}
/// Return true if this represents an expression A + B*x where A
@@ -975,9 +971,8 @@ class SCEVLoopAddRecRewriter
const Loop *L = Expr->getLoop();
auto It = Map.find(L);
- if (It == Map.end()) {
+ if (It == Map.end())
return SE.getAddRecExpr(Operands, L, Expr->getNoWrapFlags());
- }
return SCEVAddRecExpr::evaluateAtIteration(Operands, It->second, SE);
}
More information about the llvm-commits
mailing list