[polly] f8734a5 - [SCEV] Introduce SCEVUse, use it instead of const SCEV * (NFCI). (#91961)

via llvm-commits llvm-commits at lists.llvm.org
Fri Mar 13 09:23:12 PDT 2026


Author: Florian Hahn
Date: 2026-03-13T16:23:06Z
New Revision: f8734a5e107f603871e4e327ddc41fb8a6058f3e

URL: https://github.com/llvm/llvm-project/commit/f8734a5e107f603871e4e327ddc41fb8a6058f3e
DIFF: https://github.com/llvm/llvm-project/commit/f8734a5e107f603871e4e327ddc41fb8a6058f3e.diff

LOG: [SCEV] Introduce SCEVUse, use it instead of const SCEV * (NFCI). (#91961)

This patch introduces SCEVUse, which is a tagged pointer containing the
used const SCEV *, plus extra bits to store NUW/NSW flags that are only
valid at the specific use.

This was suggested by @nikic as an alternative
to https://github.com/llvm/llvm-project/pull/90742.

This patch just updates most SCEV infrastructure to operate on SCEVUse
instead of const SCEV *. It does not introduce any code that makes use
of the use-specific flags yet which I'll share as follow-ups.

Compile-time impact: https://llvm-compile-time-tracker.com/compare.php?from=ee34eb6edccdebc2a752ffecdde5faae6b0d5593&to=5a7727d7819414d2acbc5b6ab740f0fc2363e842&stat=instructions%3Au

Added: 
    

Modified: 
    llvm/include/llvm/Analysis/ScalarEvolution.h
    llvm/include/llvm/Analysis/ScalarEvolutionExpressions.h
    llvm/include/llvm/Analysis/ScalarEvolutionPatternMatch.h
    llvm/include/llvm/Transforms/Scalar/NaryReassociate.h
    llvm/lib/Analysis/Delinearization.cpp
    llvm/lib/Analysis/ScalarEvolution.cpp
    llvm/lib/Analysis/ScalarEvolutionDivision.cpp
    llvm/lib/Analysis/ScalarEvolutionNormalization.cpp
    llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp
    llvm/lib/Transforms/Scalar/IndVarSimplify.cpp
    llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp
    llvm/lib/Transforms/Scalar/LoopDataPrefetch.cpp
    llvm/lib/Transforms/Scalar/LoopFuse.cpp
    llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
    llvm/lib/Transforms/Scalar/LoopPredication.cpp
    llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
    llvm/lib/Transforms/Scalar/NaryReassociate.cpp
    llvm/lib/Transforms/Scalar/StraightLineStrengthReduce.cpp
    llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp
    llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp
    llvm/lib/Transforms/Vectorize/VPlanUtils.cpp
    llvm/unittests/Analysis/ScalarEvolutionTest.cpp
    llvm/unittests/Transforms/Utils/ScalarEvolutionExpanderTest.cpp
    polly/include/polly/CodeGen/BlockGenerators.h
    polly/include/polly/Support/ScopHelper.h
    polly/lib/Analysis/ScopDetection.cpp
    polly/lib/Support/SCEVValidator.cpp
    polly/lib/Support/ScopHelper.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/include/llvm/Analysis/ScalarEvolution.h b/llvm/include/llvm/Analysis/ScalarEvolution.h
index 6aff53a1e7b70..c59a652509e27 100644
--- a/llvm/include/llvm/Analysis/ScalarEvolution.h
+++ b/llvm/include/llvm/Analysis/ScalarEvolution.h
@@ -66,6 +66,76 @@ enum SCEVTypes : unsigned short;
 
 LLVM_ABI extern bool VerifySCEV;
 
+class SCEV;
+
+struct SCEVUse : PointerIntPair<const SCEV *, 2> {
+  SCEVUse() : PointerIntPair() { setFromOpaqueValue(nullptr); }
+  SCEVUse(const SCEV *S) : PointerIntPair() { setFromOpaqueValue((void *)S); }
+  SCEVUse(const SCEV *S, unsigned Flags) : PointerIntPair(S, Flags) {}
+
+  operator const SCEV *() const { return getPointer(); }
+  const SCEV *operator->() const { return getPointer(); }
+
+  void *getRawPointer() const { return getOpaqueValue(); }
+
+  unsigned getFlags() const { return getInt(); }
+
+  bool operator==(const SCEVUse &RHS) const {
+    return getRawPointer() == RHS.getRawPointer();
+  }
+
+  bool operator==(const SCEV *RHS) const { return getRawPointer() == RHS; }
+
+  /// Print out the internal representation of this scalar to the specified
+  /// stream.  This should really only be used for debugging purposes.
+  void print(raw_ostream &OS) const;
+
+  /// This method is used for debugging.
+  void dump() const;
+};
+
+/// Provide PointerLikeTypeTraits for SCEVUse, so it can be used with
+/// SmallPtrSet, among others.
+template <> struct PointerLikeTypeTraits<SCEVUse> {
+  static inline void *getAsVoidPointer(SCEVUse U) { return U.getOpaqueValue(); }
+  static inline SCEVUse getFromVoidPointer(void *P) {
+    SCEVUse U;
+    U.setFromOpaqueValue(P);
+    return U;
+  }
+
+  /// The Low bits are used by the PointerIntPair.
+  static constexpr int NumLowBitsAvailable = 0;
+};
+
+template <> struct DenseMapInfo<SCEVUse> {
+  static inline SCEVUse getEmptyKey() {
+    uintptr_t Val = static_cast<uintptr_t>(-1);
+    return PointerLikeTypeTraits<SCEVUse>::getFromVoidPointer((void *)Val);
+  }
+
+  static inline SCEVUse getTombstoneKey() {
+    uintptr_t Val = static_cast<uintptr_t>(-2);
+    return PointerLikeTypeTraits<SCEVUse>::getFromVoidPointer((void *)Val);
+  }
+
+  static unsigned getHashValue(SCEVUse U) {
+    return hash_value(U.getRawPointer());
+  }
+
+  static bool isEqual(const SCEVUse LHS, const SCEVUse RHS) {
+    return LHS.getRawPointer() == RHS.getRawPointer();
+  }
+};
+
+template <> struct simplify_type<SCEVUse> {
+  using SimpleType = const SCEV *;
+
+  static SimpleType getSimplifiedValue(SCEVUse &Val) {
+    return Val.getPointer();
+  }
+};
+
 /// This class represents an analyzed expression in the program.  These are
 /// opaque objects that the client is not allowed to do much with directly.
 ///
@@ -144,7 +214,7 @@ class SCEV : public FoldingSetNode {
   LLVM_ABI Type *getType() const;
 
   /// Return operands of this SCEV expression.
-  LLVM_ABI ArrayRef<const SCEV *> operands() const;
+  LLVM_ABI ArrayRef<SCEVUse> operands() const;
 
   /// Return true if the expression is a constant zero.
   LLVM_ABI bool isZero() const;
@@ -554,6 +624,7 @@ class ScalarEvolution {
 
   /// Notify this ScalarEvolution that \p User directly uses SCEVs in \p Ops.
   LLVM_ABI void registerUser(const SCEV *User, ArrayRef<const SCEV *> Ops);
+  LLVM_ABI void registerUser(const SCEV *User, ArrayRef<SCEVUse> Ops);
 
   /// Return true if the SCEV expression contains an undef value.
   LLVM_ABI bool containsUndefs(const SCEV *S) const;
@@ -592,46 +663,47 @@ class ScalarEvolution {
                                              unsigned Depth = 0);
   LLVM_ABI const SCEV *getCastExpr(SCEVTypes Kind, const SCEV *Op, Type *Ty);
   LLVM_ABI const SCEV *getAnyExtendExpr(const SCEV *Op, Type *Ty);
-  LLVM_ABI const SCEV *getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
+
+  LLVM_ABI const SCEV *getAddExpr(SmallVectorImpl<SCEVUse> &Ops,
                                   SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
                                   unsigned Depth = 0);
-  const SCEV *getAddExpr(const SCEV *LHS, const SCEV *RHS,
+  const SCEV *getAddExpr(SCEVUse LHS, SCEVUse RHS,
                          SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
                          unsigned Depth = 0) {
-    SmallVector<const SCEV *, 2> Ops = {LHS, RHS};
+    SmallVector<SCEVUse, 2> Ops = {LHS, RHS};
     return getAddExpr(Ops, Flags, Depth);
   }
-  const SCEV *getAddExpr(const SCEV *Op0, const SCEV *Op1, const SCEV *Op2,
+  const SCEV *getAddExpr(SCEVUse Op0, SCEVUse Op1, SCEVUse Op2,
                          SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
                          unsigned Depth = 0) {
-    SmallVector<const SCEV *, 3> Ops = {Op0, Op1, Op2};
+    SmallVector<SCEVUse, 3> Ops = {Op0, Op1, Op2};
     return getAddExpr(Ops, Flags, Depth);
   }
-  LLVM_ABI const SCEV *getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
+  LLVM_ABI const SCEV *getMulExpr(SmallVectorImpl<SCEVUse> &Ops,
                                   SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
                                   unsigned Depth = 0);
-  const SCEV *getMulExpr(const SCEV *LHS, const SCEV *RHS,
+  const SCEV *getMulExpr(SCEVUse LHS, SCEVUse RHS,
                          SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
                          unsigned Depth = 0) {
-    SmallVector<const SCEV *, 2> Ops = {LHS, RHS};
+    SmallVector<SCEVUse, 2> Ops = {LHS, RHS};
     return getMulExpr(Ops, Flags, Depth);
   }
-  const SCEV *getMulExpr(const SCEV *Op0, const SCEV *Op1, const SCEV *Op2,
+  const SCEV *getMulExpr(SCEVUse Op0, SCEVUse Op1, SCEVUse Op2,
                          SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
                          unsigned Depth = 0) {
-    SmallVector<const SCEV *, 3> Ops = {Op0, Op1, Op2};
+    SmallVector<SCEVUse, 3> Ops = {Op0, Op1, Op2};
     return getMulExpr(Ops, Flags, Depth);
   }
-  LLVM_ABI const SCEV *getUDivExpr(const SCEV *LHS, const SCEV *RHS);
-  LLVM_ABI const SCEV *getUDivExactExpr(const SCEV *LHS, const SCEV *RHS);
-  LLVM_ABI const SCEV *getURemExpr(const SCEV *LHS, const SCEV *RHS);
-  LLVM_ABI const SCEV *getAddRecExpr(const SCEV *Start, const SCEV *Step,
-                                     const Loop *L, SCEV::NoWrapFlags Flags);
-  LLVM_ABI const SCEV *getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands,
+  LLVM_ABI const SCEV *getUDivExpr(SCEVUse LHS, SCEVUse RHS);
+  LLVM_ABI const SCEV *getUDivExactExpr(SCEVUse LHS, SCEVUse RHS);
+  LLVM_ABI const SCEV *getURemExpr(SCEVUse LHS, SCEVUse RHS);
+  LLVM_ABI const SCEV *getAddRecExpr(SCEVUse Start, SCEVUse Step, const Loop *L,
+                                     SCEV::NoWrapFlags Flags);
+  LLVM_ABI const SCEV *getAddRecExpr(SmallVectorImpl<SCEVUse> &Operands,
                                      const Loop *L, SCEV::NoWrapFlags Flags);
-  const SCEV *getAddRecExpr(const SmallVectorImpl<const SCEV *> &Operands,
+  const SCEV *getAddRecExpr(const SmallVectorImpl<SCEVUse> &Operands,
                             const Loop *L, SCEV::NoWrapFlags Flags) {
-    SmallVector<const SCEV *, 4> NewOp(Operands.begin(), Operands.end());
+    SmallVector<SCEVUse, 4> NewOp(Operands.begin(), Operands.end());
     return getAddRecExpr(NewOp, L, Flags);
   }
 
@@ -649,26 +721,25 @@ class ScalarEvolution {
   /// instead we use IndexExprs.
   /// \p IndexExprs The expressions for the indices.
   LLVM_ABI const SCEV *getGEPExpr(GEPOperator *GEP,
-                                  ArrayRef<const SCEV *> IndexExprs);
-  LLVM_ABI const SCEV *getGEPExpr(const SCEV *BaseExpr,
-                                  ArrayRef<const SCEV *> IndexExprs,
+                                  ArrayRef<SCEVUse> IndexExprs);
+  LLVM_ABI const SCEV *getGEPExpr(SCEVUse BaseExpr,
+                                  ArrayRef<SCEVUse> IndexExprs,
                                   Type *SrcElementTy,
                                   GEPNoWrapFlags NW = GEPNoWrapFlags::none());
   LLVM_ABI const SCEV *getAbsExpr(const SCEV *Op, bool IsNSW);
   LLVM_ABI const SCEV *getMinMaxExpr(SCEVTypes Kind,
-                                     SmallVectorImpl<const SCEV *> &Operands);
+                                     SmallVectorImpl<SCEVUse> &Operands);
   LLVM_ABI const SCEV *
-  getSequentialMinMaxExpr(SCEVTypes Kind,
-                          SmallVectorImpl<const SCEV *> &Operands);
-  LLVM_ABI const SCEV *getSMaxExpr(const SCEV *LHS, const SCEV *RHS);
-  LLVM_ABI const SCEV *getSMaxExpr(SmallVectorImpl<const SCEV *> &Operands);
-  LLVM_ABI const SCEV *getUMaxExpr(const SCEV *LHS, const SCEV *RHS);
-  LLVM_ABI const SCEV *getUMaxExpr(SmallVectorImpl<const SCEV *> &Operands);
-  LLVM_ABI const SCEV *getSMinExpr(const SCEV *LHS, const SCEV *RHS);
-  LLVM_ABI const SCEV *getSMinExpr(SmallVectorImpl<const SCEV *> &Operands);
-  LLVM_ABI const SCEV *getUMinExpr(const SCEV *LHS, const SCEV *RHS,
+  getSequentialMinMaxExpr(SCEVTypes Kind, SmallVectorImpl<SCEVUse> &Operands);
+  LLVM_ABI const SCEV *getSMaxExpr(SCEVUse LHS, SCEVUse RHS);
+  LLVM_ABI const SCEV *getSMaxExpr(SmallVectorImpl<SCEVUse> &Operands);
+  LLVM_ABI const SCEV *getUMaxExpr(SCEVUse LHS, SCEVUse RHS);
+  LLVM_ABI const SCEV *getUMaxExpr(SmallVectorImpl<SCEVUse> &Operands);
+  LLVM_ABI const SCEV *getSMinExpr(SCEVUse LHS, SCEVUse RHS);
+  LLVM_ABI const SCEV *getSMinExpr(SmallVectorImpl<SCEVUse> &Operands);
+  LLVM_ABI const SCEV *getUMinExpr(SCEVUse LHS, SCEVUse RHS,
                                    bool Sequential = false);
-  LLVM_ABI const SCEV *getUMinExpr(SmallVectorImpl<const SCEV *> &Operands,
+  LLVM_ABI const SCEV *getUMinExpr(SmallVectorImpl<SCEVUse> &Operands,
                                    bool Sequential = false);
   LLVM_ABI const SCEV *getUnknown(Value *V);
   LLVM_ABI const SCEV *getCouldNotCompute();
@@ -717,7 +788,7 @@ class ScalarEvolution {
   /// To compute the 
diff erence between two unrelated pointers, you can
   /// explicitly convert the arguments using getPtrToIntExpr(), for pointer
   /// types that support it.
-  LLVM_ABI const SCEV *getMinusSCEV(const SCEV *LHS, const SCEV *RHS,
+  LLVM_ABI const SCEV *getMinusSCEV(SCEVUse LHS, SCEVUse RHS,
                                     SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
                                     unsigned Depth = 0);
 
@@ -773,9 +844,8 @@ class ScalarEvolution {
 
   /// Promote the operands to the wider of the types using zero-extension, and
   /// then perform a umin operation with them. N-ary function.
-  LLVM_ABI const SCEV *
-  getUMinFromMismatchedTypes(SmallVectorImpl<const SCEV *> &Ops,
-                             bool Sequential = false);
+  LLVM_ABI const SCEV *getUMinFromMismatchedTypes(SmallVectorImpl<SCEVUse> &Ops,
+                                                  bool Sequential = false);
 
   /// Transitively follow the chain of pointer-type operands until reaching a
   /// SCEV that does not have a single pointer operand. This returns a
@@ -2177,7 +2247,7 @@ class ScalarEvolution {
   void forgetBackedgeTakenCounts(const Loop *L, bool Predicated);
 
   /// Drop memoized information for all \p SCEVs.
-  void forgetMemoizedResults(ArrayRef<const SCEV *> SCEVs);
+  void forgetMemoizedResults(ArrayRef<SCEVUse> SCEVs);
 
   /// Helper for forgetMemoizedResults.
   void forgetMemoizedResultsImpl(const SCEV *S);
@@ -2186,7 +2256,7 @@ class ScalarEvolution {
   /// from ValueExprMap and collect SCEV expressions in \p ToForget
   void visitAndClearUsers(SmallVectorImpl<Instruction *> &Worklist,
                           SmallPtrSetImpl<Instruction *> &Visited,
-                          SmallVectorImpl<const SCEV *> &ToForget);
+                          SmallVectorImpl<SCEVUse> &ToForget);
 
   /// Erase Value from ValueExprMap and ExprValueMap.
   void eraseValueFromMap(Value *V);
@@ -2236,12 +2306,12 @@ class ScalarEvolution {
   /// Return a scope which provides an upper bound on the defining scope for
   /// a SCEV with the operands in Ops.  The outparam Precise is set if the
   /// bound found is a precise bound (i.e. must be the defining scope.)
-  const Instruction *getDefiningScopeBound(ArrayRef<const SCEV *> Ops,
+  const Instruction *getDefiningScopeBound(ArrayRef<SCEVUse> Ops,
                                            bool &Precise);
 
   /// Wrapper around the above for cases which don't care if the bound
   /// is precise.
-  const Instruction *getDefiningScopeBound(ArrayRef<const SCEV *> Ops);
+  const Instruction *getDefiningScopeBound(ArrayRef<SCEVUse> Ops);
 
   /// Given two instructions in the same function, return true if we can
   /// prove B must execute given A executes.
@@ -2315,16 +2385,16 @@ class ScalarEvolution {
   bool canIVOverflowOnGT(const SCEV *RHS, const SCEV *Stride, bool IsSigned);
 
   /// Get add expr already created or create a new one.
-  const SCEV *getOrCreateAddExpr(ArrayRef<const SCEV *> Ops,
+  const SCEV *getOrCreateAddExpr(ArrayRef<SCEVUse> Ops,
                                  SCEV::NoWrapFlags Flags);
 
   /// Get mul expr already created or create a new one.
-  const SCEV *getOrCreateMulExpr(ArrayRef<const SCEV *> Ops,
+  const SCEV *getOrCreateMulExpr(ArrayRef<SCEVUse> Ops,
                                  SCEV::NoWrapFlags Flags);
 
   // Get addrec expr already created or create a new one.
-  const SCEV *getOrCreateAddRecExpr(ArrayRef<const SCEV *> Ops,
-                                    const Loop *L, SCEV::NoWrapFlags Flags);
+  const SCEV *getOrCreateAddRecExpr(ArrayRef<SCEVUse> Ops, const Loop *L,
+                                    SCEV::NoWrapFlags Flags);
 
   /// Return x if \p Val is f(x) where f is a 1-1 function.
   const SCEV *stripInjectiveFunctions(const SCEV *Val) const;
@@ -2337,6 +2407,7 @@ class ScalarEvolution {
   /// Look for a SCEV expression with type `SCEVType` and operands `Ops` in
   /// `UniqueSCEVs`.  Return if found, else nullptr.
   SCEV *findExistingSCEVInCache(SCEVTypes SCEVType, ArrayRef<const SCEV *> Ops);
+  SCEV *findExistingSCEVInCache(SCEVTypes SCEVType, ArrayRef<SCEVUse> Ops);
 
   /// Get reachable blocks in this function, making limited use of SCEV
   /// reasoning about conditions.
@@ -2345,8 +2416,7 @@ class ScalarEvolution {
 
   /// Return the given SCEV expression with a new set of operands.
   /// This preserves the origial nowrap flags.
-  const SCEV *getWithOperands(const SCEV *S,
-                              SmallVectorImpl<const SCEV *> &NewOps);
+  const SCEV *getWithOperands(const SCEV *S, SmallVectorImpl<SCEVUse> &NewOps);
 
   FoldingSet<SCEV> UniqueSCEVs;
   FoldingSet<SCEVPredicate> UniquePreds;

diff  --git a/llvm/include/llvm/Analysis/ScalarEvolutionExpressions.h b/llvm/include/llvm/Analysis/ScalarEvolutionExpressions.h
index 2fd25dbb9062d..5199bbede7f84 100644
--- a/llvm/include/llvm/Analysis/ScalarEvolutionExpressions.h
+++ b/llvm/include/llvm/Analysis/ScalarEvolutionExpressions.h
@@ -94,9 +94,9 @@ class SCEVVScale : public SCEV {
   static bool classof(const SCEV *S) { return S->getSCEVType() == scVScale; }
 };
 
-inline unsigned short computeExpressionSize(ArrayRef<const SCEV *> Args) {
+inline unsigned short computeExpressionSize(ArrayRef<SCEVUse> Args) {
   APInt Size(16, 1);
-  for (const auto *Arg : Args)
+  for (const SCEV *Arg : Args)
     Size = Size.uadd_sat(APInt(16, Arg->getExpressionSize()));
   return (unsigned short)Size.getZExtValue();
 }
@@ -104,19 +104,19 @@ inline unsigned short computeExpressionSize(ArrayRef<const SCEV *> Args) {
 /// This is the base class for unary cast operator classes.
 class SCEVCastExpr : public SCEV {
 protected:
-  const SCEV *Op;
+  SCEVUse Op;
   Type *Ty;
 
   LLVM_ABI SCEVCastExpr(const FoldingSetNodeIDRef ID, SCEVTypes SCEVTy,
-                        const SCEV *op, Type *ty);
+                        SCEVUse op, Type *ty);
 
 public:
-  const SCEV *getOperand() const { return Op; }
-  const SCEV *getOperand(unsigned i) const {
+  SCEVUse getOperand() const { return Op; }
+  SCEVUse getOperand(unsigned i) const {
     assert(i == 0 && "Operand index out of range!");
     return Op;
   }
-  ArrayRef<const SCEV *> operands() const { return Op; }
+  ArrayRef<SCEVUse> operands() const { return Op; }
   size_t getNumOperands() const { return 1; }
   Type *getType() const { return Ty; }
 
@@ -133,11 +133,12 @@ class SCEVCastExpr : public SCEV {
 class SCEVPtrToIntExpr : public SCEVCastExpr {
   friend class ScalarEvolution;
 
-  SCEVPtrToIntExpr(const FoldingSetNodeIDRef ID, const SCEV *Op, Type *ITy);
+  SCEVPtrToIntExpr(const FoldingSetNodeIDRef ID, SCEVUse Op, Type *ITy);
 
 public:
   /// Methods for support type inquiry through isa, cast, and dyn_cast:
   static bool classof(const SCEV *S) { return S->getSCEVType() == scPtrToInt; }
+  static bool classof(const SCEVUse *U) { return classof(U->getPointer()); }
 };
 
 /// This class represents a cast from a pointer to a pointer-sized integer
@@ -156,7 +157,7 @@ class SCEVPtrToAddrExpr : public SCEVCastExpr {
 class SCEVIntegralCastExpr : public SCEVCastExpr {
 protected:
   LLVM_ABI SCEVIntegralCastExpr(const FoldingSetNodeIDRef ID, SCEVTypes SCEVTy,
-                                const SCEV *op, Type *ty);
+                                SCEVUse op, Type *ty);
 
 public:
   /// Methods for support type inquiry through isa, cast, and dyn_cast:
@@ -171,7 +172,7 @@ class SCEVIntegralCastExpr : public SCEVCastExpr {
 class SCEVTruncateExpr : public SCEVIntegralCastExpr {
   friend class ScalarEvolution;
 
-  SCEVTruncateExpr(const FoldingSetNodeIDRef ID, const SCEV *op, Type *ty);
+  SCEVTruncateExpr(const FoldingSetNodeIDRef ID, SCEVUse op, Type *ty);
 
 public:
   /// Methods for support type inquiry through isa, cast, and dyn_cast:
@@ -183,7 +184,7 @@ class SCEVTruncateExpr : public SCEVIntegralCastExpr {
 class SCEVZeroExtendExpr : public SCEVIntegralCastExpr {
   friend class ScalarEvolution;
 
-  SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID, const SCEV *op, Type *ty);
+  SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID, SCEVUse op, Type *ty);
 
 public:
   /// Methods for support type inquiry through isa, cast, and dyn_cast:
@@ -197,7 +198,7 @@ class SCEVZeroExtendExpr : public SCEVIntegralCastExpr {
 class SCEVSignExtendExpr : public SCEVIntegralCastExpr {
   friend class ScalarEvolution;
 
-  SCEVSignExtendExpr(const FoldingSetNodeIDRef ID, const SCEV *op, Type *ty);
+  SCEVSignExtendExpr(const FoldingSetNodeIDRef ID, SCEVUse op, Type *ty);
 
 public:
   /// Methods for support type inquiry through isa, cast, and dyn_cast:
@@ -214,25 +215,23 @@ class SCEVNAryExpr : public SCEV {
   // arrays with its SCEVAllocator, so this class just needs a simple
   // pointer rather than a more elaborate vector-like data structure.
   // This also avoids the need for a non-trivial destructor.
-  const SCEV *const *Operands;
+  const SCEVUse *Operands;
   size_t NumOperands;
 
-  SCEVNAryExpr(const FoldingSetNodeIDRef ID, enum SCEVTypes T,
-               const SCEV *const *O, size_t N)
+  SCEVNAryExpr(const FoldingSetNodeIDRef ID, enum SCEVTypes T, const SCEVUse *O,
+               size_t N)
       : SCEV(ID, T, computeExpressionSize(ArrayRef(O, N))), Operands(O),
         NumOperands(N) {}
 
 public:
   size_t getNumOperands() const { return NumOperands; }
 
-  const SCEV *getOperand(unsigned i) const {
+  SCEVUse getOperand(unsigned i) const {
     assert(i < NumOperands && "Operand index out of range!");
     return Operands[i];
   }
 
-  ArrayRef<const SCEV *> operands() const {
-    return ArrayRef(Operands, NumOperands);
-  }
+  ArrayRef<SCEVUse> operands() const { return ArrayRef(Operands, NumOperands); }
 
   NoWrapFlags getNoWrapFlags(NoWrapFlags Mask = NoWrapMask) const {
     return (NoWrapFlags)(SubclassData & Mask);
@@ -256,13 +255,14 @@ class SCEVNAryExpr : public SCEV {
            S->getSCEVType() == scSequentialUMinExpr ||
            S->getSCEVType() == scAddRecExpr;
   }
+  static bool classof(const SCEVUse *U) { return classof(U->getPointer()); }
 };
 
 /// This node is the base class for n'ary commutative operators.
 class SCEVCommutativeExpr : public SCEVNAryExpr {
 protected:
   SCEVCommutativeExpr(const FoldingSetNodeIDRef ID, enum SCEVTypes T,
-                      const SCEV *const *O, size_t N)
+                      const SCEVUse *O, size_t N)
       : SCEVNAryExpr(ID, T, O, N) {}
 
 public:
@@ -283,11 +283,10 @@ class SCEVAddExpr : public SCEVCommutativeExpr {
 
   Type *Ty;
 
-  SCEVAddExpr(const FoldingSetNodeIDRef ID, const SCEV *const *O, size_t N)
+  SCEVAddExpr(const FoldingSetNodeIDRef ID, const SCEVUse *O, size_t N)
       : SCEVCommutativeExpr(ID, scAddExpr, O, N) {
-    auto *FirstPointerTypedOp = find_if(operands(), [](const SCEV *Op) {
-      return Op->getType()->isPointerTy();
-    });
+    auto *FirstPointerTypedOp = find_if(
+        operands(), [](SCEVUse Op) { return Op->getType()->isPointerTy(); });
     if (FirstPointerTypedOp != operands().end())
       Ty = (*FirstPointerTypedOp)->getType();
     else
@@ -299,13 +298,14 @@ class SCEVAddExpr : public SCEVCommutativeExpr {
 
   /// Methods for support type inquiry through isa, cast, and dyn_cast:
   static bool classof(const SCEV *S) { return S->getSCEVType() == scAddExpr; }
+  static bool classof(const SCEVUse *U) { return classof(U->getPointer()); }
 };
 
 /// This node represents multiplication of some number of SCEVs.
 class SCEVMulExpr : public SCEVCommutativeExpr {
   friend class ScalarEvolution;
 
-  SCEVMulExpr(const FoldingSetNodeIDRef ID, const SCEV *const *O, size_t N)
+  SCEVMulExpr(const FoldingSetNodeIDRef ID, const SCEVUse *O, size_t N)
       : SCEVCommutativeExpr(ID, scMulExpr, O, N) {}
 
 public:
@@ -313,30 +313,31 @@ class SCEVMulExpr : public SCEVCommutativeExpr {
 
   /// Methods for support type inquiry through isa, cast, and dyn_cast:
   static bool classof(const SCEV *S) { return S->getSCEVType() == scMulExpr; }
+  static bool classof(const SCEVUse *U) { return classof(U->getPointer()); }
 };
 
 /// This class represents a binary unsigned division operation.
 class SCEVUDivExpr : public SCEV {
   friend class ScalarEvolution;
 
-  std::array<const SCEV *, 2> Operands;
+  std::array<SCEVUse, 2> Operands;
 
-  SCEVUDivExpr(const FoldingSetNodeIDRef ID, const SCEV *lhs, const SCEV *rhs)
+  SCEVUDivExpr(const FoldingSetNodeIDRef ID, SCEVUse lhs, SCEVUse rhs)
       : SCEV(ID, scUDivExpr, computeExpressionSize({lhs, rhs})) {
     Operands[0] = lhs;
     Operands[1] = rhs;
   }
 
 public:
-  const SCEV *getLHS() const { return Operands[0]; }
-  const SCEV *getRHS() const { return Operands[1]; }
+  SCEVUse getLHS() const { return Operands[0]; }
+  SCEVUse getRHS() const { return Operands[1]; }
   size_t getNumOperands() const { return 2; }
-  const SCEV *getOperand(unsigned i) const {
+  SCEVUse getOperand(unsigned i) const {
     assert((i == 0 || i == 1) && "Operand index out of range!");
     return i == 0 ? getLHS() : getRHS();
   }
 
-  ArrayRef<const SCEV *> operands() const { return Operands; }
+  ArrayRef<SCEVUse> operands() const { return Operands; }
 
   Type *getType() const {
     // In most cases the types of LHS and RHS will be the same, but in some
@@ -364,25 +365,24 @@ class SCEVAddRecExpr : public SCEVNAryExpr {
 
   const Loop *L;
 
-  SCEVAddRecExpr(const FoldingSetNodeIDRef ID, const SCEV *const *O, size_t N,
+  SCEVAddRecExpr(const FoldingSetNodeIDRef ID, const SCEVUse *O, size_t N,
                  const Loop *l)
       : SCEVNAryExpr(ID, scAddRecExpr, O, N), L(l) {}
 
 public:
   Type *getType() const { return getStart()->getType(); }
-  const SCEV *getStart() const { return Operands[0]; }
+  SCEVUse getStart() const { return Operands[0]; }
   const Loop *getLoop() const { return L; }
 
   /// Constructs and returns the recurrence indicating how much this
   /// expression steps by.  If this is a polynomial of degree N, it
   /// returns a chrec of degree N-1.  We cannot determine whether
   /// the step recurrence has self-wraparound.
-  const SCEV *getStepRecurrence(ScalarEvolution &SE) const {
+  SCEVUse getStepRecurrence(ScalarEvolution &SE) const {
     if (isAffine())
       return getOperand(1);
-    return SE.getAddRecExpr(
-        SmallVector<const SCEV *, 3>(operands().drop_front()), getLoop(),
-        FlagAnyWrap);
+    return SE.getAddRecExpr(SmallVector<SCEVUse, 3>(operands().drop_front()),
+                            getLoop(), FlagAnyWrap);
   }
 
   /// Return true if this represents an expression A + B*x where A
@@ -414,9 +414,9 @@ class SCEVAddRecExpr : public SCEVNAryExpr {
 
   /// Return the value of this chain of recurrences at the specified iteration
   /// number. Takes an explicit list of operands to represent an AddRec.
-  LLVM_ABI static const SCEV *
-  evaluateAtIteration(ArrayRef<const SCEV *> Operands, const SCEV *It,
-                      ScalarEvolution &SE);
+  LLVM_ABI static const SCEV *evaluateAtIteration(ArrayRef<SCEVUse> Operands,
+                                                  const SCEV *It,
+                                                  ScalarEvolution &SE);
 
   /// Return the number of iterations of this loop that produce
   /// values in the specified constant range.  Another way of
@@ -449,7 +449,7 @@ class SCEVMinMaxExpr : public SCEVCommutativeExpr {
 protected:
   /// Note: Constructing subclasses via this constructor is allowed
   SCEVMinMaxExpr(const FoldingSetNodeIDRef ID, enum SCEVTypes T,
-                 const SCEV *const *O, size_t N)
+                 const SCEVUse *O, size_t N)
       : SCEVCommutativeExpr(ID, T, O, N) {
     assert(isMinMaxType(T));
     // Min and max never overflow
@@ -481,7 +481,7 @@ class SCEVMinMaxExpr : public SCEVCommutativeExpr {
 class SCEVSMaxExpr : public SCEVMinMaxExpr {
   friend class ScalarEvolution;
 
-  SCEVSMaxExpr(const FoldingSetNodeIDRef ID, const SCEV *const *O, size_t N)
+  SCEVSMaxExpr(const FoldingSetNodeIDRef ID, const SCEVUse *O, size_t N)
       : SCEVMinMaxExpr(ID, scSMaxExpr, O, N) {}
 
 public:
@@ -493,7 +493,7 @@ class SCEVSMaxExpr : public SCEVMinMaxExpr {
 class SCEVUMaxExpr : public SCEVMinMaxExpr {
   friend class ScalarEvolution;
 
-  SCEVUMaxExpr(const FoldingSetNodeIDRef ID, const SCEV *const *O, size_t N)
+  SCEVUMaxExpr(const FoldingSetNodeIDRef ID, const SCEVUse *O, size_t N)
       : SCEVMinMaxExpr(ID, scUMaxExpr, O, N) {}
 
 public:
@@ -505,7 +505,7 @@ class SCEVUMaxExpr : public SCEVMinMaxExpr {
 class SCEVSMinExpr : public SCEVMinMaxExpr {
   friend class ScalarEvolution;
 
-  SCEVSMinExpr(const FoldingSetNodeIDRef ID, const SCEV *const *O, size_t N)
+  SCEVSMinExpr(const FoldingSetNodeIDRef ID, const SCEVUse *O, size_t N)
       : SCEVMinMaxExpr(ID, scSMinExpr, O, N) {}
 
 public:
@@ -517,7 +517,7 @@ class SCEVSMinExpr : public SCEVMinMaxExpr {
 class SCEVUMinExpr : public SCEVMinMaxExpr {
   friend class ScalarEvolution;
 
-  SCEVUMinExpr(const FoldingSetNodeIDRef ID, const SCEV *const *O, size_t N)
+  SCEVUMinExpr(const FoldingSetNodeIDRef ID, const SCEVUse *O, size_t N)
       : SCEVMinMaxExpr(ID, scUMinExpr, O, N) {}
 
 public:
@@ -544,7 +544,7 @@ class SCEVSequentialMinMaxExpr : public SCEVNAryExpr {
 protected:
   /// Note: Constructing subclasses via this constructor is allowed
   SCEVSequentialMinMaxExpr(const FoldingSetNodeIDRef ID, enum SCEVTypes T,
-                           const SCEV *const *O, size_t N)
+                           const SCEVUse *O, size_t N)
       : SCEVNAryExpr(ID, T, O, N) {
     assert(isSequentialMinMaxType(T));
     // Min and max never overflow
@@ -571,13 +571,14 @@ class SCEVSequentialMinMaxExpr : public SCEVNAryExpr {
   static bool classof(const SCEV *S) {
     return isSequentialMinMaxType(S->getSCEVType());
   }
+  static bool classof(const SCEVUse *U) { return classof(U->getPointer()); }
 };
 
 /// This class represents a sequential/in-order unsigned minimum selection.
 class SCEVSequentialUMinExpr : public SCEVSequentialMinMaxExpr {
   friend class ScalarEvolution;
 
-  SCEVSequentialUMinExpr(const FoldingSetNodeIDRef ID, const SCEV *const *O,
+  SCEVSequentialUMinExpr(const FoldingSetNodeIDRef ID, const SCEVUse *O,
                          size_t N)
       : SCEVSequentialMinMaxExpr(ID, scSequentialUMinExpr, O, N) {}
 
@@ -715,7 +716,7 @@ template <typename SV> class SCEVTraversal {
       case scUMinExpr:
       case scSequentialUMinExpr:
       case scAddRecExpr:
-        for (const auto *Op : S->operands()) {
+        for (const SCEV *Op : S->operands()) {
           push(Op);
           if (Visitor.isDone())
             break;
@@ -825,9 +826,9 @@ class SCEVRewriteVisitor : public SCEVVisitor<SC, const SCEV *> {
   }
 
   const SCEV *visitAddExpr(const SCEVAddExpr *Expr) {
-    SmallVector<const SCEV *, 2> Operands;
+    SmallVector<SCEVUse, 2> Operands;
     bool Changed = false;
-    for (const auto *Op : Expr->operands()) {
+    for (const SCEV *Op : Expr->operands()) {
       Operands.push_back(((SC *)this)->visit(Op));
       Changed |= Op != Operands.back();
     }
@@ -835,9 +836,9 @@ class SCEVRewriteVisitor : public SCEVVisitor<SC, const SCEV *> {
   }
 
   const SCEV *visitMulExpr(const SCEVMulExpr *Expr) {
-    SmallVector<const SCEV *, 2> Operands;
+    SmallVector<SCEVUse, 2> Operands;
     bool Changed = false;
-    for (const auto *Op : Expr->operands()) {
+    for (const SCEV *Op : Expr->operands()) {
       Operands.push_back(((SC *)this)->visit(Op));
       Changed |= Op != Operands.back();
     }
@@ -852,9 +853,9 @@ class SCEVRewriteVisitor : public SCEVVisitor<SC, const SCEV *> {
   }
 
   const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) {
-    SmallVector<const SCEV *, 2> Operands;
+    SmallVector<SCEVUse, 2> Operands;
     bool Changed = false;
-    for (const auto *Op : Expr->operands()) {
+    for (const SCEV *Op : Expr->operands()) {
       Operands.push_back(((SC *)this)->visit(Op));
       Changed |= Op != Operands.back();
     }
@@ -864,9 +865,9 @@ class SCEVRewriteVisitor : public SCEVVisitor<SC, const SCEV *> {
   }
 
   const SCEV *visitSMaxExpr(const SCEVSMaxExpr *Expr) {
-    SmallVector<const SCEV *, 2> Operands;
+    SmallVector<SCEVUse, 2> Operands;
     bool Changed = false;
-    for (const auto *Op : Expr->operands()) {
+    for (const SCEV *Op : Expr->operands()) {
       Operands.push_back(((SC *)this)->visit(Op));
       Changed |= Op != Operands.back();
     }
@@ -874,9 +875,9 @@ class SCEVRewriteVisitor : public SCEVVisitor<SC, const SCEV *> {
   }
 
   const SCEV *visitUMaxExpr(const SCEVUMaxExpr *Expr) {
-    SmallVector<const SCEV *, 2> Operands;
+    SmallVector<SCEVUse, 2> Operands;
     bool Changed = false;
-    for (const auto *Op : Expr->operands()) {
+    for (const SCEV *Op : Expr->operands()) {
       Operands.push_back(((SC *)this)->visit(Op));
       Changed |= Op != Operands.back();
     }
@@ -884,9 +885,9 @@ class SCEVRewriteVisitor : public SCEVVisitor<SC, const SCEV *> {
   }
 
   const SCEV *visitSMinExpr(const SCEVSMinExpr *Expr) {
-    SmallVector<const SCEV *, 2> Operands;
+    SmallVector<SCEVUse, 2> Operands;
     bool Changed = false;
-    for (const auto *Op : Expr->operands()) {
+    for (const SCEV *Op : Expr->operands()) {
       Operands.push_back(((SC *)this)->visit(Op));
       Changed |= Op != Operands.back();
     }
@@ -894,9 +895,9 @@ class SCEVRewriteVisitor : public SCEVVisitor<SC, const SCEV *> {
   }
 
   const SCEV *visitUMinExpr(const SCEVUMinExpr *Expr) {
-    SmallVector<const SCEV *, 2> Operands;
+    SmallVector<SCEVUse, 2> Operands;
     bool Changed = false;
-    for (const auto *Op : Expr->operands()) {
+    for (const SCEV *Op : Expr->operands()) {
       Operands.push_back(((SC *)this)->visit(Op));
       Changed |= Op != Operands.back();
     }
@@ -904,9 +905,9 @@ class SCEVRewriteVisitor : public SCEVVisitor<SC, const SCEV *> {
   }
 
   const SCEV *visitSequentialUMinExpr(const SCEVSequentialUMinExpr *Expr) {
-    SmallVector<const SCEV *, 2> Operands;
+    SmallVector<SCEVUse, 2> Operands;
     bool Changed = false;
-    for (const auto *Op : Expr->operands()) {
+    for (const SCEV *Op : Expr->operands()) {
       Operands.push_back(((SC *)this)->visit(Op));
       Changed |= Op != Operands.back();
     }
@@ -964,8 +965,8 @@ class SCEVLoopAddRecRewriter
   }
 
   const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) {
-    SmallVector<const SCEV *, 2> Operands;
-    for (const SCEV *Op : Expr->operands())
+    SmallVector<SCEVUse, 2> Operands;
+    for (SCEVUse Op : Expr->operands())
       Operands.push_back(visit(Op));
 
     const Loop *L = Expr->getLoop();

diff  --git a/llvm/include/llvm/Analysis/ScalarEvolutionPatternMatch.h b/llvm/include/llvm/Analysis/ScalarEvolutionPatternMatch.h
index 7b00d0109a68c..0b47357434782 100644
--- a/llvm/include/llvm/Analysis/ScalarEvolutionPatternMatch.h
+++ b/llvm/include/llvm/Analysis/ScalarEvolutionPatternMatch.h
@@ -13,6 +13,7 @@
 #ifndef LLVM_ANALYSIS_SCALAREVOLUTIONPATTERNMATCH_H
 #define LLVM_ANALYSIS_SCALAREVOLUTIONPATTERNMATCH_H
 
+#include "llvm/Analysis/ScalarEvolution.h"
 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
 
 namespace llvm {
@@ -22,6 +23,11 @@ template <typename Pattern> bool match(const SCEV *S, const Pattern &P) {
   return P.match(S);
 }
 
+template <typename Pattern> bool match(const SCEVUse U, const Pattern &P) {
+  const SCEV *S = U.getPointer();
+  return const_cast<Pattern &>(P).match(S);
+}
+
 template <typename Predicate> struct cst_pred_ty : public Predicate {
   cst_pred_ty() = default;
   cst_pred_ty(uint64_t V) : Predicate(V) {}
@@ -159,7 +165,8 @@ template <typename SCEVTy, typename Op0_t> struct SCEVUnaryExpr_match {
 
   bool match(const SCEV *S) const {
     auto *E = dyn_cast<SCEVTy>(S);
-    return E && E->getNumOperands() == 1 && Op0.match(E->getOperand(0));
+    return E && E->getNumOperands() == 1 &&
+           Op0.match(E->getOperand(0).getPointer());
   }
 };
 
@@ -215,9 +222,10 @@ struct SCEVBinaryExpr_match {
 
     auto *E = dyn_cast<SCEVTy>(S);
     return E && E->getNumOperands() == 2 &&
-           ((Op0.match(E->getOperand(0)) && Op1.match(E->getOperand(1))) ||
-            (Commutable && Op0.match(E->getOperand(1)) &&
-             Op1.match(E->getOperand(0))));
+           ((Op0.match(E->getOperand(0).getPointer()) &&
+             Op1.match(E->getOperand(1).getPointer())) ||
+            (Commutable && Op0.match(E->getOperand(1).getPointer()) &&
+             Op1.match(E->getOperand(0).getPointer())));
   }
 };
 

diff  --git a/llvm/include/llvm/Transforms/Scalar/NaryReassociate.h b/llvm/include/llvm/Transforms/Scalar/NaryReassociate.h
index f0474bc4352e3..417801d470800 100644
--- a/llvm/include/llvm/Transforms/Scalar/NaryReassociate.h
+++ b/llvm/include/llvm/Transforms/Scalar/NaryReassociate.h
@@ -80,6 +80,7 @@
 
 #include "llvm/ADT/DenseMap.h"
 #include "llvm/ADT/SmallVector.h"
+#include "llvm/Analysis/ScalarEvolution.h"
 #include "llvm/IR/PassManager.h"
 #include "llvm/IR/ValueHandle.h"
 
@@ -92,8 +93,6 @@ class DominatorTree;
 class Function;
 class GetElementPtrInst;
 class Instruction;
-class ScalarEvolution;
-class SCEV;
 class TargetLibraryInfo;
 class TargetTransformInfo;
 class Type;
@@ -114,7 +113,7 @@ class NaryReassociatePass : public PassInfoMixin<NaryReassociatePass> {
   bool doOneIteration(Function &F);
 
   // Reassociates I for better CSE.
-  Instruction *tryReassociate(Instruction *I, const SCEV *&OrigSCEV);
+  Instruction *tryReassociate(Instruction *I, SCEVUse &OrigSCEV);
 
   // Reassociate GEP for better CSE.
   Instruction *tryReassociateGEP(GetElementPtrInst *GEP);
@@ -143,19 +142,18 @@ class NaryReassociatePass : public PassInfoMixin<NaryReassociatePass> {
   Instruction *tryReassociateBinaryOp(Value *LHS, Value *RHS,
                                       BinaryOperator *I);
   // Rewrites I to (LHS op RHS) if LHS is computed already.
-  Instruction *tryReassociatedBinaryOp(const SCEV *LHS, Value *RHS,
+  Instruction *tryReassociatedBinaryOp(SCEVUse LHS, Value *RHS,
                                        BinaryOperator *I);
 
   // Tries to match Op1 and Op2 by using V.
   bool matchTernaryOp(BinaryOperator *I, Value *V, Value *&Op1, Value *&Op2);
 
   // Gets SCEV for (LHS op RHS).
-  const SCEV *getBinarySCEV(BinaryOperator *I, const SCEV *LHS,
-                            const SCEV *RHS);
+  SCEVUse getBinarySCEV(BinaryOperator *I, SCEVUse LHS, SCEVUse RHS);
 
   // Returns the closest dominator of \c Dominatee that computes
   // \c CandidateExpr. Returns null if not found.
-  Instruction *findClosestMatchingDominator(const SCEV *CandidateExpr,
+  Instruction *findClosestMatchingDominator(SCEVUse CandidateExpr,
                                             Instruction *Dominatee);
 
   // Try to match \p I as signed/unsigned Min/Max and reassociate it. \p
@@ -163,8 +161,7 @@ class NaryReassociatePass : public PassInfoMixin<NaryReassociatePass> {
   // done or not. If reassociation was successful newly generated instruction is
   // returned, otherwise nullptr.
   template <typename PredT>
-  Instruction *matchAndReassociateMinOrMax(Instruction *I,
-                                           const SCEV *&OrigSCEV);
+  Instruction *matchAndReassociateMinOrMax(Instruction *I, SCEVUse &OrigSCEV);
 
   // Reassociate Min/Max.
   template <typename MaxMinT>

diff  --git a/llvm/lib/Analysis/Delinearization.cpp b/llvm/lib/Analysis/Delinearization.cpp
index 5e670b5a5f456..b6125c47f0835 100644
--- a/llvm/lib/Analysis/Delinearization.cpp
+++ b/llvm/lib/Analysis/Delinearization.cpp
@@ -133,7 +133,7 @@ struct SCEVCollectAddRecMultiplies {
   bool follow(const SCEV *S) {
     if (auto *Mul = dyn_cast<SCEVMulExpr>(S)) {
       bool HasAddRec = false;
-      SmallVector<const SCEV *, 0> Operands;
+      SmallVector<SCEVUse, 0> Operands;
       for (const SCEV *Op : Mul->operands()) {
         const SCEVUnknown *Unknown = dyn_cast<SCEVUnknown>(Op);
         if (Unknown && !isa<CallInst>(Unknown->getValue())) {
@@ -207,7 +207,7 @@ static bool findArrayDimensionsRec(ScalarEvolution &SE,
   // End of recursion.
   if (Last == 0) {
     if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Step)) {
-      SmallVector<const SCEV *, 2> Qs;
+      SmallVector<SCEVUse, 2> Qs;
       for (const SCEV *Op : M->operands())
         if (!isa<SCEVConstant>(Op))
           Qs.push_back(Op);
@@ -266,7 +266,7 @@ static const SCEV *removeConstantFactors(ScalarEvolution &SE, const SCEV *T) {
     return T;
 
   if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(T)) {
-    SmallVector<const SCEV *, 2> Factors;
+    SmallVector<SCEVUse, 2> Factors;
     for (const SCEV *Op : M->operands())
       if (!isa<SCEVConstant>(Op))
         Factors.push_back(Op);

diff  --git a/llvm/lib/Analysis/ScalarEvolution.cpp b/llvm/lib/Analysis/ScalarEvolution.cpp
index ba7d94f4b8bdd..53c01e68744d7 100644
--- a/llvm/lib/Analysis/ScalarEvolution.cpp
+++ b/llvm/lib/Analysis/ScalarEvolution.cpp
@@ -258,6 +258,22 @@ static cl::opt<bool> UseContextForNoWrapFlagInference(
 //                           SCEV class definitions
 //===----------------------------------------------------------------------===//
 
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+LLVM_DUMP_METHOD void SCEVUse::dump() const {
+  print(dbgs());
+  dbgs() << '\n';
+}
+#endif
+
+void SCEVUse::print(raw_ostream &OS) const {
+  getPointer()->print(OS);
+  SCEV::NoWrapFlags Flags = static_cast<SCEV::NoWrapFlags>(getInt());
+  if (Flags & SCEV::FlagNUW)
+    OS << "(u nuw)";
+  if (Flags & SCEV::FlagNSW)
+    OS << "(u nsw)";
+}
+
 //===----------------------------------------------------------------------===//
 // Implementation of the SCEV class.
 //
@@ -417,7 +433,7 @@ Type *SCEV::getType() const {
   llvm_unreachable("Unknown SCEV kind!");
 }
 
-ArrayRef<const SCEV *> SCEV::operands() const {
+ArrayRef<SCEVUse> SCEV::operands() const {
   switch (getSCEVType()) {
   case scConstant:
   case scVScale:
@@ -516,7 +532,7 @@ const SCEV *ScalarEvolution::getElementCount(Type *Ty, ElementCount EC,
 }
 
 SCEVCastExpr::SCEVCastExpr(const FoldingSetNodeIDRef ID, SCEVTypes SCEVTy,
-                           const SCEV *op, Type *ty)
+                           SCEVUse op, Type *ty)
     : SCEV(ID, SCEVTy, computeExpressionSize(op)), Op(op), Ty(ty) {}
 
 SCEVPtrToAddrExpr::SCEVPtrToAddrExpr(const FoldingSetNodeIDRef ID,
@@ -526,7 +542,7 @@ SCEVPtrToAddrExpr::SCEVPtrToAddrExpr(const FoldingSetNodeIDRef ID,
          "Must be a non-bit-width-changing pointer-to-integer cast!");
 }
 
-SCEVPtrToIntExpr::SCEVPtrToIntExpr(const FoldingSetNodeIDRef ID, const SCEV *Op,
+SCEVPtrToIntExpr::SCEVPtrToIntExpr(const FoldingSetNodeIDRef ID, SCEVUse Op,
                                    Type *ITy)
     : SCEVCastExpr(ID, scPtrToInt, Op, ITy) {
   assert(getOperand()->getType()->isPointerTy() && Ty->isIntegerTy() &&
@@ -534,26 +550,26 @@ SCEVPtrToIntExpr::SCEVPtrToIntExpr(const FoldingSetNodeIDRef ID, const SCEV *Op,
 }
 
 SCEVIntegralCastExpr::SCEVIntegralCastExpr(const FoldingSetNodeIDRef ID,
-                                           SCEVTypes SCEVTy, const SCEV *op,
+                                           SCEVTypes SCEVTy, SCEVUse op,
                                            Type *ty)
     : SCEVCastExpr(ID, SCEVTy, op, ty) {}
 
-SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeIDRef ID, const SCEV *op,
+SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeIDRef ID, SCEVUse op,
                                    Type *ty)
     : SCEVIntegralCastExpr(ID, scTruncate, op, ty) {
   assert(getOperand()->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
          "Cannot truncate non-integer value!");
 }
 
-SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID,
-                                       const SCEV *op, Type *ty)
+SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID, SCEVUse op,
+                                       Type *ty)
     : SCEVIntegralCastExpr(ID, scZeroExtend, op, ty) {
   assert(getOperand()->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
          "Cannot zero extend non-integer value!");
 }
 
-SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeIDRef ID,
-                                       const SCEV *op, Type *ty)
+SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeIDRef ID, SCEVUse op,
+                                       Type *ty)
     : SCEVIntegralCastExpr(ID, scSignExtend, op, ty) {
   assert(getOperand()->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
          "Cannot sign extend non-integer value!");
@@ -561,7 +577,7 @@ SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeIDRef ID,
 
 void SCEVUnknown::deleted() {
   // Clear this SCEVUnknown from various maps.
-  SE->forgetMemoizedResults(this);
+  SE->forgetMemoizedResults({this});
 
   // Remove this SCEVUnknown from the uniquing map.
   SE->UniqueSCEVs.RemoveNode(this);
@@ -572,7 +588,7 @@ void SCEVUnknown::deleted() {
 
 void SCEVUnknown::allUsesReplacedWith(Value *New) {
   // Clear this SCEVUnknown from various maps.
-  SE->forgetMemoizedResults(this);
+  SE->forgetMemoizedResults({this});
 
   // Remove this SCEVUnknown from the uniquing map.
   SE->UniqueSCEVs.RemoveNode(this);
@@ -748,8 +764,8 @@ CompareSCEVComplexity(const LoopInfo *const LI, const SCEV *LHS,
   case scSMinExpr:
   case scUMinExpr:
   case scSequentialUMinExpr: {
-    ArrayRef<const SCEV *> LOps = LHS->operands();
-    ArrayRef<const SCEV *> ROps = RHS->operands();
+    ArrayRef<SCEVUse> LOps = LHS->operands();
+    ArrayRef<SCEVUse> ROps = RHS->operands();
 
     // Lexicographically compare n-ary-like expressions.
     unsigned LNumOps = LOps.size(), RNumOps = ROps.size();
@@ -757,7 +773,8 @@ CompareSCEVComplexity(const LoopInfo *const LI, const SCEV *LHS,
       return (int)LNumOps - (int)RNumOps;
 
     for (unsigned i = 0; i != LNumOps; ++i) {
-      auto X = CompareSCEVComplexity(LI, LOps[i], ROps[i], DT, Depth + 1);
+      auto X = CompareSCEVComplexity(LI, LOps[i].getPointer(),
+                                     ROps[i].getPointer(), DT, Depth + 1);
       if (X != 0)
         return X;
     }
@@ -779,28 +796,27 @@ CompareSCEVComplexity(const LoopInfo *const LI, const SCEV *LHS,
 /// results from this routine.  In other words, we don't want the results of
 /// this to depend on where the addresses of various SCEV objects happened to
 /// land in memory.
-static void GroupByComplexity(SmallVectorImpl<const SCEV *> &Ops,
-                              LoopInfo *LI, DominatorTree &DT) {
+static void GroupByComplexity(SmallVectorImpl<SCEVUse> &Ops, LoopInfo *LI,
+                              DominatorTree &DT) {
   if (Ops.size() < 2) return;  // Noop
 
   // Whether LHS has provably less complexity than RHS.
-  auto IsLessComplex = [&](const SCEV *LHS, const SCEV *RHS) {
+  auto IsLessComplex = [&](SCEVUse LHS, SCEVUse RHS) {
     auto Complexity = CompareSCEVComplexity(LI, LHS, RHS, DT);
     return Complexity && *Complexity < 0;
   };
   if (Ops.size() == 2) {
     // This is the common case, which also happens to be trivially simple.
     // Special case it.
-    const SCEV *&LHS = Ops[0], *&RHS = Ops[1];
+    SCEVUse &LHS = Ops[0], &RHS = Ops[1];
     if (IsLessComplex(RHS, LHS))
       std::swap(LHS, RHS);
     return;
   }
 
   // Do the rough sort by complexity.
-  llvm::stable_sort(Ops, [&](const SCEV *LHS, const SCEV *RHS) {
-    return IsLessComplex(LHS, RHS);
-  });
+  llvm::stable_sort(
+      Ops, [&](SCEVUse LHS, SCEVUse RHS) { return IsLessComplex(LHS, RHS); });
 
   // Now that we are sorted by complexity, group elements of the same
   // complexity.  Note that this is, at worst, N^2, but the vector is likely to
@@ -825,7 +841,7 @@ static void GroupByComplexity(SmallVectorImpl<const SCEV *> &Ops,
 
 /// Returns true if \p Ops contains a huge SCEV (the subtree of S contains at
 /// least HugeExprThreshold nodes).
-static bool hasHugeExpression(ArrayRef<const SCEV *> Ops) {
+static bool hasHugeExpression(ArrayRef<SCEVUse> Ops) {
   return any_of(Ops, [](const SCEV *S) {
     return S->getExpressionSize() >= HugeExprThreshold;
   });
@@ -842,7 +858,7 @@ static bool hasHugeExpression(ArrayRef<const SCEV *> Ops) {
 template <typename FoldT, typename IsIdentityT, typename IsAbsorberT>
 static const SCEV *
 constantFoldAndGroupOps(ScalarEvolution &SE, LoopInfo &LI, DominatorTree &DT,
-                        SmallVectorImpl<const SCEV *> &Ops, FoldT Fold,
+                        SmallVectorImpl<SCEVUse> &Ops, FoldT Fold,
                         IsIdentityT IsIdentity, IsAbsorberT IsAbsorber) {
   const SCEVConstant *Folded = nullptr;
   for (unsigned Idx = 0; Idx < Ops.size();) {
@@ -997,11 +1013,11 @@ const SCEV *SCEVAddRecExpr::evaluateAtIteration(const SCEV *It,
   return evaluateAtIteration(operands(), It, SE);
 }
 
-const SCEV *
-SCEVAddRecExpr::evaluateAtIteration(ArrayRef<const SCEV *> Operands,
-                                    const SCEV *It, ScalarEvolution &SE) {
+const SCEV *SCEVAddRecExpr::evaluateAtIteration(ArrayRef<SCEVUse> Operands,
+                                                const SCEV *It,
+                                                ScalarEvolution &SE) {
   assert(Operands.size() > 0);
-  const SCEV *Result = Operands[0];
+  const SCEV *Result = Operands[0].getPointer();
   for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
     // The computation is correct in the face of overflow provided that the
     // multiplication is performed _after_ the evaluation of the binomial
@@ -1010,7 +1026,8 @@ SCEVAddRecExpr::evaluateAtIteration(ArrayRef<const SCEV *> Operands,
     if (isa<SCEVCouldNotCompute>(Coeff))
       return Coeff;
 
-    Result = SE.getAddExpr(Result, SE.getMulExpr(Operands[i], Coeff));
+    Result =
+        SE.getAddExpr(Result, SE.getMulExpr(Operands[i].getPointer(), Coeff));
   }
   return Result;
 }
@@ -1055,21 +1072,21 @@ class SCEVCastSinkingRewriter
   const SCEV *visitAddExpr(const SCEVAddExpr *Expr) {
     // Preserve wrap flags on rewritten SCEVAddExpr, which the default
     // implementation drops.
-    SmallVector<const SCEV *, 2> Operands;
+    SmallVector<SCEVUse, 2> Operands;
     bool Changed = false;
-    for (const auto *Op : Expr->operands()) {
-      Operands.push_back(visit(Op));
-      Changed |= Op != Operands.back();
+    for (SCEVUse Op : Expr->operands()) {
+      Operands.push_back(visit(Op.getPointer()));
+      Changed |= Op.getPointer() != Operands.back();
     }
     return !Changed ? Expr : SE.getAddExpr(Operands, Expr->getNoWrapFlags());
   }
 
   const SCEV *visitMulExpr(const SCEVMulExpr *Expr) {
-    SmallVector<const SCEV *, 2> Operands;
+    SmallVector<SCEVUse, 2> Operands;
     bool Changed = false;
-    for (const auto *Op : Expr->operands()) {
-      Operands.push_back(visit(Op));
-      Changed |= Op != Operands.back();
+    for (SCEVUse Op : Expr->operands()) {
+      Operands.push_back(visit(Op.getPointer()));
+      Changed |= Op.getPointer() != Operands.back();
     }
     return !Changed ? Expr : SE.getMulExpr(Operands, Expr->getNoWrapFlags());
   }
@@ -1215,7 +1232,7 @@ const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op, Type *Ty,
   // that replace other casts.
   if (isa<SCEVAddExpr>(Op) || isa<SCEVMulExpr>(Op)) {
     auto *CommOp = cast<SCEVCommutativeExpr>(Op);
-    SmallVector<const SCEV *, 4> Operands;
+    SmallVector<SCEVUse, 4> Operands;
     unsigned numTruncs = 0;
     for (unsigned i = 0, e = CommOp->getNumOperands(); i != e && numTruncs < 2;
          ++i) {
@@ -1241,7 +1258,7 @@ const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op, Type *Ty,
 
   // If the input value is a chrec scev, truncate the chrec's operands.
   if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) {
-    SmallVector<const SCEV *, 4> Operands;
+    SmallVector<SCEVUse, 4> Operands;
     for (const SCEV *Op : AddRec->operands())
       Operands.push_back(getTruncateExpr(Op, Ty, Depth + 1));
     return getAddRecExpr(Operands, AddRec->getLoop(), SCEV::FlagAnyWrap);
@@ -1375,7 +1392,7 @@ static const SCEV *getPreStartForExtend(const SCEVAddRecExpr *AR, Type *Ty,
   // subtraction is expensive. For this purpose, perform a quick and dirty
   // 
diff erence, by checking for Step in the operand list. Note, that
   // SA might have repeated ops, like %a + %a + ..., so only remove one.
-  SmallVector<const SCEV *, 4> DiffOps(SA->operands());
+  SmallVector<SCEVUse, 4> DiffOps(SA->operands());
   for (auto It = DiffOps.begin(); It != DiffOps.end(); ++It)
     if (*It == Step) {
       DiffOps.erase(It);
@@ -1819,8 +1836,8 @@ const SCEV *ScalarEvolution::getZeroExtendExprImpl(const SCEV *Op, Type *Ty,
     if (SA->hasNoUnsignedWrap()) {
       // If the addition does not unsign overflow then we can, by definition,
       // commute the zero extension with the addition operation.
-      SmallVector<const SCEV *, 4> Ops;
-      for (const auto *Op : SA->operands())
+      SmallVector<SCEVUse, 4> Ops;
+      for (SCEVUse Op : SA->operands())
         Ops.push_back(getZeroExtendExpr(Op, Ty, Depth + 1));
       return getAddExpr(Ops, SCEV::FlagNUW, Depth + 1);
     }
@@ -1852,8 +1869,8 @@ const SCEV *ScalarEvolution::getZeroExtendExprImpl(const SCEV *Op, Type *Ty,
     if (SM->hasNoUnsignedWrap()) {
       // If the multiply does not unsign overflow then we can, by definition,
       // commute the zero extension with the multiply operation.
-      SmallVector<const SCEV *, 4> Ops;
-      for (const auto *Op : SM->operands())
+      SmallVector<SCEVUse, 4> Ops;
+      for (SCEVUse Op : SM->operands())
         Ops.push_back(getZeroExtendExpr(Op, Ty, Depth + 1));
       return getMulExpr(Ops, SCEV::FlagNUW, Depth + 1);
     }
@@ -1889,8 +1906,8 @@ const SCEV *ScalarEvolution::getZeroExtendExprImpl(const SCEV *Op, Type *Ty,
   // zext(umax(x, y)) -> umax(zext(x), zext(y))
   if (isa<SCEVUMinExpr>(Op) || isa<SCEVUMaxExpr>(Op)) {
     auto *MinMax = cast<SCEVMinMaxExpr>(Op);
-    SmallVector<const SCEV *, 4> Operands;
-    for (auto *Operand : MinMax->operands())
+    SmallVector<SCEVUse, 4> Operands;
+    for (SCEVUse Operand : MinMax->operands())
       Operands.push_back(getZeroExtendExpr(Operand, Ty));
     if (isa<SCEVUMinExpr>(MinMax))
       return getUMinExpr(Operands);
@@ -1900,8 +1917,8 @@ const SCEV *ScalarEvolution::getZeroExtendExprImpl(const SCEV *Op, Type *Ty,
   // zext(umin_seq(x, y)) -> umin_seq(zext(x), zext(y))
   if (auto *MinMax = dyn_cast<SCEVSequentialMinMaxExpr>(Op)) {
     assert(isa<SCEVSequentialUMinExpr>(MinMax) && "Not supported!");
-    SmallVector<const SCEV *, 4> Operands;
-    for (auto *Operand : MinMax->operands())
+    SmallVector<SCEVUse, 4> Operands;
+    for (SCEVUse Operand : MinMax->operands())
       Operands.push_back(getZeroExtendExpr(Operand, Ty));
     return getUMinExpr(Operands, /*Sequential*/ true);
   }
@@ -1990,8 +2007,8 @@ const SCEV *ScalarEvolution::getSignExtendExprImpl(const SCEV *Op, Type *Ty,
     if (SA->hasNoSignedWrap()) {
       // If the addition does not sign overflow then we can, by definition,
       // commute the sign extension with the addition operation.
-      SmallVector<const SCEV *, 4> Ops;
-      for (const auto *Op : SA->operands())
+      SmallVector<SCEVUse, 4> Ops;
+      for (SCEVUse Op : SA->operands())
         Ops.push_back(getSignExtendExpr(Op, Ty, Depth + 1));
       return getAddExpr(Ops, SCEV::FlagNSW, Depth + 1);
     }
@@ -2160,8 +2177,8 @@ const SCEV *ScalarEvolution::getSignExtendExprImpl(const SCEV *Op, Type *Ty,
   // sext(smax(x, y)) -> smax(sext(x), sext(y))
   if (isa<SCEVSMinExpr>(Op) || isa<SCEVSMaxExpr>(Op)) {
     auto *MinMax = cast<SCEVMinMaxExpr>(Op);
-    SmallVector<const SCEV *, 4> Operands;
-    for (auto *Operand : MinMax->operands())
+    SmallVector<SCEVUse, 4> Operands;
+    for (SCEVUse Operand : MinMax->operands())
       Operands.push_back(getSignExtendExpr(Operand, Ty));
     if (isa<SCEVSMinExpr>(MinMax))
       return getSMinExpr(Operands);
@@ -2174,7 +2191,7 @@ const SCEV *ScalarEvolution::getSignExtendExprImpl(const SCEV *Op, Type *Ty,
   SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator),
                                                    Op, Ty);
   UniqueSCEVs.InsertNode(S, IP);
-  registerUser(S, { Op });
+  registerUser(S, Op);
   return S;
 }
 
@@ -2229,7 +2246,7 @@ const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op,
 
   // Force the cast to be folded into the operands of an addrec.
   if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) {
-    SmallVector<const SCEV *, 4> Ops;
+    SmallVector<SCEVUse, 4> Ops;
     for (const SCEV *Op : AR->operands())
       Ops.push_back(getAnyExtendExpr(Op, Ty));
     return getAddRecExpr(Ops, AR->getLoop(), SCEV::FlagNW);
@@ -2266,12 +2283,12 @@ const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op,
 /// may be exposed. This helps getAddRecExpr short-circuit extra work in
 /// the common case where no interesting opportunities are present, and
 /// is also used as a check to avoid infinite recursion.
-static bool
-CollectAddOperandsWithScales(SmallDenseMap<const SCEV *, APInt, 16> &M,
-                             SmallVectorImpl<const SCEV *> &NewOps,
-                             APInt &AccumulatedConstant,
-                             ArrayRef<const SCEV *> Ops, const APInt &Scale,
-                             ScalarEvolution &SE) {
+static bool CollectAddOperandsWithScales(SmallDenseMap<SCEVUse, APInt, 16> &M,
+                                         SmallVectorImpl<SCEVUse> &NewOps,
+                                         APInt &AccumulatedConstant,
+                                         ArrayRef<SCEVUse> Ops,
+                                         const APInt &Scale,
+                                         ScalarEvolution &SE) {
   bool Interesting = false;
 
   // Iterate over the add operands. They are sorted, with constants first.
@@ -2294,13 +2311,12 @@ CollectAddOperandsWithScales(SmallDenseMap<const SCEV *, APInt, 16> &M,
       if (Mul->getNumOperands() == 2 && isa<SCEVAddExpr>(Mul->getOperand(1))) {
         // A multiplication of a constant with another add; recurse.
         const SCEVAddExpr *Add = cast<SCEVAddExpr>(Mul->getOperand(1));
-        Interesting |=
-          CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant,
-                                       Add->operands(), NewScale, SE);
+        Interesting |= CollectAddOperandsWithScales(
+            M, NewOps, AccumulatedConstant, Add->operands(), NewScale, SE);
       } else {
         // A multiplication of a constant with some other value. Update
         // the map.
-        SmallVector<const SCEV *, 4> MulOps(drop_begin(Mul->operands()));
+        SmallVector<SCEVUse, 4> MulOps(drop_begin(Mul->operands()));
         const SCEV *Key = SE.getMulExpr(MulOps);
         auto Pair = M.insert({Key, NewScale});
         if (Pair.second) {
@@ -2314,8 +2330,7 @@ CollectAddOperandsWithScales(SmallDenseMap<const SCEV *, APInt, 16> &M,
       }
     } else {
       // An ordinary operand. Update the map.
-      std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair =
-          M.insert({Ops[i], Scale});
+      auto Pair = M.insert({Ops[i], Scale});
       if (Pair.second) {
         NewOps.push_back(Pair.first->first);
       } else {
@@ -2333,8 +2348,8 @@ CollectAddOperandsWithScales(SmallDenseMap<const SCEV *, APInt, 16> &M,
 bool ScalarEvolution::willNotOverflow(Instruction::BinaryOps BinOp, bool Signed,
                                       const SCEV *LHS, const SCEV *RHS,
                                       const Instruction *CtxI) {
-  const SCEV *(ScalarEvolution::*Operation)(const SCEV *, const SCEV *,
-                                            SCEV::NoWrapFlags, unsigned);
+  const SCEV *(ScalarEvolution::*Operation)(SCEVUse, SCEVUse, SCEV::NoWrapFlags,
+                                            unsigned);
   switch (BinOp) {
   default:
     llvm_unreachable("Unsupported binary op");
@@ -2456,7 +2471,7 @@ ScalarEvolution::getStrengthenedNoWrapFlagsFromBinOp(
 // can't-overflow flags for the operation if possible.
 static SCEV::NoWrapFlags StrengthenNoWrapFlags(ScalarEvolution *SE,
                                                SCEVTypes Type,
-                                               ArrayRef<const SCEV *> Ops,
+                                               ArrayRef<SCEVUse> Ops,
                                                SCEV::NoWrapFlags Flags) {
   using namespace std::placeholders;
 
@@ -2472,8 +2487,8 @@ static SCEV::NoWrapFlags StrengthenNoWrapFlags(ScalarEvolution *SE,
       ScalarEvolution::maskFlags(Flags, SignOrUnsignMask);
 
   // If FlagNSW is true and all the operands are non-negative, infer FlagNUW.
-  auto IsKnownNonNegative = [&](const SCEV *S) {
-    return SE->isKnownNonNegative(S);
+  auto IsKnownNonNegative = [&](SCEVUse U) {
+    return SE->isKnownNonNegative(U);
   };
 
   if (SignOrUnsignWrap == SCEV::FlagNSW && all_of(Ops, IsKnownNonNegative))
@@ -2542,7 +2557,7 @@ bool ScalarEvolution::isAvailableAtLoopEntry(const SCEV *S, const Loop *L) {
 }
 
 /// Get a canonical add expression, or something simpler if possible.
-const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
+const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<SCEVUse> &Ops,
                                         SCEV::NoWrapFlags OrigFlags,
                                         unsigned Depth) {
   assert(!(OrigFlags & ~(SCEV::FlagNUW | SCEV::FlagNSW)) &&
@@ -2570,7 +2585,7 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
   unsigned Idx = isa<SCEVConstant>(Ops[0]) ? 1 : 0;
 
   // Delay expensive flag strengthening until necessary.
-  auto ComputeFlags = [this, OrigFlags](ArrayRef<const SCEV *> Ops) {
+  auto ComputeFlags = [this, OrigFlags](ArrayRef<SCEVUse> Ops) {
     return StrengthenNoWrapFlags(this, scAddExpr, Ops, OrigFlags);
   };
 
@@ -2598,7 +2613,7 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
       while (i+Count != e && Ops[i+Count] == Ops[i])
         ++Count;
       // Merge the values into a multiply.
-      const SCEV *Scale = getConstant(Ty, Count);
+      SCEVUse Scale = getConstant(Ty, Count);
       const SCEV *Mul = getMulExpr(Scale, Ops[i], SCEV::FlagAnyWrap, Depth + 1);
       if (Ops.size() == Count)
         return Mul;
@@ -2622,14 +2637,14 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
     if (auto *T = dyn_cast<SCEVTruncateExpr>(Ops[Idx]))
       return T->getOperand()->getType();
     if (const auto *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) {
-      const auto *LastOp = Mul->getOperand(Mul->getNumOperands() - 1);
+      SCEVUse LastOp = Mul->getOperand(Mul->getNumOperands() - 1);
       if (const auto *T = dyn_cast<SCEVTruncateExpr>(LastOp))
         return T->getOperand()->getType();
     }
     return nullptr;
   };
   if (auto *SrcType = FindTruncSrcType()) {
-    SmallVector<const SCEV *, 8> LargeOps;
+    SmallVector<SCEVUse, 8> LargeOps;
     bool Ok = true;
     // Check all the operands to see if they can be represented in the
     // source type of the truncate.
@@ -2643,7 +2658,7 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
       } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Op)) {
         LargeOps.push_back(getAnyExtendExpr(C, SrcType));
       } else if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Op)) {
-        SmallVector<const SCEV *, 8> LargeMulOps;
+        SmallVector<SCEVUse, 8> LargeMulOps;
         for (unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) {
           if (const SCEVTruncateExpr *T =
                 dyn_cast<SCEVTruncateExpr>(M->getOperand(j))) {
@@ -2707,7 +2722,7 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
       }
 
       if (PreservedFlags != SCEV::FlagAnyWrap) {
-        SmallVector<const SCEV *, 4> NewOps(AddExpr->operands());
+        SmallVector<SCEVUse, 4> NewOps(AddExpr->operands());
         NewOps[0] = getConstant(ConstAdd);
         return getAddExpr(NewOps, PreservedFlags);
       }
@@ -2774,8 +2789,8 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
   // operands multiplied by constant values.
   if (Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx])) {
     uint64_t BitWidth = getTypeSizeInBits(Ty);
-    SmallDenseMap<const SCEV *, APInt, 16> M;
-    SmallVector<const SCEV *, 8> NewOps;
+    SmallDenseMap<SCEVUse, APInt, 16> M;
+    SmallVector<SCEVUse, 8> NewOps;
     APInt AccumulatedConstant(BitWidth, 0);
     if (CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant,
                                      Ops, APInt(BitWidth, 1), *this)) {
@@ -2788,7 +2803,7 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
       // Some interesting folding opportunity is present, so its worthwhile to
       // re-generate the operands list. Group the operands by constant scale,
       // to avoid multiplying by the same constant scale multiple times.
-      std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare> MulOpLists;
+      std::map<APInt, SmallVector<SCEVUse, 4>, APIntCompare> MulOpLists;
       for (const SCEV *NewOp : NewOps)
         MulOpLists[M.find(NewOp)->second].push_back(NewOp);
       // Re-generate the operands list.
@@ -2829,13 +2844,12 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
           if (Mul->getNumOperands() != 2) {
             // If the multiply has more than two operands, we must get the
             // Y*Z term.
-            SmallVector<const SCEV *, 4> MulOps(
-                Mul->operands().take_front(MulOp));
+            SmallVector<SCEVUse, 4> MulOps(Mul->operands().take_front(MulOp));
             append_range(MulOps, Mul->operands().drop_front(MulOp + 1));
             InnerMul = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1);
           }
-          SmallVector<const SCEV *, 2> TwoOps = {getOne(Ty), InnerMul};
-          const SCEV *AddOne = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1);
+          const SCEV *AddOne =
+              getAddExpr(getOne(Ty), InnerMul, SCEV::FlagAnyWrap, Depth + 1);
           const SCEV *OuterMul = getMulExpr(AddOne, MulOpSCEV,
                                             SCEV::FlagAnyWrap, Depth + 1);
           if (Ops.size() == 2) return OuterMul;
@@ -2863,21 +2877,19 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
             // Fold X + (A*B*C) + (A*D*E) --> X + (A*(B*C+D*E))
             const SCEV *InnerMul1 = Mul->getOperand(MulOp == 0);
             if (Mul->getNumOperands() != 2) {
-              SmallVector<const SCEV *, 4> MulOps(
-                  Mul->operands().take_front(MulOp));
+              SmallVector<SCEVUse, 4> MulOps(Mul->operands().take_front(MulOp));
               append_range(MulOps, Mul->operands().drop_front(MulOp+1));
               InnerMul1 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1);
             }
             const SCEV *InnerMul2 = OtherMul->getOperand(OMulOp == 0);
             if (OtherMul->getNumOperands() != 2) {
-              SmallVector<const SCEV *, 4> MulOps(
+              SmallVector<SCEVUse, 4> MulOps(
                   OtherMul->operands().take_front(OMulOp));
               append_range(MulOps, OtherMul->operands().drop_front(OMulOp+1));
               InnerMul2 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1);
             }
-            SmallVector<const SCEV *, 2> TwoOps = {InnerMul1, InnerMul2};
             const SCEV *InnerMulSum =
-                getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1);
+                getAddExpr(InnerMul1, InnerMul2, SCEV::FlagAnyWrap, Depth + 1);
             const SCEV *OuterMul = getMulExpr(MulOpSCEV, InnerMulSum,
                                               SCEV::FlagAnyWrap, Depth + 1);
             if (Ops.size() == 2) return OuterMul;
@@ -2900,7 +2912,7 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
   for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) {
     // Scan all of the other operands to this add and add them to the vector if
     // they are loop invariant w.r.t. the recurrence.
-    SmallVector<const SCEV *, 8> LIOps;
+    SmallVector<SCEVUse, 8> LIOps;
     const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
     const Loop *AddRecLoop = AddRec->getLoop();
     for (unsigned i = 0, e = Ops.size(); i != e; ++i)
@@ -2922,7 +2934,7 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
       //  NLI + LI + {Start,+,Step}  -->  NLI + {LI+Start,+,Step}
       LIOps.push_back(AddRec->getStart());
 
-      SmallVector<const SCEV *, 4> AddRecOps(AddRec->operands());
+      SmallVector<SCEVUse, 4> AddRecOps(AddRec->operands());
 
       // It is not in general safe to propagate flags valid on an add within
       // the addrec scope to one outside it.  We must prove that the inner
@@ -2975,7 +2987,7 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
         "AddRecExprs are not sorted in reverse dominance order?");
       if (AddRecLoop == cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()) {
         // Other + {A,+,B}<L> + {C,+,D}<L>  -->  Other + {A+C,+,B+D}<L>
-        SmallVector<const SCEV *, 4> AddRecOps(AddRec->operands());
+        SmallVector<SCEVUse, 4> AddRecOps(AddRec->operands());
         for (; OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
              ++OtherIdx) {
           const auto *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]);
@@ -2986,9 +2998,9 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
                 append_range(AddRecOps, OtherAddRec->operands().drop_front(i));
                 break;
               }
-              SmallVector<const SCEV *, 2> TwoOps = {
-                  AddRecOps[i], OtherAddRec->getOperand(i)};
-              AddRecOps[i] = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1);
+              AddRecOps[i] =
+                  getAddExpr(AddRecOps[i], OtherAddRec->getOperand(i),
+                             SCEV::FlagAnyWrap, Depth + 1);
             }
             Ops.erase(Ops.begin() + OtherIdx); --OtherIdx;
           }
@@ -3008,9 +3020,8 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
   return getOrCreateAddExpr(Ops, ComputeFlags(Ops));
 }
 
-const SCEV *
-ScalarEvolution::getOrCreateAddExpr(ArrayRef<const SCEV *> Ops,
-                                    SCEV::NoWrapFlags Flags) {
+const SCEV *ScalarEvolution::getOrCreateAddExpr(ArrayRef<SCEVUse> Ops,
+                                                SCEV::NoWrapFlags Flags) {
   FoldingSetNodeID ID;
   ID.AddInteger(scAddExpr);
   for (const SCEV *Op : Ops)
@@ -3019,7 +3030,7 @@ ScalarEvolution::getOrCreateAddExpr(ArrayRef<const SCEV *> Ops,
   SCEVAddExpr *S =
       static_cast<SCEVAddExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
   if (!S) {
-    const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
+    SCEVUse *O = SCEVAllocator.Allocate<SCEVUse>(Ops.size());
     llvm::uninitialized_copy(Ops, O);
     S = new (SCEVAllocator)
         SCEVAddExpr(ID.Intern(SCEVAllocator), O, Ops.size());
@@ -3030,9 +3041,9 @@ ScalarEvolution::getOrCreateAddExpr(ArrayRef<const SCEV *> Ops,
   return S;
 }
 
-const SCEV *
-ScalarEvolution::getOrCreateAddRecExpr(ArrayRef<const SCEV *> Ops,
-                                       const Loop *L, SCEV::NoWrapFlags Flags) {
+const SCEV *ScalarEvolution::getOrCreateAddRecExpr(ArrayRef<SCEVUse> Ops,
+                                                   const Loop *L,
+                                                   SCEV::NoWrapFlags Flags) {
   FoldingSetNodeID ID;
   ID.AddInteger(scAddRecExpr);
   for (const SCEV *Op : Ops)
@@ -3042,7 +3053,7 @@ ScalarEvolution::getOrCreateAddRecExpr(ArrayRef<const SCEV *> Ops,
   SCEVAddRecExpr *S =
       static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
   if (!S) {
-    const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
+    SCEVUse *O = SCEVAllocator.Allocate<SCEVUse>(Ops.size());
     llvm::uninitialized_copy(Ops, O);
     S = new (SCEVAllocator)
         SCEVAddRecExpr(ID.Intern(SCEVAllocator), O, Ops.size(), L);
@@ -3054,9 +3065,8 @@ ScalarEvolution::getOrCreateAddRecExpr(ArrayRef<const SCEV *> Ops,
   return S;
 }
 
-const SCEV *
-ScalarEvolution::getOrCreateMulExpr(ArrayRef<const SCEV *> Ops,
-                                    SCEV::NoWrapFlags Flags) {
+const SCEV *ScalarEvolution::getOrCreateMulExpr(ArrayRef<SCEVUse> Ops,
+                                                SCEV::NoWrapFlags Flags) {
   FoldingSetNodeID ID;
   ID.AddInteger(scMulExpr);
   for (const SCEV *Op : Ops)
@@ -3065,7 +3075,7 @@ ScalarEvolution::getOrCreateMulExpr(ArrayRef<const SCEV *> Ops,
   SCEVMulExpr *S =
     static_cast<SCEVMulExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
   if (!S) {
-    const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
+    SCEVUse *O = SCEVAllocator.Allocate<SCEVUse>(Ops.size());
     llvm::uninitialized_copy(Ops, O);
     S = new (SCEVAllocator) SCEVMulExpr(ID.Intern(SCEVAllocator),
                                         O, Ops.size());
@@ -3131,7 +3141,7 @@ static bool containsConstantInAddMulChain(const SCEV *StartExpr) {
 }
 
 /// Get a canonical multiply expression, or something simpler if possible.
-const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
+const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<SCEVUse> &Ops,
                                         SCEV::NoWrapFlags OrigFlags,
                                         unsigned Depth) {
   assert(OrigFlags == maskFlags(OrigFlags, SCEV::FlagNUW | SCEV::FlagNSW) &&
@@ -3155,7 +3165,7 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
     return Folded;
 
   // Delay expensive flag strengthening until necessary.
-  auto ComputeFlags = [this, OrigFlags](ArrayRef<const SCEV *> Ops) {
+  auto ComputeFlags = [this, OrigFlags](const ArrayRef<SCEVUse> Ops) {
     return StrengthenNoWrapFlags(this, scMulExpr, Ops, OrigFlags);
   };
 
@@ -3192,11 +3202,11 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
         // If we have a mul by -1 of an add, try distributing the -1 among the
         // add operands.
         if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) {
-          SmallVector<const SCEV *, 4> NewOps;
+          SmallVector<SCEVUse, 4> NewOps;
           bool AnyFolded = false;
           for (const SCEV *AddOp : Add->operands()) {
-            const SCEV *Mul = getMulExpr(Ops[0], AddOp, SCEV::FlagAnyWrap,
-                                         Depth + 1);
+            const SCEV *Mul = getMulExpr(Ops[0], SCEVUse(AddOp),
+                                         SCEV::FlagAnyWrap, Depth + 1);
             if (!isa<SCEVMulExpr>(Mul)) AnyFolded = true;
             NewOps.push_back(Mul);
           }
@@ -3204,10 +3214,10 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
             return getAddExpr(NewOps, SCEV::FlagAnyWrap, Depth + 1);
         } else if (const auto *AddRec = dyn_cast<SCEVAddRecExpr>(Ops[1])) {
           // Negation preserves a recurrence's no self-wrap property.
-          SmallVector<const SCEV *, 4> Operands;
+          SmallVector<SCEVUse, 4> Operands;
           for (const SCEV *AddRecOp : AddRec->operands())
-            Operands.push_back(getMulExpr(Ops[0], AddRecOp, SCEV::FlagAnyWrap,
-                                          Depth + 1));
+            Operands.push_back(getMulExpr(Ops[0], SCEVUse(AddRecOp),
+                                          SCEV::FlagAnyWrap, Depth + 1));
           // Let M be the minimum representable signed value. AddRec with nsw
           // multiplied by -1 can have signed overflow if and only if it takes a
           // value of M: M * (-1) would stay M and (M + 1) * (-1) would be the
@@ -3302,7 +3312,7 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
   for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) {
     // Scan all of the other operands to this mul and add them to the vector
     // if they are loop invariant w.r.t. the recurrence.
-    SmallVector<const SCEV *, 8> LIOps;
+    SmallVector<SCEVUse, 8> LIOps;
     const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
     for (unsigned i = 0, e = Ops.size(); i != e; ++i)
       if (isAvailableAtLoopEntry(Ops[i], AddRec->getLoop())) {
@@ -3314,7 +3324,7 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
     // If we found some loop invariants, fold them into the recurrence.
     if (!LIOps.empty()) {
       //  NLI * LI * {Start,+,Step}  -->  NLI * {LI*Start,+,LI*Step}
-      SmallVector<const SCEV *, 4> NewOps;
+      SmallVector<SCEVUse, 4> NewOps;
       NewOps.reserve(AddRec->getNumOperands());
       const SCEV *Scale = getMulExpr(LIOps, SCEV::FlagAnyWrap, Depth + 1);
 
@@ -3384,10 +3394,10 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
       bool Overflow = false;
       Type *Ty = AddRec->getType();
       bool LargerThan64Bits = getTypeSizeInBits(Ty) > 64;
-      SmallVector<const SCEV*, 7> AddRecOps;
+      SmallVector<SCEVUse, 7> AddRecOps;
       for (int x = 0, xe = AddRec->getNumOperands() +
              OtherAddRec->getNumOperands() - 1; x != xe && !Overflow; ++x) {
-        SmallVector <const SCEV *, 7> SumOps;
+        SmallVector<SCEVUse, 7> SumOps;
         for (int y = x, ye = 2*x+1; y != ye && !Overflow; ++y) {
           uint64_t Coeff1 = Choose(x, 2*x - y, Overflow);
           for (int z = std::max(y-x, y-(int)AddRec->getNumOperands()+1),
@@ -3435,8 +3445,7 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
 }
 
 /// Represents an unsigned remainder expression based on unsigned division.
-const SCEV *ScalarEvolution::getURemExpr(const SCEV *LHS,
-                                         const SCEV *RHS) {
+const SCEV *ScalarEvolution::getURemExpr(SCEVUse LHS, SCEVUse RHS) {
   assert(getEffectiveSCEVType(LHS->getType()) ==
          getEffectiveSCEVType(RHS->getType()) &&
          "SCEVURemExpr operand types don't match!");
@@ -3464,8 +3473,7 @@ const SCEV *ScalarEvolution::getURemExpr(const SCEV *LHS,
 
 /// Get a canonical unsigned division expression, or something simpler if
 /// possible.
-const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS,
-                                         const SCEV *RHS) {
+const SCEV *ScalarEvolution::getUDivExpr(SCEVUse LHS, SCEVUse RHS) {
   assert(!LHS->getType()->isPointerTy() &&
          "SCEVUDivExpr operand can't be pointer!");
   assert(LHS->getType() == RHS->getType() &&
@@ -3513,7 +3521,7 @@ const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS,
               getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy),
                             getZeroExtendExpr(Step, ExtTy),
                             AR->getLoop(), SCEV::FlagAnyWrap)) {
-            SmallVector<const SCEV *, 4> Operands;
+            SmallVector<SCEVUse, 4> Operands;
             for (const SCEV *Op : AR->operands())
               Operands.push_back(getUDivExpr(Op, RHS));
             return getAddRecExpr(Operands, AR->getLoop(), SCEV::FlagNW);
@@ -3562,7 +3570,7 @@ const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS,
         }
       // (A*B)/C --> A*(B/C) if safe and B/C can be folded.
       if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) {
-        SmallVector<const SCEV *, 4> Operands;
+        SmallVector<SCEVUse, 4> Operands;
         for (const SCEV *Op : M->operands())
           Operands.push_back(getZeroExtendExpr(Op, ExtTy));
         if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands))
@@ -3571,7 +3579,7 @@ const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS,
             const SCEV *Op = M->getOperand(i);
             const SCEV *Div = getUDivExpr(Op, RHSC);
             if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) {
-              Operands = SmallVector<const SCEV *, 4>(M->operands());
+              Operands = SmallVector<SCEVUse, 4>(M->operands());
               Operands[i] = Div;
               return getMulExpr(Operands);
             }
@@ -3594,7 +3602,7 @@ const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS,
 
       // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded.
       if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(LHS)) {
-        SmallVector<const SCEV *, 4> Operands;
+        SmallVector<SCEVUse, 4> Operands;
         for (const SCEV *Op : A->operands())
           Operands.push_back(getZeroExtendExpr(Op, ExtTy));
         if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) {
@@ -3639,7 +3647,7 @@ const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS,
   SCEV *S = new (SCEVAllocator) SCEVUDivExpr(ID.Intern(SCEVAllocator),
                                              LHS, RHS);
   UniqueSCEVs.InsertNode(S, IP);
-  registerUser(S, {LHS, RHS});
+  registerUser(S, ArrayRef<SCEVUse>({LHS, RHS}));
   return S;
 }
 
@@ -3661,8 +3669,7 @@ APInt gcd(const SCEVConstant *C1, const SCEVConstant *C2) {
 /// possible. There is no representation for an exact udiv in SCEV IR, but we
 /// can attempt to remove factors from the LHS and RHS.  We can't do this when
 /// it's not exact because the udiv may be clearing bits.
-const SCEV *ScalarEvolution::getUDivExactExpr(const SCEV *LHS,
-                                              const SCEV *RHS) {
+const SCEV *ScalarEvolution::getUDivExactExpr(SCEVUse LHS, SCEVUse RHS) {
   // TODO: we could try to find factors in all sorts of things, but for now we
   // just deal with u/exact (multiply, constant). See SCEVDivision towards the
   // end of this file for inspiration.
@@ -3676,7 +3683,7 @@ const SCEV *ScalarEvolution::getUDivExactExpr(const SCEV *LHS,
     // first element of the mulexpr.
     if (const auto *LHSCst = dyn_cast<SCEVConstant>(Mul->getOperand(0))) {
       if (LHSCst == RHSCst) {
-        SmallVector<const SCEV *, 2> Operands(drop_begin(Mul->operands()));
+        SmallVector<SCEVUse, 2> Operands(drop_begin(Mul->operands()));
         return getMulExpr(Operands);
       }
 
@@ -3689,7 +3696,7 @@ const SCEV *ScalarEvolution::getUDivExactExpr(const SCEV *LHS,
             cast<SCEVConstant>(getConstant(LHSCst->getAPInt().udiv(Factor)));
         RHSCst =
             cast<SCEVConstant>(getConstant(RHSCst->getAPInt().udiv(Factor)));
-        SmallVector<const SCEV *, 2> Operands;
+        SmallVector<SCEVUse, 2> Operands;
         Operands.push_back(LHSCst);
         append_range(Operands, Mul->operands().drop_front());
         LHS = getMulExpr(Operands);
@@ -3703,7 +3710,7 @@ const SCEV *ScalarEvolution::getUDivExactExpr(const SCEV *LHS,
 
   for (int i = 0, e = Mul->getNumOperands(); i != e; ++i) {
     if (Mul->getOperand(i) == RHS) {
-      SmallVector<const SCEV *, 2> Operands;
+      SmallVector<SCEVUse, 2> Operands;
       append_range(Operands, Mul->operands().take_front(i));
       append_range(Operands, Mul->operands().drop_front(i + 1));
       return getMulExpr(Operands);
@@ -3715,10 +3722,10 @@ const SCEV *ScalarEvolution::getUDivExactExpr(const SCEV *LHS,
 
 /// Get an add recurrence expression for the specified loop.  Simplify the
 /// expression as much as possible.
-const SCEV *ScalarEvolution::getAddRecExpr(const SCEV *Start, const SCEV *Step,
+const SCEV *ScalarEvolution::getAddRecExpr(SCEVUse Start, SCEVUse Step,
                                            const Loop *L,
                                            SCEV::NoWrapFlags Flags) {
-  SmallVector<const SCEV *, 4> Operands;
+  SmallVector<SCEVUse, 4> Operands;
   Operands.push_back(Start);
   if (const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step))
     if (StepChrec->getLoop() == L) {
@@ -3732,9 +3739,9 @@ const SCEV *ScalarEvolution::getAddRecExpr(const SCEV *Start, const SCEV *Step,
 
 /// Get an add recurrence expression for the specified loop.  Simplify the
 /// expression as much as possible.
-const SCEV *
-ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands,
-                               const Loop *L, SCEV::NoWrapFlags Flags) {
+const SCEV *ScalarEvolution::getAddRecExpr(SmallVectorImpl<SCEVUse> &Operands,
+                                           const Loop *L,
+                                           SCEV::NoWrapFlags Flags) {
   if (Operands.size() == 1) return Operands[0];
 #ifndef NDEBUG
   Type *ETy = getEffectiveSCEVType(Operands[0]->getType());
@@ -3768,7 +3775,7 @@ ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands,
             ? (L->getLoopDepth() < NestedLoop->getLoopDepth())
             : (!NestedLoop->contains(L) &&
                DT.dominates(L->getHeader(), NestedLoop->getHeader()))) {
-      SmallVector<const SCEV *, 4> NestedOperands(NestedAR->operands());
+      SmallVector<SCEVUse, 4> NestedOperands(NestedAR->operands());
       Operands[0] = NestedAR->getStart();
       // AddRecs require their operands be loop-invariant with respect to their
       // loops. Don't perform this transformation if it would break this
@@ -3810,7 +3817,7 @@ ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands,
 }
 
 const SCEV *ScalarEvolution::getGEPExpr(GEPOperator *GEP,
-                                        ArrayRef<const SCEV *> IndexExprs) {
+                                        ArrayRef<SCEVUse> IndexExprs) {
   const SCEV *BaseExpr = getSCEV(GEP->getPointerOperand());
   // getSCEV(Base)->getType() has the same address space as Base->getType()
   // because SCEV::getType() preserves the address space.
@@ -3829,8 +3836,8 @@ const SCEV *ScalarEvolution::getGEPExpr(GEPOperator *GEP,
   return getGEPExpr(BaseExpr, IndexExprs, GEP->getSourceElementType(), NW);
 }
 
-const SCEV *ScalarEvolution::getGEPExpr(const SCEV *BaseExpr,
-                                        ArrayRef<const SCEV *> IndexExprs,
+const SCEV *ScalarEvolution::getGEPExpr(SCEVUse BaseExpr,
+                                        ArrayRef<SCEVUse> IndexExprs,
                                         Type *SrcElementTy, GEPNoWrapFlags NW) {
   SCEV::NoWrapFlags OffsetWrap = SCEV::FlagAnyWrap;
   if (NW.hasNoUnsignedSignedWrap())
@@ -3841,8 +3848,8 @@ const SCEV *ScalarEvolution::getGEPExpr(const SCEV *BaseExpr,
   Type *CurTy = BaseExpr->getType();
   Type *IntIdxTy = getEffectiveSCEVType(BaseExpr->getType());
   bool FirstIter = true;
-  SmallVector<const SCEV *, 4> Offsets;
-  for (const SCEV *IndexExpr : IndexExprs) {
+  SmallVector<SCEVUse, 4> Offsets;
+  for (SCEVUse IndexExpr : IndexExprs) {
     // Compute the (potentially symbolic) offset in bytes for this index.
     if (StructType *STy = dyn_cast<StructType>(CurTy)) {
       // For a struct, add the member offset.
@@ -3902,13 +3909,23 @@ SCEV *ScalarEvolution::findExistingSCEVInCache(SCEVTypes SCEVType,
   return UniqueSCEVs.FindNodeOrInsertPos(ID, IP);
 }
 
+SCEV *ScalarEvolution::findExistingSCEVInCache(SCEVTypes SCEVType,
+                                               ArrayRef<SCEVUse> Ops) {
+  FoldingSetNodeID ID;
+  ID.AddInteger(SCEVType);
+  for (const SCEV *Op : Ops)
+    ID.AddPointer(Op);
+  void *IP = nullptr;
+  return UniqueSCEVs.FindNodeOrInsertPos(ID, IP);
+}
+
 const SCEV *ScalarEvolution::getAbsExpr(const SCEV *Op, bool IsNSW) {
   SCEV::NoWrapFlags Flags = IsNSW ? SCEV::FlagNSW : SCEV::FlagAnyWrap;
   return getSMaxExpr(Op, getNegativeSCEV(Op, Flags));
 }
 
 const SCEV *ScalarEvolution::getMinMaxExpr(SCEVTypes Kind,
-                                           SmallVectorImpl<const SCEV *> &Ops) {
+                                           SmallVectorImpl<SCEVUse> &Ops) {
   assert(SCEVMinMaxExpr::isMinMaxType(Kind) && "Not a SCEVMinMaxExpr!");
   assert(!Ops.empty() && "Cannot get empty (u|s)(min|max)!");
   if (Ops.size() == 1) return Ops[0];
@@ -4024,7 +4041,7 @@ const SCEV *ScalarEvolution::getMinMaxExpr(SCEVTypes Kind,
   const SCEV *ExistingSCEV = UniqueSCEVs.FindNodeOrInsertPos(ID, IP);
   if (ExistingSCEV)
     return ExistingSCEV;
-  const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
+  SCEVUse *O = SCEVAllocator.Allocate<SCEVUse>(Ops.size());
   llvm::uninitialized_copy(Ops, O);
   SCEV *S = new (SCEVAllocator)
       SCEVMinMaxExpr(ID.Intern(SCEVAllocator), Kind, O, Ops.size());
@@ -4062,7 +4079,7 @@ class SCEVSequentialMinMaxDeduplicatingVisitor final
       return S;
 
     auto *NAry = cast<SCEVNAryExpr>(S);
-    SmallVector<const SCEV *> NewOps;
+    SmallVector<SCEVUse> NewOps;
     bool Changed = visit(Kind, NAry->operands(), NewOps);
 
     if (!Changed)
@@ -4090,10 +4107,10 @@ class SCEVSequentialMinMaxDeduplicatingVisitor final
             SCEVSequentialMinMaxExpr::getEquivalentNonSequentialSCEVType(
                 RootKind)) {}
 
-  bool /*Changed*/ visit(SCEVTypes Kind, ArrayRef<const SCEV *> OrigOps,
-                         SmallVectorImpl<const SCEV *> &NewOps) {
+  bool /*Changed*/ visit(SCEVTypes Kind, ArrayRef<SCEVUse> OrigOps,
+                         SmallVectorImpl<SCEVUse> &NewOps) {
     bool Changed = false;
-    SmallVector<const SCEV *> Ops;
+    SmallVector<SCEVUse> Ops;
     Ops.reserve(OrigOps.size());
 
     for (const SCEV *Op : OrigOps) {
@@ -4313,7 +4330,7 @@ bool ScalarEvolution::canReuseInstruction(
 
 const SCEV *
 ScalarEvolution::getSequentialMinMaxExpr(SCEVTypes Kind,
-                                         SmallVectorImpl<const SCEV *> &Ops) {
+                                         SmallVectorImpl<SCEVUse> &Ops) {
   assert(SCEVSequentialMinMaxExpr::isSequentialMinMaxType(Kind) &&
          "Not a SCEVSequentialMinMaxExpr!");
   assert(!Ops.empty() && "Cannot get empty (u|s)(min|max)!");
@@ -4388,7 +4405,7 @@ ScalarEvolution::getSequentialMinMaxExpr(SCEVTypes Kind,
     if (::impliesPoison(Ops[i], Ops[i - 1]) ||
         isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_NE, Ops[i - 1],
                                         SaturationPoint)) {
-      SmallVector<const SCEV *> SeqOps = {Ops[i - 1], Ops[i]};
+      SmallVector<SCEVUse, 2> SeqOps = {Ops[i - 1], Ops[i]};
       Ops[i - 1] = getMinMaxExpr(
           SCEVSequentialMinMaxExpr::getEquivalentNonSequentialSCEVType(Kind),
           SeqOps);
@@ -4414,7 +4431,7 @@ ScalarEvolution::getSequentialMinMaxExpr(SCEVTypes Kind,
   if (ExistingSCEV)
     return ExistingSCEV;
 
-  const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
+  SCEVUse *O = SCEVAllocator.Allocate<SCEVUse>(Ops.size());
   llvm::uninitialized_copy(Ops, O);
   SCEV *S = new (SCEVAllocator)
       SCEVSequentialMinMaxExpr(ID.Intern(SCEVAllocator), Kind, O, Ops.size());
@@ -4424,41 +4441,40 @@ ScalarEvolution::getSequentialMinMaxExpr(SCEVTypes Kind,
   return S;
 }
 
-const SCEV *ScalarEvolution::getSMaxExpr(const SCEV *LHS, const SCEV *RHS) {
-  SmallVector<const SCEV *, 2> Ops = {LHS, RHS};
-  return getSMaxExpr(Ops);
+const SCEV *ScalarEvolution::getSMaxExpr(SCEVUse LHS, SCEVUse RHS) {
+  SmallVector<SCEVUse, 2> Ops = {LHS, RHS};
+  return getMinMaxExpr(scSMaxExpr, Ops);
 }
 
-const SCEV *ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
+const SCEV *ScalarEvolution::getSMaxExpr(SmallVectorImpl<SCEVUse> &Ops) {
   return getMinMaxExpr(scSMaxExpr, Ops);
 }
 
-const SCEV *ScalarEvolution::getUMaxExpr(const SCEV *LHS, const SCEV *RHS) {
-  SmallVector<const SCEV *, 2> Ops = {LHS, RHS};
-  return getUMaxExpr(Ops);
+const SCEV *ScalarEvolution::getUMaxExpr(SCEVUse LHS, SCEVUse RHS) {
+  SmallVector<SCEVUse, 2> Ops = {LHS, RHS};
+  return getMinMaxExpr(scUMaxExpr, Ops);
 }
 
-const SCEV *ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
+const SCEV *ScalarEvolution::getUMaxExpr(SmallVectorImpl<SCEVUse> &Ops) {
   return getMinMaxExpr(scUMaxExpr, Ops);
 }
 
-const SCEV *ScalarEvolution::getSMinExpr(const SCEV *LHS,
-                                         const SCEV *RHS) {
-  SmallVector<const SCEV *, 2> Ops = { LHS, RHS };
-  return getSMinExpr(Ops);
+const SCEV *ScalarEvolution::getSMinExpr(SCEVUse LHS, SCEVUse RHS) {
+  SmallVector<SCEVUse, 2> Ops = {LHS, RHS};
+  return getMinMaxExpr(scSMinExpr, Ops);
 }
 
-const SCEV *ScalarEvolution::getSMinExpr(SmallVectorImpl<const SCEV *> &Ops) {
+const SCEV *ScalarEvolution::getSMinExpr(SmallVectorImpl<SCEVUse> &Ops) {
   return getMinMaxExpr(scSMinExpr, Ops);
 }
 
-const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS, const SCEV *RHS,
+const SCEV *ScalarEvolution::getUMinExpr(SCEVUse LHS, SCEVUse RHS,
                                          bool Sequential) {
-  SmallVector<const SCEV *, 2> Ops = { LHS, RHS };
+  SmallVector<SCEVUse, 2> Ops = {LHS, RHS};
   return getUMinExpr(Ops, Sequential);
 }
 
-const SCEV *ScalarEvolution::getUMinExpr(SmallVectorImpl<const SCEV *> &Ops,
+const SCEV *ScalarEvolution::getUMinExpr(SmallVectorImpl<SCEVUse> &Ops,
                                          bool Sequential) {
   return Sequential ? getSequentialMinMaxExpr(scSequentialUMinExpr, Ops)
                     : getMinMaxExpr(scUMinExpr, Ops);
@@ -4681,7 +4697,7 @@ const SCEV *ScalarEvolution::getNotSCEV(const SCEV *V) {
   // Fold ~(u|s)(min|max)(~x, ~y) to (u|s)(max|min)(x, y)
   if (const SCEVMinMaxExpr *MME = dyn_cast<SCEVMinMaxExpr>(V)) {
     auto MatchMinMaxNegation = [&](const SCEVMinMaxExpr *MME) {
-      SmallVector<const SCEV *, 2> MatchedOperands;
+      SmallVector<SCEVUse, 2> MatchedOperands;
       for (const SCEV *Operand : MME->operands()) {
         const SCEV *Matched = MatchNotExpr(Operand);
         if (!Matched)
@@ -4705,7 +4721,7 @@ const SCEV *ScalarEvolution::removePointerBase(const SCEV *P) {
 
   if (auto *AddRec = dyn_cast<SCEVAddRecExpr>(P)) {
     // The base of an AddRec is the first operand.
-    SmallVector<const SCEV *> Ops{AddRec->operands()};
+    SmallVector<SCEVUse> Ops{AddRec->operands()};
     Ops[0] = removePointerBase(Ops[0]);
     // Don't try to transfer nowrap flags for now. We could in some cases
     // (for example, if pointer operand of the AddRec is a SCEVUnknown).
@@ -4713,9 +4729,9 @@ const SCEV *ScalarEvolution::removePointerBase(const SCEV *P) {
   }
   if (auto *Add = dyn_cast<SCEVAddExpr>(P)) {
     // The base of an Add is the pointer operand.
-    SmallVector<const SCEV *> Ops{Add->operands()};
-    const SCEV **PtrOp = nullptr;
-    for (const SCEV *&AddOp : Ops) {
+    SmallVector<SCEVUse> Ops{Add->operands()};
+    SCEVUse *PtrOp = nullptr;
+    for (SCEVUse &AddOp : Ops) {
       if (AddOp->getType()->isPointerTy()) {
         assert(!PtrOp && "Cannot have multiple pointer ops");
         PtrOp = &AddOp;
@@ -4730,7 +4746,7 @@ const SCEV *ScalarEvolution::removePointerBase(const SCEV *P) {
   return getZero(P->getType());
 }
 
-const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS, const SCEV *RHS,
+const SCEV *ScalarEvolution::getMinusSCEV(SCEVUse LHS, SCEVUse RHS,
                                           SCEV::NoWrapFlags Flags,
                                           unsigned Depth) {
   // Fast path: X - X --> 0.
@@ -4868,12 +4884,12 @@ const SCEV *ScalarEvolution::getUMaxFromMismatchedTypes(const SCEV *LHS,
 const SCEV *ScalarEvolution::getUMinFromMismatchedTypes(const SCEV *LHS,
                                                         const SCEV *RHS,
                                                         bool Sequential) {
-  SmallVector<const SCEV *, 2> Ops = { LHS, RHS };
+  SmallVector<SCEVUse, 2> Ops = {LHS, RHS};
   return getUMinFromMismatchedTypes(Ops, Sequential);
 }
 
 const SCEV *
-ScalarEvolution::getUMinFromMismatchedTypes(SmallVectorImpl<const SCEV *> &Ops,
+ScalarEvolution::getUMinFromMismatchedTypes(SmallVectorImpl<SCEVUse> &Ops,
                                             bool Sequential) {
   assert(!Ops.empty() && "At least one operand must be!");
   // Trivial case.
@@ -4882,7 +4898,7 @@ ScalarEvolution::getUMinFromMismatchedTypes(SmallVectorImpl<const SCEV *> &Ops,
 
   // Find the max type first.
   Type *MaxType = nullptr;
-  for (const auto *S : Ops)
+  for (SCEVUse S : Ops)
     if (MaxType)
       MaxType = getWiderType(MaxType, S->getType());
     else
@@ -4890,8 +4906,8 @@ ScalarEvolution::getUMinFromMismatchedTypes(SmallVectorImpl<const SCEV *> &Ops,
   assert(MaxType && "Failed to find maximum type!");
 
   // Extend all ops to max type.
-  SmallVector<const SCEV *, 2> PromotedOps;
-  for (const auto *S : Ops)
+  SmallVector<SCEVUse, 2> PromotedOps;
+  for (SCEVUse S : Ops)
     PromotedOps.push_back(getNoopOrZeroExtend(S, MaxType));
 
   // Generate umin.
@@ -5580,7 +5596,7 @@ ScalarEvolution::createAddRecFromPHIWithCastsImpl(const SCEVUnknown *SymbolicPHI
     return std::nullopt;
 
   // Create an add with everything but the specified operand.
-  SmallVector<const SCEV *, 8> Ops;
+  SmallVector<SCEVUse, 8> Ops;
   for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
     if (i != FoundIndex)
       Ops.push_back(Add->getOperand(i));
@@ -5911,7 +5927,7 @@ const SCEV *ScalarEvolution::createAddRecFromPHI(PHINode *PN) {
 
     if (FoundIndex != Add->getNumOperands()) {
       // Create an add with everything but the specified operand.
-      SmallVector<const SCEV *, 8> Ops;
+      SmallVector<SCEVUse, 8> Ops;
       for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
         if (i != FoundIndex)
           Ops.push_back(SCEVBackedgeConditionFolder::rewrite(Add->getOperand(i),
@@ -5958,7 +5974,7 @@ const SCEV *ScalarEvolution::createAddRecFromPHI(PHINode *PN) {
         // Okay, for the entire analysis of this edge we assumed the PHI
         // to be symbolic.  We now need to go back and purge all of the
         // entries for the scalars that use the symbolic expression.
-        forgetMemoizedResults(SymbolicName);
+        forgetMemoizedResults({SymbolicName});
         insertValueToMap(PN, PHISCEV);
 
         if (auto *AR = dyn_cast<SCEVAddRecExpr>(PHISCEV)) {
@@ -5998,7 +6014,7 @@ const SCEV *ScalarEvolution::createAddRecFromPHI(PHINode *PN) {
         // Okay, for the entire analysis of this edge we assumed the PHI
         // to be symbolic.  We now need to go back and purge all of the
         // entries for the scalars that use the symbolic expression.
-        forgetMemoizedResults(SymbolicName);
+        forgetMemoizedResults({SymbolicName});
         insertValueToMap(PN, Shifted);
         return Shifted;
       }
@@ -6367,7 +6383,7 @@ const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) {
   assert(GEP->getSourceElementType()->isSized() &&
          "GEP source element type must be sized");
 
-  SmallVector<const SCEV *, 4> IndexExprs;
+  SmallVector<SCEVUse, 4> IndexExprs;
   for (Value *Index : GEP->indices())
     IndexExprs.push_back(getSCEV(Index));
   return getGEPExpr(GEP, IndexExprs);
@@ -6652,7 +6668,7 @@ ScalarEvolution::getRangeRefIter(const SCEV *S,
   DenseMap<const SCEV *, ConstantRange> &Cache =
       SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED ? UnsignedRanges
                                                        : SignedRanges;
-  SmallVector<const SCEV *> WorkList;
+  SmallVector<SCEVUse> WorkList;
   SmallPtrSet<const SCEV *, 8> Seen;
 
   // Add Expr to the worklist, if Expr is either an N-ary expression or a
@@ -7378,13 +7394,12 @@ ScalarEvolution::getNonTrivialDefiningScopeBound(const SCEV *S) {
   return nullptr;
 }
 
-const Instruction *
-ScalarEvolution::getDefiningScopeBound(ArrayRef<const SCEV *> Ops,
-                                       bool &Precise) {
+const Instruction *ScalarEvolution::getDefiningScopeBound(ArrayRef<SCEVUse> Ops,
+                                                          bool &Precise) {
   Precise = true;
   // Do a bounded search of the def relation of the requested SCEVs.
   SmallPtrSet<const SCEV *, 16> Visited;
-  SmallVector<const SCEV *> Worklist;
+  SmallVector<SCEVUse> Worklist;
   auto pushOp = [&](const SCEV *S) {
     if (!Visited.insert(S).second)
       return;
@@ -7396,17 +7411,17 @@ ScalarEvolution::getDefiningScopeBound(ArrayRef<const SCEV *> Ops,
     Worklist.push_back(S);
   };
 
-  for (const auto *S : Ops)
+  for (SCEVUse S : Ops)
     pushOp(S);
 
   const Instruction *Bound = nullptr;
   while (!Worklist.empty()) {
-    auto *S = Worklist.pop_back_val();
+    SCEVUse S = Worklist.pop_back_val();
     if (auto *DefI = getNonTrivialDefiningScopeBound(S)) {
       if (!Bound || DT.dominates(Bound, DefI))
         Bound = DefI;
     } else {
-      for (const auto *Op : S->operands())
+      for (SCEVUse Op : S->operands())
         pushOp(Op);
     }
   }
@@ -7414,7 +7429,7 @@ ScalarEvolution::getDefiningScopeBound(ArrayRef<const SCEV *> Ops,
 }
 
 const Instruction *
-ScalarEvolution::getDefiningScopeBound(ArrayRef<const SCEV *> Ops) {
+ScalarEvolution::getDefiningScopeBound(ArrayRef<SCEVUse> Ops) {
   bool Discard;
   return getDefiningScopeBound(Ops, Discard);
 }
@@ -7469,7 +7484,7 @@ bool ScalarEvolution::isSCEVExprNeverPoison(const Instruction *I) {
   // executed every time we enter that scope.  When the bounding scope is a
   // loop (the common case), this is equivalent to proving I executes on every
   // iteration of that loop.
-  SmallVector<const SCEV *> SCEVOps;
+  SmallVector<SCEVUse> SCEVOps;
   for (const Use &Op : I->operands()) {
     // I could be an extractvalue from a call to an overflow intrinsic.
     // TODO: We can do better here in some cases.
@@ -7832,7 +7847,7 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) {
       // because it leads to N-1 getAddExpr calls for N ultimate operands.
       // Instead, gather up all the operands and make a single getAddExpr call.
       // LLVM IR canonical form means we need only traverse the left operands.
-      SmallVector<const SCEV *, 4> AddOps;
+      SmallVector<SCEVUse, 4> AddOps;
       do {
         if (BO->Op) {
           if (auto *OpSCEV = getExistingSCEV(BO->Op)) {
@@ -7878,7 +7893,7 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) {
     }
 
     case Instruction::Mul: {
-      SmallVector<const SCEV *, 4> MulOps;
+      SmallVector<SCEVUse, 4> MulOps;
       do {
         if (BO->Op) {
           if (auto *OpSCEV = getExistingSCEV(BO->Op)) {
@@ -7955,7 +7970,7 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) {
               unsigned MulZeros = OpC->getAPInt().countr_zero();
               unsigned GCD = std::min(MulZeros, TZ);
               APInt DivAmt = APInt::getOneBitSet(BitWidth, TZ - GCD);
-              SmallVector<const SCEV*, 4> MulOps;
+              SmallVector<SCEVUse, 4> MulOps;
               MulOps.push_back(getConstant(OpC->getAPInt().ashr(GCD)));
               append_range(MulOps, LHSMul->operands().drop_front());
               auto *NewMul = getMulExpr(MulOps, LHSMul->getNoWrapFlags());
@@ -8534,7 +8549,7 @@ ScalarEvolution::getBackedgeTakenInfo(const Loop *L) {
   // only done to produce more precise results.
   if (Result.hasAnyInfo()) {
     // Invalidate any expression using an addrec in this loop.
-    SmallVector<const SCEV *, 8> ToForget;
+    SmallVector<SCEVUse, 8> ToForget;
     auto LoopUsersIt = LoopUsers.find(L);
     if (LoopUsersIt != LoopUsers.end())
       append_range(ToForget, LoopUsersIt->second);
@@ -8581,7 +8596,7 @@ void ScalarEvolution::forgetAllLoops() {
 void ScalarEvolution::visitAndClearUsers(
     SmallVectorImpl<Instruction *> &Worklist,
     SmallPtrSetImpl<Instruction *> &Visited,
-    SmallVectorImpl<const SCEV *> &ToForget) {
+    SmallVectorImpl<SCEVUse> &ToForget) {
   while (!Worklist.empty()) {
     Instruction *I = Worklist.pop_back_val();
     if (!isSCEVable(I->getType()) && !isa<WithOverflowInst>(I))
@@ -8604,7 +8619,7 @@ void ScalarEvolution::forgetLoop(const Loop *L) {
   SmallVector<const Loop *, 16> LoopWorklist(1, L);
   SmallVector<Instruction *, 32> Worklist;
   SmallPtrSet<Instruction *, 16> Visited;
-  SmallVector<const SCEV *, 16> ToForget;
+  SmallVector<SCEVUse, 16> ToForget;
 
   // Iterate over all the loops and sub-loops to drop SCEV information.
   while (!LoopWorklist.empty()) {
@@ -8651,7 +8666,7 @@ void ScalarEvolution::forgetValue(Value *V) {
   // Drop information about expressions based on loop-header PHIs.
   SmallVector<Instruction *, 16> Worklist;
   SmallPtrSet<Instruction *, 8> Visited;
-  SmallVector<const SCEV *, 8> ToForget;
+  SmallVector<SCEVUse, 8> ToForget;
   Worklist.push_back(I);
   Visited.insert(I);
   visitAndClearUsers(Worklist, Visited, ToForget);
@@ -8670,7 +8685,7 @@ void ScalarEvolution::forgetLcssaPhiWithNewPredecessor(Loop *L, PHINode *V) {
   if (const SCEV *S = getExistingSCEV(V)) {
     struct InvalidationRootCollector {
       Loop *L;
-      SmallVector<const SCEV *, 8> Roots;
+      SmallVector<SCEVUse, 8> Roots;
 
       InvalidationRootCollector(Loop *L) : L(L) {}
 
@@ -8719,7 +8734,7 @@ void ScalarEvolution::forgetBlockAndLoopDispositions(Value *V) {
   // S's users may change if S's disposition changes (i.e. a user may change to
   // loop-invariant, if S changes to loop invariant), so also invalidate
   // dispositions of S's users recursively.
-  SmallVector<const SCEV *, 8> Worklist = {S};
+  SmallVector<SCEVUse, 8> Worklist = {S};
   SmallPtrSet<const SCEV *, 8> Seen = {S};
   while (!Worklist.empty()) {
     const SCEV *Curr = Worklist.pop_back_val();
@@ -8755,7 +8770,7 @@ const SCEV *ScalarEvolution::BackedgeTakenInfo::getExact(
 
   // All exiting blocks we have gathered dominate loop's latch, so exact trip
   // count is simply a minimum out of all these calculated exit counts.
-  SmallVector<const SCEV *, 2> Ops;
+  SmallVector<SCEVUse, 2> Ops;
   for (const auto &ENT : ExitNotTaken) {
     const SCEV *BECount = ENT.ExactNotTaken;
     assert(BECount != SE->getCouldNotCompute() && "Bad exit SCEV!");
@@ -8823,7 +8838,7 @@ const SCEV *ScalarEvolution::BackedgeTakenInfo::getSymbolicMax(
     // merge the max and exact information to approximate a version of
     // getConstantMaxBackedgeTakenCount which isn't restricted to just
     // constants.
-    SmallVector<const SCEV *, 4> ExitCounts;
+    SmallVector<SCEVUse, 4> ExitCounts;
 
     for (const auto &ENT : ExitNotTaken) {
       const SCEV *ExitCount = ENT.SymbolicMaxNotTaken;
@@ -9351,7 +9366,7 @@ ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromICmp(
                                /*OrNegative=*/true)) {
       auto Flags = AR->getNoWrapFlags();
       Flags = setFlags(Flags, SCEV::FlagNW);
-      SmallVector<const SCEV *> Operands{AR->operands()};
+      SmallVector<SCEVUse> Operands{AR->operands()};
       Flags = StrengthenNoWrapFlags(this, scAddRecExpr, Operands, Flags);
       setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), Flags);
     }
@@ -9369,7 +9384,7 @@ ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromICmp(
           isKnownPositive(AR->getStepRecurrence(*this))) {
         auto Flags = AR->getNoWrapFlags();
         Flags = setFlags(Flags, WrapType);
-        SmallVector<const SCEV*> Operands{AR->operands()};
+        SmallVector<SCEVUse> Operands{AR->operands()};
         Flags = StrengthenNoWrapFlags(this, scAddRecExpr, Operands, Flags);
         setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), Flags);
       }
@@ -10050,9 +10065,8 @@ static Constant *BuildConstantFromSCEV(const SCEV *V) {
   llvm_unreachable("Unknown SCEV kind!");
 }
 
-const SCEV *
-ScalarEvolution::getWithOperands(const SCEV *S,
-                                 SmallVectorImpl<const SCEV *> &NewOps) {
+const SCEV *ScalarEvolution::getWithOperands(const SCEV *S,
+                                             SmallVectorImpl<SCEVUse> &NewOps) {
   switch (S->getSCEVType()) {
   case scTruncate:
   case scZeroExtend:
@@ -10106,7 +10120,7 @@ const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) {
 
       // Okay, at least one of these operands is loop variant but might be
       // foldable.  Build a new instance of the folded commutative expression.
-      SmallVector<const SCEV *, 8> NewOps;
+      SmallVector<SCEVUse, 8> NewOps;
       NewOps.reserve(AddRec->getNumOperands());
       append_range(NewOps, AddRec->operands().take_front(i));
       NewOps.push_back(OpAtScope);
@@ -10152,21 +10166,21 @@ const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) {
   case scUMinExpr:
   case scSMinExpr:
   case scSequentialUMinExpr: {
-    ArrayRef<const SCEV *> Ops = V->operands();
+    ArrayRef<SCEVUse> Ops = V->operands();
     // Avoid performing the look-up in the common case where the specified
     // expression has no loop-variant portions.
     for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
-      const SCEV *OpAtScope = getSCEVAtScope(Ops[i], L);
-      if (OpAtScope != Ops[i]) {
+      const SCEV *OpAtScope = getSCEVAtScope(Ops[i].getPointer(), L);
+      if (OpAtScope != Ops[i].getPointer()) {
         // Okay, at least one of these operands is loop variant but might be
         // foldable.  Build a new instance of the folded commutative expression.
-        SmallVector<const SCEV *, 8> NewOps;
+        SmallVector<SCEVUse, 8> NewOps;
         NewOps.reserve(Ops.size());
         append_range(NewOps, Ops.take_front(i));
         NewOps.push_back(OpAtScope);
 
         for (++i; i != e; ++i) {
-          OpAtScope = getSCEVAtScope(Ops[i], L);
+          OpAtScope = getSCEVAtScope(Ops[i].getPointer(), L);
           NewOps.push_back(OpAtScope);
         }
 
@@ -11460,7 +11474,7 @@ ScalarEvolution::getLoopInvariantExitCondDuringFirstIterations(
     // work, try the following trick: if the a predicate is invariant for X, it
     // is also invariant for umin(X, ...). So try to find something that works
     // among subexpressions of MaxIter expressed as umin.
-    for (auto *Op : UMin->operands())
+    for (SCEVUse Op : UMin->operands())
       if (auto LIP = getLoopInvariantExitCondDuringFirstIterationsImpl(
               Pred, LHS, RHS, L, CtxI, Op))
         return LIP;
@@ -12775,7 +12789,7 @@ bool ScalarEvolution::isImpliedViaOperations(CmpPredicate Pred, const SCEV *LHS,
   if (P != ICmpInst::ICMP_SGT)
     return false;
 
-  auto GetOpFromSExt = [&](const SCEV *S) {
+  auto GetOpFromSExt = [&](const SCEV *S) -> const SCEV * {
     if (auto *Ext = dyn_cast<SCEVSignExtendExpr>(S))
       return Ext->getOperand();
     // TODO: If S is a SCEVConstant then you can cheaply "strip" the sext off
@@ -12809,8 +12823,8 @@ bool ScalarEvolution::isImpliedViaOperations(CmpPredicate Pred, const SCEV *LHS,
     if (!LHSAddExpr->hasNoSignedWrap())
       return false;
 
-    auto *LL = LHSAddExpr->getOperand(0);
-    auto *LR = LHSAddExpr->getOperand(1);
+    SCEVUse LL = LHSAddExpr->getOperand(0);
+    SCEVUse LR = LHSAddExpr->getOperand(1);
     auto *MinusOne = getMinusOne(RHS->getType());
 
     // Checks that S1 >= 0 && S2 > RHS, trivially or using the found context.
@@ -13667,7 +13681,7 @@ const SCEV *SCEVAddRecExpr::getNumIterationsInRange(const ConstantRange &Range,
   // If the start is a non-zero constant, shift the range to simplify things.
   if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(getStart()))
     if (!SC->getValue()->isZero()) {
-      SmallVector<const SCEV *, 4> Operands(operands());
+      SmallVector<SCEVUse, 4> Operands(operands());
       Operands[0] = SE.getZero(SC->getType());
       const SCEV *Shifted = SE.getAddRecExpr(Operands, getLoop(),
                                              getNoWrapFlags(FlagNW));
@@ -13739,7 +13753,7 @@ SCEVAddRecExpr::getPostIncExpr(ScalarEvolution &SE) const {
   // simplification: it is legal to return ({rec1} + {rec2}). For example, it
   // may happen if we reach arithmetic depth limit while simplifying. So we
   // construct the returned value explicitly.
-  SmallVector<const SCEV *, 3> Ops;
+  SmallVector<SCEVUse, 3> Ops;
   // If this is {A,+,B,+,C,...,+,N}, then its step is {B,+,C,+,...,+,N}, and
   // (this + Step) is {A+B,+,B+C,+...,+,N}.
   for (unsigned i = 0, e = getNumOperands() - 1; i < e; ++i)
@@ -14230,7 +14244,7 @@ ScalarEvolution::computeLoopDisposition(const SCEV *S, const Loop *L) {
 
     // This recurrence is variant w.r.t. L if any of its operands
     // are variant.
-    for (const auto *Op : AR->operands())
+    for (SCEVUse Op : AR->operands())
       if (!isLoopInvariant(Op, L))
         return LoopVariant;
 
@@ -14251,7 +14265,7 @@ ScalarEvolution::computeLoopDisposition(const SCEV *S, const Loop *L) {
   case scSMinExpr:
   case scSequentialUMinExpr: {
     bool HasVarying = false;
-    for (const auto *Op : S->operands()) {
+    for (SCEVUse Op : S->operands()) {
       LoopDisposition D = getLoopDisposition(Op, L);
       if (D == LoopVariant)
         return LoopVariant;
@@ -14389,9 +14403,9 @@ void ScalarEvolution::forgetBackedgeTakenCounts(const Loop *L,
   }
 }
 
-void ScalarEvolution::forgetMemoizedResults(ArrayRef<const SCEV *> SCEVs) {
+void ScalarEvolution::forgetMemoizedResults(ArrayRef<SCEVUse> SCEVs) {
   SmallPtrSet<const SCEV *, 8> ToForget(llvm::from_range, SCEVs);
-  SmallVector<const SCEV *, 8> Worklist(ToForget.begin(), ToForget.end());
+  SmallVector<SCEVUse, 8> Worklist(ToForget.begin(), ToForget.end());
 
   while (!Worklist.empty()) {
     const SCEV *Curr = Worklist.pop_back_val();
@@ -14677,7 +14691,7 @@ void ScalarEvolution::verify() const {
 
   // Verify integrity of SCEV users.
   for (const auto &S : UniqueSCEVs) {
-    for (const auto *Op : S.operands()) {
+    for (SCEVUse Op : S.operands()) {
       // We do not store dependencies of constants.
       if (isa<SCEVConstant>(Op))
         continue;
@@ -15316,6 +15330,15 @@ void ScalarEvolution::registerUser(const SCEV *User,
       SCEVUsers[Op].insert(User);
 }
 
+void ScalarEvolution::registerUser(const SCEV *User, ArrayRef<SCEVUse> Ops) {
+  for (const SCEV *Op : Ops)
+    // We do not expect that forgetting cached data for SCEVConstants will ever
+    // open any prospects for sharpening or introduce any correctness issues,
+    // so we don't bother storing their dependencies.
+    if (!isa<SCEVConstant>(Op))
+      SCEVUsers[Op].insert(User);
+}
+
 const SCEV *PredicatedScalarEvolution::getSCEV(Value *V) {
   const SCEV *Expr = SE.getSCEV(V);
   return getPredicatedSCEV(Expr);
@@ -15549,7 +15572,7 @@ void ScalarEvolution::LoopGuards::collectFromPHI(
   }
   if (P.first) {
     const SCEV *LHS = SE.getSCEV(const_cast<PHINode *>(&Phi));
-    SmallVector<const SCEV *, 2> Ops({P.first, LHS});
+    SmallVector<SCEVUse, 2> Ops({P.first, LHS});
     const SCEV *RHS = SE.getMinMaxExpr(P.second, Ops);
     Guards.RewriteMap.insert({LHS, RHS});
   }
@@ -15654,7 +15677,7 @@ static const SCEV *applyDivisibilityOnMinMaxExpr(const SCEV *MinMaxExpr,
   auto *DivisibleExpr =
       IsMin ? getPreviousSCEVDivisibleByDivisor(MinMaxLHS, Divisor, SE)
             : getNextSCEVDivisibleByDivisor(MinMaxLHS, Divisor, SE);
-  SmallVector<const SCEV *> Ops = {
+  SmallVector<SCEVUse> Ops = {
       applyDivisibilityOnMinMaxExpr(MinMaxRHS, Divisor, SE), DivisibleExpr};
   return SE.getMinMaxExpr(SCTy, Ops);
 }
@@ -15666,7 +15689,7 @@ void ScalarEvolution::LoopGuards::collectFromBlock(
 
   assert(SE.DT.isReachableFromEntry(Block) && SE.DT.isReachableFromEntry(Pred));
 
-  SmallVector<const SCEV *> ExprsToRewrite;
+  SmallVector<SCEVUse> ExprsToRewrite;
   auto CollectCondition = [&](ICmpInst::Predicate Predicate, const SCEV *LHS,
                               const SCEV *RHS,
                               DenseMap<const SCEV *, const SCEV *> &RewriteMap,
@@ -15781,7 +15804,7 @@ void ScalarEvolution::LoopGuards::collectFromBlock(
       break;
     }
 
-    SmallVector<const SCEV *, 16> Worklist(1, LHS);
+    SmallVector<SCEVUse, 16> Worklist(1, LHS);
     SmallPtrSet<const SCEV *, 16> Visited;
 
     auto EnqueueOperands = [&Worklist](const SCEVNAryExpr *S) {
@@ -16118,9 +16141,9 @@ const SCEV *ScalarEvolution::LoopGuards::rewrite(const SCEV *Expr) const {
         if (const SCEV *S = Map.lookup(Add))
           return SE.getAddExpr(Expr->getOperand(0), S);
       }
-      SmallVector<const SCEV *, 2> Operands;
+      SmallVector<SCEVUse, 2> Operands;
       bool Changed = false;
-      for (const auto *Op : Expr->operands()) {
+      for (SCEVUse Op : Expr->operands()) {
         Operands.push_back(
             SCEVRewriteVisitor<SCEVLoopGuardRewriter>::visit(Op));
         Changed |= Op != Operands.back();
@@ -16134,9 +16157,9 @@ const SCEV *ScalarEvolution::LoopGuards::rewrite(const SCEV *Expr) const {
     }
 
     const SCEV *visitMulExpr(const SCEVMulExpr *Expr) {
-      SmallVector<const SCEV *, 2> Operands;
+      SmallVector<SCEVUse, 2> Operands;
       bool Changed = false;
-      for (const auto *Op : Expr->operands()) {
+      for (SCEVUse Op : Expr->operands()) {
         Operands.push_back(
             SCEVRewriteVisitor<SCEVLoopGuardRewriter>::visit(Op));
         Changed |= Op != Operands.back();

diff  --git a/llvm/lib/Analysis/ScalarEvolutionDivision.cpp b/llvm/lib/Analysis/ScalarEvolutionDivision.cpp
index 52d82785f6b9c..710473191f8c6 100644
--- a/llvm/lib/Analysis/ScalarEvolutionDivision.cpp
+++ b/llvm/lib/Analysis/ScalarEvolutionDivision.cpp
@@ -149,7 +149,7 @@ void SCEVDivision::visitAddRecExpr(const SCEVAddRecExpr *Numerator) {
 }
 
 void SCEVDivision::visitAddExpr(const SCEVAddExpr *Numerator) {
-  SmallVector<const SCEV *, 2> Qs, Rs;
+  SmallVector<SCEVUse, 2> Qs, Rs;
   Type *Ty = Denominator->getType();
 
   for (const SCEV *Op : Numerator->operands()) {
@@ -175,7 +175,7 @@ void SCEVDivision::visitAddExpr(const SCEVAddExpr *Numerator) {
 }
 
 void SCEVDivision::visitMulExpr(const SCEVMulExpr *Numerator) {
-  SmallVector<const SCEV *, 2> Qs;
+  SmallVector<SCEVUse, 2> Qs;
   Type *Ty = Denominator->getType();
 
   bool FoundDenominatorTerm = false;

diff  --git a/llvm/lib/Analysis/ScalarEvolutionNormalization.cpp b/llvm/lib/Analysis/ScalarEvolutionNormalization.cpp
index cfc5b84554548..bfc2d6aafdbfc 100644
--- a/llvm/lib/Analysis/ScalarEvolutionNormalization.cpp
+++ b/llvm/lib/Analysis/ScalarEvolutionNormalization.cpp
@@ -46,10 +46,10 @@ struct NormalizeDenormalizeRewriter
 
 const SCEV *
 NormalizeDenormalizeRewriter::visitAddRecExpr(const SCEVAddRecExpr *AR) {
-  SmallVector<const SCEV *, 8> Operands;
+  SmallVector<SCEVUse, 8> Operands;
 
   transform(AR->operands(), std::back_inserter(Operands),
-            [&](const SCEV *Op) { return visit(Op); });
+            [&](SCEVUse Op) { return visit(Op.getPointer()); });
 
   if (!Pred(AR))
     return SE.getAddRecExpr(Operands, AR->getLoop(), SCEV::FlagAnyWrap);

diff  --git a/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp b/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp
index bcf4d6286b4d3..d0f3fa86ef007 100644
--- a/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp
+++ b/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp
@@ -2785,7 +2785,7 @@ void ARMTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
   bool Runtime = true;
   if (ST->hasLOB()) {
     if (SE.hasLoopInvariantBackedgeTakenCount(L)) {
-      const auto *BETC = SE.getBackedgeTakenCount(L);
+      const SCEV *BETC = SE.getBackedgeTakenCount(L);
       auto *Outer = L->getOutermostLoop();
       if ((L != Outer && Outer != L->getParentLoop()) ||
           (L != Outer && BETC && !SE.isLoopInvariant(BETC, Outer))) {

diff  --git a/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp b/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp
index 305fe05c4f180..745880f1ca0cb 100644
--- a/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp
+++ b/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp
@@ -1395,9 +1395,9 @@ createReplacement(ICmpInst *ICmp, const Loop *L, BasicBlock *ExitingBB,
     // wrap". getLoopInvariantExitCondDuringFirstIterations knows how to deal
     // with umin in a smart way, but umin(a, b) - 1 will likely not simplify.
     // So we manually construct umin(a - 1, b - 1).
-    SmallVector<const SCEV *, 4> Elements;
+    SmallVector<SCEVUse, 4> Elements;
     if (auto *UMin = dyn_cast<SCEVUMinExpr>(MaxIter)) {
-      for (const SCEV *Op : UMin->operands())
+      for (SCEVUse Op : UMin->operands())
         Elements.push_back(SE->getMinusSCEV(Op, SE->getOne(Op->getType())));
       MaxIter = SE->getUMinFromMismatchedTypes(Elements);
     } else

diff  --git a/llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp b/llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp
index deff79b403883..308c267969113 100644
--- a/llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp
+++ b/llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp
@@ -423,7 +423,7 @@ bool InductiveRangeCheck::reassociateSubLHS(
   auto getExprScaledIfOverflow = [&](Instruction::BinaryOps BinOp,
                                      const SCEV *LHS,
                                      const SCEV *RHS) -> const SCEV * {
-    const SCEV *(ScalarEvolution::*Operation)(const SCEV *, const SCEV *,
+    const SCEV *(ScalarEvolution::*Operation)(SCEVUse, SCEVUse,
                                               SCEV::NoWrapFlags, unsigned);
     switch (BinOp) {
     default:

diff  --git a/llvm/lib/Transforms/Scalar/LoopDataPrefetch.cpp b/llvm/lib/Transforms/Scalar/LoopDataPrefetch.cpp
index 6f0d8dcb05e04..fe20c24318e12 100644
--- a/llvm/lib/Transforms/Scalar/LoopDataPrefetch.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopDataPrefetch.cpp
@@ -391,9 +391,10 @@ bool LoopDataPrefetch::runOnLoop(Loop *L) {
 
     BasicBlock *BB = P.InsertPt->getParent();
     SCEVExpander SCEVE(*SE, "prefaddr");
-    const SCEV *NextLSCEV = SE->getAddExpr(P.LSCEVAddRec, SE->getMulExpr(
-      SE->getConstant(P.LSCEVAddRec->getType(), ItersAhead),
-      P.LSCEVAddRec->getStepRecurrence(*SE)));
+    const SCEV *NextLSCEV = SE->getAddExpr(
+        P.LSCEVAddRec,
+        SE->getMulExpr(SE->getConstant(P.LSCEVAddRec->getType(), ItersAhead),
+                       P.LSCEVAddRec->getStepRecurrence(*SE)));
     if (!SCEVE.isSafeToExpand(NextLSCEV))
       continue;
 

diff  --git a/llvm/lib/Transforms/Scalar/LoopFuse.cpp b/llvm/lib/Transforms/Scalar/LoopFuse.cpp
index b28e548e59a47..abe03cf1270ab 100644
--- a/llvm/lib/Transforms/Scalar/LoopFuse.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopFuse.cpp
@@ -1132,7 +1132,7 @@ struct LoopFuser {
 
     const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) {
       const Loop *ExprL = Expr->getLoop();
-      SmallVector<const SCEV *, 2> Operands;
+      SmallVector<SCEVUse, 2> Operands;
       if (ExprL == &OldL) {
         append_range(Operands, Expr->operands());
         return SE.getAddRecExpr(Operands, &NewL, Expr->getNoWrapFlags());
@@ -1147,7 +1147,7 @@ struct LoopFuser {
         return visit(Expr->getStart());
       }
 
-      for (const SCEV *Op : Expr->operands())
+      for (SCEVUse Op : Expr->operands())
         Operands.push_back(visit(Op));
       return SE.getAddRecExpr(Operands, ExprL, Expr->getNoWrapFlags());
     }

diff  --git a/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp b/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
index 074cc73d53080..d6200b1909226 100644
--- a/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
@@ -889,7 +889,7 @@ bool LoopIdiomRecognize::processLoopMemSet(MemSetInst *MSI,
     return false;
   }
 
-  const SCEV *MemsetSizeSCEV = SE->getSCEV(MSI->getLength());
+  SCEVUse MemsetSizeSCEV = SE->getSCEV(MSI->getLength());
 
   bool IsNegStride = false;
   const bool IsConstantSize = isa<ConstantInt>(MSI->getLength());
@@ -928,9 +928,9 @@ bool LoopIdiomRecognize::processLoopMemSet(MemSetInst *MSI,
 
     // Compare positive direction PointerStrideSCEV with MemsetSizeSCEV
     IsNegStride = PointerStrideSCEV->isNonConstantNegative();
-    const SCEV *PositiveStrideSCEV =
-        IsNegStride ? SE->getNegativeSCEV(PointerStrideSCEV)
-                    : PointerStrideSCEV;
+    SCEVUse PositiveStrideSCEV =
+        IsNegStride ? SCEVUse(SE->getNegativeSCEV(PointerStrideSCEV))
+                    : SCEVUse(PointerStrideSCEV);
     LLVM_DEBUG(dbgs() << "  MemsetSizeSCEV: " << *MemsetSizeSCEV << "\n"
                       << "  PositiveStrideSCEV: " << *PositiveStrideSCEV
                       << "\n");

diff  --git a/llvm/lib/Transforms/Scalar/LoopPredication.cpp b/llvm/lib/Transforms/Scalar/LoopPredication.cpp
index ea261491b7f94..b14900d6dd635 100644
--- a/llvm/lib/Transforms/Scalar/LoopPredication.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopPredication.cpp
@@ -1006,9 +1006,9 @@ static const SCEV *getMinAnalyzeableBackedgeTakenCount(ScalarEvolution &SE,
   SmallVector<BasicBlock *, 16> ExitingBlocks;
   L->getExitingBlocks(ExitingBlocks);
 
-  SmallVector<const SCEV *, 4> ExitCounts;
+  SmallVector<SCEVUse, 4> ExitCounts;
   for (BasicBlock *ExitingBB : ExitingBlocks) {
-    const SCEV *ExitCount = SE.getExitCount(L, ExitingBB);
+    SCEVUse ExitCount = SE.getExitCount(L, ExitingBB);
     if (isa<SCEVCouldNotCompute>(ExitCount))
       continue;
     assert(DT.dominates(ExitingBB, L->getLoopLatch()) &&

diff  --git a/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp b/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
index d359c2019565a..337500aec1dcf 100644
--- a/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
@@ -541,9 +541,8 @@ struct Formula {
 
 /// Recursion helper for initialMatch.
 static void DoInitialMatch(const SCEV *S, Loop *L,
-                           SmallVectorImpl<const SCEV *> &Good,
-                           SmallVectorImpl<const SCEV *> &Bad,
-                           ScalarEvolution &SE) {
+                           SmallVectorImpl<SCEVUse> &Good,
+                           SmallVectorImpl<SCEVUse> &Bad, ScalarEvolution &SE) {
   // Collect expressions which properly dominate the loop header.
   if (SE.properlyDominates(S, L->getHeader())) {
     Good.push_back(S);
@@ -574,11 +573,11 @@ static void DoInitialMatch(const SCEV *S, Loop *L,
   // Handle a multiplication by -1 (negation) if it didn't fold.
   if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S))
     if (Mul->getOperand(0)->isAllOnesValue()) {
-      SmallVector<const SCEV *, 4> Ops(drop_begin(Mul->operands()));
+      SmallVector<SCEVUse, 4> Ops(drop_begin(Mul->operands()));
       const SCEV *NewMul = SE.getMulExpr(Ops);
 
-      SmallVector<const SCEV *, 4> MyGood;
-      SmallVector<const SCEV *, 4> MyBad;
+      SmallVector<SCEVUse, 4> MyGood;
+      SmallVector<SCEVUse, 4> MyBad;
       DoInitialMatch(NewMul, L, MyGood, MyBad, SE);
       const SCEV *NegOne = SE.getSCEV(ConstantInt::getAllOnesValue(
         SE.getEffectiveSCEVType(NewMul->getType())));
@@ -597,8 +596,8 @@ static void DoInitialMatch(const SCEV *S, Loop *L,
 /// Incorporate loop-variant parts of S into this Formula, attempting to keep
 /// all loop-invariant and loop-computable values in a single base register.
 void Formula::initialMatch(const SCEV *S, Loop *L, ScalarEvolution &SE) {
-  SmallVector<const SCEV *, 4> Good;
-  SmallVector<const SCEV *, 4> Bad;
+  SmallVector<SCEVUse, 4> Good;
+  SmallVector<SCEVUse, 4> Bad;
   DoInitialMatch(S, L, Good, Bad, SE);
   if (!Good.empty()) {
     const SCEV *Sum = SE.getAddExpr(Good);
@@ -877,7 +876,7 @@ static const SCEV *getExactSDiv(const SCEV *LHS, const SCEV *RHS,
   // Distribute the sdiv over add operands, if the add doesn't overflow.
   if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(LHS)) {
     if (IgnoreSignificantBits || isAddSExtable(Add, SE)) {
-      SmallVector<const SCEV *, 8> Ops;
+      SmallVector<SCEVUse, 8> Ops;
       for (const SCEV *S : Add->operands()) {
         const SCEV *Op = getExactSDiv(S, RHS, SE, IgnoreSignificantBits);
         if (!Op) return nullptr;
@@ -906,7 +905,7 @@ static const SCEV *getExactSDiv(const SCEV *LHS, const SCEV *RHS,
         }
       }
 
-      SmallVector<const SCEV *, 4> Ops;
+      SmallVector<SCEVUse, 4> Ops;
       bool Found = false;
       for (const SCEV *S : Mul->operands()) {
         if (!Found)
@@ -928,7 +927,7 @@ static const SCEV *getExactSDiv(const SCEV *LHS, const SCEV *RHS,
 
 /// If S involves the addition of a constant integer value, return that integer
 /// value, and mutate S to point to a new SCEV with that value excluded.
-static Immediate ExtractImmediate(const SCEV *&S, ScalarEvolution &SE) {
+static Immediate ExtractImmediate(SCEVUse &S, ScalarEvolution &SE) {
   const APInt *C;
   if (match(S, m_scev_APInt(C))) {
     if (C->getSignificantBits() <= 64) {
@@ -936,13 +935,13 @@ static Immediate ExtractImmediate(const SCEV *&S, ScalarEvolution &SE) {
       return Immediate::getFixed(C->getSExtValue());
     }
   } else if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
-    SmallVector<const SCEV *, 8> NewOps(Add->operands());
+    SmallVector<SCEVUse, 8> NewOps(Add->operands());
     Immediate Result = ExtractImmediate(NewOps.front(), SE);
     if (Result.isNonZero())
       S = SE.getAddExpr(NewOps);
     return Result;
   } else if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) {
-    SmallVector<const SCEV *, 8> NewOps(AR->operands());
+    SmallVector<SCEVUse, 8> NewOps(AR->operands());
     Immediate Result = ExtractImmediate(NewOps.front(), SE);
     if (Result.isNonZero())
       S = SE.getAddRecExpr(NewOps, AR->getLoop(),
@@ -959,20 +958,20 @@ static Immediate ExtractImmediate(const SCEV *&S, ScalarEvolution &SE) {
 
 /// If S involves the addition of a GlobalValue address, return that symbol, and
 /// mutate S to point to a new SCEV with that value excluded.
-static GlobalValue *ExtractSymbol(const SCEV *&S, ScalarEvolution &SE) {
+static GlobalValue *ExtractSymbol(SCEVUse &S, ScalarEvolution &SE) {
   if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
     if (GlobalValue *GV = dyn_cast<GlobalValue>(U->getValue())) {
       S = SE.getConstant(GV->getType(), 0);
       return GV;
     }
   } else if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
-    SmallVector<const SCEV *, 8> NewOps(Add->operands());
+    SmallVector<SCEVUse, 8> NewOps(Add->operands());
     GlobalValue *Result = ExtractSymbol(NewOps.back(), SE);
     if (Result)
       S = SE.getAddExpr(NewOps);
     return Result;
   } else if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) {
-    SmallVector<const SCEV *, 8> NewOps(AR->operands());
+    SmallVector<SCEVUse, 8> NewOps(AR->operands());
     GlobalValue *Result = ExtractSymbol(NewOps.front(), SE);
     if (Result)
       S = SE.getAddRecExpr(NewOps, AR->getLoop(),
@@ -2042,11 +2041,13 @@ static bool isAlwaysFoldable(const TargetTransformInfo &TTI,
 
   // Conservatively, create an address with an immediate and a
   // base and a scale.
-  Immediate BaseOffset = ExtractImmediate(S, SE);
-  GlobalValue *BaseGV = ExtractSymbol(S, SE);
+  SCEVUse SCopy = S;
+  Immediate BaseOffset = ExtractImmediate(SCopy, SE);
+  GlobalValue *BaseGV = ExtractSymbol(SCopy, SE);
 
   // If there's anything else involved, it's not foldable.
-  if (!S->isZero()) return false;
+  if (!SCopy->isZero())
+    return false;
 
   // Fast-path: zero is always foldable.
   if (BaseOffset.isZero() && !BaseGV)
@@ -2816,7 +2817,9 @@ std::pair<size_t, Immediate> LSRInstance::getUse(const SCEV *&Expr,
                                                  LSRUse::KindType Kind,
                                                  MemAccessTy AccessTy) {
   const SCEV *Copy = Expr;
-  Immediate Offset = ExtractImmediate(Expr, SE);
+  SCEVUse ExprUse = Expr;
+  Immediate Offset = ExtractImmediate(ExprUse, SE);
+  Expr = ExprUse;
 
   // Basic uses can't accept any offset, for example.
   if (!isAlwaysFoldable(TTI, Kind, AccessTy, /*BaseGV=*/ nullptr,
@@ -3958,7 +3961,7 @@ void LSRInstance::GenerateReassociationsImpl(LSRUse &LU, unsigned LUIdx,
       continue;
 
     // Collect all operands except *J.
-    SmallVector<const SCEV *, 8> InnerAddOps(std::as_const(AddOps).begin(), J);
+    SmallVector<SCEVUse, 8> InnerAddOps(std::as_const(AddOps).begin(), J);
     InnerAddOps.append(std::next(J), std::as_const(AddOps).end());
 
     // Don't leave just a constant behind in a register if the constant could
@@ -4049,7 +4052,7 @@ void LSRInstance::GenerateCombinations(LSRUse &LU, unsigned LUIdx,
   // Flatten the representation, i.e., reg1 + 1*reg2 => reg1 + reg2, before
   // processing the formula.
   Base.unscale();
-  SmallVector<const SCEV *, 4> Ops;
+  SmallVector<SCEVUse, 4> Ops;
   Formula NewBase = Base;
   NewBase.BaseRegs.clear();
   Type *CombinedIntegerType = nullptr;
@@ -4086,7 +4089,7 @@ void LSRInstance::GenerateCombinations(LSRUse &LU, unsigned LUIdx,
 
   // If we collected at least two registers, generate a formula combining them.
   if (Ops.size() > 1) {
-    SmallVector<const SCEV *, 4> OpsCopy(Ops); // Don't let SE modify Ops.
+    SmallVector<SCEVUse, 4> OpsCopy(Ops); // Don't let SE modify Ops.
     GenerateFormula(SE.getAddExpr(OpsCopy));
   }
 
@@ -4105,7 +4108,7 @@ void LSRInstance::GenerateCombinations(LSRUse &LU, unsigned LUIdx,
 void LSRInstance::GenerateSymbolicOffsetsImpl(LSRUse &LU, unsigned LUIdx,
                                               const Formula &Base, size_t Idx,
                                               bool IsScaledReg) {
-  const SCEV *G = IsScaledReg ? Base.ScaledReg : Base.BaseRegs[Idx];
+  SCEVUse G = IsScaledReg ? Base.ScaledReg : Base.BaseRegs[Idx];
   GlobalValue *GV = ExtractSymbol(G, SE);
   if (G->isZero() || !GV)
     return;
@@ -4165,7 +4168,7 @@ void LSRInstance::GenerateConstantOffsetsImpl(
     }
   };
 
-  const SCEV *G = IsScaledReg ? Base.ScaledReg : Base.BaseRegs[Idx];
+  SCEVUse G = IsScaledReg ? Base.ScaledReg : Base.BaseRegs[Idx];
 
   // With constant offsets and constant steps, we can generate pre-inc
   // accesses by having the offset equal the step. So, for access #0 with a
@@ -4522,7 +4525,7 @@ void LSRInstance::GenerateCrossUseConstantOffsets() {
   DenseMap<const SCEV *, SmallBitVector> UsedByIndicesMap;
   SmallVector<const SCEV *, 8> Sequence;
   for (const SCEV *Use : RegUses) {
-    const SCEV *Reg = Use; // Make a copy for ExtractImmediate to modify.
+    SCEVUse Reg = Use; // Make a copy for ExtractImmediate to modify.
     Immediate Imm = ExtractImmediate(Reg, SE);
     auto Pair = Map.try_emplace(Reg);
     if (Pair.second)
@@ -5716,7 +5719,7 @@ Value *LSRInstance::Expand(const LSRUse &LU, const LSRFixup &LF,
   Type *IntTy = SE.getEffectiveSCEVType(Ty);
 
   // Build up a list of operands to add together to form the full base.
-  SmallVector<const SCEV *, 8> Ops;
+  SmallVector<SCEVUse, 8> Ops;
 
   // Expand the BaseRegs portion.
   for (const SCEV *Reg : F.BaseRegs) {

diff  --git a/llvm/lib/Transforms/Scalar/NaryReassociate.cpp b/llvm/lib/Transforms/Scalar/NaryReassociate.cpp
index ec145f2f48bea..b0a33710c25bc 100644
--- a/llvm/lib/Transforms/Scalar/NaryReassociate.cpp
+++ b/llvm/lib/Transforms/Scalar/NaryReassociate.cpp
@@ -225,7 +225,7 @@ bool NaryReassociatePass::doOneIteration(Function &F) {
   for (const auto Node : depth_first(DT)) {
     BasicBlock *BB = Node->getBlock();
     for (Instruction &OrigI : *BB) {
-      const SCEV *OrigSCEV = nullptr;
+      SCEVUse OrigSCEV = nullptr;
       if (Instruction *NewI = tryReassociate(&OrigI, OrigSCEV)) {
         Changed = true;
         OrigI.replaceAllUsesWith(NewI);
@@ -234,7 +234,7 @@ bool NaryReassociatePass::doOneIteration(Function &F) {
         DeadInsts.push_back(WeakTrackingVH(&OrigI));
         // Add the rewritten instruction to SeenExprs; the original
         // instruction is deleted.
-        const SCEV *NewSCEV = SE->getSCEV(NewI);
+        SCEVUse NewSCEV = SE->getSCEV(NewI);
         SeenExprs[NewSCEV].push_back(WeakTrackingVH(NewI));
 
         // Ideally, NewSCEV should equal OldSCEV because tryReassociate(I)
@@ -273,7 +273,7 @@ bool NaryReassociatePass::doOneIteration(Function &F) {
 template <typename PredT>
 Instruction *
 NaryReassociatePass::matchAndReassociateMinOrMax(Instruction *I,
-                                                 const SCEV *&OrigSCEV) {
+                                                 SCEVUse &OrigSCEV) {
   Value *LHS = nullptr;
   Value *RHS = nullptr;
 
@@ -292,8 +292,8 @@ NaryReassociatePass::matchAndReassociateMinOrMax(Instruction *I,
   return nullptr;
 }
 
-Instruction *NaryReassociatePass::tryReassociate(Instruction * I,
-                                                 const SCEV *&OrigSCEV) {
+Instruction *NaryReassociatePass::tryReassociate(Instruction *I,
+                                                 SCEVUse &OrigSCEV) {
 
   if (!SE->isSCEVable(I->getType()))
     return nullptr;
@@ -397,7 +397,7 @@ NaryReassociatePass::tryReassociateGEPAtIndex(GetElementPtrInst *GEP,
                                               Value *RHS, Type *IndexedType) {
   // Look for GEP's closest dominator that has the same SCEV as GEP except that
   // the I-th index is replaced with LHS.
-  SmallVector<const SCEV *, 4> IndexExprs;
+  SmallVector<SCEVUse, 4> IndexExprs;
   for (Use &Index : GEP->indices())
     IndexExprs.push_back(SE->getSCEV(Index));
   // Replace the I-th index with LHS.
@@ -414,8 +414,7 @@ NaryReassociatePass::tryReassociateGEPAtIndex(GetElementPtrInst *GEP,
     // @reassociate_gep_assume for an example of this canonicalization.
     IndexExprs[I] = SE->getZeroExtendExpr(IndexExprs[I], GEPArgType);
   }
-  const SCEV *CandidateExpr = SE->getGEPExpr(cast<GEPOperator>(GEP),
-                                             IndexExprs);
+  SCEVUse CandidateExpr = SE->getGEPExpr(cast<GEPOperator>(GEP), IndexExprs);
 
   Value *Candidate = findClosestMatchingDominator(CandidateExpr, GEP);
   if (Candidate == nullptr)
@@ -481,8 +480,8 @@ Instruction *NaryReassociatePass::tryReassociateBinaryOp(Value *LHS, Value *RHS,
   if (LHS->hasOneUse() && matchTernaryOp(I, LHS, A, B)) {
     // I = (A op B) op RHS
     //   = (A op RHS) op B or (B op RHS) op A
-    const SCEV *AExpr = SE->getSCEV(A), *BExpr = SE->getSCEV(B);
-    const SCEV *RHSExpr = SE->getSCEV(RHS);
+    SCEVUse AExpr = SE->getSCEV(A), BExpr = SE->getSCEV(B);
+    SCEVUse RHSExpr = SE->getSCEV(RHS);
     if (BExpr != RHSExpr) {
       if (auto *NewI =
               tryReassociatedBinaryOp(getBinarySCEV(I, AExpr, RHSExpr), B, I))
@@ -497,7 +496,7 @@ Instruction *NaryReassociatePass::tryReassociateBinaryOp(Value *LHS, Value *RHS,
   return nullptr;
 }
 
-Instruction *NaryReassociatePass::tryReassociatedBinaryOp(const SCEV *LHSExpr,
+Instruction *NaryReassociatePass::tryReassociatedBinaryOp(SCEVUse LHSExpr,
                                                           Value *RHS,
                                                           BinaryOperator *I) {
   // Look for the closest dominator LHS of I that computes LHSExpr, and replace
@@ -535,9 +534,8 @@ bool NaryReassociatePass::matchTernaryOp(BinaryOperator *I, Value *V,
   return false;
 }
 
-const SCEV *NaryReassociatePass::getBinarySCEV(BinaryOperator *I,
-                                               const SCEV *LHS,
-                                               const SCEV *RHS) {
+SCEVUse NaryReassociatePass::getBinarySCEV(BinaryOperator *I, SCEVUse LHS,
+                                           SCEVUse RHS) {
   switch (I->getOpcode()) {
   case Instruction::Add:
     return SE->getAddExpr(LHS, RHS);
@@ -550,7 +548,7 @@ const SCEV *NaryReassociatePass::getBinarySCEV(BinaryOperator *I,
 }
 
 Instruction *
-NaryReassociatePass::findClosestMatchingDominator(const SCEV *CandidateExpr,
+NaryReassociatePass::findClosestMatchingDominator(SCEVUse CandidateExpr,
                                                   Instruction *Dominatee) {
   auto Pos = SeenExprs.find(CandidateExpr);
   if (Pos == SeenExprs.end())
@@ -622,12 +620,11 @@ Value *NaryReassociatePass::tryReassociateMinOrMax(Instruction *I,
       }))
     return nullptr;
 
-  auto tryCombination = [&](Value *A, const SCEV *AExpr, Value *B,
-                            const SCEV *BExpr, Value *C,
-                            const SCEV *CExpr) -> Value * {
-    SmallVector<const SCEV *, 2> Ops1{BExpr, AExpr};
+  auto tryCombination = [&](Value *A, SCEVUse AExpr, Value *B, SCEVUse BExpr,
+                            Value *C, SCEVUse CExpr) -> Value * {
+    SmallVector<SCEVUse, 2> Ops1{BExpr, AExpr};
     const SCEVTypes SCEVType = convertToSCEVype(m_MaxMin);
-    const SCEV *R1Expr = SE->getMinMaxExpr(SCEVType, Ops1);
+    SCEVUse R1Expr = SE->getMinMaxExpr(SCEVType, Ops1);
 
     Instruction *R1MinMax = findClosestMatchingDominator(R1Expr, I);
 
@@ -636,9 +633,8 @@ Value *NaryReassociatePass::tryReassociateMinOrMax(Instruction *I,
 
     LLVM_DEBUG(dbgs() << "NARY: Found common sub-expr: " << *R1MinMax << "\n");
 
-    SmallVector<const SCEV *, 2> Ops2{SE->getUnknown(C),
-                                      SE->getUnknown(R1MinMax)};
-    const SCEV *R2Expr = SE->getMinMaxExpr(SCEVType, Ops2);
+    SmallVector<SCEVUse, 2> Ops2{SE->getUnknown(C), SE->getUnknown(R1MinMax)};
+    SCEVUse R2Expr = SE->getMinMaxExpr(SCEVType, Ops2);
 
     SCEVExpander Expander(*SE, "nary-reassociate");
     Value *NewMinMax = Expander.expandCodeFor(R2Expr, I->getType(), I);
@@ -649,9 +645,9 @@ Value *NaryReassociatePass::tryReassociateMinOrMax(Instruction *I,
     return NewMinMax;
   };
 
-  const SCEV *AExpr = SE->getSCEV(A);
-  const SCEV *BExpr = SE->getSCEV(B);
-  const SCEV *RHSExpr = SE->getSCEV(RHS);
+  SCEVUse AExpr = SE->getSCEV(A);
+  SCEVUse BExpr = SE->getSCEV(B);
+  SCEVUse RHSExpr = SE->getSCEV(RHS);
 
   if (BExpr != RHSExpr) {
     // Try (A op RHS) op B

diff  --git a/llvm/lib/Transforms/Scalar/StraightLineStrengthReduce.cpp b/llvm/lib/Transforms/Scalar/StraightLineStrengthReduce.cpp
index c371a9d2f6416..ed43fb4b63f87 100644
--- a/llvm/lib/Transforms/Scalar/StraightLineStrengthReduce.cpp
+++ b/llvm/lib/Transforms/Scalar/StraightLineStrengthReduce.cpp
@@ -1086,7 +1086,7 @@ void StraightLineStrengthReduce::allocateCandidatesAndFindBasisForGEP(
   if (GEP->getType()->isVectorTy())
     return;
 
-  SmallVector<const SCEV *, 4> IndexExprs;
+  SmallVector<SCEVUse, 4> IndexExprs;
   for (Use &Idx : GEP->indices())
     IndexExprs.push_back(SE->getSCEV(Idx));
 
@@ -1095,12 +1095,12 @@ void StraightLineStrengthReduce::allocateCandidatesAndFindBasisForGEP(
     if (GTI.isStruct())
       continue;
 
-    const SCEV *OrigIndexExpr = IndexExprs[I - 1];
-    IndexExprs[I - 1] = SE->getZero(OrigIndexExpr->getType());
+    SCEVUse OrigIndexExpr = IndexExprs[I - 1];
+    IndexExprs[I - 1] = SE->getZero(OrigIndexExpr.getPointer()->getType());
 
     // The base of this candidate is GEP's base plus the offsets of all
     // indices except this current one.
-    const SCEV *BaseExpr = SE->getGEPExpr(cast<GEPOperator>(GEP), IndexExprs);
+    SCEVUse BaseExpr = SE->getGEPExpr(cast<GEPOperator>(GEP), IndexExprs);
     Value *ArrayIdx = GEP->getOperand(I);
     uint64_t ElementSize = GTI.getSequentialElementStride(*DL);
     IntegerType *PtrIdxTy = cast<IntegerType>(DL->getIndexType(GEP->getType()));

diff  --git a/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp b/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp
index 5dd388e95accb..ac60837584763 100644
--- a/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp
+++ b/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp
@@ -563,7 +563,7 @@ Value *SCEVExpander::visitAddExpr(const SCEVAddExpr *S) {
     if (isa<PointerType>(Sum->getType())) {
       // The running sum expression is a pointer. Try to form a getelementptr
       // at this level with that as the base.
-      SmallVector<const SCEV *, 4> NewOps;
+      SmallVector<SCEVUse, 4> NewOps;
       for (; I != E && I->first == CurLoop; ++I) {
         // If the operand is SCEVUnknown and not instructions, peek through
         // it, to enable more of it to be folded into the GEP.
@@ -1336,7 +1336,7 @@ Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) {
   if (CanonicalIV &&
       SE.getTypeSizeInBits(CanonicalIV->getType()) > SE.getTypeSizeInBits(Ty) &&
       !S->getType()->isPointerTy()) {
-    SmallVector<const SCEV *, 4> NewOps(S->getNumOperands());
+    SmallVector<SCEVUse, 4> NewOps(S->getNumOperands());
     for (unsigned i = 0, e = S->getNumOperands(); i != e; ++i)
       NewOps[i] = SE.getAnyExtendExpr(S->getOperand(i), CanonicalIV->getType());
     Value *V = expand(SE.getAddRecExpr(NewOps, S->getLoop(),
@@ -1360,7 +1360,7 @@ Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) {
                             S->getNoWrapFlags(SCEV::FlagNUW));
     }
 
-    SmallVector<const SCEV *, 4> NewOps(S->operands());
+    SmallVector<SCEVUse, 4> NewOps(S->operands());
     NewOps[0] = SE.getConstant(Ty, 0);
     const SCEV *Rest = SE.getAddRecExpr(NewOps, L,
                                         S->getNoWrapFlags(SCEV::FlagNW));

diff  --git a/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp
index bd9cf3077a052..ea18cddd3df73 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp
@@ -537,7 +537,8 @@ class SCEVAddRecForUniformityRewriter
     const SCEV *NewStep =
         SE.getMulExpr(Step, SE.getConstant(Ty, StepMultiplier));
     const SCEV *ScaledOffset = SE.getMulExpr(Step, SE.getConstant(Ty, Offset));
-    const SCEV *NewStart = SE.getAddExpr(Expr->getStart(), ScaledOffset);
+    const SCEV *NewStart =
+        SE.getAddExpr(Expr->getStart(), SCEVUse(ScaledOffset));
     return SE.getAddRecExpr(NewStart, NewStep, TheLoop, SCEV::FlagAnyWrap);
   }
 

diff  --git a/llvm/lib/Transforms/Vectorize/VPlanUtils.cpp b/llvm/lib/Transforms/Vectorize/VPlanUtils.cpp
index 821a4f7911bb8..6614e7cc3a7e9 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanUtils.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanUtils.cpp
@@ -149,11 +149,10 @@ const SCEV *vputils::getSCEVExprForVPValue(const VPValue *V,
   }
 
   // Helper to create SCEVs for binary and unary operations.
-  auto CreateSCEV =
-      [&](ArrayRef<VPValue *> Ops,
-          function_ref<const SCEV *(ArrayRef<const SCEV *>)> CreateFn)
+  auto CreateSCEV = [&](ArrayRef<VPValue *> Ops,
+                        function_ref<const SCEV *(ArrayRef<SCEVUse>)> CreateFn)
       -> const SCEV * {
-    SmallVector<const SCEV *, 2> SCEVOps;
+    SmallVector<SCEVUse, 2> SCEVOps;
     for (VPValue *Op : Ops) {
       const SCEV *S = getSCEVExprForVPValue(Op, PSE, L);
       if (isa<SCEVCouldNotCompute>(S))
@@ -165,46 +164,46 @@ const SCEV *vputils::getSCEVExprForVPValue(const VPValue *V,
 
   VPValue *LHSVal, *RHSVal;
   if (match(V, m_Add(m_VPValue(LHSVal), m_VPValue(RHSVal))))
-    return CreateSCEV({LHSVal, RHSVal}, [&](ArrayRef<const SCEV *> Ops) {
+    return CreateSCEV({LHSVal, RHSVal}, [&](ArrayRef<SCEVUse> Ops) {
       return SE.getAddExpr(Ops[0], Ops[1], SCEV::FlagAnyWrap, 0);
     });
   if (match(V, m_Sub(m_VPValue(LHSVal), m_VPValue(RHSVal))))
-    return CreateSCEV({LHSVal, RHSVal}, [&](ArrayRef<const SCEV *> Ops) {
+    return CreateSCEV({LHSVal, RHSVal}, [&](ArrayRef<SCEVUse> Ops) {
       return SE.getMinusSCEV(Ops[0], Ops[1], SCEV::FlagAnyWrap, 0);
     });
   if (match(V, m_Not(m_VPValue(LHSVal)))) {
     // not X = xor X, -1 = -1 - X
-    return CreateSCEV({LHSVal}, [&](ArrayRef<const SCEV *> Ops) {
+    return CreateSCEV({LHSVal}, [&](ArrayRef<SCEVUse> Ops) {
       return SE.getMinusSCEV(SE.getMinusOne(Ops[0]->getType()), Ops[0]);
     });
   }
   if (match(V, m_Mul(m_VPValue(LHSVal), m_VPValue(RHSVal))))
-    return CreateSCEV({LHSVal, RHSVal}, [&](ArrayRef<const SCEV *> Ops) {
+    return CreateSCEV({LHSVal, RHSVal}, [&](ArrayRef<SCEVUse> Ops) {
       return SE.getMulExpr(Ops[0], Ops[1], SCEV::FlagAnyWrap, 0);
     });
   if (match(V,
             m_Binary<Instruction::UDiv>(m_VPValue(LHSVal), m_VPValue(RHSVal))))
-    return CreateSCEV({LHSVal, RHSVal}, [&](ArrayRef<const SCEV *> Ops) {
+    return CreateSCEV({LHSVal, RHSVal}, [&](ArrayRef<SCEVUse> Ops) {
       return SE.getUDivExpr(Ops[0], Ops[1]);
     });
   // Handle AND with constant mask: x & (2^n - 1) can be represented as x % 2^n.
   const APInt *Mask;
   if (match(V, m_c_BinaryAnd(m_VPValue(LHSVal), m_APInt(Mask))) &&
       (*Mask + 1).isPowerOf2())
-    return CreateSCEV({LHSVal}, [&](ArrayRef<const SCEV *> Ops) {
+    return CreateSCEV({LHSVal}, [&](ArrayRef<SCEVUse> Ops) {
       return SE.getURemExpr(Ops[0], SE.getConstant(*Mask + 1));
     });
   if (match(V, m_Trunc(m_VPValue(LHSVal)))) {
     const VPlan *Plan = V->getDefiningRecipe()->getParent()->getPlan();
     Type *DestTy = VPTypeAnalysis(*Plan).inferScalarType(V);
-    return CreateSCEV({LHSVal}, [&](ArrayRef<const SCEV *> Ops) {
+    return CreateSCEV({LHSVal}, [&](ArrayRef<SCEVUse> Ops) {
       return SE.getTruncateExpr(Ops[0], DestTy);
     });
   }
   if (match(V, m_ZExt(m_VPValue(LHSVal)))) {
     const VPlan *Plan = V->getDefiningRecipe()->getParent()->getPlan();
     Type *DestTy = VPTypeAnalysis(*Plan).inferScalarType(V);
-    return CreateSCEV({LHSVal}, [&](ArrayRef<const SCEV *> Ops) {
+    return CreateSCEV({LHSVal}, [&](ArrayRef<SCEVUse> Ops) {
       return SE.getZeroExtendExpr(Ops[0], DestTy);
     });
   }
@@ -225,35 +224,35 @@ const SCEV *vputils::getSCEVExprForVPValue(const VPValue *V,
                                SE.getSignExtendExpr(V2, DestTy), SCEV::FlagNSW);
     }
 
-    return CreateSCEV({LHSVal}, [&](ArrayRef<const SCEV *> Ops) {
+    return CreateSCEV({LHSVal}, [&](ArrayRef<SCEVUse> Ops) {
       return SE.getSignExtendExpr(Ops[0], DestTy);
     });
   }
   if (match(V,
             m_Intrinsic<Intrinsic::umax>(m_VPValue(LHSVal), m_VPValue(RHSVal))))
-    return CreateSCEV({LHSVal, RHSVal}, [&](ArrayRef<const SCEV *> Ops) {
+    return CreateSCEV({LHSVal, RHSVal}, [&](ArrayRef<SCEVUse> Ops) {
       return SE.getUMaxExpr(Ops[0], Ops[1]);
     });
   if (match(V,
             m_Intrinsic<Intrinsic::smax>(m_VPValue(LHSVal), m_VPValue(RHSVal))))
-    return CreateSCEV({LHSVal, RHSVal}, [&](ArrayRef<const SCEV *> Ops) {
+    return CreateSCEV({LHSVal, RHSVal}, [&](ArrayRef<SCEVUse> Ops) {
       return SE.getSMaxExpr(Ops[0], Ops[1]);
     });
   if (match(V,
             m_Intrinsic<Intrinsic::umin>(m_VPValue(LHSVal), m_VPValue(RHSVal))))
-    return CreateSCEV({LHSVal, RHSVal}, [&](ArrayRef<const SCEV *> Ops) {
+    return CreateSCEV({LHSVal, RHSVal}, [&](ArrayRef<SCEVUse> Ops) {
       return SE.getUMinExpr(Ops[0], Ops[1]);
     });
   if (match(V,
             m_Intrinsic<Intrinsic::smin>(m_VPValue(LHSVal), m_VPValue(RHSVal))))
-    return CreateSCEV({LHSVal, RHSVal}, [&](ArrayRef<const SCEV *> Ops) {
+    return CreateSCEV({LHSVal, RHSVal}, [&](ArrayRef<SCEVUse> Ops) {
       return SE.getSMinExpr(Ops[0], Ops[1]);
     });
 
   ArrayRef<VPValue *> Ops;
   Type *SourceElementType;
   if (match(V, m_GetElementPtr(SourceElementType, Ops))) {
-    const SCEV *GEPExpr = CreateSCEV(Ops, [&](ArrayRef<const SCEV *> Ops) {
+    const SCEV *GEPExpr = CreateSCEV(Ops, [&](ArrayRef<SCEVUse> Ops) {
       return SE.getGEPExpr(Ops.front(), Ops.drop_front(), SourceElementType);
     });
     return PSE.getPredicatedSCEV(GEPExpr);

diff  --git a/llvm/unittests/Analysis/ScalarEvolutionTest.cpp b/llvm/unittests/Analysis/ScalarEvolutionTest.cpp
index 0974c693069de..6c16f1b90e489 100644
--- a/llvm/unittests/Analysis/ScalarEvolutionTest.cpp
+++ b/llvm/unittests/Analysis/ScalarEvolutionTest.cpp
@@ -251,12 +251,12 @@ TEST_F(ScalarEvolutionsTest, CommutativeExprOperandOrder) {
     EXPECT_EQ(SE.getMulExpr(B, C), SE.getMulExpr(C, B));
     EXPECT_EQ(SE.getMulExpr(A, C), SE.getMulExpr(C, A));
 
-    SmallVector<const SCEV *, 3> Ops0 = {A, B, C};
-    SmallVector<const SCEV *, 3> Ops1 = {A, C, B};
-    SmallVector<const SCEV *, 3> Ops2 = {B, A, C};
-    SmallVector<const SCEV *, 3> Ops3 = {B, C, A};
-    SmallVector<const SCEV *, 3> Ops4 = {C, B, A};
-    SmallVector<const SCEV *, 3> Ops5 = {C, A, B};
+    SmallVector<SCEVUse, 3> Ops0 = {A, B, C};
+    SmallVector<SCEVUse, 3> Ops1 = {A, C, B};
+    SmallVector<SCEVUse, 3> Ops2 = {B, A, C};
+    SmallVector<SCEVUse, 3> Ops3 = {B, C, A};
+    SmallVector<SCEVUse, 3> Ops4 = {C, B, A};
+    SmallVector<SCEVUse, 3> Ops5 = {C, A, B};
 
     const SCEV *Mul0 = SE.getMulExpr(Ops0);
     const SCEV *Mul1 = SE.getMulExpr(Ops1);
@@ -542,13 +542,14 @@ TEST_F(ScalarEvolutionsTest, SCEVNormalization) {
     auto *L1 = *std::next(LI.begin());
     auto *L0 = *std::next(LI.begin(), 2);
 
-    auto GetAddRec = [&SE](const Loop *L, std::initializer_list<const SCEV *> Ops) {
-      SmallVector<const SCEV *, 4> OpsCopy(Ops);
+    auto GetAddRec = [&SE](const Loop *L,
+                           std::initializer_list<const SCEV *> Ops) {
+      SmallVector<SCEVUse, 4> OpsCopy(Ops.begin(), Ops.end());
       return SE.getAddRecExpr(OpsCopy, L, SCEV::FlagAnyWrap);
     };
 
     auto GetAdd = [&SE](std::initializer_list<const SCEV *> Ops) {
-      SmallVector<const SCEV *, 4> OpsCopy(Ops);
+      SmallVector<SCEVUse, 4> OpsCopy(Ops.begin(), Ops.end());
       return SE.getAddExpr(OpsCopy, SCEV::FlagAnyWrap);
     };
 
@@ -1729,7 +1730,7 @@ TEST_F(ScalarEvolutionsTest, ComplexityComparatorIsStrictWeakOrdering2) {
   const SCEV *M0 = SE.getNegativeSCEV(P0);
   const SCEV *M2 = SE.getNegativeSCEV(P2);
 
-  SmallVector<const SCEV *, 6> Ops = {M2, P0, M0, P1, P2};
+  SmallVector<SCEVUse, 6> Ops = {M2, P0, M0, P1, P2};
   // When _LIBCPP_HARDENING_MODE == _LIBCPP_HARDENING_MODE_DEBUG, this will
   // crash if the comparator has the specific caching bug.
   SE.getAddExpr(Ops);

diff  --git a/llvm/unittests/Transforms/Utils/ScalarEvolutionExpanderTest.cpp b/llvm/unittests/Transforms/Utils/ScalarEvolutionExpanderTest.cpp
index 144a28a358f6e..e97d00dda7f8c 100644
--- a/llvm/unittests/Transforms/Utils/ScalarEvolutionExpanderTest.cpp
+++ b/llvm/unittests/Transforms/Utils/ScalarEvolutionExpanderTest.cpp
@@ -873,8 +873,8 @@ TEST_F(ScalarEvolutionExpanderTest, SCEVExpandNonAffineAddRec) {
 
   // Expand {5,+,1,+,1}
   auto GetAR3 = [&](ScalarEvolution &SE, Loop *L) -> const SCEVAddRecExpr * {
-    SmallVector<const SCEV *, 3> Ops = {SE.getConstant(APInt(ARBitWidth, 5)),
-                                        SE.getOne(ARType), SE.getOne(ARType)};
+    SmallVector<SCEVUse, 3> Ops = {SE.getConstant(APInt(ARBitWidth, 5)),
+                                   SE.getOne(ARType), SE.getOne(ARType)};
     return cast<SCEVAddRecExpr>(SE.getAddRecExpr(Ops, L, SCEV::FlagAnyWrap));
   };
   TestNoCanonicalIV(GetAR3);
@@ -883,9 +883,9 @@ TEST_F(ScalarEvolutionExpanderTest, SCEVExpandNonAffineAddRec) {
 
   // Expand {5,+,1,+,1,+,1}
   auto GetAR4 = [&](ScalarEvolution &SE, Loop *L) -> const SCEVAddRecExpr * {
-    SmallVector<const SCEV *, 4> Ops = {SE.getConstant(APInt(ARBitWidth, 5)),
-                                        SE.getOne(ARType), SE.getOne(ARType),
-                                        SE.getOne(ARType)};
+    SmallVector<SCEVUse, 4> Ops = {SE.getConstant(APInt(ARBitWidth, 5)),
+                                   SE.getOne(ARType), SE.getOne(ARType),
+                                   SE.getOne(ARType)};
     return cast<SCEVAddRecExpr>(SE.getAddRecExpr(Ops, L, SCEV::FlagAnyWrap));
   };
   TestNoCanonicalIV(GetAR4);
@@ -894,9 +894,9 @@ TEST_F(ScalarEvolutionExpanderTest, SCEVExpandNonAffineAddRec) {
 
   // Expand {5,+,1,+,1,+,1,+,1}
   auto GetAR5 = [&](ScalarEvolution &SE, Loop *L) -> const SCEVAddRecExpr * {
-    SmallVector<const SCEV *, 5> Ops = {SE.getConstant(APInt(ARBitWidth, 5)),
-                                        SE.getOne(ARType), SE.getOne(ARType),
-                                        SE.getOne(ARType), SE.getOne(ARType)};
+    SmallVector<SCEVUse, 5> Ops = {SE.getConstant(APInt(ARBitWidth, 5)),
+                                   SE.getOne(ARType), SE.getOne(ARType),
+                                   SE.getOne(ARType), SE.getOne(ARType)};
     return cast<SCEVAddRecExpr>(SE.getAddRecExpr(Ops, L, SCEV::FlagAnyWrap));
   };
   TestNoCanonicalIV(GetAR5);

diff  --git a/polly/include/polly/CodeGen/BlockGenerators.h b/polly/include/polly/CodeGen/BlockGenerators.h
index 401e80eb0fece..338256d9e1dc3 100644
--- a/polly/include/polly/CodeGen/BlockGenerators.h
+++ b/polly/include/polly/CodeGen/BlockGenerators.h
@@ -35,7 +35,6 @@ using llvm::Instruction;
 using llvm::LoadInst;
 using llvm::Loop;
 using llvm::LoopInfo;
-using llvm::LoopToScevMapT;
 using llvm::MapVector;
 using llvm::PHINode;
 using llvm::ScalarEvolution;

diff  --git a/polly/include/polly/Support/ScopHelper.h b/polly/include/polly/Support/ScopHelper.h
index 38b731a9f7d8d..fa36acf458823 100644
--- a/polly/include/polly/Support/ScopHelper.h
+++ b/polly/include/polly/Support/ScopHelper.h
@@ -14,6 +14,7 @@
 #define POLLY_SUPPORT_IRHELPER_H
 
 #include "llvm/ADT/SetVector.h"
+#include "llvm/Analysis/ScalarEvolution.h"
 #include "llvm/IR/Instructions.h"
 #include "llvm/IR/IntrinsicInst.h"
 #include "llvm/IR/ValueHandle.h"
@@ -37,7 +38,7 @@ class Scop;
 class ScopStmt;
 
 /// Same as llvm/Analysis/ScalarEvolutionExpressions.h
-using LoopToScevMapT = llvm::DenseMap<const llvm::Loop *, const llvm::SCEV *>;
+using LoopToScevMapT = llvm::DenseMap<const llvm::Loop *, llvm::SCEVUse>;
 
 /// Enumeration of assumptions Polly can take.
 enum AssumptionKind {

diff  --git a/polly/lib/Analysis/ScopDetection.cpp b/polly/lib/Analysis/ScopDetection.cpp
index b9fd4b8be7027..99269aafb3d14 100644
--- a/polly/lib/Analysis/ScopDetection.cpp
+++ b/polly/lib/Analysis/ScopDetection.cpp
@@ -886,7 +886,7 @@ ScopDetection::getDelinearizationTerms(DetectionContext &Context,
         if (auto *AF2 = dyn_cast<SCEVAddRecExpr>(Op))
           collectParametricTerms(SE, AF2, Terms);
         if (auto *AF2 = dyn_cast<SCEVMulExpr>(Op)) {
-          SmallVector<const SCEV *, 0> Operands;
+          SmallVector<SCEVUse, 0> Operands;
 
           for (const SCEV *MulOp : AF2->operands()) {
             if (auto *Const = dyn_cast<SCEVConstant>(MulOp))

diff  --git a/polly/lib/Support/SCEVValidator.cpp b/polly/lib/Support/SCEVValidator.cpp
index 5c3b10cc08ded..291853cde8308 100644
--- a/polly/lib/Support/SCEVValidator.cpp
+++ b/polly/lib/Support/SCEVValidator.cpp
@@ -700,7 +700,7 @@ polly::extractConstantFactor(const SCEV *S, ScalarEvolution &SE) {
   }
 
   if (auto *Add = dyn_cast<SCEVAddExpr>(S)) {
-    SmallVector<const SCEV *, 4> LeftOvers;
+    SmallVector<SCEVUse, 4> LeftOvers;
     auto Op0Pair = extractConstantFactor(Add->getOperand(0), SE);
     auto *Factor = Op0Pair.first;
     if (SE.isKnownNegative(Factor)) {
@@ -729,7 +729,7 @@ polly::extractConstantFactor(const SCEV *S, ScalarEvolution &SE) {
   if (!Mul)
     return std::make_pair(ConstPart, S);
 
-  SmallVector<const SCEV *, 4> LeftOvers;
+  SmallVector<SCEVUse, 4> LeftOvers;
   for (const SCEV *Op : Mul->operands())
     if (isa<SCEVConstant>(Op))
       ConstPart = cast<SCEVConstant>(SE.getMulExpr(ConstPart, Op));

diff  --git a/polly/lib/Support/ScopHelper.cpp b/polly/lib/Support/ScopHelper.cpp
index fe9b8bb1ffea1..fb80edcf1d307 100644
--- a/polly/lib/Support/ScopHelper.cpp
+++ b/polly/lib/Support/ScopHelper.cpp
@@ -243,7 +243,7 @@ struct ScopExpander final : SCEVVisitor<ScopExpander, const SCEV *> {
 
   explicit ScopExpander(const Region &R, ScalarEvolution &SE, Function *GenFn,
                         ScalarEvolution &GenSE, const char *Name,
-                        ValueMapT *VMap, LoopToScevMapT *LoopMap,
+                        ValueMapT *VMap, polly::LoopToScevMapT *LoopMap,
                         BasicBlock *RTCBB)
       : Expander(GenSE, Name, /*PreserveLCSSA=*/false), Name(Name), R(R),
         VMap(VMap), LoopMap(LoopMap), RTCBB(RTCBB), GenSE(GenSE), GenFn(GenFn) {
@@ -272,7 +272,7 @@ struct ScopExpander final : SCEVVisitor<ScopExpander, const SCEV *> {
   const char *Name;
   const Region &R;
   ValueMapT *VMap;
-  LoopToScevMapT *LoopMap;
+  polly::LoopToScevMapT *LoopMap;
   BasicBlock *RTCBB;
   DenseMap<const SCEV *, const SCEV *> SCEVCache;
 
@@ -389,50 +389,50 @@ struct ScopExpander final : SCEVVisitor<ScopExpander, const SCEV *> {
     return GenSE.getUDivExpr(visit(E->getLHS()), RHSScev);
   }
   const SCEV *visitAddExpr(const SCEVAddExpr *E) {
-    SmallVector<const SCEV *, 4> NewOps;
+    SmallVector<SCEVUse, 4> NewOps;
     for (const SCEV *Op : E->operands())
       NewOps.push_back(visit(Op));
     return GenSE.getAddExpr(NewOps);
   }
   const SCEV *visitMulExpr(const SCEVMulExpr *E) {
-    SmallVector<const SCEV *, 4> NewOps;
+    SmallVector<SCEVUse, 4> NewOps;
     for (const SCEV *Op : E->operands())
       NewOps.push_back(visit(Op));
     return GenSE.getMulExpr(NewOps);
   }
   const SCEV *visitUMaxExpr(const SCEVUMaxExpr *E) {
-    SmallVector<const SCEV *, 4> NewOps;
-    for (const SCEV *Op : E->operands())
+    SmallVector<SCEVUse, 4> NewOps;
+    for (SCEVUse Op : E->operands())
       NewOps.push_back(visit(Op));
     return GenSE.getUMaxExpr(NewOps);
   }
   const SCEV *visitSMaxExpr(const SCEVSMaxExpr *E) {
-    SmallVector<const SCEV *, 4> NewOps;
-    for (const SCEV *Op : E->operands())
+    SmallVector<SCEVUse, 4> NewOps;
+    for (SCEVUse Op : E->operands())
       NewOps.push_back(visit(Op));
     return GenSE.getSMaxExpr(NewOps);
   }
   const SCEV *visitUMinExpr(const SCEVUMinExpr *E) {
-    SmallVector<const SCEV *, 4> NewOps;
-    for (const SCEV *Op : E->operands())
+    SmallVector<SCEVUse, 4> NewOps;
+    for (SCEVUse Op : E->operands())
       NewOps.push_back(visit(Op));
     return GenSE.getUMinExpr(NewOps);
   }
   const SCEV *visitSMinExpr(const SCEVSMinExpr *E) {
-    SmallVector<const SCEV *, 4> NewOps;
-    for (const SCEV *Op : E->operands())
+    SmallVector<SCEVUse, 4> NewOps;
+    for (SCEVUse Op : E->operands())
       NewOps.push_back(visit(Op));
     return GenSE.getSMinExpr(NewOps);
   }
   const SCEV *visitSequentialUMinExpr(const SCEVSequentialUMinExpr *E) {
-    SmallVector<const SCEV *, 4> NewOps;
-    for (const SCEV *Op : E->operands())
+    SmallVector<SCEVUse, 4> NewOps;
+    for (SCEVUse Op : E->operands())
       NewOps.push_back(visit(Op));
     return GenSE.getUMinExpr(NewOps, /*Sequential=*/true);
   }
   const SCEV *visitAddRecExpr(const SCEVAddRecExpr *E) {
-    SmallVector<const SCEV *, 4> NewOps;
-    for (const SCEV *Op : E->operands())
+    SmallVector<SCEVUse, 4> NewOps;
+    for (SCEVUse Op : E->operands())
       NewOps.push_back(visit(Op));
 
     const Loop *L = E->getLoop();


        


More information about the llvm-commits mailing list