[llvm] 6555b5d - [SCEV] Store getValue() result in variable (NFC)
Nikita Popov via llvm-commits
llvm-commits at lists.llvm.org
Fri Jun 23 03:31:43 PDT 2023
Author: Nikita Popov
Date: 2023-06-23T12:31:36+02:00
New Revision: 6555b5dc99715aa0f27b1e75a8f08e3cc8d76406
URL: https://github.com/llvm/llvm-project/commit/6555b5dc99715aa0f27b1e75a8f08e3cc8d76406
DIFF: https://github.com/llvm/llvm-project/commit/6555b5dc99715aa0f27b1e75a8f08e3cc8d76406.diff
LOG: [SCEV] Store getValue() result in variable (NFC)
Added:
Modified:
llvm/lib/Analysis/ScalarEvolution.cpp
Removed:
################################################################################
diff --git a/llvm/lib/Analysis/ScalarEvolution.cpp b/llvm/lib/Analysis/ScalarEvolution.cpp
index 7c09eb9ae128d..827e4f676fc1d 100644
--- a/llvm/lib/Analysis/ScalarEvolution.cpp
+++ b/llvm/lib/Analysis/ScalarEvolution.cpp
@@ -6764,9 +6764,10 @@ const ConstantRange &ScalarEvolution::getRangeRef(
}
case scUnknown: {
const SCEVUnknown *U = cast<SCEVUnknown>(S);
+ Value *V = U->getValue();
// Check if the IR explicitly contains !range metadata.
- std::optional<ConstantRange> MDRange = GetRangeFromMetadata(U->getValue());
+ std::optional<ConstantRange> MDRange = GetRangeFromMetadata(V);
if (MDRange)
ConservativeResult =
ConservativeResult.intersectWith(*MDRange, RangeType);
@@ -6779,13 +6780,13 @@ const ConstantRange &ScalarEvolution::getRangeRef(
// See if ValueTracking can give us a useful range.
const DataLayout &DL = getDataLayout();
- KnownBits Known = computeKnownBits(U->getValue(), DL, 0, &AC, nullptr, &DT);
+ KnownBits Known = computeKnownBits(V, DL, 0, &AC, nullptr, &DT);
if (Known.getBitWidth() != BitWidth)
Known = Known.zextOrTrunc(BitWidth);
// ValueTracking may be able to compute a tighter result for the number of
// sign bits than for the value of those sign bits.
- unsigned NS = ComputeNumSignBits(U->getValue(), DL, 0, &AC, nullptr, &DT);
+ unsigned NS = ComputeNumSignBits(V, DL, 0, &AC, nullptr, &DT);
if (U->getType()->isPointerTy()) {
// If the pointer size is larger than the index size type, this can cause
// NS to be larger than BitWidth. So compensate for this.
@@ -6820,9 +6821,8 @@ const ConstantRange &ScalarEvolution::getRangeRef(
Opts.RoundToAlign = false;
Opts.NullIsUnknownSize = true;
uint64_t ObjSize;
- auto *GV = dyn_cast<GlobalVariable>(U->getValue());
- if (GV && getObjectSize(U->getValue(), ObjSize, DL, &TLI, Opts) &&
- ObjSize > 1) {
+ auto *GV = dyn_cast<GlobalVariable>(V);
+ if (GV && getObjectSize(V, ObjSize, DL, &TLI, Opts) && ObjSize > 1) {
// The highest address the object can start is ObjSize bytes before the
// end (unsigned max value). If this value is not a multiple of the
// alignment, the last possible start value is the next lowest multiple
@@ -6839,7 +6839,7 @@ const ConstantRange &ScalarEvolution::getRangeRef(
}
// A range of Phi is a subset of union of all ranges of its input.
- if (PHINode *Phi = dyn_cast<PHINode>(U->getValue())) {
+ if (PHINode *Phi = dyn_cast<PHINode>(V)) {
// Make sure that we do not run over cycled Phis.
if (PendingPhiRanges.insert(Phi).second) {
ConstantRange RangeFromOps(BitWidth, /*isFullSet=*/false);
@@ -6860,7 +6860,7 @@ const ConstantRange &ScalarEvolution::getRangeRef(
}
// vscale can't be equal to zero
- if (const auto *II = dyn_cast<IntrinsicInst>(U->getValue()))
+ if (const auto *II = dyn_cast<IntrinsicInst>(V))
if (II->getIntrinsicID() == Intrinsic::vscale) {
ConstantRange Disallowed = APInt::getZero(BitWidth);
ConservativeResult = ConservativeResult.
diff erence(Disallowed);
More information about the llvm-commits
mailing list