[llvm] 9b959b5 - [LVI] Require context instruction in external API (NFCI)
Nikita Popov via llvm-commits
llvm-commits at lists.llvm.org
Sun Sep 27 09:07:34 PDT 2020
Author: Nikita Popov
Date: 2020-09-27T18:07:24+02:00
New Revision: 9b959b59dfaf87ba978480594d2cfcf15fe66218
URL: https://github.com/llvm/llvm-project/commit/9b959b59dfaf87ba978480594d2cfcf15fe66218
DIFF: https://github.com/llvm/llvm-project/commit/9b959b59dfaf87ba978480594d2cfcf15fe66218.diff
LOG: [LVI] Require context instruction in external API (NFCI)
Require CxtI in getConstant() and getConstantRange() APIs.
Accordingly drop the BB parameter, as it is implied by
CxtI->getParent().
This makes sure we don't forget to pass the context instruction,
and makes the API contract clearer (also clean up the comments to
that effect -- the value holds at the context instruction, not
the end of the block).
Added:
Modified:
llvm/include/llvm/Analysis/LazyValueInfo.h
llvm/lib/Analysis/LazyValueInfo.cpp
llvm/lib/Transforms/IPO/AttributorAttributes.cpp
llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp
llvm/lib/Transforms/Scalar/JumpThreading.cpp
llvm/lib/Transforms/Utils/LowerSwitch.cpp
Removed:
################################################################################
diff --git a/llvm/include/llvm/Analysis/LazyValueInfo.h b/llvm/include/llvm/Analysis/LazyValueInfo.h
index 1bc88235273e..cc8f26897bb9 100644
--- a/llvm/include/llvm/Analysis/LazyValueInfo.h
+++ b/llvm/include/llvm/Analysis/LazyValueInfo.h
@@ -76,15 +76,14 @@ class LazyValueInfo {
Tristate getPredicateAt(unsigned Pred, Value *V, Constant *C,
Instruction *CxtI);
- /// Determine whether the specified value is known to be a
- /// constant at the end of the specified block. Return null if not.
- Constant *getConstant(Value *V, BasicBlock *BB, Instruction *CxtI = nullptr);
+ /// Determine whether the specified value is known to be a constant at the
+ /// specified instruction. Return null if not.
+ Constant *getConstant(Value *V, Instruction *CxtI);
/// Return the ConstantRange constraint that is known to hold for the
- /// specified value at the end of the specified block. This may only be called
+ /// specified value at the specified instruction. This may only be called
/// on integer-typed Values.
- ConstantRange getConstantRange(Value *V, BasicBlock *BB,
- Instruction *CxtI = nullptr,
+ ConstantRange getConstantRange(Value *V, Instruction *CxtI,
bool UndefAllowed = true);
/// Determine whether the specified value is known to be a
diff --git a/llvm/lib/Analysis/LazyValueInfo.cpp b/llvm/lib/Analysis/LazyValueInfo.cpp
index cc364fc93337..475b7189a0b2 100644
--- a/llvm/lib/Analysis/LazyValueInfo.cpp
+++ b/llvm/lib/Analysis/LazyValueInfo.cpp
@@ -1586,12 +1586,12 @@ static bool isKnownNonConstant(Value *V) {
return false;
}
-Constant *LazyValueInfo::getConstant(Value *V, BasicBlock *BB,
- Instruction *CxtI) {
+Constant *LazyValueInfo::getConstant(Value *V, Instruction *CxtI) {
// Bail out early if V is known not to be a Constant.
if (isKnownNonConstant(V))
return nullptr;
+ BasicBlock *BB = CxtI->getParent();
ValueLatticeElement Result =
getImpl(PImpl, AC, BB->getModule()).getValueInBlock(V, BB, CxtI);
@@ -1605,11 +1605,11 @@ Constant *LazyValueInfo::getConstant(Value *V, BasicBlock *BB,
return nullptr;
}
-ConstantRange LazyValueInfo::getConstantRange(Value *V, BasicBlock *BB,
- Instruction *CxtI,
+ConstantRange LazyValueInfo::getConstantRange(Value *V, Instruction *CxtI,
bool UndefAllowed) {
assert(V->getType()->isIntegerTy());
unsigned Width = V->getType()->getIntegerBitWidth();
+ BasicBlock *BB = CxtI->getParent();
ValueLatticeElement Result =
getImpl(PImpl, AC, BB->getModule()).getValueInBlock(V, BB, CxtI);
if (Result.isUnknown())
diff --git a/llvm/lib/Transforms/IPO/AttributorAttributes.cpp b/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
index 7bec97059703..11b91ddd1a91 100644
--- a/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
+++ b/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
@@ -6902,7 +6902,6 @@ struct AAValueConstantRangeImpl : AAValueConstantRange {
if (!LVI || !CtxI)
return getWorstState(getBitWidth());
return LVI->getConstantRange(&getAssociatedValue(),
- const_cast<BasicBlock *>(CtxI->getParent()),
const_cast<Instruction *>(CtxI));
}
diff --git a/llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp b/llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp
index 8b130984ba99..15505d1d41ca 100644
--- a/llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp
+++ b/llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp
@@ -129,7 +129,7 @@ static bool processSelect(SelectInst *S, LazyValueInfo *LVI) {
if (S->getType()->isVectorTy()) return false;
if (isa<Constant>(S->getCondition())) return false;
- Constant *C = LVI->getConstant(S->getCondition(), S->getParent(), S);
+ Constant *C = LVI->getConstant(S->getCondition(), S);
if (!C) return false;
ConstantInt *CI = dyn_cast<ConstantInt>(C);
@@ -286,7 +286,7 @@ static bool processMemAccess(Instruction *I, LazyValueInfo *LVI) {
if (isa<Constant>(Pointer)) return false;
- Constant *C = LVI->getConstant(Pointer, I->getParent(), I);
+ Constant *C = LVI->getConstant(Pointer, I);
if (!C) return false;
++NumMemAccess;
@@ -432,10 +432,8 @@ static bool processSwitch(SwitchInst *I, LazyValueInfo *LVI,
// See if we can prove that the given binary op intrinsic will not overflow.
static bool willNotOverflow(BinaryOpIntrinsic *BO, LazyValueInfo *LVI) {
- ConstantRange LRange = LVI->getConstantRange(
- BO->getLHS(), BO->getParent(), BO);
- ConstantRange RRange = LVI->getConstantRange(
- BO->getRHS(), BO->getParent(), BO);
+ ConstantRange LRange = LVI->getConstantRange(BO->getLHS(), BO);
+ ConstantRange RRange = LVI->getConstantRange(BO->getRHS(), BO);
ConstantRange NWRegion = ConstantRange::makeGuaranteedNoWrapRegion(
BO->getBinaryOp(), RRange, BO->getNoWrapKind());
return NWRegion.contains(LRange);
@@ -567,7 +565,7 @@ static bool processCallSite(CallBase &CB, LazyValueInfo *LVI) {
if (V->getType()->isVectorTy()) continue;
if (isa<Constant>(V)) continue;
- Constant *C = LVI->getConstant(V, CB.getParent(), &CB);
+ Constant *C = LVI->getConstant(V, &CB);
if (!C) continue;
U.set(C);
Progress = true;
@@ -643,8 +641,7 @@ static bool narrowSDivOrSRem(BinaryOperator *Instr, LazyValueInfo *LVI) {
std::array<Optional<ConstantRange>, 2> CRs;
unsigned MinSignedBits = 0;
for (auto I : zip(Instr->operands(), CRs)) {
- std::get<1>(I) =
- LVI->getConstantRange(std::get<0>(I), Instr->getParent(), Instr);
+ std::get<1>(I) = LVI->getConstantRange(std::get<0>(I), Instr);
MinSignedBits = std::max(std::get<1>(I)->getMinSignedBits(), MinSignedBits);
}
@@ -696,8 +693,7 @@ static bool processUDivOrURem(BinaryOperator *Instr, LazyValueInfo *LVI) {
// of both of the operands?
unsigned MaxActiveBits = 0;
for (Value *Operand : Instr->operands()) {
- ConstantRange CR =
- LVI->getConstantRange(Operand, Instr->getParent(), Instr);
+ ConstantRange CR = LVI->getConstantRange(Operand, Instr);
MaxActiveBits = std::max(CR.getActiveBits(), MaxActiveBits);
}
// Don't shrink below 8 bits wide.
@@ -902,14 +898,12 @@ static bool processBinOp(BinaryOperator *BinOp, LazyValueInfo *LVI) {
if (NSW && NUW)
return false;
- BasicBlock *BB = BinOp->getParent();
-
Instruction::BinaryOps Opcode = BinOp->getOpcode();
Value *LHS = BinOp->getOperand(0);
Value *RHS = BinOp->getOperand(1);
- ConstantRange LRange = LVI->getConstantRange(LHS, BB, BinOp);
- ConstantRange RRange = LVI->getConstantRange(RHS, BB, BinOp);
+ ConstantRange LRange = LVI->getConstantRange(LHS, BinOp);
+ ConstantRange RRange = LVI->getConstantRange(RHS, BinOp);
bool Changed = false;
bool NewNUW = false, NewNSW = false;
@@ -937,7 +931,6 @@ static bool processAnd(BinaryOperator *BinOp, LazyValueInfo *LVI) {
// Pattern match (and lhs, C) where C includes a superset of bits which might
// be set in lhs. This is a common truncation idiom created by instcombine.
- BasicBlock *BB = BinOp->getParent();
Value *LHS = BinOp->getOperand(0);
ConstantInt *RHS = dyn_cast<ConstantInt>(BinOp->getOperand(1));
if (!RHS || !RHS->getValue().isMask())
@@ -946,7 +939,7 @@ static bool processAnd(BinaryOperator *BinOp, LazyValueInfo *LVI) {
// We can only replace the AND with LHS based on range info if the range does
// not include undef.
ConstantRange LRange =
- LVI->getConstantRange(LHS, BB, BinOp, /*UndefAllowed=*/false);
+ LVI->getConstantRange(LHS, BinOp, /*UndefAllowed=*/false);
if (!LRange.getUnsignedMax().ule(RHS->getValue()))
return false;
@@ -958,7 +951,7 @@ static bool processAnd(BinaryOperator *BinOp, LazyValueInfo *LVI) {
static Constant *getConstantAt(Value *V, Instruction *At, LazyValueInfo *LVI) {
- if (Constant *C = LVI->getConstant(V, At->getParent(), At))
+ if (Constant *C = LVI->getConstant(V, At))
return C;
// TODO: The following really should be sunk inside LVI's core algorithm, or
diff --git a/llvm/lib/Transforms/Scalar/JumpThreading.cpp b/llvm/lib/Transforms/Scalar/JumpThreading.cpp
index 8b1ad336c8a5..646b28a49470 100644
--- a/llvm/lib/Transforms/Scalar/JumpThreading.cpp
+++ b/llvm/lib/Transforms/Scalar/JumpThreading.cpp
@@ -960,7 +960,8 @@ bool JumpThreadingPass::ComputeValueKnownInPredecessorsImpl(
}
// If all else fails, see if LVI can figure out a constant value for us.
- Constant *CI = LVI->getConstant(V, BB, CxtI);
+ assert(CxtI->getParent() == BB && "CxtI should be in BB");
+ Constant *CI = LVI->getConstant(V, CxtI);
if (Constant *KC = getKnownConstant(CI, Preference)) {
for (BasicBlock *Pred : predecessors(BB))
Result.emplace_back(KC, Pred);
diff --git a/llvm/lib/Transforms/Utils/LowerSwitch.cpp b/llvm/lib/Transforms/Utils/LowerSwitch.cpp
index 10a4420b1753..a297d6df82f3 100644
--- a/llvm/lib/Transforms/Utils/LowerSwitch.cpp
+++ b/llvm/lib/Transforms/Utils/LowerSwitch.cpp
@@ -400,7 +400,7 @@ void ProcessSwitchInst(SwitchInst *SI,
// TODO Shouldn't this create a signed range?
ConstantRange KnownBitsRange =
ConstantRange::fromKnownBits(Known, /*IsSigned=*/false);
- const ConstantRange LVIRange = LVI->getConstantRange(Val, OrigBlock, SI);
+ const ConstantRange LVIRange = LVI->getConstantRange(Val, SI);
ConstantRange ValRange = KnownBitsRange.intersectWith(LVIRange);
// We delegate removal of unreachable non-default cases to other passes. In
// the unlikely event that some of them survived, we just conservatively
More information about the llvm-commits
mailing list