[llvm] 460d628 - [GVN] Clean up unused argument, unify style, modernize syntax (NFC)
Antonio Frighetto via llvm-commits
llvm-commits at lists.llvm.org
Fri Mar 28 08:00:31 PDT 2025
Author: Antonio Frighetto
Date: 2025-03-28T15:59:28+01:00
New Revision: 460d628d9006f63af3d296a90c32a60a347dbc2c
URL: https://github.com/llvm/llvm-project/commit/460d628d9006f63af3d296a90c32a60a347dbc2c
DIFF: https://github.com/llvm/llvm-project/commit/460d628d9006f63af3d296a90c32a60a347dbc2c.diff
LOG: [GVN] Clean up unused argument, unify style, modernize syntax (NFC)
Finalize code style overhaul in GVN, following up to
2a0946bc0dffca89d16cd9d5208ec9416ed8100e and 9deed7d2ef3a147c4e8410910967fde601359039.
Added:
Modified:
llvm/include/llvm/Transforms/Scalar/GVN.h
llvm/lib/Transforms/Scalar/GVN.cpp
Removed:
################################################################################
diff --git a/llvm/include/llvm/Transforms/Scalar/GVN.h b/llvm/include/llvm/Transforms/Scalar/GVN.h
index c8be390799836..ffdf57cd3d8f8 100644
--- a/llvm/include/llvm/Transforms/Scalar/GVN.h
+++ b/llvm/include/llvm/Transforms/Scalar/GVN.h
@@ -157,14 +157,14 @@ class GVNPass : public PassInfoMixin<GVNPass> {
/// as an efficient mechanism to determine the expression-wise equivalence of
/// two values.
class ValueTable {
- DenseMap<Value *, uint32_t> valueNumbering;
- DenseMap<Expression, uint32_t> expressionNumbering;
+ DenseMap<Value *, uint32_t> ValueNumbering;
+ DenseMap<Expression, uint32_t> ExpressionNumbering;
// Expressions is the vector of Expression. ExprIdx is the mapping from
// value number to the index of Expression in Expressions. We use it
// instead of a DenseMap because filling such mapping is faster than
// filling a DenseMap and the compile time is a little better.
- uint32_t nextExprNumber = 0;
+ uint32_t NextExprNumber = 0;
std::vector<Expression> Expressions;
std::vector<uint32_t> ExprIdx;
@@ -181,7 +181,7 @@ class GVNPass : public PassInfoMixin<GVNPass> {
MemoryDependenceResults *MD = nullptr;
DominatorTree *DT = nullptr;
- uint32_t nextValueNumber = 1;
+ uint32_t NextValueNumber = 1;
Expression createExpr(Instruction *I);
Expression createCmpExpr(unsigned Opcode, CmpInst::Predicate Predicate,
@@ -190,11 +190,11 @@ class GVNPass : public PassInfoMixin<GVNPass> {
Expression createGEPExpr(GetElementPtrInst *GEP);
uint32_t lookupOrAddCall(CallInst *C);
uint32_t phiTranslateImpl(const BasicBlock *BB, const BasicBlock *PhiBlock,
- uint32_t Num, GVNPass &Gvn);
+ uint32_t Num, GVNPass &GVN);
bool areCallValsEqual(uint32_t Num, uint32_t NewNum, const BasicBlock *Pred,
- const BasicBlock *PhiBlock, GVNPass &Gvn);
- std::pair<uint32_t, bool> assignExpNewValueNum(Expression &exp);
- bool areAllValsInBB(uint32_t num, const BasicBlock *BB, GVNPass &Gvn);
+ const BasicBlock *PhiBlock, GVNPass &GVN);
+ std::pair<uint32_t, bool> assignExpNewValueNum(Expression &Exp);
+ bool areAllValsInBB(uint32_t Num, const BasicBlock *BB, GVNPass &GVN);
public:
ValueTable();
@@ -208,17 +208,17 @@ class GVNPass : public PassInfoMixin<GVNPass> {
uint32_t lookupOrAddCmp(unsigned Opcode, CmpInst::Predicate Pred,
Value *LHS, Value *RHS);
uint32_t phiTranslate(const BasicBlock *BB, const BasicBlock *PhiBlock,
- uint32_t Num, GVNPass &Gvn);
+ uint32_t Num, GVNPass &GVN);
void eraseTranslateCacheEntry(uint32_t Num, const BasicBlock &CurrBlock);
bool exists(Value *V) const;
- void add(Value *V, uint32_t num);
+ void add(Value *V, uint32_t Num);
void clear();
- void erase(Value *v);
+ void erase(Value *V);
void setAliasAnalysis(AAResults *A) { AA = A; }
AAResults *getAliasAnalysis() const { return AA; }
void setMemDep(MemoryDependenceResults *M) { MD = M; }
void setDomTree(DominatorTree *D) { DT = D; }
- uint32_t getNextUnusedValueNumber() { return nextValueNumber; }
+ uint32_t getNextUnusedValueNumber() { return NextValueNumber; }
void verifyRemoved(const Value *) const;
};
@@ -327,9 +327,9 @@ class GVNPass : public PassInfoMixin<GVNPass> {
OptimizationRemarkEmitter *ORE, MemorySSA *MSSA = nullptr);
// List of critical edges to be split between iterations.
- SmallVector<std::pair<Instruction *, unsigned>, 4> toSplit;
+ SmallVector<std::pair<Instruction *, unsigned>, 4> ToSplit;
- // Helper functions of redundant load elimination
+ // Helper functions of redundant load elimination.
bool processLoad(LoadInst *L);
bool processNonLocalLoad(LoadInst *L);
bool processAssumeIntrinsic(AssumeInst *II);
@@ -367,16 +367,16 @@ class GVNPass : public PassInfoMixin<GVNPass> {
MapVector<BasicBlock *, Value *> &AvailableLoads,
MapVector<BasicBlock *, LoadInst *> *CriticalEdgePredAndLoad);
- // Other helper routines
+ // Other helper routines.
bool processInstruction(Instruction *I);
bool processBlock(BasicBlock *BB);
- void dump(DenseMap<uint32_t, Value *> &d) const;
+ void dump(DenseMap<uint32_t, Value *> &Map) const;
bool iterateOnFunction(Function &F);
bool performPRE(Function &F);
bool performScalarPRE(Instruction *I);
bool performScalarPREInsertion(Instruction *Instr, BasicBlock *Pred,
BasicBlock *Curr, unsigned int ValNo);
- Value *findLeader(const BasicBlock *BB, uint32_t num);
+ Value *findLeader(const BasicBlock *BB, uint32_t Num);
void cleanupGlobalSets();
void removeInstruction(Instruction *I);
void verifyRemoved(const Instruction *I) const;
diff --git a/llvm/lib/Transforms/Scalar/GVN.cpp b/llvm/lib/Transforms/Scalar/GVN.cpp
index 93b629d531ad8..6233e8e2ee681 100644
--- a/llvm/lib/Transforms/Scalar/GVN.cpp
+++ b/llvm/lib/Transforms/Scalar/GVN.cpp
@@ -138,36 +138,36 @@ static cl::opt<uint32_t> MaxNumInsnsPerBlock(
"(default = 100)"));
struct llvm::GVNPass::Expression {
- uint32_t opcode;
- bool commutative = false;
+ uint32_t Opcode;
+ bool Commutative = false;
// The type is not necessarily the result type of the expression, it may be
// any additional type needed to disambiguate the expression.
- Type *type = nullptr;
- SmallVector<uint32_t, 4> varargs;
+ Type *Ty = nullptr;
+ SmallVector<uint32_t, 4> VarArgs;
- AttributeList attrs;
+ AttributeList Attrs;
- Expression(uint32_t o = ~2U) : opcode(o) {}
+ Expression(uint32_t Op = ~2U) : Opcode(Op) {}
- bool operator==(const Expression &other) const {
- if (opcode != other.opcode)
+ bool operator==(const Expression &Other) const {
+ if (Opcode != Other.Opcode)
return false;
- if (opcode == ~0U || opcode == ~1U)
+ if (Opcode == ~0U || Opcode == ~1U)
return true;
- if (type != other.type)
+ if (Ty != Other.Ty)
return false;
- if (varargs != other.varargs)
+ if (VarArgs != Other.VarArgs)
return false;
- if ((!attrs.isEmpty() || !other.attrs.isEmpty()) &&
- !attrs.intersectWith(type->getContext(), other.attrs).has_value())
+ if ((!Attrs.isEmpty() || !Other.Attrs.isEmpty()) &&
+ !Attrs.intersectWith(Ty->getContext(), Other.Attrs).has_value())
return false;
return true;
}
friend hash_code hash_value(const Expression &Value) {
return hash_combine(
- Value.opcode, Value.type,
- hash_combine_range(Value.varargs.begin(), Value.varargs.end()));
+ Value.Opcode, Value.Ty,
+ hash_combine_range(Value.VarArgs.begin(), Value.VarArgs.end()));
}
};
@@ -177,10 +177,10 @@ template <> struct DenseMapInfo<GVNPass::Expression> {
static inline GVNPass::Expression getEmptyKey() { return ~0U; }
static inline GVNPass::Expression getTombstoneKey() { return ~1U; }
- static unsigned getHashValue(const GVNPass::Expression &e) {
+ static unsigned getHashValue(const GVNPass::Expression &E) {
using llvm::hash_value;
- return static_cast<unsigned>(hash_value(e));
+ return static_cast<unsigned>(hash_value(E));
}
static bool isEqual(const GVNPass::Expression &LHS,
@@ -286,8 +286,7 @@ struct llvm::gvn::AvailableValue {
/// Emit code at the specified insertion point to adjust the value defined
/// here to the specified type. This handles various coercion cases.
- Value *MaterializeAdjustedValue(LoadInst *Load, Instruction *InsertPt,
- GVNPass &gvn) const;
+ Value *MaterializeAdjustedValue(LoadInst *Load, Instruction *InsertPt) const;
};
/// Represents an AvailableValue which can be rematerialized at the end of
@@ -296,7 +295,7 @@ struct llvm::gvn::AvailableValueInBlock {
/// BB - The basic block in question.
BasicBlock *BB = nullptr;
- /// AV - The actual available value
+ /// AV - The actual available value.
AvailableValue AV;
static AvailableValueInBlock get(BasicBlock *BB, AvailableValue &&AV) {
@@ -322,8 +321,8 @@ struct llvm::gvn::AvailableValueInBlock {
/// Emit code at the end of this block to adjust the value defined here to
/// the specified type. This handles various coercion cases.
- Value *MaterializeAdjustedValue(LoadInst *Load, GVNPass &gvn) const {
- return AV.MaterializeAdjustedValue(Load, BB->getTerminator(), gvn);
+ Value *MaterializeAdjustedValue(LoadInst *Load) const {
+ return AV.MaterializeAdjustedValue(Load, BB->getTerminator());
}
};
@@ -333,18 +332,18 @@ struct llvm::gvn::AvailableValueInBlock {
GVNPass::Expression GVNPass::ValueTable::createExpr(Instruction *I) {
Expression E;
- E.type = I->getType();
- E.opcode = I->getOpcode();
+ E.Ty = I->getType();
+ E.Opcode = I->getOpcode();
if (const GCRelocateInst *GCR = dyn_cast<GCRelocateInst>(I)) {
// gc.relocate is 'special' call: its second and third operands are
// not real values, but indices into statepoint's argument list.
// Use the refered to values for purposes of identity.
- E.varargs.push_back(lookupOrAdd(GCR->getOperand(0)));
- E.varargs.push_back(lookupOrAdd(GCR->getBasePtr()));
- E.varargs.push_back(lookupOrAdd(GCR->getDerivedPtr()));
+ E.VarArgs.push_back(lookupOrAdd(GCR->getOperand(0)));
+ E.VarArgs.push_back(lookupOrAdd(GCR->getBasePtr()));
+ E.VarArgs.push_back(lookupOrAdd(GCR->getDerivedPtr()));
} else {
for (Use &Op : I->operands())
- E.varargs.push_back(lookupOrAdd(Op));
+ E.VarArgs.push_back(lookupOrAdd(Op));
}
if (I->isCommutative()) {
// Ensure that commutative instructions that only
diff er by a permutation
@@ -352,27 +351,27 @@ GVNPass::Expression GVNPass::ValueTable::createExpr(Instruction *I) {
// numbers. Since commutative operands are the 1st two operands it is more
// efficient to sort by hand rather than using, say, std::sort.
assert(I->getNumOperands() >= 2 && "Unsupported commutative instruction!");
- if (E.varargs[0] > E.varargs[1])
- std::swap(E.varargs[0], E.varargs[1]);
- E.commutative = true;
+ if (E.VarArgs[0] > E.VarArgs[1])
+ std::swap(E.VarArgs[0], E.VarArgs[1]);
+ E.Commutative = true;
}
if (auto *C = dyn_cast<CmpInst>(I)) {
// Sort the operand value numbers so x<y and y>x get the same value number.
CmpInst::Predicate Predicate = C->getPredicate();
- if (E.varargs[0] > E.varargs[1]) {
- std::swap(E.varargs[0], E.varargs[1]);
+ if (E.VarArgs[0] > E.VarArgs[1]) {
+ std::swap(E.VarArgs[0], E.VarArgs[1]);
Predicate = CmpInst::getSwappedPredicate(Predicate);
}
- E.opcode = (C->getOpcode() << 8) | Predicate;
- E.commutative = true;
+ E.Opcode = (C->getOpcode() << 8) | Predicate;
+ E.Commutative = true;
} else if (auto *IVI = dyn_cast<InsertValueInst>(I)) {
- E.varargs.append(IVI->idx_begin(), IVI->idx_end());
+ E.VarArgs.append(IVI->idx_begin(), IVI->idx_end());
} else if (auto *SVI = dyn_cast<ShuffleVectorInst>(I)) {
ArrayRef<int> ShuffleMask = SVI->getShuffleMask();
- E.varargs.append(ShuffleMask.begin(), ShuffleMask.end());
+ E.VarArgs.append(ShuffleMask.begin(), ShuffleMask.end());
} else if (auto *CB = dyn_cast<CallBase>(I)) {
- E.attrs = CB->getAttributes();
+ E.Attrs = CB->getAttributes();
}
return E;
@@ -383,17 +382,17 @@ GVNPass::Expression GVNPass::ValueTable::createCmpExpr(
assert((Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) &&
"Not a comparison!");
Expression E;
- E.type = CmpInst::makeCmpResultType(LHS->getType());
- E.varargs.push_back(lookupOrAdd(LHS));
- E.varargs.push_back(lookupOrAdd(RHS));
+ E.Ty = CmpInst::makeCmpResultType(LHS->getType());
+ E.VarArgs.push_back(lookupOrAdd(LHS));
+ E.VarArgs.push_back(lookupOrAdd(RHS));
// Sort the operand value numbers so x<y and y>x get the same value number.
- if (E.varargs[0] > E.varargs[1]) {
- std::swap(E.varargs[0], E.varargs[1]);
+ if (E.VarArgs[0] > E.VarArgs[1]) {
+ std::swap(E.VarArgs[0], E.VarArgs[1]);
Predicate = CmpInst::getSwappedPredicate(Predicate);
}
- E.opcode = (Opcode << 8) | Predicate;
- E.commutative = true;
+ E.Opcode = (Opcode << 8) | Predicate;
+ E.Commutative = true;
return E;
}
@@ -401,27 +400,27 @@ GVNPass::Expression
GVNPass::ValueTable::createExtractvalueExpr(ExtractValueInst *EI) {
assert(EI && "Not an ExtractValueInst?");
Expression E;
- E.type = EI->getType();
- E.opcode = 0;
+ E.Ty = EI->getType();
+ E.Opcode = 0;
WithOverflowInst *WO = dyn_cast<WithOverflowInst>(EI->getAggregateOperand());
if (WO != nullptr && EI->getNumIndices() == 1 && *EI->idx_begin() == 0) {
// EI is an extract from one of our with.overflow intrinsics. Synthesize
// a semantically equivalent expression instead of an extract value
// expression.
- E.opcode = WO->getBinaryOp();
- E.varargs.push_back(lookupOrAdd(WO->getLHS()));
- E.varargs.push_back(lookupOrAdd(WO->getRHS()));
+ E.Opcode = WO->getBinaryOp();
+ E.VarArgs.push_back(lookupOrAdd(WO->getLHS()));
+ E.VarArgs.push_back(lookupOrAdd(WO->getRHS()));
return E;
}
// Not a recognised intrinsic. Fall back to producing an extract value
// expression.
- E.opcode = EI->getOpcode();
+ E.Opcode = EI->getOpcode();
for (Use &Op : EI->operands())
- E.varargs.push_back(lookupOrAdd(Op));
+ E.VarArgs.push_back(lookupOrAdd(Op));
- append_range(E.varargs, EI->indices());
+ append_range(E.VarArgs, EI->indices());
return E;
}
@@ -437,23 +436,23 @@ GVNPass::Expression GVNPass::ValueTable::createGEPExpr(GetElementPtrInst *GEP) {
// Convert into offset representation, to recognize equivalent address
// calculations that use
diff erent type encoding.
LLVMContext &Context = GEP->getContext();
- E.opcode = GEP->getOpcode();
- E.type = nullptr;
- E.varargs.push_back(lookupOrAdd(GEP->getPointerOperand()));
- for (const auto &Pair : VariableOffsets) {
- E.varargs.push_back(lookupOrAdd(Pair.first));
- E.varargs.push_back(lookupOrAdd(ConstantInt::get(Context, Pair.second)));
+ E.Opcode = GEP->getOpcode();
+ E.Ty = nullptr;
+ E.VarArgs.push_back(lookupOrAdd(GEP->getPointerOperand()));
+ for (const auto &[V, Scale] : VariableOffsets) {
+ E.VarArgs.push_back(lookupOrAdd(V));
+ E.VarArgs.push_back(lookupOrAdd(ConstantInt::get(Context, Scale)));
}
if (!ConstantOffset.isZero())
- E.varargs.push_back(
+ E.VarArgs.push_back(
lookupOrAdd(ConstantInt::get(Context, ConstantOffset)));
} else {
// If converting to offset representation fails (for scalable vectors),
- // fall back to type-based implementation:
- E.opcode = GEP->getOpcode();
- E.type = GEP->getSourceElementType();
+ // fall back to type-based implementation.
+ E.Opcode = GEP->getOpcode();
+ E.Ty = GEP->getSourceElementType();
for (Use &Op : GEP->operands())
- E.varargs.push_back(lookupOrAdd(Op));
+ E.VarArgs.push_back(lookupOrAdd(Op));
}
return E;
}
@@ -470,10 +469,10 @@ GVNPass::ValueTable &
GVNPass::ValueTable::operator=(const GVNPass::ValueTable &Arg) = default;
/// add - Insert a value into the table with a specified value number.
-void GVNPass::ValueTable::add(Value *V, uint32_t num) {
- valueNumbering.insert(std::make_pair(V, num));
+void GVNPass::ValueTable::add(Value *V, uint32_t Num) {
+ ValueNumbering.insert(std::make_pair(V, Num));
if (PHINode *PN = dyn_cast<PHINode>(V))
- NumberingPhi[num] = PN;
+ NumberingPhi[Num] = PN;
}
uint32_t GVNPass::ValueTable::lookupOrAddCall(CallInst *C) {
@@ -485,38 +484,38 @@ uint32_t GVNPass::ValueTable::lookupOrAddCall(CallInst *C) {
// optimizations. Revert this one when we detect the memory
// accessing kind more precisely.
if (C->getFunction()->isPresplitCoroutine()) {
- valueNumbering[C] = nextValueNumber;
- return nextValueNumber++;
+ ValueNumbering[C] = NextValueNumber;
+ return NextValueNumber++;
}
// Do not combine convergent calls since they implicitly depend on the set of
// threads that is currently executing, and they might be in
diff erent basic
// blocks.
if (C->isConvergent()) {
- valueNumbering[C] = nextValueNumber;
- return nextValueNumber++;
+ ValueNumbering[C] = NextValueNumber;
+ return NextValueNumber++;
}
if (AA->doesNotAccessMemory(C)) {
- Expression exp = createExpr(C);
- uint32_t e = assignExpNewValueNum(exp).first;
- valueNumbering[C] = e;
- return e;
+ Expression Exp = createExpr(C);
+ uint32_t E = assignExpNewValueNum(Exp).first;
+ ValueNumbering[C] = E;
+ return E;
}
if (MD && AA->onlyReadsMemory(C)) {
Expression Exp = createExpr(C);
- auto ValNum = assignExpNewValueNum(Exp);
- if (ValNum.second) {
- valueNumbering[C] = ValNum.first;
- return ValNum.first;
+ auto [E, IsValNumNew] = assignExpNewValueNum(Exp);
+ if (IsValNumNew) {
+ ValueNumbering[C] = E;
+ return E;
}
MemDepResult LocalDep = MD->getDependency(C);
if (!LocalDep.isDef() && !LocalDep.isNonLocal()) {
- valueNumbering[C] = nextValueNumber;
- return nextValueNumber++;
+ ValueNumbering[C] = NextValueNumber;
+ return NextValueNumber++;
}
if (LocalDep.isDef()) {
@@ -525,21 +524,21 @@ uint32_t GVNPass::ValueTable::lookupOrAddCall(CallInst *C) {
CallInst *LocalDepCall = dyn_cast<CallInst>(LocalDep.getInst());
if (!LocalDepCall || LocalDepCall->arg_size() != C->arg_size()) {
- valueNumbering[C] = nextValueNumber;
- return nextValueNumber++;
+ ValueNumbering[C] = NextValueNumber;
+ return NextValueNumber++;
}
for (unsigned I = 0, E = C->arg_size(); I < E; ++I) {
uint32_t CVN = lookupOrAdd(C->getArgOperand(I));
uint32_t LocalDepCallVN = lookupOrAdd(LocalDepCall->getArgOperand(I));
if (CVN != LocalDepCallVN) {
- valueNumbering[C] = nextValueNumber;
- return nextValueNumber++;
+ ValueNumbering[C] = NextValueNumber;
+ return NextValueNumber++;
}
}
uint32_t V = lookupOrAdd(LocalDepCall);
- valueNumbering[C] = V;
+ ValueNumbering[C] = V;
return V;
}
@@ -574,48 +573,48 @@ uint32_t GVNPass::ValueTable::lookupOrAddCall(CallInst *C) {
}
if (!CDep) {
- valueNumbering[C] = nextValueNumber;
- return nextValueNumber++;
+ ValueNumbering[C] = NextValueNumber;
+ return NextValueNumber++;
}
if (CDep->arg_size() != C->arg_size()) {
- valueNumbering[C] = nextValueNumber;
- return nextValueNumber++;
+ ValueNumbering[C] = NextValueNumber;
+ return NextValueNumber++;
}
for (unsigned I = 0, E = C->arg_size(); I < E; ++I) {
uint32_t CVN = lookupOrAdd(C->getArgOperand(I));
uint32_t CDepVN = lookupOrAdd(CDep->getArgOperand(I));
if (CVN != CDepVN) {
- valueNumbering[C] = nextValueNumber;
- return nextValueNumber++;
+ ValueNumbering[C] = NextValueNumber;
+ return NextValueNumber++;
}
}
uint32_t V = lookupOrAdd(CDep);
- valueNumbering[C] = V;
+ ValueNumbering[C] = V;
return V;
}
- valueNumbering[C] = nextValueNumber;
- return nextValueNumber++;
+ ValueNumbering[C] = NextValueNumber;
+ return NextValueNumber++;
}
/// Returns true if a value number exists for the specified value.
bool GVNPass::ValueTable::exists(Value *V) const {
- return valueNumbering.contains(V);
+ return ValueNumbering.contains(V);
}
-/// lookup_or_add - Returns the value number for the specified value, assigning
+/// lookupOrAdd - Returns the value number for the specified value, assigning
/// it a new number if it did not have one before.
uint32_t GVNPass::ValueTable::lookupOrAdd(Value *V) {
- DenseMap<Value*, uint32_t>::iterator VI = valueNumbering.find(V);
- if (VI != valueNumbering.end())
+ DenseMap<Value *, uint32_t>::iterator VI = ValueNumbering.find(V);
+ if (VI != ValueNumbering.end())
return VI->second;
auto *I = dyn_cast<Instruction>(V);
if (!I) {
- valueNumbering[V] = nextValueNumber;
- return nextValueNumber++;
+ ValueNumbering[V] = NextValueNumber;
+ return NextValueNumber++;
}
Expression Exp;
@@ -671,28 +670,28 @@ uint32_t GVNPass::ValueTable::lookupOrAdd(Value *V) {
Exp = createExtractvalueExpr(cast<ExtractValueInst>(I));
break;
case Instruction::PHI:
- valueNumbering[V] = nextValueNumber;
- NumberingPhi[nextValueNumber] = cast<PHINode>(V);
- return nextValueNumber++;
+ ValueNumbering[V] = NextValueNumber;
+ NumberingPhi[NextValueNumber] = cast<PHINode>(V);
+ return NextValueNumber++;
default:
- valueNumbering[V] = nextValueNumber;
- return nextValueNumber++;
+ ValueNumbering[V] = NextValueNumber;
+ return NextValueNumber++;
}
uint32_t E = assignExpNewValueNum(Exp).first;
- valueNumbering[V] = E;
+ ValueNumbering[V] = E;
return E;
}
/// Returns the value number of the specified value. Fails if
/// the value has not yet been numbered.
uint32_t GVNPass::ValueTable::lookup(Value *V, bool Verify) const {
- DenseMap<Value*, uint32_t>::const_iterator VI = valueNumbering.find(V);
+ DenseMap<Value *, uint32_t>::const_iterator VI = ValueNumbering.find(V);
if (Verify) {
- assert(VI != valueNumbering.end() && "Value not numbered?");
+ assert(VI != ValueNumbering.end() && "Value not numbered?");
return VI->second;
}
- return (VI != valueNumbering.end()) ? VI->second : 0;
+ return (VI != ValueNumbering.end()) ? VI->second : 0;
}
/// Returns the value number of the given comparison,
@@ -708,20 +707,20 @@ uint32_t GVNPass::ValueTable::lookupOrAddCmp(unsigned Opcode,
/// Remove all entries from the ValueTable.
void GVNPass::ValueTable::clear() {
- valueNumbering.clear();
- expressionNumbering.clear();
+ ValueNumbering.clear();
+ ExpressionNumbering.clear();
NumberingPhi.clear();
PhiTranslateTable.clear();
- nextValueNumber = 1;
+ NextValueNumber = 1;
Expressions.clear();
ExprIdx.clear();
- nextExprNumber = 0;
+ NextExprNumber = 0;
}
/// Remove a value from the value numbering.
void GVNPass::ValueTable::erase(Value *V) {
- uint32_t Num = valueNumbering.lookup(V);
- valueNumbering.erase(V);
+ uint32_t Num = ValueNumbering.lookup(V);
+ ValueNumbering.erase(V);
// If V is PHINode, V <--> value number is an one-to-one mapping.
if (isa<PHINode>(V))
NumberingPhi.erase(Num);
@@ -730,7 +729,7 @@ void GVNPass::ValueTable::erase(Value *V) {
/// verifyRemoved - Verify that the value is removed from all internal data
/// structures.
void GVNPass::ValueTable::verifyRemoved(const Value *V) const {
- assert(!valueNumbering.contains(V) &&
+ assert(!ValueNumbering.contains(V) &&
"Inst still occurs in value numbering map!");
}
@@ -879,11 +878,11 @@ void GVNPass::printPipeline(
}
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
-LLVM_DUMP_METHOD void GVNPass::dump(DenseMap<uint32_t, Value *> &d) const {
+LLVM_DUMP_METHOD void GVNPass::dump(DenseMap<uint32_t, Value *> &Map) const {
errs() << "{\n";
- for (auto &I : d) {
- errs() << I.first << "\n";
- I.second->dump();
+ for (const auto &[Num, Exp] : Map) {
+ errs() << Num << "\n";
+ Exp->dump();
}
errs() << "}\n";
}
@@ -974,7 +973,7 @@ static bool IsValueFullyAvailableInBlock(
#endif
// If the block isn't marked as fixpoint yet
- // (the Unavailable and Available states are fixpoints)
+ // (the Unavailable and Available states are fixpoints).
auto MarkAsFixpointAndEnqueueSuccessors =
[&](BasicBlock *BB, AvailabilityState FixpointState) {
auto It = FullyAvailableBlocks.find(BB);
@@ -1046,15 +1045,15 @@ static void replaceValuesPerBlockEntry(
static Value *
ConstructSSAForLoadSet(LoadInst *Load,
SmallVectorImpl<AvailableValueInBlock> &ValuesPerBlock,
- GVNPass &gvn) {
+ GVNPass &GVN) {
// Check for the fully redundant, dominating load case. In this case, we can
// just use the dominating value directly.
if (ValuesPerBlock.size() == 1 &&
- gvn.getDominatorTree().properlyDominates(ValuesPerBlock[0].BB,
+ GVN.getDominatorTree().properlyDominates(ValuesPerBlock[0].BB,
Load->getParent())) {
assert(!ValuesPerBlock[0].AV.isUndefValue() &&
"Dead BB dominate this block");
- return ValuesPerBlock[0].MaterializeAdjustedValue(Load, gvn);
+ return ValuesPerBlock[0].MaterializeAdjustedValue(Load);
}
// Otherwise, we have to construct SSA form.
@@ -1080,7 +1079,7 @@ ConstructSSAForLoadSet(LoadInst *Load,
(AV.AV.isCoercedLoadValue() && AV.AV.getCoercedLoadValue() == Load)))
continue;
- SSAUpdate.AddAvailableValue(BB, AV.MaterializeAdjustedValue(Load, gvn));
+ SSAUpdate.AddAvailableValue(BB, AV.MaterializeAdjustedValue(Load));
}
// Perform PHI construction.
@@ -1088,8 +1087,7 @@ ConstructSSAForLoadSet(LoadInst *Load,
}
Value *AvailableValue::MaterializeAdjustedValue(LoadInst *Load,
- Instruction *InsertPt,
- GVNPass &gvn) const {
+ Instruction *InsertPt) const {
Value *Res;
Type *LoadTy = Load->getType();
const DataLayout &DL = Load->getDataLayout();
@@ -1187,7 +1185,7 @@ static void reportMayClobberedLoad(LoadInst *Load, MemDepResult DepInfo,
if (U != Load && (isa<LoadInst>(U) || isa<StoreInst>(U))) {
auto *I = cast<Instruction>(U);
if (I->getFunction() == Load->getFunction() && DT->dominates(I, Load)) {
- // Use the most immediately dominating value
+ // Use the most immediately dominating value.
if (OtherAccess) {
if (DT->dominates(OtherAccess, I))
OtherAccess = I;
@@ -1215,7 +1213,8 @@ static void reportMayClobberedLoad(LoadInst *Load, MemDepResult DepInfo,
// the clobber, but neither lies strictly after the other.
OtherAccess = nullptr;
break;
- } // else: keep current OtherAccess since it lies between U and Load
+ } // else: keep current OtherAccess since it lies between U and
+ // Load.
} else {
OtherAccess = I;
}
@@ -1319,7 +1318,7 @@ GVNPass::AnalyzeLoadAvailability(LoadInst *Load, MemDepResult DepInfo,
}
}
- // Nothing known about this clobber, have to be conservative
+ // Nothing known about this clobber, have to be conservative.
LLVM_DEBUG(
// fast print dep, using operator<< on instruction is too slow.
dbgs() << "GVN: load "; Load->printAsOperand(dbgs());
@@ -1389,7 +1388,7 @@ GVNPass::AnalyzeLoadAvailability(LoadInst *Load, MemDepResult DepInfo,
return AvailableValue::getSelect(Sel, V1, V2);
}
- // Unknown def - must be conservative
+ // Unknown def - must be conservative.
LLVM_DEBUG(
// fast print dep, using operator<< on instruction is too slow.
dbgs() << "GVN: load "; Load->printAsOperand(dbgs());
@@ -1547,11 +1546,11 @@ void GVNPass::eliminatePartiallyRedundantLoad(
// For PredBB in CriticalEdgePredAndLoad we need to replace the uses of old
// load instruction with the new created load instruction.
if (CriticalEdgePredAndLoad) {
- auto I = CriticalEdgePredAndLoad->find(UnavailableBlock);
- if (I != CriticalEdgePredAndLoad->end()) {
+ auto It = CriticalEdgePredAndLoad->find(UnavailableBlock);
+ if (It != CriticalEdgePredAndLoad->end()) {
++NumPRELoadMoved2CEPred;
ICF->insertInstructionTo(NewLoad, UnavailableBlock);
- LoadInst *OldLoad = I->second;
+ LoadInst *OldLoad = It->second;
combineMetadataForCSE(NewLoad, OldLoad, false);
OldLoad->replaceAllUsesWith(NewLoad);
replaceValuesPerBlockEntry(ValuesPerBlock, OldLoad, NewLoad);
@@ -1926,7 +1925,7 @@ static void reportLoadElim(LoadInst *Load, Value *AvailableValue,
/// Attempt to eliminate a load whose dependencies are
/// non-local by performing PHI construction.
bool GVNPass::processNonLocalLoad(LoadInst *Load) {
- // non-local speculations are not allowed under asan.
+ // Non-local speculations are not allowed under asan.
if (Load->getParent()->getParent()->hasFnAttribute(
Attribute::SanitizeAddress) ||
Load->getParent()->getParent()->hasFnAttribute(
@@ -1962,7 +1961,7 @@ bool GVNPass::processNonLocalLoad(LoadInst *Load) {
Changed |= performScalarPRE(I);
}
- // Step 2: Analyze the availability of the load
+ // Step 2: Analyze the availability of the load.
AvailValInBlkVect ValuesPerBlock;
UnavailBlkVect UnavailableBlocks;
AnalyzeLoadAvailability(Load, Deps, ValuesPerBlock, UnavailableBlocks);
@@ -2016,7 +2015,7 @@ bool GVNPass::processNonLocalLoad(LoadInst *Load) {
}
static bool hasUsersIn(Value *V, BasicBlock *BB) {
- return llvm::any_of(V->users(), [BB](User *U) {
+ return any_of(V->users(), [BB](User *U) {
auto *I = dyn_cast<Instruction>(U);
return I && I->getParent() == BB;
});
@@ -2122,7 +2121,7 @@ bool GVNPass::processAssumeIntrinsic(AssumeInst *IntrinsicI) {
// Heuristically pick the better replacement -- the choice of heuristic
// isn't terribly important here, but the fact we canonicalize on some
// replacement is for exposing other simplifications.
- // TODO: pull this out as a helper function and reuse w/existing
+ // TODO: pull this out as a helper function and reuse w/ existing
// (slightly
diff erent) logic.
if (isa<Constant>(CmpLHS) && !isa<Constant>(CmpRHS))
std::swap(CmpLHS, CmpRHS);
@@ -2148,8 +2147,7 @@ bool GVNPass::processAssumeIntrinsic(AssumeInst *IntrinsicI) {
<< *CmpRHS << " in block "
<< IntrinsicI->getParent()->getName() << "\n");
-
- // Setup the replacement map - this handles uses within the same block
+ // Setup the replacement map - this handles uses within the same block.
if (hasUsersIn(CmpLHS, IntrinsicI->getParent()))
ReplaceOperandsWithMap[CmpLHS] = CmpRHS;
@@ -2173,7 +2171,7 @@ bool GVNPass::processLoad(LoadInst *L) {
if (!MD)
return false;
- // This code hasn't been audited for ordered or volatile memory access
+ // This code hasn't been audited for ordered or volatile memory access.
if (!L->isUnordered())
return false;
@@ -2189,9 +2187,9 @@ bool GVNPass::processLoad(LoadInst *L) {
if (Dep.isNonLocal())
return processNonLocalLoad(L);
- // Only handle the local case below
+ // Only handle the local case below.
if (!Dep.isLocal()) {
- // This might be a NonFuncLocal or an Unknown
+ // This might be a NonFuncLocal or an Unknown.
LLVM_DEBUG(
// fast print dep, using operator<< on instruction is too slow.
dbgs() << "GVN: load "; L->printAsOperand(dbgs());
@@ -2203,7 +2201,7 @@ bool GVNPass::processLoad(LoadInst *L) {
if (!AV)
return false;
- Value *AvailableValue = AV->MaterializeAdjustedValue(L, L, *this);
+ Value *AvailableValue = AV->MaterializeAdjustedValue(L, L);
// MaterializeAdjustedValue is responsible for combining metadata.
ICF->removeUsersOf(L);
@@ -2224,35 +2222,35 @@ bool GVNPass::processLoad(LoadInst *L) {
/// second field showing whether it is a value number newly created.
std::pair<uint32_t, bool>
GVNPass::ValueTable::assignExpNewValueNum(Expression &Exp) {
- uint32_t &e = expressionNumbering[Exp];
- bool CreateNewValNum = !e;
+ uint32_t &E = ExpressionNumbering[Exp];
+ bool CreateNewValNum = !E;
if (CreateNewValNum) {
Expressions.push_back(Exp);
- if (ExprIdx.size() < nextValueNumber + 1)
- ExprIdx.resize(nextValueNumber * 2);
- e = nextValueNumber;
- ExprIdx[nextValueNumber++] = nextExprNumber++;
+ if (ExprIdx.size() < NextValueNumber + 1)
+ ExprIdx.resize(NextValueNumber * 2);
+ E = NextValueNumber;
+ ExprIdx[NextValueNumber++] = NextExprNumber++;
}
- return {e, CreateNewValNum};
+ return {E, CreateNewValNum};
}
/// Return whether all the values related with the same \p num are
/// defined in \p BB.
bool GVNPass::ValueTable::areAllValsInBB(uint32_t Num, const BasicBlock *BB,
- GVNPass &Gvn) {
+ GVNPass &GVN) {
return all_of(
- Gvn.LeaderTable.getLeaders(Num),
+ GVN.LeaderTable.getLeaders(Num),
[=](const LeaderMap::LeaderTableEntry &L) { return L.BB == BB; });
}
/// Wrap phiTranslateImpl to provide caching functionality.
uint32_t GVNPass::ValueTable::phiTranslate(const BasicBlock *Pred,
const BasicBlock *PhiBlock,
- uint32_t Num, GVNPass &Gvn) {
+ uint32_t Num, GVNPass &GVN) {
auto FindRes = PhiTranslateTable.find({Num, Pred});
if (FindRes != PhiTranslateTable.end())
return FindRes->second;
- uint32_t NewNum = phiTranslateImpl(Pred, PhiBlock, Num, Gvn);
+ uint32_t NewNum = phiTranslateImpl(Pred, PhiBlock, Num, GVN);
PhiTranslateTable.insert({{Num, Pred}, NewNum});
return NewNum;
}
@@ -2262,9 +2260,9 @@ uint32_t GVNPass::ValueTable::phiTranslate(const BasicBlock *Pred,
bool GVNPass::ValueTable::areCallValsEqual(uint32_t Num, uint32_t NewNum,
const BasicBlock *Pred,
const BasicBlock *PhiBlock,
- GVNPass &Gvn) {
+ GVNPass &GVN) {
CallInst *Call = nullptr;
- auto Leaders = Gvn.LeaderTable.getLeaders(Num);
+ auto Leaders = GVN.LeaderTable.getLeaders(Num);
for (const auto &Entry : Leaders) {
Call = dyn_cast<CallInst>(Entry.Val);
if (Call && Call->getParent() == PhiBlock)
@@ -2277,15 +2275,15 @@ bool GVNPass::ValueTable::areCallValsEqual(uint32_t Num, uint32_t NewNum,
if (!MD || !AA->onlyReadsMemory(Call))
return false;
- MemDepResult local_dep = MD->getDependency(Call);
- if (!local_dep.isNonLocal())
+ MemDepResult LocalDep = MD->getDependency(Call);
+ if (!LocalDep.isNonLocal())
return false;
- const MemoryDependenceResults::NonLocalDepInfo &deps =
+ const MemoryDependenceResults::NonLocalDepInfo &Deps =
MD->getNonLocalCallDependency(Call);
// Check to see if the Call has no function local clobber.
- for (const NonLocalDepEntry &D : deps) {
+ for (const NonLocalDepEntry &D : Deps) {
if (D.getResult().isNonFuncLocal())
return true;
}
@@ -2296,11 +2294,11 @@ bool GVNPass::ValueTable::areCallValsEqual(uint32_t Num, uint32_t NewNum,
/// the phis in BB.
uint32_t GVNPass::ValueTable::phiTranslateImpl(const BasicBlock *Pred,
const BasicBlock *PhiBlock,
- uint32_t Num, GVNPass &Gvn) {
+ uint32_t Num, GVNPass &GVN) {
if (PHINode *PN = NumberingPhi[Num]) {
- for (unsigned i = 0; i != PN->getNumIncomingValues(); ++i) {
- if (PN->getParent() == PhiBlock && PN->getIncomingBlock(i) == Pred)
- if (uint32_t TransVal = lookup(PN->getIncomingValue(i), false))
+ for (unsigned I = 0; I != PN->getNumIncomingValues(); ++I) {
+ if (PN->getParent() == PhiBlock && PN->getIncomingBlock(I) == Pred)
+ if (uint32_t TransVal = lookup(PN->getIncomingValue(I), false))
return TransVal;
}
return Num;
@@ -2309,39 +2307,39 @@ uint32_t GVNPass::ValueTable::phiTranslateImpl(const BasicBlock *Pred,
// If there is any value related with Num is defined in a BB other than
// PhiBlock, it cannot depend on a phi in PhiBlock without going through
// a backedge. We can do an early exit in that case to save compile time.
- if (!areAllValsInBB(Num, PhiBlock, Gvn))
+ if (!areAllValsInBB(Num, PhiBlock, GVN))
return Num;
if (Num >= ExprIdx.size() || ExprIdx[Num] == 0)
return Num;
Expression Exp = Expressions[ExprIdx[Num]];
- for (unsigned I = 0; I < Exp.varargs.size(); I++) {
+ for (unsigned I = 0; I < Exp.VarArgs.size(); I++) {
// For InsertValue and ExtractValue, some varargs are index numbers
// instead of value numbers. Those index numbers should not be
// translated.
- if ((I > 1 && Exp.opcode == Instruction::InsertValue) ||
- (I > 0 && Exp.opcode == Instruction::ExtractValue) ||
- (I > 1 && Exp.opcode == Instruction::ShuffleVector))
+ if ((I > 1 && Exp.Opcode == Instruction::InsertValue) ||
+ (I > 0 && Exp.Opcode == Instruction::ExtractValue) ||
+ (I > 1 && Exp.Opcode == Instruction::ShuffleVector))
continue;
- Exp.varargs[I] = phiTranslate(Pred, PhiBlock, Exp.varargs[I], Gvn);
+ Exp.VarArgs[I] = phiTranslate(Pred, PhiBlock, Exp.VarArgs[I], GVN);
}
- if (Exp.commutative) {
- assert(Exp.varargs.size() >= 2 && "Unsupported commutative instruction!");
- if (Exp.varargs[0] > Exp.varargs[1]) {
- std::swap(Exp.varargs[0], Exp.varargs[1]);
- uint32_t Opcode = Exp.opcode >> 8;
+ if (Exp.Commutative) {
+ assert(Exp.VarArgs.size() >= 2 && "Unsupported commutative instruction!");
+ if (Exp.VarArgs[0] > Exp.VarArgs[1]) {
+ std::swap(Exp.VarArgs[0], Exp.VarArgs[1]);
+ uint32_t Opcode = Exp.Opcode >> 8;
if (Opcode == Instruction::ICmp || Opcode == Instruction::FCmp)
- Exp.opcode = (Opcode << 8) |
+ Exp.Opcode = (Opcode << 8) |
CmpInst::getSwappedPredicate(
- static_cast<CmpInst::Predicate>(Exp.opcode & 255));
+ static_cast<CmpInst::Predicate>(Exp.Opcode & 255));
}
}
- if (uint32_t NewNum = expressionNumbering[Exp]) {
- if (Exp.opcode == Instruction::Call && NewNum != Num)
- return areCallValsEqual(Num, NewNum, Pred, PhiBlock, Gvn) ? NewNum : Num;
+ if (uint32_t NewNum = ExpressionNumbering[Exp]) {
+ if (Exp.Opcode == Instruction::Call && NewNum != Num)
+ return areCallValsEqual(Num, NewNum, Pred, PhiBlock, GVN) ? NewNum : Num;
return NewNum;
}
return Num;
@@ -2360,8 +2358,8 @@ void GVNPass::ValueTable::eraseTranslateCacheEntry(
// and then scan the list to find one whose block dominates the block in
// question. This is fast because dominator tree queries consist of only
// a few comparisons of DFS numbers.
-Value *GVNPass::findLeader(const BasicBlock *BB, uint32_t num) {
- auto Leaders = LeaderTable.getLeaders(num);
+Value *GVNPass::findLeader(const BasicBlock *BB, uint32_t Num) {
+ auto Leaders = LeaderTable.getLeaders(Num);
if (Leaders.empty())
return nullptr;
@@ -2406,11 +2404,11 @@ bool GVNPass::replaceOperandsForInBlockEquality(Instruction *Instr) const {
bool Changed = false;
for (unsigned OpNum = 0; OpNum < Instr->getNumOperands(); ++OpNum) {
Value *Operand = Instr->getOperand(OpNum);
- auto it = ReplaceOperandsWithMap.find(Operand);
- if (it != ReplaceOperandsWithMap.end()) {
+ auto It = ReplaceOperandsWithMap.find(Operand);
+ if (It != ReplaceOperandsWithMap.end()) {
LLVM_DEBUG(dbgs() << "GVN replacing: " << *Operand << " with "
- << *it->second << " in instruction " << *Instr << '\n');
- Instr->setOperand(OpNum, it->second);
+ << *It->second << " in instruction " << *Instr << '\n');
+ Instr->setOperand(OpNum, It->second);
Changed = true;
}
}
@@ -2487,15 +2485,15 @@ bool GVNPass::propagateEquality(Value *LHS, Value *RHS,
// never do anything if LHS has only one use.
if (!LHS->hasOneUse()) {
// Create a callback that captures the DL.
- auto canReplacePointersCallBack = [&DL](const Use &U, const Value *To) {
+ auto CanReplacePointersCallBack = [&DL](const Use &U, const Value *To) {
return canReplacePointersInUseIfEqual(U, To, DL);
};
unsigned NumReplacements =
DominatesByEdge
? replaceDominatedUsesWithIf(LHS, RHS, *DT, Root,
- canReplacePointersCallBack)
+ CanReplacePointersCallBack)
: replaceDominatedUsesWithIf(LHS, RHS, *DT, Root.getStart(),
- canReplacePointersCallBack);
+ CanReplacePointersCallBack);
if (NumReplacements > 0) {
Changed = true;
@@ -2518,14 +2516,14 @@ bool GVNPass::propagateEquality(Value *LHS, Value *RHS,
// RHS neither 'true' nor 'false' - bail out.
continue;
// Whether RHS equals 'true'. Otherwise it equals 'false'.
- bool isKnownTrue = CI->isMinusOne();
- bool isKnownFalse = !isKnownTrue;
+ bool IsKnownTrue = CI->isMinusOne();
+ bool IsKnownFalse = !IsKnownTrue;
// If "A && B" is known true then both A and B are known true. If "A || B"
// is known false then both A and B are known false.
Value *A, *B;
- if ((isKnownTrue && match(LHS, m_LogicalAnd(m_Value(A), m_Value(B)))) ||
- (isKnownFalse && match(LHS, m_LogicalOr(m_Value(A), m_Value(B))))) {
+ if ((IsKnownTrue && match(LHS, m_LogicalAnd(m_Value(A), m_Value(B)))) ||
+ (IsKnownFalse && match(LHS, m_LogicalOr(m_Value(A), m_Value(B))))) {
Worklist.push_back(std::make_pair(A, RHS));
Worklist.push_back(std::make_pair(B, RHS));
continue;
@@ -2540,12 +2538,12 @@ bool GVNPass::propagateEquality(Value *LHS, Value *RHS,
// If "A == B" is known true, or "A != B" is known false, then replace
// A with B everywhere in the scope. For floating point operations, we
// have to be careful since equality does not always imply equivalance.
- if (Cmp->isEquivalence(isKnownFalse))
+ if (Cmp->isEquivalence(IsKnownFalse))
Worklist.push_back(std::make_pair(Op0, Op1));
// If "A >= B" is known true, replace "A < B" with false everywhere.
CmpInst::Predicate NotPred = Cmp->getInversePredicate();
- Constant *NotVal = ConstantInt::get(Cmp->getType(), isKnownFalse);
+ Constant *NotVal = ConstantInt::get(Cmp->getType(), IsKnownFalse);
// Since we don't have the instruction "A < B" immediately to hand, work
// out the value number that it would have and use that to find an
// appropriate instruction (if any).
@@ -2583,7 +2581,7 @@ bool GVNPass::propagateEquality(Value *LHS, Value *RHS,
}
/// When calculating availability, handle an instruction
-/// by inserting it into the appropriate sets
+/// by inserting it into the appropriate sets.
bool GVNPass::processInstruction(Instruction *I) {
// Ignore dbg info intrinsics.
if (isa<DbgInfoIntrinsic>(I))
@@ -2668,13 +2666,12 @@ bool GVNPass::processInstruction(Instruction *I) {
for (BasicBlock *Succ : successors(Parent))
++SwitchEdges[Succ];
- for (SwitchInst::CaseIt i = SI->case_begin(), e = SI->case_end();
- i != e; ++i) {
- BasicBlock *Dst = i->getCaseSuccessor();
+ for (const auto &Case : SI->cases()) {
+ BasicBlock *Dst = Case.getCaseSuccessor();
// If there is only a single edge, propagate the case value into it.
if (SwitchEdges.lookup(Dst) == 1) {
BasicBlockEdge E(Parent, Dst);
- Changed |= propagateEquality(SwitchCond, i->getCaseValue(), E, true);
+ Changed |= propagateEquality(SwitchCond, Case.getCaseValue(), E, true);
}
}
return Changed;
@@ -2752,12 +2749,12 @@ bool GVNPass::runImpl(Function &F, AssumptionCache &RunAC, DominatorTree &RunDT,
DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Lazy);
// Merge unconditional branches, allowing PRE to catch more
// optimization opportunities.
- for (BasicBlock &BB : llvm::make_early_inc_range(F)) {
- bool removedBlock = MergeBlockIntoPredecessor(&BB, &DTU, &LI, MSSAU, MD);
- if (removedBlock)
+ for (BasicBlock &BB : make_early_inc_range(F)) {
+ bool RemovedBlock = MergeBlockIntoPredecessor(&BB, &DTU, &LI, MSSAU, MD);
+ if (RemovedBlock)
++NumGVNBlocks;
- Changed |= removedBlock;
+ Changed |= RemovedBlock;
}
DTU.flush();
@@ -2864,9 +2861,9 @@ bool GVNPass::performScalarPREInsertion(Instruction *Instr, BasicBlock *Pred,
// will be available in the predecessor by the time we need them. Any
// that weren't originally present will have been instantiated earlier
// in this loop.
- bool success = true;
- for (unsigned i = 0, e = Instr->getNumOperands(); i != e; ++i) {
- Value *Op = Instr->getOperand(i);
+ bool Success = true;
+ for (unsigned I = 0, E = Instr->getNumOperands(); I != E; ++I) {
+ Value *Op = Instr->getOperand(I);
if (isa<Argument>(Op) || isa<Constant>(Op) || isa<GlobalValue>(Op))
continue;
// This could be a newly inserted instruction, in which case, we won't
@@ -2874,15 +2871,15 @@ bool GVNPass::performScalarPREInsertion(Instruction *Instr, BasicBlock *Pred,
// FIXME: Rewrite the infrastructure to let it easier to value number
// and process newly inserted instructions.
if (!VN.exists(Op)) {
- success = false;
+ Success = false;
break;
}
uint32_t TValNo =
VN.phiTranslate(Pred, Curr, VN.lookup(Op), *this);
if (Value *V = findLeader(Pred, TValNo)) {
- Instr->setOperand(i, V);
+ Instr->setOperand(I, V);
} else {
- success = false;
+ Success = false;
break;
}
}
@@ -2890,7 +2887,7 @@ bool GVNPass::performScalarPREInsertion(Instruction *Instr, BasicBlock *Pred,
// Fail out if we encounter an operand that is not available in
// the PRE predecessor. This is typically because of loads which
// are not value numbered precisely.
- if (!success)
+ if (!Success)
return false;
Instr->insertBefore(Pred->getTerminator()->getIterator());
@@ -2954,7 +2951,7 @@ bool GVNPass::performScalarPRE(Instruction *CurInst) {
if (InvalidBlockRPONumbers)
assignBlockRPONumber(*CurrentBlock->getParent());
- SmallVector<std::pair<Value *, BasicBlock *>, 8> predMap;
+ SmallVector<std::pair<Value *, BasicBlock *>, 8> PredMap;
for (BasicBlock *P : predecessors(CurrentBlock)) {
// We're not interested in PRE where blocks with predecessors that are
// not reachable.
@@ -2971,17 +2968,17 @@ bool GVNPass::performScalarPRE(Instruction *CurInst) {
}
uint32_t TValNo = VN.phiTranslate(P, CurrentBlock, ValNo, *this);
- Value *predV = findLeader(P, TValNo);
- if (!predV) {
- predMap.push_back(std::make_pair(static_cast<Value *>(nullptr), P));
+ Value *PredV = findLeader(P, TValNo);
+ if (!PredV) {
+ PredMap.push_back(std::make_pair(static_cast<Value *>(nullptr), P));
PREPred = P;
++NumWithout;
- } else if (predV == CurInst) {
- /* CurInst dominates this predecessor. */
+ } else if (PredV == CurInst) {
+ // CurInst dominates this predecessor.
NumWithout = 2;
break;
} else {
- predMap.push_back(std::make_pair(predV, P));
+ PredMap.push_back(std::make_pair(PredV, P));
++NumWith;
}
}
@@ -3015,10 +3012,10 @@ bool GVNPass::performScalarPRE(Instruction *CurInst) {
// on the function.
unsigned SuccNum = GetSuccessorNumber(PREPred, CurrentBlock);
if (isCriticalEdge(PREPred->getTerminator(), SuccNum)) {
- toSplit.push_back(std::make_pair(PREPred->getTerminator(), SuccNum));
+ ToSplit.push_back(std::make_pair(PREPred->getTerminator(), SuccNum));
return false;
}
- // We need to insert somewhere, so let's give it a shot
+ // We need to insert somewhere, so let's give it a shot.
PREInstr = CurInst->clone();
if (!performScalarPREInsertion(PREInstr, PREPred, CurrentBlock, ValNo)) {
// If we failed insertion, make sure we remove the instruction.
@@ -3037,15 +3034,15 @@ bool GVNPass::performScalarPRE(Instruction *CurInst) {
++NumGVNPRE;
// Create a PHI to make the value available in this block.
- PHINode *Phi = PHINode::Create(CurInst->getType(), predMap.size(),
+ PHINode *Phi = PHINode::Create(CurInst->getType(), PredMap.size(),
CurInst->getName() + ".pre-phi");
Phi->insertBefore(CurrentBlock->begin());
- for (unsigned i = 0, e = predMap.size(); i != e; ++i) {
- if (Value *V = predMap[i].first) {
+ for (unsigned I = 0, E = PredMap.size(); I != E; ++I) {
+ if (Value *V = PredMap[I].first) {
// If we use an existing value in this phi, we have to patch the original
// value because the phi will be used to replace a later value.
patchReplacementInstruction(CurInst, V);
- Phi->addIncoming(V, predMap[i].second);
+ Phi->addIncoming(V, PredMap[I].second);
} else
Phi->addIncoming(PREInstr, PREPred);
}
@@ -3115,16 +3112,16 @@ BasicBlock *GVNPass::splitCriticalEdges(BasicBlock *Pred, BasicBlock *Succ) {
/// Split critical edges found during the previous
/// iteration that may enable further optimization.
bool GVNPass::splitCriticalEdges() {
- if (toSplit.empty())
+ if (ToSplit.empty())
return false;
bool Changed = false;
do {
- std::pair<Instruction *, unsigned> Edge = toSplit.pop_back_val();
+ std::pair<Instruction *, unsigned> Edge = ToSplit.pop_back_val();
Changed |= SplitCriticalEdge(Edge.first, Edge.second,
CriticalEdgeSplittingOptions(DT, LI, MSSAU)) !=
nullptr;
- } while (!toSplit.empty());
+ } while (!ToSplit.empty());
if (Changed) {
if (MD)
MD->invalidateCachedPredecessors();
@@ -3133,11 +3130,11 @@ bool GVNPass::splitCriticalEdges() {
return Changed;
}
-/// Executes one iteration of GVN
+/// Executes one iteration of GVN.
bool GVNPass::iterateOnFunction(Function &F) {
cleanupGlobalSets();
- // Top-down walk of the dominator tree
+ // Top-down walk of the dominator tree.
bool Changed = false;
// Needed for value numbering with phi construction to work.
// RPOT walks the graph in its constructor and will not be invalidated during
@@ -3235,7 +3232,7 @@ void GVNPass::addDeadBlock(BasicBlock *BB) {
if (!DeadBlocks.count(P))
continue;
- if (llvm::is_contained(successors(P), B) &&
+ if (is_contained(successors(P), B) &&
isCriticalEdge(P->getTerminator(), B)) {
if (BasicBlock *S = splitCriticalEdges(P, B))
DeadBlocks.insert(P = S);
@@ -3307,7 +3304,7 @@ void GVNPass::assignValNumForDeadCode() {
class llvm::gvn::GVNLegacyPass : public FunctionPass {
public:
- static char ID; // Pass identification, replacement for typeid
+ static char ID; // Pass identification, replacement for typeid.
explicit GVNLegacyPass(bool MemDepAnalysis = GVNEnableMemDep,
bool MemSSAAnalysis = GVNEnableMemorySSA)
More information about the llvm-commits
mailing list