[llvm] r267898 - [GVN] Minor code cleanup. NFC.
Chad Rosier via llvm-commits
llvm-commits at lists.llvm.org
Thu Apr 28 09:00:16 PDT 2016
Author: mcrosier
Date: Thu Apr 28 11:00:15 2016
New Revision: 267898
URL: http://llvm.org/viewvc/llvm-project?rev=267898&view=rev
Log:
[GVN] Minor code cleanup. NFC.
Differential Revision: http://reviews.llvm.org/D18828
Patch by Aditya Kumar!
Modified:
llvm/trunk/include/llvm/Transforms/Scalar/GVN.h
llvm/trunk/lib/Transforms/Scalar/GVN.cpp
Modified: llvm/trunk/include/llvm/Transforms/Scalar/GVN.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/Transforms/Scalar/GVN.h?rev=267898&r1=267897&r2=267898&view=diff
==============================================================================
--- llvm/trunk/include/llvm/Transforms/Scalar/GVN.h (original)
+++ llvm/trunk/include/llvm/Transforms/Scalar/GVN.h Thu Apr 28 11:00:15 2016
@@ -76,12 +76,11 @@ private:
uint32_t nextValueNumber;
- Expression create_expression(Instruction *I);
- Expression create_cmp_expression(unsigned Opcode,
- CmpInst::Predicate Predicate, Value *LHS,
- Value *RHS);
- Expression create_extractvalue_expression(ExtractValueInst *EI);
- uint32_t lookup_or_add_call(CallInst *C);
+ Expression createExpr(Instruction *I);
+ Expression createCmpExpr(unsigned Opcode, CmpInst::Predicate Predicate,
+ Value *LHS, Value *RHS);
+ Expression createExtractvalueExpr(ExtractValueInst *EI);
+ uint32_t lookupOrAddCall(CallInst *C);
public:
ValueTable();
@@ -89,10 +88,10 @@ private:
ValueTable(ValueTable &&Arg);
~ValueTable();
- uint32_t lookup_or_add(Value *V);
+ uint32_t lookupOrAdd(Value *V);
uint32_t lookup(Value *V) const;
- uint32_t lookup_or_add_cmp(unsigned Opcode, CmpInst::Predicate Pred,
- Value *LHS, Value *RHS);
+ uint32_t lookupOrAddCmp(unsigned Opcode, CmpInst::Predicate Pred,
+ Value *LHS, Value *RHS);
bool exists(Value *V) const;
void add(Value *V, uint32_t num);
void clear();
Modified: llvm/trunk/lib/Transforms/Scalar/GVN.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Scalar/GVN.cpp?rev=267898&r1=267897&r2=267898&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/Scalar/GVN.cpp (original)
+++ llvm/trunk/lib/Transforms/Scalar/GVN.cpp Thu Apr 28 11:00:15 2016
@@ -228,13 +228,13 @@ struct llvm::gvn::AvailableValueInBlock
// ValueTable Internal Functions
//===----------------------------------------------------------------------===//
-GVN::Expression GVN::ValueTable::create_expression(Instruction *I) {
+GVN::Expression GVN::ValueTable::createExpr(Instruction *I) {
Expression e;
e.type = I->getType();
e.opcode = I->getOpcode();
for (Instruction::op_iterator OI = I->op_begin(), OE = I->op_end();
OI != OE; ++OI)
- e.varargs.push_back(lookup_or_add(*OI));
+ e.varargs.push_back(lookupOrAdd(*OI));
if (I->isCommutative()) {
// Ensure that commutative instructions that only differ by a permutation
// of their operands get the same value number by sorting the operand value
@@ -262,14 +262,15 @@ GVN::Expression GVN::ValueTable::create_
return e;
}
-GVN::Expression GVN::ValueTable::create_cmp_expression(
- unsigned Opcode, CmpInst::Predicate Predicate, Value *LHS, Value *RHS) {
+GVN::Expression GVN::ValueTable::createCmpExpr(unsigned Opcode,
+ CmpInst::Predicate Predicate,
+ Value *LHS, Value *RHS) {
assert((Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) &&
"Not a comparison!");
Expression e;
e.type = CmpInst::makeCmpResultType(LHS->getType());
- e.varargs.push_back(lookup_or_add(LHS));
- e.varargs.push_back(lookup_or_add(RHS));
+ e.varargs.push_back(lookupOrAdd(LHS));
+ e.varargs.push_back(lookupOrAdd(RHS));
// Sort the operand value numbers so x<y and y>x get the same value number.
if (e.varargs[0] > e.varargs[1]) {
@@ -280,8 +281,7 @@ GVN::Expression GVN::ValueTable::create_
return e;
}
-GVN::Expression
-GVN::ValueTable::create_extractvalue_expression(ExtractValueInst *EI) {
+GVN::Expression GVN::ValueTable::createExtractvalueExpr(ExtractValueInst *EI) {
assert(EI && "Not an ExtractValueInst?");
Expression e;
e.type = EI->getType();
@@ -313,8 +313,8 @@ GVN::ValueTable::create_extractvalue_exp
// Intrinsic recognized. Grab its args to finish building the expression.
assert(I->getNumArgOperands() == 2 &&
"Expect two args for recognised intrinsics.");
- e.varargs.push_back(lookup_or_add(I->getArgOperand(0)));
- e.varargs.push_back(lookup_or_add(I->getArgOperand(1)));
+ e.varargs.push_back(lookupOrAdd(I->getArgOperand(0)));
+ e.varargs.push_back(lookupOrAdd(I->getArgOperand(1)));
return e;
}
}
@@ -324,7 +324,7 @@ GVN::ValueTable::create_extractvalue_exp
e.opcode = EI->getOpcode();
for (Instruction::op_iterator OI = EI->op_begin(), OE = EI->op_end();
OI != OE; ++OI)
- e.varargs.push_back(lookup_or_add(*OI));
+ e.varargs.push_back(lookupOrAdd(*OI));
for (ExtractValueInst::idx_iterator II = EI->idx_begin(), IE = EI->idx_end();
II != IE; ++II)
@@ -354,15 +354,15 @@ void GVN::ValueTable::add(Value *V, uint
valueNumbering.insert(std::make_pair(V, num));
}
-uint32_t GVN::ValueTable::lookup_or_add_call(CallInst *C) {
+uint32_t GVN::ValueTable::lookupOrAddCall(CallInst *C) {
if (AA->doesNotAccessMemory(C)) {
- Expression exp = create_expression(C);
+ Expression exp = createExpr(C);
uint32_t &e = expressionNumbering[exp];
if (!e) e = nextValueNumber++;
valueNumbering[C] = e;
return e;
} else if (AA->onlyReadsMemory(C)) {
- Expression exp = create_expression(C);
+ Expression exp = createExpr(C);
uint32_t &e = expressionNumbering[exp];
if (!e) {
e = nextValueNumber++;
@@ -391,15 +391,15 @@ uint32_t GVN::ValueTable::lookup_or_add_
}
for (unsigned i = 0, e = C->getNumArgOperands(); i < e; ++i) {
- uint32_t c_vn = lookup_or_add(C->getArgOperand(i));
- uint32_t cd_vn = lookup_or_add(local_cdep->getArgOperand(i));
+ uint32_t c_vn = lookupOrAdd(C->getArgOperand(i));
+ uint32_t cd_vn = lookupOrAdd(local_cdep->getArgOperand(i));
if (c_vn != cd_vn) {
valueNumbering[C] = nextValueNumber;
return nextValueNumber++;
}
}
- uint32_t v = lookup_or_add(local_cdep);
+ uint32_t v = lookupOrAdd(local_cdep);
valueNumbering[C] = v;
return v;
}
@@ -445,15 +445,15 @@ uint32_t GVN::ValueTable::lookup_or_add_
return nextValueNumber++;
}
for (unsigned i = 0, e = C->getNumArgOperands(); i < e; ++i) {
- uint32_t c_vn = lookup_or_add(C->getArgOperand(i));
- uint32_t cd_vn = lookup_or_add(cdep->getArgOperand(i));
+ uint32_t c_vn = lookupOrAdd(C->getArgOperand(i));
+ uint32_t cd_vn = lookupOrAdd(cdep->getArgOperand(i));
if (c_vn != cd_vn) {
valueNumbering[C] = nextValueNumber;
return nextValueNumber++;
}
}
- uint32_t v = lookup_or_add(cdep);
+ uint32_t v = lookupOrAdd(cdep);
valueNumbering[C] = v;
return v;
@@ -468,7 +468,7 @@ bool GVN::ValueTable::exists(Value *V) c
/// lookup_or_add - Returns the value number for the specified value, assigning
/// it a new number if it did not have one before.
-uint32_t GVN::ValueTable::lookup_or_add(Value *V) {
+uint32_t GVN::ValueTable::lookupOrAdd(Value *V) {
DenseMap<Value*, uint32_t>::iterator VI = valueNumbering.find(V);
if (VI != valueNumbering.end())
return VI->second;
@@ -482,7 +482,7 @@ uint32_t GVN::ValueTable::lookup_or_add(
Expression exp;
switch (I->getOpcode()) {
case Instruction::Call:
- return lookup_or_add_call(cast<CallInst>(I));
+ return lookupOrAddCall(cast<CallInst>(I));
case Instruction::Add:
case Instruction::FAdd:
case Instruction::Sub:
@@ -521,10 +521,10 @@ uint32_t GVN::ValueTable::lookup_or_add(
case Instruction::ShuffleVector:
case Instruction::InsertValue:
case Instruction::GetElementPtr:
- exp = create_expression(I);
+ exp = createExpr(I);
break;
case Instruction::ExtractValue:
- exp = create_extractvalue_expression(cast<ExtractValueInst>(I));
+ exp = createExtractvalueExpr(cast<ExtractValueInst>(I));
break;
default:
valueNumbering[V] = nextValueNumber;
@@ -549,10 +549,10 @@ uint32_t GVN::ValueTable::lookup(Value *
/// assigning it a new number if it did not have one before. Useful when
/// we deduced the result of a comparison, but don't immediately have an
/// instruction realizing that comparison to hand.
-uint32_t GVN::ValueTable::lookup_or_add_cmp(unsigned Opcode,
- CmpInst::Predicate Predicate,
- Value *LHS, Value *RHS) {
- Expression exp = create_cmp_expression(Opcode, Predicate, LHS, RHS);
+uint32_t GVN::ValueTable::lookupOrAddCmp(unsigned Opcode,
+ CmpInst::Predicate Predicate,
+ Value *LHS, Value *RHS) {
+ Expression exp = createCmpExpr(Opcode, Predicate, LHS, RHS);
uint32_t& e = expressionNumbering[exp];
if (!e) e = nextValueNumber++;
return e;
@@ -1188,7 +1188,7 @@ Value *AvailableValue::MaterializeAdjust
Res = Load;
} else {
Res = GetLoadValueForLoad(Load, Offset, LoadTy, InsertPt, gvn);
-
+
DEBUG(dbgs() << "GVN COERCED NONLOCAL LOAD:\nOffset: " << Offset << " "
<< *getCoercedLoadValue() << '\n'
<< *Res << '\n' << "\n\n\n");
@@ -1221,7 +1221,7 @@ bool GVN::AnalyzeLoadAvailability(LoadIn
"expected a local dependence");
const DataLayout &DL = LI->getModule()->getDataLayout();
-
+
if (DepInfo.isClobber()) {
// If the dependence is to a store that writes to a superset of the bits
// read by the load, we can extract the bits we need for the load from the
@@ -1247,7 +1247,7 @@ bool GVN::AnalyzeLoadAvailability(LoadIn
if (DepLI != LI && Address) {
int Offset =
AnalyzeLoadFromClobberingLoad(LI->getType(), Address, DepLI, DL);
-
+
if (Offset != -1) {
Res = AvailableValue::getLoad(DepLI, Offset);
return true;
@@ -1280,7 +1280,7 @@ bool GVN::AnalyzeLoadAvailability(LoadIn
assert(DepInfo.isDef() && "follows from above");
Instruction *DepInst = DepInfo.getInst();
-
+
// Loading the allocation -> undef.
if (isa<AllocaInst>(DepInst) || isMallocLikeFn(DepInst, TLI) ||
// Loading immediately after lifetime begin -> undef.
@@ -1288,13 +1288,13 @@ bool GVN::AnalyzeLoadAvailability(LoadIn
Res = AvailableValue::get(UndefValue::get(LI->getType()));
return true;
}
-
+
// Loading from calloc (which zero initializes memory) -> zero
if (isCallocLikeFn(DepInst, TLI)) {
Res = AvailableValue::get(Constant::getNullValue(LI->getType()));
return true;
}
-
+
if (StoreInst *S = dyn_cast<StoreInst>(DepInst)) {
// Reject loads and stores that are to the same address but are of
// different types if we have to. If the stored value is larger or equal to
@@ -1303,15 +1303,15 @@ bool GVN::AnalyzeLoadAvailability(LoadIn
!CanCoerceMustAliasedValueToLoad(S->getValueOperand(),
LI->getType(), DL))
return false;
-
+
Res = AvailableValue::get(S->getValueOperand());
return true;
}
-
+
if (LoadInst *LD = dyn_cast<LoadInst>(DepInst)) {
// If the types mismatch and we can't handle it, reject reuse of the load.
// If the stored value is larger or equal to the loaded value, we can reuse
- // it.
+ // it.
if (LD->getType() != LI->getType() &&
!CanCoerceMustAliasedValueToLoad(LD, LI->getType(), DL))
return false;
@@ -1330,8 +1330,7 @@ bool GVN::AnalyzeLoadAvailability(LoadIn
return false;
}
-
-void GVN::AnalyzeLoadAvailability(LoadInst *LI, LoadDepVect &Deps,
+void GVN::AnalyzeLoadAvailability(LoadInst *LI, LoadDepVect &Deps,
AvailValInBlkVect &ValuesPerBlock,
UnavailBlkVect &UnavailableBlocks) {
@@ -1377,8 +1376,7 @@ void GVN::AnalyzeLoadAvailability(LoadIn
"post condition violation");
}
-
-bool GVN::PerformLoadPRE(LoadInst *LI, AvailValInBlkVect &ValuesPerBlock,
+bool GVN::PerformLoadPRE(LoadInst *LI, AvailValInBlkVect &ValuesPerBlock,
UnavailBlkVect &UnavailableBlocks) {
// Okay, we have *some* definitions of the value. This means that the value
// is available in some of our (transitive) predecessors. Lets think about
@@ -1536,7 +1534,7 @@ bool GVN::PerformLoadPRE(LoadInst *LI, A
// parent's availability map. However, in doing so, we risk getting into
// ordering issues. If a block hasn't been processed yet, we would be
// marking a value as AVAIL-IN, which isn't what we intend.
- VN.lookup_or_add(I);
+ VN.lookupOrAdd(I);
}
for (const auto &PredLoad : PredLoads) {
@@ -1787,7 +1785,7 @@ bool GVN::processLoad(LoadInst *L) {
AvailableValue AV;
if (AnalyzeLoadAvailability(L, Dep, L->getPointerOperand(), AV)) {
Value *AvailableValue = AV.MaterializeAdjustedValue(L, L, *this);
-
+
// Replace the load!
patchAndReplaceAllUsesWith(L, AvailableValue);
markInstructionForDeletion(L);
@@ -1841,9 +1839,8 @@ static bool isOnlyReachableViaThisEdge(c
// GVN runs all such loops have preheaders, which means that Dst will have
// been changed to have only one predecessor, namely Src.
const BasicBlock *Pred = E.getEnd()->getSinglePredecessor();
- const BasicBlock *Src = E.getStart();
- assert((!Pred || Pred == Src) && "No edge between these basic blocks!");
- (void)Src;
+ assert((!Pred || Pred == E.getStart()) &&
+ "No edge between these basic blocks!");
return Pred != nullptr;
}
@@ -1878,7 +1875,7 @@ bool GVN::propagateEquality(Value *LHS,
bool Changed = false;
// For speed, compute a conservative fast approximation to
// DT->dominates(Root, Root.getEnd());
- bool RootDominatesEnd = isOnlyReachableViaThisEdge(Root, DT);
+ const bool RootDominatesEnd = isOnlyReachableViaThisEdge(Root, DT);
while (!Worklist.empty()) {
std::pair<Value*, Value*> Item = Worklist.pop_back_val();
@@ -1901,12 +1898,12 @@ bool GVN::propagateEquality(Value *LHS,
// right-hand side, ensure the longest lived term is on the right-hand side,
// so the shortest lived term will be replaced by the longest lived.
// This tends to expose more simplifications.
- uint32_t LVN = VN.lookup_or_add(LHS);
+ uint32_t LVN = VN.lookupOrAdd(LHS);
if ((isa<Argument>(LHS) && isa<Argument>(RHS)) ||
(isa<Instruction>(LHS) && isa<Instruction>(RHS))) {
// Move the 'oldest' value to the right-hand side, using the value number
// as a proxy for age.
- uint32_t RVN = VN.lookup_or_add(RHS);
+ uint32_t RVN = VN.lookupOrAdd(RHS);
if (LVN < RVN) {
std::swap(LHS, RHS);
LVN = RVN;
@@ -1982,7 +1979,7 @@ bool GVN::propagateEquality(Value *LHS,
// Floating point -0.0 and 0.0 compare equal, so we can only
// propagate values if we know that we have a constant and that
// its value is non-zero.
-
+
// FIXME: We should do this optimization if 'no signed zeros' is
// applicable via an instruction-level fast-math-flag or some other
// indicator that relaxed FP semantics are being used.
@@ -1990,7 +1987,7 @@ bool GVN::propagateEquality(Value *LHS,
if (isa<ConstantFP>(Op1) && !cast<ConstantFP>(Op1)->isZero())
Worklist.push_back(std::make_pair(Op0, Op1));
}
-
+
// If "A >= B" is known true, replace "A < B" with false everywhere.
CmpInst::Predicate NotPred = Cmp->getInversePredicate();
Constant *NotVal = ConstantInt::get(Cmp->getType(), isKnownFalse);
@@ -1998,7 +1995,7 @@ bool GVN::propagateEquality(Value *LHS,
// out the value number that it would have and use that to find an
// appropriate instruction (if any).
uint32_t NextNum = VN.getNextUnusedValueNumber();
- uint32_t Num = VN.lookup_or_add_cmp(Cmp->getOpcode(), NotPred, Op0, Op1);
+ uint32_t Num = VN.lookupOrAddCmp(Cmp->getOpcode(), NotPred, Op0, Op1);
// If the number we were assigned was brand new then there is no point in
// looking for an instruction realizing it: there cannot be one!
if (Num < NextNum) {
@@ -2056,7 +2053,7 @@ bool GVN::processInstruction(Instruction
if (processLoad(LI))
return true;
- unsigned Num = VN.lookup_or_add(LI);
+ unsigned Num = VN.lookupOrAdd(LI);
addToLeaderTable(Num, LI, LI->getParent());
return false;
}
@@ -2120,7 +2117,7 @@ bool GVN::processInstruction(Instruction
return false;
uint32_t NextNum = VN.getNextUnusedValueNumber();
- unsigned Num = VN.lookup_or_add(I);
+ unsigned Num = VN.lookupOrAdd(I);
// Allocations are always uniquely numbered, so we can save time and memory
// by fast failing them.
@@ -2211,7 +2208,7 @@ bool GVN::runImpl(Function &F, Assumptio
cleanupGlobalSets();
// Do not cleanup DeadBlocks in cleanupGlobalSets() as it's called for each
- // iteration.
+ // iteration.
DeadBlocks.clear();
return Changed;
@@ -2311,8 +2308,6 @@ bool GVN::performScalarPREInsertion(Inst
}
bool GVN::performScalarPRE(Instruction *CurInst) {
- SmallVector<std::pair<Value*, BasicBlock*>, 8> predMap;
-
if (isa<AllocaInst>(CurInst) || isa<TerminatorInst>(CurInst) ||
isa<PHINode>(CurInst) || CurInst->getType()->isVoidTy() ||
CurInst->mayReadFromMemory() || CurInst->mayHaveSideEffects() ||
@@ -2343,8 +2338,8 @@ bool GVN::performScalarPRE(Instruction *
unsigned NumWithout = 0;
BasicBlock *PREPred = nullptr;
BasicBlock *CurrentBlock = CurInst->getParent();
- predMap.clear();
+ SmallVector<std::pair<Value *, BasicBlock *>, 8> predMap;
for (BasicBlock *P : predecessors(CurrentBlock)) {
// We're not interested in PRE where the block is its
// own predecessor, or in blocks with predecessors
@@ -2437,7 +2432,7 @@ bool GVN::performScalarPRE(Instruction *
DEBUG(verifyRemoved(CurInst));
CurInst->eraseFromParent();
++NumGVNInstr;
-
+
return true;
}
@@ -2560,7 +2555,7 @@ void GVN::addDeadBlock(BasicBlock *BB) {
SmallVector<BasicBlock *, 8> Dom;
DT->getDescendants(D, Dom);
DeadBlocks.insert(Dom.begin(), Dom.end());
-
+
// Figure out the dominance-frontier(D).
for (BasicBlock *B : Dom) {
for (BasicBlock *S : successors(B)) {
@@ -2618,13 +2613,13 @@ void GVN::addDeadBlock(BasicBlock *BB) {
// If the given branch is recognized as a foldable branch (i.e. conditional
// branch with constant condition), it will perform following analyses and
// transformation.
-// 1) If the dead out-coming edge is a critical-edge, split it. Let
+// 1) If the dead out-coming edge is a critical-edge, split it. Let
// R be the target of the dead out-coming edge.
// 1) Identify the set of dead blocks implied by the branch's dead outcoming
// edge. The result of this step will be {X| X is dominated by R}
// 2) Identify those blocks which haves at least one dead predecessor. The
// result of this step will be dominance-frontier(R).
-// 3) Update the PHIs in DF(R) by replacing the operands corresponding to
+// 3) Update the PHIs in DF(R) by replacing the operands corresponding to
// dead blocks with "UndefVal" in an hope these PHIs will optimized away.
//
// Return true iff *NEW* dead code are found.
@@ -2640,8 +2635,8 @@ bool GVN::processFoldableCondBr(BranchIn
if (!Cond)
return false;
- BasicBlock *DeadRoot = Cond->getZExtValue() ?
- BI->getSuccessor(1) : BI->getSuccessor(0);
+ BasicBlock *DeadRoot =
+ Cond->getZExtValue() ? BI->getSuccessor(1) : BI->getSuccessor(0);
if (DeadBlocks.count(DeadRoot))
return false;
@@ -2659,7 +2654,7 @@ bool GVN::processFoldableCondBr(BranchIn
void GVN::assignValNumForDeadCode() {
for (BasicBlock *BB : DeadBlocks) {
for (Instruction &Inst : *BB) {
- unsigned ValNum = VN.lookup_or_add(&Inst);
+ unsigned ValNum = VN.lookupOrAdd(&Inst);
addToLeaderTable(ValNum, &Inst, BB);
}
}
More information about the llvm-commits
mailing list