[llvm] 275f3a2 - [GVN][NFC] Factor out load elimination logic via PRE for reuse
Max Kazantsev via llvm-commits
llvm-commits at lists.llvm.org
Fri Apr 9 02:13:21 PDT 2021
Author: Max Kazantsev
Date: 2021-04-09T16:12:25+07:00
New Revision: 275f3a2540fef429aaf8cebd2aa413bb9e7a6daa
URL: https://github.com/llvm/llvm-project/commit/275f3a2540fef429aaf8cebd2aa413bb9e7a6daa
DIFF: https://github.com/llvm/llvm-project/commit/275f3a2540fef429aaf8cebd2aa413bb9e7a6daa.diff
LOG: [GVN][NFC] Factor out load elimination logic via PRE for reuse
Added:
Modified:
llvm/include/llvm/Transforms/Scalar/GVN.h
llvm/lib/Transforms/Scalar/GVN.cpp
Removed:
################################################################################
diff --git a/llvm/include/llvm/Transforms/Scalar/GVN.h b/llvm/include/llvm/Transforms/Scalar/GVN.h
index 8560ede44b27..13f55ddcf2d6 100644
--- a/llvm/include/llvm/Transforms/Scalar/GVN.h
+++ b/llvm/include/llvm/Transforms/Scalar/GVN.h
@@ -328,6 +328,12 @@ class GVN : public PassInfoMixin<GVN> {
bool PerformLoadPRE(LoadInst *Load, AvailValInBlkVect &ValuesPerBlock,
UnavailBlkVect &UnavailableBlocks);
+ /// Eliminates partially redundant \p Load, replacing it with \p
+ /// AvailableLoads (connected by Phis if needed).
+ void eliminatePartiallyRedundantLoad(
+ LoadInst *Load, AvailValInBlkVect &ValuesPerBlock,
+ MapVector<BasicBlock *, Value *> &AvailableLoads);
+
// Other helper routines
bool processInstruction(Instruction *I);
bool processBlock(BasicBlock *BB);
diff --git a/llvm/lib/Transforms/Scalar/GVN.cpp b/llvm/lib/Transforms/Scalar/GVN.cpp
index 50fe373cf690..9fa9676adc72 100644
--- a/llvm/lib/Transforms/Scalar/GVN.cpp
+++ b/llvm/lib/Transforms/Scalar/GVN.cpp
@@ -1131,6 +1131,82 @@ void GVN::AnalyzeLoadAvailability(LoadInst *Load, LoadDepVect &Deps,
"post condition violation");
}
+void GVN::eliminatePartiallyRedundantLoad(
+ LoadInst *Load, AvailValInBlkVect &ValuesPerBlock,
+ MapVector<BasicBlock *, Value *> &AvailableLoads) {
+ for (const auto &AvailableLoad : AvailableLoads) {
+ BasicBlock *UnavailableBlock = AvailableLoad.first;
+ Value *LoadPtr = AvailableLoad.second;
+
+ auto *NewLoad =
+ new LoadInst(Load->getType(), LoadPtr, Load->getName() + ".pre",
+ Load->isVolatile(), Load->getAlign(), Load->getOrdering(),
+ Load->getSyncScopeID(), UnavailableBlock->getTerminator());
+ NewLoad->setDebugLoc(Load->getDebugLoc());
+ if (MSSAU) {
+ auto *MSSA = MSSAU->getMemorySSA();
+ // Get the defining access of the original load or use the load if it is a
+ // MemoryDef (e.g. because it is volatile). The inserted loads are
+ // guaranteed to load from the same definition.
+ auto *LoadAcc = MSSA->getMemoryAccess(Load);
+ auto *DefiningAcc =
+ isa<MemoryDef>(LoadAcc) ? LoadAcc : LoadAcc->getDefiningAccess();
+ auto *NewAccess = MSSAU->createMemoryAccessInBB(
+ NewLoad, DefiningAcc, NewLoad->getParent(),
+ MemorySSA::BeforeTerminator);
+ if (auto *NewDef = dyn_cast<MemoryDef>(NewAccess))
+ MSSAU->insertDef(NewDef, /*RenameUses=*/true);
+ else
+ MSSAU->insertUse(cast<MemoryUse>(NewAccess), /*RenameUses=*/true);
+ }
+
+ // Transfer the old load's AA tags to the new load.
+ AAMDNodes Tags;
+ Load->getAAMetadata(Tags);
+ if (Tags)
+ NewLoad->setAAMetadata(Tags);
+
+ if (auto *MD = Load->getMetadata(LLVMContext::MD_invariant_load))
+ NewLoad->setMetadata(LLVMContext::MD_invariant_load, MD);
+ if (auto *InvGroupMD = Load->getMetadata(LLVMContext::MD_invariant_group))
+ NewLoad->setMetadata(LLVMContext::MD_invariant_group, InvGroupMD);
+ if (auto *RangeMD = Load->getMetadata(LLVMContext::MD_range))
+ NewLoad->setMetadata(LLVMContext::MD_range, RangeMD);
+ if (auto *AccessMD = Load->getMetadata(LLVMContext::MD_access_group))
+ if (LI &&
+ LI->getLoopFor(Load->getParent()) == LI->getLoopFor(UnavailableBlock))
+ NewLoad->setMetadata(LLVMContext::MD_access_group, AccessMD);
+
+ // We do not propagate the old load's debug location, because the new
+ // load now lives in a
diff erent BB, and we want to avoid a jumpy line
+ // table.
+ // FIXME: How do we retain source locations without causing poor debugging
+ // behavior?
+
+ // Add the newly created load.
+ ValuesPerBlock.push_back(
+ AvailableValueInBlock::get(UnavailableBlock, NewLoad));
+ MD->invalidateCachedPointerInfo(LoadPtr);
+ LLVM_DEBUG(dbgs() << "GVN INSERTED " << *NewLoad << '\n');
+ }
+
+ // Perform PHI construction.
+ Value *V = ConstructSSAForLoadSet(Load, ValuesPerBlock, *this);
+ Load->replaceAllUsesWith(V);
+ if (isa<PHINode>(V))
+ V->takeName(Load);
+ if (Instruction *I = dyn_cast<Instruction>(V))
+ I->setDebugLoc(Load->getDebugLoc());
+ if (V->getType()->isPtrOrPtrVectorTy())
+ MD->invalidateCachedPointerInfo(V);
+ markInstructionForDeletion(Load);
+ ORE->emit([&]() {
+ return OptimizationRemark(DEBUG_TYPE, "LoadPRE", Load)
+ << "load eliminated by PRE";
+ });
+ ++NumPRELoad;
+}
+
bool GVN::PerformLoadPRE(LoadInst *Load, AvailValInBlkVect &ValuesPerBlock,
UnavailBlkVect &UnavailableBlocks) {
// Okay, we have *some* definitions of the value. This means that the value
@@ -1349,9 +1425,9 @@ bool GVN::PerformLoadPRE(LoadInst *Load, AvailValInBlkVect &ValuesPerBlock,
// and using PHI construction to get the value in the other predecessors, do
// it.
LLVM_DEBUG(dbgs() << "GVN REMOVING PRE LOAD: " << *Load << '\n');
- LLVM_DEBUG(if (!NewInsts.empty()) dbgs()
- << "INSERTED " << NewInsts.size() << " INSTS: " << *NewInsts.back()
- << '\n');
+ LLVM_DEBUG(if (!NewInsts.empty()) dbgs() << "INSERTED " << NewInsts.size()
+ << " INSTS: " << *NewInsts.back()
+ << '\n');
// Assign value numbers to the new instructions.
for (Instruction *I : NewInsts) {
@@ -1367,77 +1443,7 @@ bool GVN::PerformLoadPRE(LoadInst *Load, AvailValInBlkVect &ValuesPerBlock,
VN.lookupOrAdd(I);
}
- for (const auto &PredLoad : PredLoads) {
- BasicBlock *UnavailablePred = PredLoad.first;
- Value *LoadPtr = PredLoad.second;
-
- auto *NewLoad =
- new LoadInst(Load->getType(), LoadPtr, Load->getName() + ".pre",
- Load->isVolatile(), Load->getAlign(), Load->getOrdering(),
- Load->getSyncScopeID(), UnavailablePred->getTerminator());
- NewLoad->setDebugLoc(Load->getDebugLoc());
- if (MSSAU) {
- auto *MSSA = MSSAU->getMemorySSA();
- // Get the defining access of the original load or use the load if it is a
- // MemoryDef (e.g. because it is volatile). The inserted loads are
- // guaranteed to load from the same definition.
- auto *LoadAcc = MSSA->getMemoryAccess(Load);
- auto *DefiningAcc =
- isa<MemoryDef>(LoadAcc) ? LoadAcc : LoadAcc->getDefiningAccess();
- auto *NewAccess = MSSAU->createMemoryAccessInBB(
- NewLoad, DefiningAcc, NewLoad->getParent(),
- MemorySSA::BeforeTerminator);
- if (auto *NewDef = dyn_cast<MemoryDef>(NewAccess))
- MSSAU->insertDef(NewDef, /*RenameUses=*/true);
- else
- MSSAU->insertUse(cast<MemoryUse>(NewAccess), /*RenameUses=*/true);
- }
-
- // Transfer the old load's AA tags to the new load.
- AAMDNodes Tags;
- Load->getAAMetadata(Tags);
- if (Tags)
- NewLoad->setAAMetadata(Tags);
-
- if (auto *MD = Load->getMetadata(LLVMContext::MD_invariant_load))
- NewLoad->setMetadata(LLVMContext::MD_invariant_load, MD);
- if (auto *InvGroupMD = Load->getMetadata(LLVMContext::MD_invariant_group))
- NewLoad->setMetadata(LLVMContext::MD_invariant_group, InvGroupMD);
- if (auto *RangeMD = Load->getMetadata(LLVMContext::MD_range))
- NewLoad->setMetadata(LLVMContext::MD_range, RangeMD);
- if (auto *AccessMD = Load->getMetadata(LLVMContext::MD_access_group))
- if (LI &&
- LI->getLoopFor(Load->getParent()) == LI->getLoopFor(UnavailablePred))
- NewLoad->setMetadata(LLVMContext::MD_access_group, AccessMD);
-
- // We do not propagate the old load's debug location, because the new
- // load now lives in a
diff erent BB, and we want to avoid a jumpy line
- // table.
- // FIXME: How do we retain source locations without causing poor debugging
- // behavior?
-
- // Add the newly created load.
- ValuesPerBlock.push_back(AvailableValueInBlock::get(UnavailablePred,
- NewLoad));
- MD->invalidateCachedPointerInfo(LoadPtr);
- LLVM_DEBUG(dbgs() << "GVN INSERTED " << *NewLoad << '\n');
- }
-
- // Perform PHI construction.
- Value *V = ConstructSSAForLoadSet(Load, ValuesPerBlock, *this);
- Load->replaceAllUsesWith(V);
- if (isa<PHINode>(V))
- V->takeName(Load);
- if (Instruction *I = dyn_cast<Instruction>(V))
- I->setDebugLoc(Load->getDebugLoc());
- if (V->getType()->isPtrOrPtrVectorTy())
- MD->invalidateCachedPointerInfo(V);
- markInstructionForDeletion(Load);
- ORE->emit([&]() {
- return OptimizationRemark(DEBUG_TYPE, "LoadPRE", Load)
- << "load eliminated by PRE";
- });
- ++NumPRELoad;
+ eliminatePartiallyRedundantLoad(Load, ValuesPerBlock, PredLoads);
return true;
}
More information about the llvm-commits
mailing list