[llvm] r350558 - [MemorySSA] Refactor CachingWalker.
Alina Sbirlea via llvm-commits
llvm-commits at lists.llvm.org
Mon Jan 7 11:22:38 PST 2019
Author: asbirlea
Date: Mon Jan 7 11:22:37 2019
New Revision: 350558
URL: http://llvm.org/viewvc/llvm-project?rev=350558&view=rev
Log:
[MemorySSA] Refactor CachingWalker.
Summary:
Refactor caching walker to make creating a walker that skips the
starting access strightforward.
Reviewers: george.burgess.iv
Subscribers: sanjoy, jlebar, Prazek, llvm-commits, jfb
Differential Revision: https://reviews.llvm.org/D55957
Modified:
llvm/trunk/include/llvm/Analysis/MemorySSA.h
llvm/trunk/lib/Analysis/MemorySSA.cpp
Modified: llvm/trunk/include/llvm/Analysis/MemorySSA.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/Analysis/MemorySSA.h?rev=350558&r1=350557&r2=350558&view=diff
==============================================================================
--- llvm/trunk/include/llvm/Analysis/MemorySSA.h (original)
+++ llvm/trunk/include/llvm/Analysis/MemorySSA.h Mon Jan 7 11:22:37 2019
@@ -828,6 +828,7 @@ protected:
const MemoryUseOrDef *Template = nullptr);
private:
+ class ClobberWalkerBase;
class CachingWalker;
class OptimizeUses;
@@ -882,6 +883,7 @@ private:
mutable DenseMap<const MemoryAccess *, unsigned long> BlockNumbering;
// Memory SSA building info
+ std::unique_ptr<ClobberWalkerBase> WalkerBase;
std::unique_ptr<CachingWalker> Walker;
unsigned NextID;
};
Modified: llvm/trunk/lib/Analysis/MemorySSA.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Analysis/MemorySSA.cpp?rev=350558&r1=350557&r2=350558&view=diff
==============================================================================
--- llvm/trunk/lib/Analysis/MemorySSA.cpp (original)
+++ llvm/trunk/lib/Analysis/MemorySSA.cpp Mon Jan 7 11:22:37 2019
@@ -946,28 +946,51 @@ struct RenamePassData {
namespace llvm {
+class MemorySSA::ClobberWalkerBase {
+ ClobberWalker Walker;
+ MemorySSA *MSSA;
+
+public:
+ ClobberWalkerBase(MemorySSA *M, AliasAnalysis *A, DominatorTree *D)
+ : Walker(*M, *A, *D), MSSA(M) {}
+
+ MemoryAccess *getClobberingMemoryAccessBase(MemoryAccess *,
+ const MemoryLocation &);
+ // Second argument (bool), defines whether the clobber search should skip the
+ // original queried access. If true, there will be a follow-up query searching
+ // for a clobber access past "self". Note that the Optimized access is not
+ // updated if a new clobber is found by this SkipSelf search. If this
+ // additional query becomes heavily used we may decide to cache the result.
+ // Walker instantiations will decide how to set the SkipSelf bool.
+ MemoryAccess *getClobberingMemoryAccessBase(MemoryAccess *, bool);
+ void verify(const MemorySSA *MSSA) { Walker.verify(MSSA); }
+};
+
/// A MemorySSAWalker that does AA walks to disambiguate accesses. It no
/// longer does caching on its own, but the name has been retained for the
/// moment.
class MemorySSA::CachingWalker final : public MemorySSAWalker {
- ClobberWalker Walker;
-
- MemoryAccess *getClobberingMemoryAccess(MemoryAccess *, UpwardsMemoryQuery &);
+ ClobberWalkerBase *Walker;
public:
- CachingWalker(MemorySSA *, AliasAnalysis *, DominatorTree *);
+ CachingWalker(MemorySSA *M, ClobberWalkerBase *W)
+ : MemorySSAWalker(M), Walker(W) {}
~CachingWalker() override = default;
using MemorySSAWalker::getClobberingMemoryAccess;
- MemoryAccess *getClobberingMemoryAccess(MemoryAccess *) override;
- MemoryAccess *getClobberingMemoryAccess(MemoryAccess *,
- const MemoryLocation &) override;
- void invalidateInfo(MemoryAccess *) override;
+ MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA) override;
+ MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
+ const MemoryLocation &Loc) override;
+
+ void invalidateInfo(MemoryAccess *MA) override {
+ if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
+ MUD->resetOptimized();
+ }
void verify(const MemorySSA *MSSA) override {
MemorySSAWalker::verify(MSSA);
- Walker.verify(MSSA);
+ Walker->verify(MSSA);
}
};
@@ -1437,7 +1460,10 @@ MemorySSA::CachingWalker *MemorySSA::get
if (Walker)
return Walker.get();
- Walker = llvm::make_unique<CachingWalker>(this, AA, DT);
+ if (!WalkerBase)
+ WalkerBase = llvm::make_unique<ClobberWalkerBase>(this, AA, DT);
+
+ Walker = llvm::make_unique<CachingWalker>(this, WalkerBase.get());
return Walker.get();
}
@@ -2142,25 +2168,11 @@ void MemorySSAWrapperPass::print(raw_ost
MemorySSAWalker::MemorySSAWalker(MemorySSA *M) : MSSA(M) {}
-MemorySSA::CachingWalker::CachingWalker(MemorySSA *M, AliasAnalysis *A,
- DominatorTree *D)
- : MemorySSAWalker(M), Walker(*M, *A, *D) {}
-
-void MemorySSA::CachingWalker::invalidateInfo(MemoryAccess *MA) {
- if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
- MUD->resetOptimized();
-}
-
-/// Walk the use-def chains starting at \p MA and find
+/// Walk the use-def chains starting at \p StartingAccess and find
/// the MemoryAccess that actually clobbers Loc.
///
/// \returns our clobbering memory access
-MemoryAccess *MemorySSA::CachingWalker::getClobberingMemoryAccess(
- MemoryAccess *StartingAccess, UpwardsMemoryQuery &Q) {
- return Walker.findClobber(StartingAccess, Q);
-}
-
-MemoryAccess *MemorySSA::CachingWalker::getClobberingMemoryAccess(
+MemoryAccess *MemorySSA::ClobberWalkerBase::getClobberingMemoryAccessBase(
MemoryAccess *StartingAccess, const MemoryLocation &Loc) {
if (isa<MemoryPhi>(StartingAccess))
return StartingAccess;
@@ -2184,11 +2196,12 @@ MemoryAccess *MemorySSA::CachingWalker::
// Unlike the other function, do not walk to the def of a def, because we are
// handed something we already believe is the clobbering access.
+ // We never set SkipSelf to true in Q in this method.
MemoryAccess *DefiningAccess = isa<MemoryUse>(StartingUseOrDef)
? StartingUseOrDef->getDefiningAccess()
: StartingUseOrDef;
- MemoryAccess *Clobber = getClobberingMemoryAccess(DefiningAccess, Q);
+ MemoryAccess *Clobber = Walker.findClobber(DefiningAccess, Q);
LLVM_DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is ");
LLVM_DEBUG(dbgs() << *StartingUseOrDef << "\n");
LLVM_DEBUG(dbgs() << "Final Memory SSA clobber for " << *I << " is ");
@@ -2197,17 +2210,23 @@ MemoryAccess *MemorySSA::CachingWalker::
}
MemoryAccess *
-MemorySSA::CachingWalker::getClobberingMemoryAccess(MemoryAccess *MA) {
+MemorySSA::ClobberWalkerBase::getClobberingMemoryAccessBase(MemoryAccess *MA,
+ bool SkipSelf) {
auto *StartingAccess = dyn_cast<MemoryUseOrDef>(MA);
// If this is a MemoryPhi, we can't do anything.
if (!StartingAccess)
return MA;
+ bool IsOptimized = false;
+
// If this is an already optimized use or def, return the optimized result.
// Note: Currently, we store the optimized def result in a separate field,
// since we can't use the defining access.
- if (StartingAccess->isOptimized())
- return StartingAccess->getOptimized();
+ if (StartingAccess->isOptimized()) {
+ if (!SkipSelf || !isa<MemoryDef>(StartingAccess))
+ return StartingAccess->getOptimized();
+ IsOptimized = true;
+ }
const Instruction *I = StartingAccess->getMemoryInst();
// We can't sanely do anything with a fence, since they conservatively clobber
@@ -2225,33 +2244,60 @@ MemorySSA::CachingWalker::getClobberingM
return LiveOnEntry;
}
- // Start with the thing we already think clobbers this location
- MemoryAccess *DefiningAccess = StartingAccess->getDefiningAccess();
+ MemoryAccess *OptimizedAccess;
+ if (!IsOptimized) {
+ // Start with the thing we already think clobbers this location
+ MemoryAccess *DefiningAccess = StartingAccess->getDefiningAccess();
+
+ // At this point, DefiningAccess may be the live on entry def.
+ // If it is, we will not get a better result.
+ if (MSSA->isLiveOnEntryDef(DefiningAccess)) {
+ StartingAccess->setOptimized(DefiningAccess);
+ StartingAccess->setOptimizedAccessType(None);
+ return DefiningAccess;
+ }
+
+ OptimizedAccess = Walker.findClobber(DefiningAccess, Q);
+ StartingAccess->setOptimized(OptimizedAccess);
+ if (MSSA->isLiveOnEntryDef(OptimizedAccess))
+ StartingAccess->setOptimizedAccessType(None);
+ else if (Q.AR == MustAlias)
+ StartingAccess->setOptimizedAccessType(MustAlias);
+ } else
+ OptimizedAccess = StartingAccess->getOptimized();
- // At this point, DefiningAccess may be the live on entry def.
- // If it is, we will not get a better result.
- if (MSSA->isLiveOnEntryDef(DefiningAccess)) {
- StartingAccess->setOptimized(DefiningAccess);
- StartingAccess->setOptimizedAccessType(None);
- return DefiningAccess;
- }
-
- MemoryAccess *Result = getClobberingMemoryAccess(DefiningAccess, Q);
LLVM_DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is ");
- LLVM_DEBUG(dbgs() << *DefiningAccess << "\n");
- LLVM_DEBUG(dbgs() << "Final Memory SSA clobber for " << *I << " is ");
- LLVM_DEBUG(dbgs() << *Result << "\n");
+ LLVM_DEBUG(dbgs() << *StartingAccess << "\n");
+ LLVM_DEBUG(dbgs() << "Optimized Memory SSA clobber for " << *I << " is ");
+ LLVM_DEBUG(dbgs() << *OptimizedAccess << "\n");
+
+ MemoryAccess *Result;
+ if (SkipSelf && isa<MemoryPhi>(OptimizedAccess) &&
+ isa<MemoryDef>(StartingAccess)) {
+ assert(isa<MemoryDef>(Q.OriginalAccess));
+ Q.SkipSelfAccess = true;
+ Result = Walker.findClobber(OptimizedAccess, Q);
+ } else
+ Result = OptimizedAccess;
- StartingAccess->setOptimized(Result);
- if (MSSA->isLiveOnEntryDef(Result))
- StartingAccess->setOptimizedAccessType(None);
- else if (Q.AR == MustAlias)
- StartingAccess->setOptimizedAccessType(MustAlias);
+ LLVM_DEBUG(dbgs() << "Result Memory SSA clobber [SkipSelf = " << SkipSelf);
+ LLVM_DEBUG(dbgs() << "] for " << *I << " is " << *Result << "\n");
return Result;
}
MemoryAccess *
+MemorySSA::CachingWalker::getClobberingMemoryAccess(MemoryAccess *MA) {
+ return Walker->getClobberingMemoryAccessBase(MA, false);
+}
+
+MemoryAccess *
+MemorySSA::CachingWalker::getClobberingMemoryAccess(MemoryAccess *MA,
+ const MemoryLocation &Loc) {
+ return Walker->getClobberingMemoryAccessBase(MA, Loc);
+}
+
+MemoryAccess *
DoNothingMemorySSAWalker::getClobberingMemoryAccess(MemoryAccess *MA) {
if (auto *Use = dyn_cast<MemoryUseOrDef>(MA))
return Use->getDefiningAccess();
More information about the llvm-commits
mailing list