[llvm] r299486 - Re-apply MemorySSA: Add support for caching clobbering access in
Daniel Berlin via llvm-commits
llvm-commits at lists.llvm.org
Tue Apr 4 16:43:11 PDT 2017
Author: dannyb
Date: Tue Apr 4 18:43:10 2017
New Revision: 299486
URL: http://llvm.org/viewvc/llvm-project?rev=299486&view=rev
Log:
Re-apply MemorySSA: Add support for caching clobbering access in
stores with some fixes.
Summary:
This enables us to cache the clobbering access for stores, despite the
fact that we can't rewrite the use-def chains themselves.
Early testing shows that, after this change, for larger testcases, it
will be a significant net positive (memory and time) to remove the
walker caching.
Reviewers: george.burgess.iv, davide
Subscribers: Prazek, llvm-commits
Differential Revision: https://reviews.llvm.org/D31567
Modified:
llvm/trunk/include/llvm/Transforms/Utils/MemorySSA.h
llvm/trunk/lib/Transforms/Utils/MemorySSA.cpp
llvm/trunk/lib/Transforms/Utils/MemorySSAUpdater.cpp
Modified: llvm/trunk/include/llvm/Transforms/Utils/MemorySSA.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/Transforms/Utils/MemorySSA.h?rev=299486&r1=299485&r2=299486&view=diff
==============================================================================
--- llvm/trunk/include/llvm/Transforms/Utils/MemorySSA.h (original)
+++ llvm/trunk/include/llvm/Transforms/Utils/MemorySSA.h Tue Apr 4 18:43:10 2017
@@ -246,6 +246,17 @@ public:
return MA->getValueID() == MemoryUseVal || MA->getValueID() == MemoryDefVal;
}
+ // Sadly, these have to be public because they are needed in some of the
+ // iterators.
+ virtual bool isOptimized() const = 0;
+ virtual MemoryAccess *getOptimized() const = 0;
+ virtual void setOptimized(MemoryAccess *) = 0;
+
+ /// \brief Reset the ID of what this MemoryUse was optimized to, causing it to
+ /// be rewalked by the walker if necessary.
+ /// This really should only be called by tests.
+ virtual void resetOptimized() = 0;
+
protected:
friend class MemorySSA;
friend class MemorySSAUpdater;
@@ -254,8 +265,13 @@ protected:
: MemoryAccess(C, Vty, BB, 1), MemoryInst(MI) {
setDefiningAccess(DMA);
}
-
- void setDefiningAccess(MemoryAccess *DMA) { setOperand(0, DMA); }
+ void setDefiningAccess(MemoryAccess *DMA, bool Optimized = false) {
+ if (!Optimized) {
+ setOperand(0, DMA);
+ return;
+ }
+ setOptimized(DMA);
+ }
private:
Instruction *MemoryInst;
@@ -288,20 +304,21 @@ public:
void print(raw_ostream &OS) const override;
- void setDefiningAccess(MemoryAccess *DMA, bool Optimized = false) {
- if (Optimized)
- OptimizedID = DMA->getID();
- MemoryUseOrDef::setDefiningAccess(DMA);
+ virtual void setOptimized(MemoryAccess *DMA) override {
+ OptimizedID = DMA->getID();
+ setOperand(0, DMA);
}
- bool isOptimized() const {
+ virtual bool isOptimized() const override {
return getDefiningAccess() && OptimizedID == getDefiningAccess()->getID();
}
- /// \brief Reset the ID of what this MemoryUse was optimized to, causing it to
- /// be rewalked by the walker if necessary.
- /// This really should only be called by tests.
- void resetOptimized() { OptimizedID = INVALID_MEMORYACCESS_ID; }
+ virtual MemoryAccess *getOptimized() const override {
+ return getDefiningAccess();
+ }
+ virtual void resetOptimized() override {
+ OptimizedID = INVALID_MEMORYACCESS_ID;
+ }
protected:
friend class MemorySSA;
@@ -333,7 +350,8 @@ public:
MemoryDef(LLVMContext &C, MemoryAccess *DMA, Instruction *MI, BasicBlock *BB,
unsigned Ver)
- : MemoryUseOrDef(C, DMA, MemoryDefVal, MI, BB), ID(Ver) {}
+ : MemoryUseOrDef(C, DMA, MemoryDefVal, MI, BB), ID(Ver),
+ Optimized(nullptr), OptimizedID(INVALID_MEMORYACCESS_ID) {}
// allocate space for exactly one operand
void *operator new(size_t s) { return User::operator new(s, 1); }
@@ -343,6 +361,19 @@ public:
return MA->getValueID() == MemoryDefVal;
}
+ virtual void setOptimized(MemoryAccess *MA) override {
+ Optimized = MA;
+ OptimizedID = getDefiningAccess()->getID();
+ }
+ virtual MemoryAccess *getOptimized() const override { return Optimized; }
+ virtual bool isOptimized() const override {
+ return getOptimized() && getDefiningAccess() &&
+ OptimizedID == getDefiningAccess()->getID();
+ }
+ virtual void resetOptimized() override {
+ OptimizedID = INVALID_MEMORYACCESS_ID;
+ }
+
void print(raw_ostream &OS) const override;
protected:
@@ -352,6 +383,8 @@ protected:
private:
const unsigned ID;
+ MemoryAccess *Optimized;
+ unsigned int OptimizedID;
};
template <>
@@ -1060,11 +1093,20 @@ upward_defs(const MemoryAccessPair &Pair
return make_range(upward_defs_begin(Pair), upward_defs_end());
}
-/// Walks the defining uses of MemoryDefs. Stops after we hit something that has
-/// no defining use (e.g. a MemoryPhi or liveOnEntry). Note that, when comparing
-/// against a null def_chain_iterator, this will compare equal only after
-/// walking said Phi/liveOnEntry.
-template <class T>
+/// Walks the defining accesses of MemoryDefs. Stops after we hit something that
+/// has no defining use (e.g. a MemoryPhi or liveOnEntry). Note that, when
+/// comparing against a null def_chain_iterator, this will compare equal only
+/// after walking said Phi/liveOnEntry.
+///
+/// The UseOptimizedChain flag specifies whether to walk the clobbering
+/// access chain, or all the accesses.
+///
+/// Normally, MemoryDef are all just def/use linked together, so a def_chain on
+/// a MemoryDef will walk all MemoryDefs above it in the program until it hits
+/// a phi node. The optimized chain walks the clobbering access of a store.
+/// So if you are just trying to find, given a store, what the next
+/// thing that would clobber the same memory is, you want the optimized chain.
+template <class T, bool UseOptimizedChain = false>
struct def_chain_iterator
: public iterator_facade_base<def_chain_iterator<T>,
std::forward_iterator_tag, MemoryAccess *> {
@@ -1075,10 +1117,15 @@ struct def_chain_iterator
def_chain_iterator &operator++() {
// N.B. liveOnEntry has a null defining access.
- if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
- MA = MUD->getDefiningAccess();
- else
+ if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA)) {
+ if (UseOptimizedChain && MUD->isOptimized())
+ MA = MUD->getOptimized();
+ else
+ MA = MUD->getDefiningAccess();
+ } else {
MA = nullptr;
+ }
+
return *this;
}
@@ -1098,6 +1145,12 @@ def_chain(T MA, MemoryAccess *UpTo = nul
return make_range(def_chain_iterator<T>(MA), def_chain_iterator<T>(UpTo));
}
+template <class T>
+inline iterator_range<def_chain_iterator<T, true>> optimized_def_chain(T MA) {
+ return make_range(def_chain_iterator<T, true>(MA),
+ def_chain_iterator<T, true>(nullptr));
+}
+
} // end namespace llvm
#endif // LLVM_TRANSFORMS_UTILS_MEMORYSSA_H
Modified: llvm/trunk/lib/Transforms/Utils/MemorySSA.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Utils/MemorySSA.cpp?rev=299486&r1=299485&r2=299486&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/Utils/MemorySSA.cpp (original)
+++ llvm/trunk/lib/Transforms/Utils/MemorySSA.cpp Tue Apr 4 18:43:10 2017
@@ -2181,9 +2181,9 @@ MemorySSA::CachingWalker::getClobberingM
// If this is an already optimized use or def, return the optimized result.
// Note: Currently, we do not store the optimized def result because we'd need
// a separate field, since we can't use it as the defining access.
- if (MemoryUse *MU = dyn_cast<MemoryUse>(StartingAccess))
- if (MU->isOptimized())
- return MU->getDefiningAccess();
+ if (auto *MUD = dyn_cast<MemoryUseOrDef>(StartingAccess))
+ if (MUD->isOptimized())
+ return MUD->getOptimized();
const Instruction *I = StartingAccess->getMemoryInst();
UpwardsMemoryQuery Q(I, StartingAccess);
@@ -2199,8 +2199,8 @@ MemorySSA::CachingWalker::getClobberingM
if (isUseTriviallyOptimizableToLiveOnEntry(*MSSA->AA, I)) {
MemoryAccess *LiveOnEntry = MSSA->getLiveOnEntryDef();
Cache.insert(StartingAccess, LiveOnEntry, Q.StartingLoc, Q.IsCall);
- if (MemoryUse *MU = dyn_cast<MemoryUse>(StartingAccess))
- MU->setDefiningAccess(LiveOnEntry, true);
+ if (auto *MUD = dyn_cast<MemoryUseOrDef>(StartingAccess))
+ MUD->setOptimized(LiveOnEntry);
return LiveOnEntry;
}
@@ -2217,8 +2217,8 @@ MemorySSA::CachingWalker::getClobberingM
DEBUG(dbgs() << *DefiningAccess << "\n");
DEBUG(dbgs() << "Final Memory SSA clobber for " << *I << " is ");
DEBUG(dbgs() << *Result << "\n");
- if (MemoryUse *MU = dyn_cast<MemoryUse>(StartingAccess))
- MU->setDefiningAccess(Result, true);
+ if (auto *MUD = dyn_cast<MemoryUseOrDef>(StartingAccess))
+ MUD->setOptimized(Result);
return Result;
}
Modified: llvm/trunk/lib/Transforms/Utils/MemorySSAUpdater.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Utils/MemorySSAUpdater.cpp?rev=299486&r1=299485&r2=299486&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/Utils/MemorySSAUpdater.cpp (original)
+++ llvm/trunk/lib/Transforms/Utils/MemorySSAUpdater.cpp Tue Apr 4 18:43:10 2017
@@ -451,8 +451,8 @@ void MemorySSAUpdater::removeMemoryAcces
while (!MA->use_empty()) {
Use &U = *MA->use_begin();
- if (MemoryUse *MU = dyn_cast<MemoryUse>(U.getUser()))
- MU->resetOptimized();
+ if (auto *MUD = dyn_cast<MemoryUseOrDef>(U.getUser()))
+ MUD->resetOptimized();
U.set(NewDefTarget);
}
}
More information about the llvm-commits
mailing list