[llvm] f96428e - [MemorySSA] Don't optimize uses during construction

Nikita Popov via llvm-commits llvm-commits at lists.llvm.org
Fri Mar 18 01:56:24 PDT 2022


Author: Nikita Popov
Date: 2022-03-18T09:56:16+01:00
New Revision: f96428e16de2181b414e39234b4d366c83cc52a8

URL: https://github.com/llvm/llvm-project/commit/f96428e16de2181b414e39234b4d366c83cc52a8
DIFF: https://github.com/llvm/llvm-project/commit/f96428e16de2181b414e39234b4d366c83cc52a8.diff

LOG: [MemorySSA] Don't optimize uses during construction

This changes MemorySSA to be constructed in unoptimized form.
MemorySSA::ensureOptimizedUses() can be called to optimize all
uses (once). This should be done by passes where having optimized
uses is beneficial, either because we're going to query all uses
anyway, or because we're doing def-use walks.

This should help reduce the compile-time impact of MemorySSA for
some use cases (the reason why I started looking into this is
D117926), which can avoid optimizing all uses upfront, and instead
only optimize those that are actually queried.

Actually, we have an existing use-case for this, which is EarlyCSE.
Disabling eager use optimization there gives a significant
compile-time improvement, because EarlyCSE will generally only query
clobbers for a subset of all uses (this change is not included in
this patch).

Differential Revision: https://reviews.llvm.org/D121381

Added: 
    

Modified: 
    llvm/include/llvm/Analysis/MemorySSA.h
    llvm/lib/Analysis/MemorySSA.cpp
    llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
    llvm/lib/Transforms/Scalar/EarlyCSE.cpp
    llvm/lib/Transforms/Scalar/GVNHoist.cpp
    llvm/lib/Transforms/Scalar/LICM.cpp
    llvm/unittests/Analysis/MemorySSATest.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/include/llvm/Analysis/MemorySSA.h b/llvm/include/llvm/Analysis/MemorySSA.h
index fa333001156b0..8cadb6a4c9121 100644
--- a/llvm/include/llvm/Analysis/MemorySSA.h
+++ b/llvm/include/llvm/Analysis/MemorySSA.h
@@ -346,9 +346,9 @@ class MemoryUse final : public MemoryUseOrDef {
     setOperand(0, DMA);
   }
 
-  /// The defining access of a MemoryUses are always optimized if queried from
-  /// outside MSSA construction itself.  This result is only useful inside
-  /// the MSSA implementation.
+  /// Whether the MemoryUse is optimized. If ensureOptimizedUses() was called,
+  /// uses will usually be optimized, but this is not guaranteed (e.g. due to
+  /// invalidation and optimization limits.)
   bool isOptimized() const {
     return getDefiningAccess() && OptimizedID == getDefiningAccess()->getID();
   }
@@ -801,6 +801,13 @@ class MemorySSA {
   /// about the beginning or end of a block.
   enum InsertionPlace { Beginning, End, BeforeTerminator };
 
+  /// By default, uses are *not* optimized during MemorySSA construction.
+  /// Calling this method will attempt to optimize all MemoryUses, if this has
+  /// not happened yet for this MemorySSA instance. This should be done if you
+  /// plan to query the clobbering access for most uses, or if you walk the
+  /// def-use chain of uses.
+  void ensureOptimizedUses();
+
 protected:
   // Used by Memory SSA dumpers and wrapper pass
   friend class MemorySSAPrinterLegacyPass;
@@ -903,6 +910,7 @@ class MemorySSA {
   std::unique_ptr<CachingWalker<AliasAnalysis>> Walker;
   std::unique_ptr<SkipSelfWalker<AliasAnalysis>> SkipWalker;
   unsigned NextID = 0;
+  bool IsOptimized = false;
 };
 
 /// Enables verification of MemorySSA.

diff  --git a/llvm/lib/Analysis/MemorySSA.cpp b/llvm/lib/Analysis/MemorySSA.cpp
index 7e7a1ad3215d0..fc9de56a5e5b8 100644
--- a/llvm/lib/Analysis/MemorySSA.cpp
+++ b/llvm/lib/Analysis/MemorySSA.cpp
@@ -1397,6 +1397,9 @@ void MemorySSA::OptimizeUses::optimizeUsesInBlock(
       continue;
     }
 
+    if (MU->isOptimized())
+      continue;
+
     if (isUseTriviallyOptimizableToLiveOnEntry(*AA, MU->getMemoryInst())) {
       MU->setDefiningAccess(MSSA->getLiveOnEntryDef(), true, None);
       continue;
@@ -1585,10 +1588,6 @@ void MemorySSA::buildMemorySSA(BatchAAResults &BAA) {
   SmallPtrSet<BasicBlock *, 16> Visited;
   renamePass(DT->getRootNode(), LiveOnEntryDef.get(), Visited);
 
-  ClobberWalkerBase<BatchAAResults> WalkerBase(this, &BAA, DT);
-  CachingWalker<BatchAAResults> WalkerLocal(this, &WalkerBase);
-  OptimizeUses(this, &WalkerLocal, &BAA, DT).optimizeUses();
-
   // Mark the uses in unreachable blocks as live on entry, so that they go
   // somewhere.
   for (auto &BB : F)
@@ -2178,6 +2177,17 @@ bool MemorySSA::dominates(const MemoryAccess *Dominator,
   return dominates(Dominator, cast<MemoryAccess>(Dominatee.getUser()));
 }
 
+void MemorySSA::ensureOptimizedUses() {
+  if (IsOptimized)
+    return;
+
+  BatchAAResults BatchAA(*AA);
+  ClobberWalkerBase<BatchAAResults> WalkerBase(this, &BatchAA, DT);
+  CachingWalker<BatchAAResults> WalkerLocal(this, &WalkerBase);
+  OptimizeUses(this, &WalkerLocal, &BatchAA, DT).optimizeUses();
+  IsOptimized = true;
+}
+
 void MemoryAccess::print(raw_ostream &OS) const {
   switch (getValueID()) {
   case MemoryPhiVal: return static_cast<const MemoryPhi *>(this)->print(OS);
@@ -2350,6 +2360,7 @@ struct DOTGraphTraits<DOTFuncMSSAInfo *> : public DefaultDOTGraphTraits {
 
 bool MemorySSAPrinterLegacyPass::runOnFunction(Function &F) {
   auto &MSSA = getAnalysis<MemorySSAWrapperPass>().getMSSA();
+  MSSA.ensureOptimizedUses();
   if (DotCFGMSSA != "") {
     DOTFuncMSSAInfo CFGInfo(F, MSSA);
     WriteGraph(&CFGInfo, "", false, "MSSA", DotCFGMSSA);
@@ -2382,6 +2393,7 @@ bool MemorySSAAnalysis::Result::invalidate(
 PreservedAnalyses MemorySSAPrinterPass::run(Function &F,
                                             FunctionAnalysisManager &AM) {
   auto &MSSA = AM.getResult<MemorySSAAnalysis>(F).getMSSA();
+  MSSA.ensureOptimizedUses();
   if (DotCFGMSSA != "") {
     DOTFuncMSSAInfo CFGInfo(F, MSSA);
     WriteGraph(&CFGInfo, "", false, "MSSA", DotCFGMSSA);

diff  --git a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
index 60196e3f3d934..2c9fec88885ba 100644
--- a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
+++ b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
@@ -1933,6 +1933,7 @@ static bool eliminateDeadStores(Function &F, AliasAnalysis &AA, MemorySSA &MSSA,
                                 const LoopInfo &LI) {
   bool MadeChange = false;
 
+  MSSA.ensureOptimizedUses();
   DSEState State(F, AA, MSSA, DT, PDT, TLI, LI);
   // For each store:
   for (unsigned I = 0; I < State.MemDefs.size(); I++) {

diff  --git a/llvm/lib/Transforms/Scalar/EarlyCSE.cpp b/llvm/lib/Transforms/Scalar/EarlyCSE.cpp
index 331e9b429d4ba..ac0cc9560c69d 100644
--- a/llvm/lib/Transforms/Scalar/EarlyCSE.cpp
+++ b/llvm/lib/Transforms/Scalar/EarlyCSE.cpp
@@ -598,7 +598,10 @@ class EarlyCSE {
            const TargetTransformInfo &TTI, DominatorTree &DT,
            AssumptionCache &AC, MemorySSA *MSSA)
       : TLI(TLI), TTI(TTI), DT(DT), AC(AC), SQ(DL, &TLI, &DT, &AC), MSSA(MSSA),
-        MSSAUpdater(std::make_unique<MemorySSAUpdater>(MSSA)) {}
+        MSSAUpdater(std::make_unique<MemorySSAUpdater>(MSSA)) {
+    if (MSSA)
+      MSSA->ensureOptimizedUses();
+  }
 
   bool run();
 

diff  --git a/llvm/lib/Transforms/Scalar/GVNHoist.cpp b/llvm/lib/Transforms/Scalar/GVNHoist.cpp
index 0cfee727782bf..4028218b3827b 100644
--- a/llvm/lib/Transforms/Scalar/GVNHoist.cpp
+++ b/llvm/lib/Transforms/Scalar/GVNHoist.cpp
@@ -259,7 +259,9 @@ class GVNHoist {
   GVNHoist(DominatorTree *DT, PostDominatorTree *PDT, AliasAnalysis *AA,
            MemoryDependenceResults *MD, MemorySSA *MSSA)
       : DT(DT), PDT(PDT), AA(AA), MD(MD), MSSA(MSSA),
-        MSSAUpdater(std::make_unique<MemorySSAUpdater>(MSSA)) {}
+        MSSAUpdater(std::make_unique<MemorySSAUpdater>(MSSA)) {
+    MSSA->ensureOptimizedUses();
+  }
 
   bool run(Function &F);
 

diff  --git a/llvm/lib/Transforms/Scalar/LICM.cpp b/llvm/lib/Transforms/Scalar/LICM.cpp
index 0e52a7212c4fc..458d4cfbd4823 100644
--- a/llvm/lib/Transforms/Scalar/LICM.cpp
+++ b/llvm/lib/Transforms/Scalar/LICM.cpp
@@ -374,6 +374,7 @@ bool LoopInvariantCodeMotion::runOnLoop(
   bool Changed = false;
 
   assert(L->isLCSSAForm(*DT) && "Loop is not in LCSSA form.");
+  MSSA->ensureOptimizedUses();
 
   // If this loop has metadata indicating that LICM is not to be performed then
   // just exit.

diff  --git a/llvm/unittests/Analysis/MemorySSATest.cpp b/llvm/unittests/Analysis/MemorySSATest.cpp
index 4db076715c389..2ee208099261f 100644
--- a/llvm/unittests/Analysis/MemorySSATest.cpp
+++ b/llvm/unittests/Analysis/MemorySSATest.cpp
@@ -1026,6 +1026,7 @@ TEST_F(MemorySSATest, TestLoadMustAlias) {
 
   setupAnalyses();
   MemorySSA &MSSA = *Analyses->MSSA;
+  MSSA.ensureOptimizedUses();
 
   unsigned I = 0;
   for (LoadInst *V : {LA1, LA2}) {
@@ -1119,6 +1120,7 @@ TEST_F(MemorySSATest, TestLoadMayAlias) {
 
   setupAnalyses();
   MemorySSA &MSSA = *Analyses->MSSA;
+  MSSA.ensureOptimizedUses();
 
   unsigned I = 0;
   for (LoadInst *V : {LA1, LB1}) {


        


More information about the llvm-commits mailing list