[llvm] 48edb90 - [MemorySSA] Use BatchAA for clobber walker

Nikita Popov via llvm-commits llvm-commits at lists.llvm.org
Mon Dec 5 23:29:27 PST 2022


Author: Nikita Popov
Date: 2022-12-06T08:29:11+01:00
New Revision: 48edb906d52899546317022c70cd674e4c06217e

URL: https://github.com/llvm/llvm-project/commit/48edb906d52899546317022c70cd674e4c06217e
DIFF: https://github.com/llvm/llvm-project/commit/48edb906d52899546317022c70cd674e4c06217e.diff

LOG: [MemorySSA] Use BatchAA for clobber walker

While MemorySSA use optimization was already using BatchAA, the
publicly exposed MSSA walkers were using plain AAResults. This is
not great, because it is expected that clobber walking will make
repeated AA queries.

This patch makes the clobber API accept a BatchAAResults instance.
The plain APIs are kept as wrappers and will create a BatchAAResults
instance for the duration of the query. In the future, the explicit
BatchAAResults arguments will be used to share AA results across
queries, not just within one query.

Differential Revision: https://reviews.llvm.org/D136164

Added: 
    

Modified: 
    llvm/include/llvm/Analysis/MemorySSA.h
    llvm/lib/Analysis/MemorySSA.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/include/llvm/Analysis/MemorySSA.h b/llvm/include/llvm/Analysis/MemorySSA.h
index 2b8c1c26be4ec..d861057cb8cb6 100644
--- a/llvm/include/llvm/Analysis/MemorySSA.h
+++ b/llvm/include/llvm/Analysis/MemorySSA.h
@@ -794,6 +794,8 @@ class MemorySSA {
   /// def-use chain of uses.
   void ensureOptimizedUses();
 
+  AliasAnalysis &getAA() { return *AA; }
+
 protected:
   // Used by Memory SSA dumpers and wrapper pass
   friend class MemorySSAPrinterLegacyPass;
@@ -840,12 +842,12 @@ class MemorySSA {
                                       bool CreationMustSucceed = true);
 
 private:
-  template <class AliasAnalysisType> class ClobberWalkerBase;
-  template <class AliasAnalysisType> class CachingWalker;
-  template <class AliasAnalysisType> class SkipSelfWalker;
+  class ClobberWalkerBase;
+  class CachingWalker;
+  class SkipSelfWalker;
   class OptimizeUses;
 
-  CachingWalker<AliasAnalysis> *getWalkerImpl();
+  CachingWalker *getWalkerImpl();
   void buildMemorySSA(BatchAAResults &BAA);
 
   void prepareForMoveTo(MemoryAccess *, BasicBlock *);
@@ -892,9 +894,9 @@ class MemorySSA {
   mutable DenseMap<const MemoryAccess *, unsigned long> BlockNumbering;
 
   // Memory SSA building info
-  std::unique_ptr<ClobberWalkerBase<AliasAnalysis>> WalkerBase;
-  std::unique_ptr<CachingWalker<AliasAnalysis>> Walker;
-  std::unique_ptr<SkipSelfWalker<AliasAnalysis>> SkipWalker;
+  std::unique_ptr<ClobberWalkerBase> WalkerBase;
+  std::unique_ptr<CachingWalker> Walker;
+  std::unique_ptr<SkipSelfWalker> SkipWalker;
   unsigned NextID = 0;
   bool IsOptimized = false;
 };
@@ -1041,15 +1043,17 @@ class MemorySSAWalker {
   ///
   /// calling this API on load(%a) will return the MemoryPhi, not the MemoryDef
   /// in the if (a) branch.
-  MemoryAccess *getClobberingMemoryAccess(const Instruction *I) {
+  MemoryAccess *getClobberingMemoryAccess(const Instruction *I,
+                                          BatchAAResults &AA) {
     MemoryAccess *MA = MSSA->getMemoryAccess(I);
     assert(MA && "Handed an instruction that MemorySSA doesn't recognize?");
-    return getClobberingMemoryAccess(MA);
+    return getClobberingMemoryAccess(MA, AA);
   }
 
   /// Does the same thing as getClobberingMemoryAccess(const Instruction *I),
   /// but takes a MemoryAccess instead of an Instruction.
-  virtual MemoryAccess *getClobberingMemoryAccess(MemoryAccess *) = 0;
+  virtual MemoryAccess *getClobberingMemoryAccess(MemoryAccess *,
+                                                  BatchAAResults &AA) = 0;
 
   /// Given a potentially clobbering memory access and a new location,
   /// calling this will give you the nearest dominating clobbering MemoryAccess
@@ -1063,7 +1067,24 @@ class MemorySSAWalker {
   /// will return that MemoryDef, whereas the above would return the clobber
   /// starting from the use side of  the memory def.
   virtual MemoryAccess *getClobberingMemoryAccess(MemoryAccess *,
-                                                  const MemoryLocation &) = 0;
+                                                  const MemoryLocation &,
+                                                  BatchAAResults &AA) = 0;
+
+  MemoryAccess *getClobberingMemoryAccess(const Instruction *I) {
+    BatchAAResults BAA(MSSA->getAA());
+    return getClobberingMemoryAccess(I, BAA);
+  }
+
+  MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA) {
+    BatchAAResults BAA(MSSA->getAA());
+    return getClobberingMemoryAccess(MA, BAA);
+  }
+
+  MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
+                                          const MemoryLocation &Loc) {
+    BatchAAResults BAA(MSSA->getAA());
+    return getClobberingMemoryAccess(MA, Loc, BAA);
+  }
 
   /// Given a memory access, invalidate anything this walker knows about
   /// that access.
@@ -1086,9 +1107,11 @@ class DoNothingMemorySSAWalker final : public MemorySSAWalker {
   // getClobberingMemoryAccess.
   using MemorySSAWalker::getClobberingMemoryAccess;
 
-  MemoryAccess *getClobberingMemoryAccess(MemoryAccess *) override;
   MemoryAccess *getClobberingMemoryAccess(MemoryAccess *,
-                                          const MemoryLocation &) override;
+                                          BatchAAResults &) override;
+  MemoryAccess *getClobberingMemoryAccess(MemoryAccess *,
+                                          const MemoryLocation &,
+                                          BatchAAResults &) override;
 };
 
 using MemoryAccessPair = std::pair<MemoryAccess *, MemoryLocation>;

diff  --git a/llvm/lib/Analysis/MemorySSA.cpp b/llvm/lib/Analysis/MemorySSA.cpp
index c729f0ab30774..f59f34eaf8543 100644
--- a/llvm/lib/Analysis/MemorySSA.cpp
+++ b/llvm/lib/Analysis/MemorySSA.cpp
@@ -125,10 +125,11 @@ class MemorySSAAnnotatedWriter : public AssemblyAnnotationWriter {
 class MemorySSAWalkerAnnotatedWriter : public AssemblyAnnotationWriter {
   MemorySSA *MSSA;
   MemorySSAWalker *Walker;
+  BatchAAResults BAA;
 
 public:
   MemorySSAWalkerAnnotatedWriter(MemorySSA *M)
-      : MSSA(M), Walker(M->getWalker()) {}
+      : MSSA(M), Walker(M->getWalker()), BAA(M->getAA()) {}
 
   void emitBasicBlockStartAnnot(const BasicBlock *BB,
                                 formatted_raw_ostream &OS) override {
@@ -139,7 +140,7 @@ class MemorySSAWalkerAnnotatedWriter : public AssemblyAnnotationWriter {
   void emitInstructionAnnot(const Instruction *I,
                             formatted_raw_ostream &OS) override {
     if (MemoryAccess *MA = MSSA->getMemoryAccess(I)) {
-      MemoryAccess *Clobber = Walker->getClobberingMemoryAccess(MA);
+      MemoryAccess *Clobber = Walker->getClobberingMemoryAccess(MA, BAA);
       OS << "; " << *MA;
       if (Clobber) {
         OS << " - clobbered by ";
@@ -372,8 +373,7 @@ struct UpwardsMemoryQuery {
 
 } // end anonymous namespace
 
-template <typename AliasAnalysisType>
-static bool isUseTriviallyOptimizableToLiveOnEntry(AliasAnalysisType &AA,
+static bool isUseTriviallyOptimizableToLiveOnEntry(BatchAAResults &AA,
                                                    const Instruction *I) {
   // If the memory can't be changed, then loads of the memory can't be
   // clobbered.
@@ -398,11 +398,10 @@ static bool isUseTriviallyOptimizableToLiveOnEntry(AliasAnalysisType &AA,
 /// \param AA        The AliasAnalysis we used for our search.
 /// \param AllowImpreciseClobber Always false, unless we do relaxed verify.
 
-template <typename AliasAnalysisType>
 LLVM_ATTRIBUTE_UNUSED static void
 checkClobberSanity(const MemoryAccess *Start, MemoryAccess *ClobberAt,
                    const MemoryLocation &StartLoc, const MemorySSA &MSSA,
-                   const UpwardsMemoryQuery &Query, AliasAnalysisType &AA,
+                   const UpwardsMemoryQuery &Query, BatchAAResults &AA,
                    bool AllowImpreciseClobber = false) {
   assert(MSSA.dominates(ClobberAt, Start) && "Clobber doesn't dominate start?");
 
@@ -493,7 +492,7 @@ namespace {
 
 /// Our algorithm for walking (and trying to optimize) clobbers, all wrapped up
 /// in one class.
-template <class AliasAnalysisType> class ClobberWalker {
+class ClobberWalker {
   /// Save a few bytes by using unsigned instead of size_t.
   using ListIndex = unsigned;
 
@@ -517,8 +516,8 @@ template <class AliasAnalysisType> class ClobberWalker {
   };
 
   const MemorySSA &MSSA;
-  AliasAnalysisType &AA;
   DominatorTree &DT;
+  BatchAAResults *AA;
   UpwardsMemoryQuery *Query;
   unsigned *UpwardWalkLimit;
 
@@ -584,7 +583,7 @@ template <class AliasAnalysisType> class ClobberWalker {
         if (!--*UpwardWalkLimit)
           return {Current, true};
 
-        if (instructionClobbersQuery(MD, Desc.Loc, Query->Inst, AA))
+        if (instructionClobbersQuery(MD, Desc.Loc, Query->Inst, *AA))
           return {MD, true};
       }
     }
@@ -928,14 +927,14 @@ template <class AliasAnalysisType> class ClobberWalker {
   }
 
 public:
-  ClobberWalker(const MemorySSA &MSSA, AliasAnalysisType &AA, DominatorTree &DT)
-      : MSSA(MSSA), AA(AA), DT(DT) {}
+  ClobberWalker(const MemorySSA &MSSA, DominatorTree &DT)
+      : MSSA(MSSA), DT(DT) {}
 
-  AliasAnalysisType *getAA() { return &AA; }
   /// Finds the nearest clobber for the given query, optimizing phis if
   /// possible.
-  MemoryAccess *findClobber(MemoryAccess *Start, UpwardsMemoryQuery &Q,
-                            unsigned &UpWalkLimit) {
+  MemoryAccess *findClobber(BatchAAResults &BAA, MemoryAccess *Start,
+                            UpwardsMemoryQuery &Q, unsigned &UpWalkLimit) {
+    AA = &BAA;
     Query = &Q;
     UpwardWalkLimit = &UpWalkLimit;
     // Starting limit must be > 0.
@@ -965,7 +964,7 @@ template <class AliasAnalysisType> class ClobberWalker {
 
 #ifdef EXPENSIVE_CHECKS
     if (!Q.SkipSelfAccess && *UpwardWalkLimit > 0)
-      checkClobberSanity(Current, Result, Q.StartingLoc, MSSA, Q, AA);
+      checkClobberSanity(Current, Result, Q.StartingLoc, MSSA, Q, BAA);
 #endif
     return Result;
   }
@@ -991,63 +990,65 @@ struct RenamePassData {
 
 namespace llvm {
 
-template <class AliasAnalysisType> class MemorySSA::ClobberWalkerBase {
-  ClobberWalker<AliasAnalysisType> Walker;
+class MemorySSA::ClobberWalkerBase {
+  ClobberWalker Walker;
   MemorySSA *MSSA;
 
 public:
-  ClobberWalkerBase(MemorySSA *M, AliasAnalysisType *A, DominatorTree *D)
-      : Walker(*M, *A, *D), MSSA(M) {}
+  ClobberWalkerBase(MemorySSA *M, DominatorTree *D) : Walker(*M, *D), MSSA(M) {}
 
   MemoryAccess *getClobberingMemoryAccessBase(MemoryAccess *,
                                               const MemoryLocation &,
-                                              unsigned &);
+                                              BatchAAResults &, unsigned &);
   // Third argument (bool), defines whether the clobber search should skip the
   // original queried access. If true, there will be a follow-up query searching
   // for a clobber access past "self". Note that the Optimized access is not
   // updated if a new clobber is found by this SkipSelf search. If this
   // additional query becomes heavily used we may decide to cache the result.
   // Walker instantiations will decide how to set the SkipSelf bool.
-  MemoryAccess *getClobberingMemoryAccessBase(MemoryAccess *, unsigned &, bool,
+  MemoryAccess *getClobberingMemoryAccessBase(MemoryAccess *, BatchAAResults &,
+                                              unsigned &, bool,
                                               bool UseInvariantGroup = true);
 };
 
 /// A MemorySSAWalker that does AA walks to disambiguate accesses. It no
 /// longer does caching on its own, but the name has been retained for the
 /// moment.
-template <class AliasAnalysisType>
 class MemorySSA::CachingWalker final : public MemorySSAWalker {
-  ClobberWalkerBase<AliasAnalysisType> *Walker;
+  ClobberWalkerBase *Walker;
 
 public:
-  CachingWalker(MemorySSA *M, ClobberWalkerBase<AliasAnalysisType> *W)
+  CachingWalker(MemorySSA *M, ClobberWalkerBase *W)
       : MemorySSAWalker(M), Walker(W) {}
   ~CachingWalker() override = default;
 
   using MemorySSAWalker::getClobberingMemoryAccess;
 
-  MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, unsigned &UWL) {
-    return Walker->getClobberingMemoryAccessBase(MA, UWL, false);
+  MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, BatchAAResults &BAA,
+                                          unsigned &UWL) {
+    return Walker->getClobberingMemoryAccessBase(MA, BAA, UWL, false);
   }
   MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
                                           const MemoryLocation &Loc,
-                                          unsigned &UWL) {
-    return Walker->getClobberingMemoryAccessBase(MA, Loc, UWL);
+                                          BatchAAResults &BAA, unsigned &UWL) {
+    return Walker->getClobberingMemoryAccessBase(MA, Loc, BAA, UWL);
   }
   // This method is not accessible outside of this file.
-  MemoryAccess *getClobberingMemoryAccessWithoutInvariantGroup(MemoryAccess *MA,
-                                                               unsigned &UWL) {
-    return Walker->getClobberingMemoryAccessBase(MA, UWL, false, false);
+  MemoryAccess *getClobberingMemoryAccessWithoutInvariantGroup(
+      MemoryAccess *MA, BatchAAResults &BAA, unsigned &UWL) {
+    return Walker->getClobberingMemoryAccessBase(MA, BAA, UWL, false, false);
   }
 
-  MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA) override {
+  MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
+                                          BatchAAResults &BAA) override {
     unsigned UpwardWalkLimit = MaxCheckLimit;
-    return getClobberingMemoryAccess(MA, UpwardWalkLimit);
+    return getClobberingMemoryAccess(MA, BAA, UpwardWalkLimit);
   }
   MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
-                                          const MemoryLocation &Loc) override {
+                                          const MemoryLocation &Loc,
+                                          BatchAAResults &BAA) override {
     unsigned UpwardWalkLimit = MaxCheckLimit;
-    return getClobberingMemoryAccess(MA, Loc, UpwardWalkLimit);
+    return getClobberingMemoryAccess(MA, Loc, BAA, UpwardWalkLimit);
   }
 
   void invalidateInfo(MemoryAccess *MA) override {
@@ -1056,34 +1057,36 @@ class MemorySSA::CachingWalker final : public MemorySSAWalker {
   }
 };
 
-template <class AliasAnalysisType>
 class MemorySSA::SkipSelfWalker final : public MemorySSAWalker {
-  ClobberWalkerBase<AliasAnalysisType> *Walker;
+  ClobberWalkerBase *Walker;
 
 public:
-  SkipSelfWalker(MemorySSA *M, ClobberWalkerBase<AliasAnalysisType> *W)
+  SkipSelfWalker(MemorySSA *M, ClobberWalkerBase *W)
       : MemorySSAWalker(M), Walker(W) {}
   ~SkipSelfWalker() override = default;
 
   using MemorySSAWalker::getClobberingMemoryAccess;
 
-  MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, unsigned &UWL) {
-    return Walker->getClobberingMemoryAccessBase(MA, UWL, true);
+  MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, BatchAAResults &BAA,
+                                          unsigned &UWL) {
+    return Walker->getClobberingMemoryAccessBase(MA, BAA, UWL, true);
   }
   MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
                                           const MemoryLocation &Loc,
-                                          unsigned &UWL) {
-    return Walker->getClobberingMemoryAccessBase(MA, Loc, UWL);
+                                          BatchAAResults &BAA, unsigned &UWL) {
+    return Walker->getClobberingMemoryAccessBase(MA, Loc, BAA, UWL);
   }
 
-  MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA) override {
+  MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
+                                          BatchAAResults &BAA) override {
     unsigned UpwardWalkLimit = MaxCheckLimit;
-    return getClobberingMemoryAccess(MA, UpwardWalkLimit);
+    return getClobberingMemoryAccess(MA, BAA, UpwardWalkLimit);
   }
   MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
-                                          const MemoryLocation &Loc) override {
+                                          const MemoryLocation &Loc,
+                                          BatchAAResults &BAA) override {
     unsigned UpwardWalkLimit = MaxCheckLimit;
-    return getClobberingMemoryAccess(MA, Loc, UpwardWalkLimit);
+    return getClobberingMemoryAccess(MA, Loc, BAA, UpwardWalkLimit);
   }
 
   void invalidateInfo(MemoryAccess *MA) override {
@@ -1284,8 +1287,8 @@ namespace llvm {
 /// which is walking bottom-up.
 class MemorySSA::OptimizeUses {
 public:
-  OptimizeUses(MemorySSA *MSSA, CachingWalker<BatchAAResults> *Walker,
-               BatchAAResults *BAA, DominatorTree *DT)
+  OptimizeUses(MemorySSA *MSSA, CachingWalker *Walker, BatchAAResults *BAA,
+               DominatorTree *DT)
       : MSSA(MSSA), Walker(Walker), AA(BAA), DT(DT) {}
 
   void optimizeUses();
@@ -1313,7 +1316,7 @@ class MemorySSA::OptimizeUses {
                            DenseMap<MemoryLocOrCall, MemlocStackInfo> &);
 
   MemorySSA *MSSA;
-  CachingWalker<BatchAAResults> *Walker;
+  CachingWalker *Walker;
   BatchAAResults *AA;
   DominatorTree *DT;
 };
@@ -1441,7 +1444,7 @@ void MemorySSA::OptimizeUses::optimizeUsesInBlock(
         // support updates, so don't use it to optimize uses.
         MemoryAccess *Result =
             Walker->getClobberingMemoryAccessWithoutInvariantGroup(
-                MU, UpwardWalkLimit);
+                MU, *AA, UpwardWalkLimit);
         // We are guaranteed to find it or something is wrong.
         while (VersionStack[UpperBound] != Result) {
           assert(UpperBound != 0);
@@ -1556,16 +1559,14 @@ void MemorySSA::buildMemorySSA(BatchAAResults &BAA) {
 
 MemorySSAWalker *MemorySSA::getWalker() { return getWalkerImpl(); }
 
-MemorySSA::CachingWalker<AliasAnalysis> *MemorySSA::getWalkerImpl() {
+MemorySSA::CachingWalker *MemorySSA::getWalkerImpl() {
   if (Walker)
     return Walker.get();
 
   if (!WalkerBase)
-    WalkerBase =
-        std::make_unique<ClobberWalkerBase<AliasAnalysis>>(this, AA, DT);
+    WalkerBase = std::make_unique<ClobberWalkerBase>(this, DT);
 
-  Walker =
-      std::make_unique<CachingWalker<AliasAnalysis>>(this, WalkerBase.get());
+  Walker = std::make_unique<CachingWalker>(this, WalkerBase.get());
   return Walker.get();
 }
 
@@ -1574,11 +1575,9 @@ MemorySSAWalker *MemorySSA::getSkipSelfWalker() {
     return SkipWalker.get();
 
   if (!WalkerBase)
-    WalkerBase =
-        std::make_unique<ClobberWalkerBase<AliasAnalysis>>(this, AA, DT);
+    WalkerBase = std::make_unique<ClobberWalkerBase>(this, DT);
 
-  SkipWalker =
-      std::make_unique<SkipSelfWalker<AliasAnalysis>>(this, WalkerBase.get());
+  SkipWalker = std::make_unique<SkipSelfWalker>(this, WalkerBase.get());
   return SkipWalker.get();
  }
 
@@ -2148,8 +2147,8 @@ void MemorySSA::ensureOptimizedUses() {
     return;
 
   BatchAAResults BatchAA(*AA);
-  ClobberWalkerBase<BatchAAResults> WalkerBase(this, &BatchAA, DT);
-  CachingWalker<BatchAAResults> WalkerLocal(this, &WalkerBase);
+  ClobberWalkerBase WalkerBase(this, DT);
+  CachingWalker WalkerLocal(this, &WalkerBase);
   OptimizeUses(this, &WalkerLocal, &BatchAA, DT).optimizeUses();
   IsOptimized = true;
 }
@@ -2418,11 +2417,9 @@ MemorySSAWalker::MemorySSAWalker(MemorySSA *M) : MSSA(M) {}
 /// the MemoryAccess that actually clobbers Loc.
 ///
 /// \returns our clobbering memory access
-template <typename AliasAnalysisType>
-MemoryAccess *
-MemorySSA::ClobberWalkerBase<AliasAnalysisType>::getClobberingMemoryAccessBase(
+MemoryAccess *MemorySSA::ClobberWalkerBase::getClobberingMemoryAccessBase(
     MemoryAccess *StartingAccess, const MemoryLocation &Loc,
-    unsigned &UpwardWalkLimit) {
+    BatchAAResults &BAA, unsigned &UpwardWalkLimit) {
   assert(!isa<MemoryUse>(StartingAccess) && "Use cannot be defining access");
 
   Instruction *I = nullptr;
@@ -2448,7 +2445,7 @@ MemorySSA::ClobberWalkerBase<AliasAnalysisType>::getClobberingMemoryAccessBase(
   // handed something we already believe is the clobbering access.
   // We never set SkipSelf to true in Q in this method.
   MemoryAccess *Clobber =
-      Walker.findClobber(StartingAccess, Q, UpwardWalkLimit);
+      Walker.findClobber(BAA, StartingAccess, Q, UpwardWalkLimit);
   LLVM_DEBUG({
     dbgs() << "Clobber starting at access " << *StartingAccess << "\n";
     if (I)
@@ -2517,11 +2514,9 @@ getInvariantGroupClobberingInstruction(Instruction &I, DominatorTree &DT) {
   return MostDominatingInstruction == &I ? nullptr : MostDominatingInstruction;
 }
 
-template <typename AliasAnalysisType>
-MemoryAccess *
-MemorySSA::ClobberWalkerBase<AliasAnalysisType>::getClobberingMemoryAccessBase(
-    MemoryAccess *MA, unsigned &UpwardWalkLimit, bool SkipSelf,
-    bool UseInvariantGroup) {
+MemoryAccess *MemorySSA::ClobberWalkerBase::getClobberingMemoryAccessBase(
+    MemoryAccess *MA, BatchAAResults &BAA, unsigned &UpwardWalkLimit,
+    bool SkipSelf, bool UseInvariantGroup) {
   auto *StartingAccess = dyn_cast<MemoryUseOrDef>(MA);
   // If this is a MemoryPhi, we can't do anything.
   if (!StartingAccess)
@@ -2560,7 +2555,7 @@ MemorySSA::ClobberWalkerBase<AliasAnalysisType>::getClobberingMemoryAccessBase(
 
   UpwardsMemoryQuery Q(I, StartingAccess);
 
-  if (isUseTriviallyOptimizableToLiveOnEntry(*Walker.getAA(), I)) {
+  if (isUseTriviallyOptimizableToLiveOnEntry(BAA, I)) {
     MemoryAccess *LiveOnEntry = MSSA->getLiveOnEntryDef();
     StartingAccess->setOptimized(LiveOnEntry);
     return LiveOnEntry;
@@ -2578,7 +2573,8 @@ MemorySSA::ClobberWalkerBase<AliasAnalysisType>::getClobberingMemoryAccessBase(
       return DefiningAccess;
     }
 
-    OptimizedAccess = Walker.findClobber(DefiningAccess, Q, UpwardWalkLimit);
+    OptimizedAccess =
+        Walker.findClobber(BAA, DefiningAccess, Q, UpwardWalkLimit);
     StartingAccess->setOptimized(OptimizedAccess);
   } else
     OptimizedAccess = StartingAccess->getOptimized();
@@ -2593,7 +2589,7 @@ MemorySSA::ClobberWalkerBase<AliasAnalysisType>::getClobberingMemoryAccessBase(
       isa<MemoryDef>(StartingAccess) && UpwardWalkLimit) {
     assert(isa<MemoryDef>(Q.OriginalAccess));
     Q.SkipSelfAccess = true;
-    Result = Walker.findClobber(OptimizedAccess, Q, UpwardWalkLimit);
+    Result = Walker.findClobber(BAA, OptimizedAccess, Q, UpwardWalkLimit);
   } else
     Result = OptimizedAccess;
 
@@ -2604,14 +2600,15 @@ MemorySSA::ClobberWalkerBase<AliasAnalysisType>::getClobberingMemoryAccessBase(
 }
 
 MemoryAccess *
-DoNothingMemorySSAWalker::getClobberingMemoryAccess(MemoryAccess *MA) {
+DoNothingMemorySSAWalker::getClobberingMemoryAccess(MemoryAccess *MA,
+                                                    BatchAAResults &) {
   if (auto *Use = dyn_cast<MemoryUseOrDef>(MA))
     return Use->getDefiningAccess();
   return MA;
 }
 
 MemoryAccess *DoNothingMemorySSAWalker::getClobberingMemoryAccess(
-    MemoryAccess *StartingAccess, const MemoryLocation &) {
+    MemoryAccess *StartingAccess, const MemoryLocation &, BatchAAResults &) {
   if (auto *Use = dyn_cast<MemoryUseOrDef>(StartingAccess))
     return Use->getDefiningAccess();
   return StartingAccess;


        


More information about the llvm-commits mailing list