[llvm] ba664d9 - [AA] Move earliest escape tracking from DSE to AA

Nikita Popov via llvm-commits llvm-commits at lists.llvm.org
Sat Sep 25 13:47:09 PDT 2021


Author: Nikita Popov
Date: 2021-09-25T22:40:41+02:00
New Revision: ba664d906644e62ac30e9a92edf48391c923992c

URL: https://github.com/llvm/llvm-project/commit/ba664d906644e62ac30e9a92edf48391c923992c
DIFF: https://github.com/llvm/llvm-project/commit/ba664d906644e62ac30e9a92edf48391c923992c.diff

LOG: [AA] Move earliest escape tracking from DSE to AA

This is a followup to D109844 (and alternative to D109907), which
integrates the new "earliest escape" tracking into AliasAnalysis.
This is done by replacing the pre-existing context-free capture
cache in AAQueryInfo with a replaceable (virtual) object with two
implementations: The SimpleCaptureInfo implements the previous
behavior (check whether object is captured at all), while
EarliestEscapeInfo implements the new behavior from DSE.

This combines the "earliest escape" analysis with the full power of
BasicAA: It subsumes the call handling from D109907, considers a
wider range of escape sources, and works with AA recursion. The
compile-time cost is slightly higher than with D109907.

Differential Revision: https://reviews.llvm.org/D110368

Added: 
    

Modified: 
    llvm/include/llvm/Analysis/AliasAnalysis.h
    llvm/lib/Analysis/AliasAnalysis.cpp
    llvm/lib/Analysis/BasicAliasAnalysis.cpp
    llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
    llvm/test/Transforms/DeadStoreElimination/captures-before-call.ll
    llvm/test/Transforms/DeadStoreElimination/captures-before-load.ll
    llvm/unittests/Analysis/BasicAliasAnalysisTest.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/include/llvm/Analysis/AliasAnalysis.h b/llvm/include/llvm/Analysis/AliasAnalysis.h
index 7fec0feb09d5b..2770a1a9b2774 100644
--- a/llvm/include/llvm/Analysis/AliasAnalysis.h
+++ b/llvm/include/llvm/Analysis/AliasAnalysis.h
@@ -61,6 +61,7 @@ class DominatorTree;
 class FenceInst;
 class Function;
 class InvokeInst;
+class LoopInfo;
 class PreservedAnalyses;
 class TargetLibraryInfo;
 class Value;
@@ -378,6 +379,50 @@ createModRefInfo(const FunctionModRefBehavior FMRB) {
   return ModRefInfo(FMRB & static_cast<int>(ModRefInfo::ModRef));
 }
 
+/// Virtual base class for providers of capture information.
+struct CaptureInfo {
+  virtual ~CaptureInfo() = 0;
+  virtual bool isNotCapturedBeforeOrAt(const Value *Object,
+                                       const Instruction *I) = 0;
+};
+
+/// Context-free CaptureInfo provider, which computes and caches whether an
+/// object is captured in the function at all, but does not distinguish whether
+/// it was captured before or after the context instruction.
+class SimpleCaptureInfo final : public CaptureInfo {
+  SmallDenseMap<const Value *, bool, 8> IsCapturedCache;
+
+public:
+  bool isNotCapturedBeforeOrAt(const Value *Object,
+                               const Instruction *I) override;
+};
+
+/// Context-sensitive CaptureInfo provider, which computes and caches the
+/// earliest common dominator closure of all captures. It provides a good
+/// approximation to a precise "captures before" analysis.
+class EarliestEscapeInfo final : public CaptureInfo {
+  DominatorTree &DT;
+  const LoopInfo &LI;
+
+  /// Map from identified local object to an instruction before which it does
+  /// not escape, or nullptr if it never escapes. The "earliest" instruction
+  /// may be a conservative approximation, e.g. the first instruction in the
+  /// function is always a legal choice.
+  DenseMap<const Value *, Instruction *> EarliestEscapes;
+
+  /// Reverse map from instruction to the objects it is the earliest escape for.
+  /// This is used for cache invalidation purposes.
+  DenseMap<Instruction *, TinyPtrVector<const Value *>> Inst2Obj;
+
+public:
+  EarliestEscapeInfo(DominatorTree &DT, const LoopInfo &LI) : DT(DT), LI(LI) {}
+
+  bool isNotCapturedBeforeOrAt(const Value *Object,
+                               const Instruction *I) override;
+
+  void removeInstruction(Instruction *I);
+};
+
 /// Reduced version of MemoryLocation that only stores a pointer and size.
 /// Used for caching AATags independent BasicAA results.
 struct AACacheLoc {
@@ -425,8 +470,7 @@ class AAQueryInfo {
   using AliasCacheT = SmallDenseMap<LocPair, CacheEntry, 8>;
   AliasCacheT AliasCache;
 
-  using IsCapturedCacheT = SmallDenseMap<const Value *, bool, 8>;
-  IsCapturedCacheT IsCapturedCache;
+  CaptureInfo *CI;
 
   /// Query depth used to distinguish recursive queries.
   unsigned Depth = 0;
@@ -439,18 +483,26 @@ class AAQueryInfo {
   /// assumption is disproven.
   SmallVector<AAQueryInfo::LocPair, 4> AssumptionBasedResults;
 
-  AAQueryInfo() : AliasCache(), IsCapturedCache() {}
+  AAQueryInfo(CaptureInfo *CI) : CI(CI) {}
 
   /// Create a new AAQueryInfo based on this one, but with the cache cleared.
   /// This is used for recursive queries across phis, where cache results may
   /// not be valid.
   AAQueryInfo withEmptyCache() {
-    AAQueryInfo NewAAQI;
+    AAQueryInfo NewAAQI(CI);
     NewAAQI.Depth = Depth;
     return NewAAQI;
   }
 };
 
+/// AAQueryInfo that uses SimpleCaptureInfo.
+class SimpleAAQueryInfo : public AAQueryInfo {
+  SimpleCaptureInfo CI;
+
+public:
+  SimpleAAQueryInfo() : AAQueryInfo(&CI) {}
+};
+
 class BatchAAResults;
 
 class AAResults {
@@ -770,7 +822,7 @@ class AAResults {
   /// helpers above.
   ModRefInfo getModRefInfo(const Instruction *I,
                            const Optional<MemoryLocation> &OptLoc) {
-    AAQueryInfo AAQIP;
+    SimpleAAQueryInfo AAQIP;
     return getModRefInfo(I, OptLoc, AAQIP);
   }
 
@@ -797,7 +849,7 @@ class AAResults {
   ModRefInfo callCapturesBefore(const Instruction *I,
                                 const MemoryLocation &MemLoc,
                                 DominatorTree *DT) {
-    AAQueryInfo AAQIP;
+    SimpleAAQueryInfo AAQIP;
     return callCapturesBefore(I, MemLoc, DT, AAQIP);
   }
 
@@ -896,9 +948,12 @@ class AAResults {
 class BatchAAResults {
   AAResults &AA;
   AAQueryInfo AAQI;
+  SimpleCaptureInfo SimpleCI;
 
 public:
-  BatchAAResults(AAResults &AAR) : AA(AAR), AAQI() {}
+  BatchAAResults(AAResults &AAR) : AA(AAR), AAQI(&SimpleCI) {}
+  BatchAAResults(AAResults &AAR, CaptureInfo *CI) : AA(AAR), AAQI(CI) {}
+
   AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB) {
     return AA.alias(LocA, LocB, AAQI);
   }

diff  --git a/llvm/lib/Analysis/AliasAnalysis.cpp b/llvm/lib/Analysis/AliasAnalysis.cpp
index 5ee41e392744e..04d71d4fec5de 100644
--- a/llvm/lib/Analysis/AliasAnalysis.cpp
+++ b/llvm/lib/Analysis/AliasAnalysis.cpp
@@ -119,7 +119,7 @@ bool AAResults::invalidate(Function &F, const PreservedAnalyses &PA,
 
 AliasResult AAResults::alias(const MemoryLocation &LocA,
                              const MemoryLocation &LocB) {
-  AAQueryInfo AAQIP;
+  SimpleAAQueryInfo AAQIP;
   return alias(LocA, LocB, AAQIP);
 }
 
@@ -162,7 +162,7 @@ AliasResult AAResults::alias(const MemoryLocation &LocA,
 
 bool AAResults::pointsToConstantMemory(const MemoryLocation &Loc,
                                        bool OrLocal) {
-  AAQueryInfo AAQIP;
+  SimpleAAQueryInfo AAQIP;
   return pointsToConstantMemory(Loc, AAQIP, OrLocal);
 }
 
@@ -190,7 +190,7 @@ ModRefInfo AAResults::getArgModRefInfo(const CallBase *Call, unsigned ArgIdx) {
 }
 
 ModRefInfo AAResults::getModRefInfo(Instruction *I, const CallBase *Call2) {
-  AAQueryInfo AAQIP;
+  SimpleAAQueryInfo AAQIP;
   return getModRefInfo(I, Call2, AAQIP);
 }
 
@@ -217,7 +217,7 @@ ModRefInfo AAResults::getModRefInfo(Instruction *I, const CallBase *Call2,
 
 ModRefInfo AAResults::getModRefInfo(const CallBase *Call,
                                     const MemoryLocation &Loc) {
-  AAQueryInfo AAQIP;
+  SimpleAAQueryInfo AAQIP;
   return getModRefInfo(Call, Loc, AAQIP);
 }
 
@@ -284,7 +284,7 @@ ModRefInfo AAResults::getModRefInfo(const CallBase *Call,
 
 ModRefInfo AAResults::getModRefInfo(const CallBase *Call1,
                                     const CallBase *Call2) {
-  AAQueryInfo AAQIP;
+  SimpleAAQueryInfo AAQIP;
   return getModRefInfo(Call1, Call2, AAQIP);
 }
 
@@ -474,7 +474,7 @@ raw_ostream &llvm::operator<<(raw_ostream &OS, AliasResult AR) {
 
 ModRefInfo AAResults::getModRefInfo(const LoadInst *L,
                                     const MemoryLocation &Loc) {
-  AAQueryInfo AAQIP;
+  SimpleAAQueryInfo AAQIP;
   return getModRefInfo(L, Loc, AAQIP);
 }
 ModRefInfo AAResults::getModRefInfo(const LoadInst *L,
@@ -499,7 +499,7 @@ ModRefInfo AAResults::getModRefInfo(const LoadInst *L,
 
 ModRefInfo AAResults::getModRefInfo(const StoreInst *S,
                                     const MemoryLocation &Loc) {
-  AAQueryInfo AAQIP;
+  SimpleAAQueryInfo AAQIP;
   return getModRefInfo(S, Loc, AAQIP);
 }
 ModRefInfo AAResults::getModRefInfo(const StoreInst *S,
@@ -531,7 +531,7 @@ ModRefInfo AAResults::getModRefInfo(const StoreInst *S,
 }
 
 ModRefInfo AAResults::getModRefInfo(const FenceInst *S, const MemoryLocation &Loc) {
-  AAQueryInfo AAQIP;
+  SimpleAAQueryInfo AAQIP;
   return getModRefInfo(S, Loc, AAQIP);
 }
 
@@ -547,7 +547,7 @@ ModRefInfo AAResults::getModRefInfo(const FenceInst *S,
 
 ModRefInfo AAResults::getModRefInfo(const VAArgInst *V,
                                     const MemoryLocation &Loc) {
-  AAQueryInfo AAQIP;
+  SimpleAAQueryInfo AAQIP;
   return getModRefInfo(V, Loc, AAQIP);
 }
 
@@ -577,7 +577,7 @@ ModRefInfo AAResults::getModRefInfo(const VAArgInst *V,
 
 ModRefInfo AAResults::getModRefInfo(const CatchPadInst *CatchPad,
                                     const MemoryLocation &Loc) {
-  AAQueryInfo AAQIP;
+  SimpleAAQueryInfo AAQIP;
   return getModRefInfo(CatchPad, Loc, AAQIP);
 }
 
@@ -597,7 +597,7 @@ ModRefInfo AAResults::getModRefInfo(const CatchPadInst *CatchPad,
 
 ModRefInfo AAResults::getModRefInfo(const CatchReturnInst *CatchRet,
                                     const MemoryLocation &Loc) {
-  AAQueryInfo AAQIP;
+  SimpleAAQueryInfo AAQIP;
   return getModRefInfo(CatchRet, Loc, AAQIP);
 }
 
@@ -617,7 +617,7 @@ ModRefInfo AAResults::getModRefInfo(const CatchReturnInst *CatchRet,
 
 ModRefInfo AAResults::getModRefInfo(const AtomicCmpXchgInst *CX,
                                     const MemoryLocation &Loc) {
-  AAQueryInfo AAQIP;
+  SimpleAAQueryInfo AAQIP;
   return getModRefInfo(CX, Loc, AAQIP);
 }
 
@@ -645,7 +645,7 @@ ModRefInfo AAResults::getModRefInfo(const AtomicCmpXchgInst *CX,
 
 ModRefInfo AAResults::getModRefInfo(const AtomicRMWInst *RMW,
                                     const MemoryLocation &Loc) {
-  AAQueryInfo AAQIP;
+  SimpleAAQueryInfo AAQIP;
   return getModRefInfo(RMW, Loc, AAQIP);
 }
 

diff  --git a/llvm/lib/Analysis/BasicAliasAnalysis.cpp b/llvm/lib/Analysis/BasicAliasAnalysis.cpp
index 636206b6cffb4..8e53e7cb344d0 100644
--- a/llvm/lib/Analysis/BasicAliasAnalysis.cpp
+++ b/llvm/lib/Analysis/BasicAliasAnalysis.cpp
@@ -223,6 +223,51 @@ static bool isObjectSize(const Value *V, uint64_t Size, const DataLayout &DL,
   return ObjectSize != MemoryLocation::UnknownSize && ObjectSize == Size;
 }
 
+//===----------------------------------------------------------------------===//
+// CaptureInfo implementations
+//===----------------------------------------------------------------------===//
+
+CaptureInfo::~CaptureInfo() = default;
+
+bool SimpleCaptureInfo::isNotCapturedBeforeOrAt(const Value *Object,
+                                                const Instruction *I) {
+  return isNonEscapingLocalObject(Object, &IsCapturedCache);
+}
+
+bool EarliestEscapeInfo::isNotCapturedBeforeOrAt(const Value *Object,
+                                                 const Instruction *I) {
+  if (!isIdentifiedFunctionLocal(Object))
+    return false;
+
+  auto Iter = EarliestEscapes.insert({Object, nullptr});
+  if (Iter.second) {
+    Instruction *EarliestCapture = FindEarliestCapture(
+        Object, *const_cast<Function *>(I->getFunction()),
+        /*ReturnCaptures=*/false, /*StoreCaptures=*/true, DT);
+    if (EarliestCapture) {
+      auto Ins = Inst2Obj.insert({EarliestCapture, {}});
+      Ins.first->second.push_back(Object);
+    }
+    Iter.first->second = EarliestCapture;
+  }
+
+  // No capturing instruction.
+  if (!Iter.first->second)
+    return true;
+
+  return I != Iter.first->second &&
+         !isPotentiallyReachable(Iter.first->second, I, nullptr, &DT, &LI);
+}
+
+void EarliestEscapeInfo::removeInstruction(Instruction *I) {
+  auto Iter = Inst2Obj.find(I);
+  if (Iter != Inst2Obj.end()) {
+    for (const Value *Obj : Iter->second)
+      EarliestEscapes.erase(Obj);
+    Inst2Obj.erase(I);
+  }
+}
+
 //===----------------------------------------------------------------------===//
 // GetElementPtr Instruction Decomposition and Analysis
 //===----------------------------------------------------------------------===//
@@ -835,7 +880,7 @@ ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call,
   // then the call can not mod/ref the pointer unless the call takes the pointer
   // as an argument, and itself doesn't capture it.
   if (!isa<Constant>(Object) && Call != Object &&
-      isNonEscapingLocalObject(Object, &AAQI.IsCapturedCache)) {
+      AAQI.CI->isNotCapturedBeforeOrAt(Object, Call)) {
 
     // Optimistically assume that call doesn't touch Object and check this
     // assumption in the following loop.
@@ -1514,10 +1559,10 @@ AliasResult BasicAAResult::aliasCheck(const Value *V1, LocationSize V1Size,
     // location if that memory location doesn't escape. Or it may pass a
     // nocapture value to other functions as long as they don't capture it.
     if (isEscapeSource(O1) &&
-        isNonEscapingLocalObject(O2, &AAQI.IsCapturedCache))
+        AAQI.CI->isNotCapturedBeforeOrAt(O2, cast<Instruction>(O1)))
       return AliasResult::NoAlias;
     if (isEscapeSource(O2) &&
-        isNonEscapingLocalObject(O1, &AAQI.IsCapturedCache))
+        AAQI.CI->isNotCapturedBeforeOrAt(O1, cast<Instruction>(O2)))
       return AliasResult::NoAlias;
   }
 

diff  --git a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
index 2fa5f4c366af9..9b6da79fbfd16 100644
--- a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
+++ b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
@@ -38,7 +38,6 @@
 #include "llvm/ADT/Statistic.h"
 #include "llvm/ADT/StringRef.h"
 #include "llvm/Analysis/AliasAnalysis.h"
-#include "llvm/Analysis/CFG.h"
 #include "llvm/Analysis/CaptureTracking.h"
 #include "llvm/Analysis/GlobalsModRef.h"
 #include "llvm/Analysis/LoopInfo.h"
@@ -856,6 +855,7 @@ bool canSkipDef(MemoryDef *D, bool DefVisibleToCaller,
 struct DSEState {
   Function &F;
   AliasAnalysis &AA;
+  EarliestEscapeInfo EI;
 
   /// The single BatchAA instance that is used to cache AA queries. It will
   /// not be invalidated over the whole run. This is safe, because:
@@ -898,14 +898,11 @@ struct DSEState {
   /// basic block.
   DenseMap<BasicBlock *, InstOverlapIntervalsTy> IOLs;
 
-  DenseMap<const Value *, Instruction *> EarliestEscapes;
-  DenseMap<Instruction *, TinyPtrVector<const Value *>> Inst2Obj;
-
   DSEState(Function &F, AliasAnalysis &AA, MemorySSA &MSSA, DominatorTree &DT,
            PostDominatorTree &PDT, const TargetLibraryInfo &TLI,
            const LoopInfo &LI)
-      : F(F), AA(AA), BatchAA(AA), MSSA(MSSA), DT(DT), PDT(PDT), TLI(TLI),
-        DL(F.getParent()->getDataLayout()), LI(LI) {}
+      : F(F), AA(AA), EI(DT, LI), BatchAA(AA, &EI), MSSA(MSSA), DT(DT),
+        PDT(PDT), TLI(TLI), DL(F.getParent()->getDataLayout()), LI(LI) {}
 
   static DSEState get(Function &F, AliasAnalysis &AA, MemorySSA &MSSA,
                       DominatorTree &DT, PostDominatorTree &PDT,
@@ -1268,30 +1265,6 @@ struct DSEState {
                        DepWriteOffset) == OW_Complete;
   }
 
-  /// Returns true if \p Object is not captured before or by \p I.
-  bool notCapturedBeforeOrAt(const Value *Object, Instruction *I) {
-    if (!isIdentifiedFunctionLocal(Object))
-      return false;
-
-    auto Iter = EarliestEscapes.insert({Object, nullptr});
-    if (Iter.second) {
-      Instruction *EarliestCapture = FindEarliestCapture(
-          Object, F, /*ReturnCaptures=*/false, /*StoreCaptures=*/true, DT);
-      if (EarliestCapture) {
-        auto Ins = Inst2Obj.insert({EarliestCapture, {}});
-        Ins.first->second.push_back(Object);
-      }
-      Iter.first->second = EarliestCapture;
-    }
-
-    // No capturing instruction.
-    if (!Iter.first->second)
-      return true;
-
-    return I != Iter.first->second &&
-           !isPotentiallyReachable(Iter.first->second, I, nullptr, &DT, &LI);
-  }
-
   // Returns true if \p Use may read from \p DefLoc.
   bool isReadClobber(const MemoryLocation &DefLoc, Instruction *UseInst) {
     if (isNoopIntrinsic(UseInst))
@@ -1309,25 +1282,6 @@ struct DSEState {
       if (CB->onlyAccessesInaccessibleMemory())
         return false;
 
-    // BasicAA does not spend linear time to check whether local objects escape
-    // before potentially aliasing accesses. To improve DSE results, compute and
-    // cache escape info for local objects in certain circumstances.
-    if (auto *LI = dyn_cast<LoadInst>(UseInst)) {
-      // If the loads reads from a loaded underlying object accesses the load
-      // cannot alias DefLoc, if DefUO is a local object that has not escaped
-      // before the load.
-      auto *ReadUO = getUnderlyingObject(LI->getPointerOperand());
-      auto *DefUO = getUnderlyingObject(DefLoc.Ptr);
-      auto *ReadLI = dyn_cast<LoadInst>(ReadUO);
-      if (ReadLI && notCapturedBeforeOrAt(DefUO, ReadLI)) {
-        assert(
-            !PointerMayBeCapturedBefore(DefLoc.Ptr, false, true, ReadLI, &DT,
-                                        false, 0, &this->LI) &&
-            "cached analysis disagrees with fresh PointerMayBeCapturedBefore");
-        return false;
-      }
-    }
-
     // NOTE: For calls, the number of stores removed could be slightly improved
     // by using AA.callCapturesBefore(UseInst, DefLoc, &DT), but that showed to
     // be expensive compared to the benefits in practice. For now, avoid more
@@ -1769,14 +1723,7 @@ struct DSEState {
             NowDeadInsts.push_back(OpI);
         }
 
-      // Clear any cached escape info for objects associated with the
-      // removed instructions.
-      auto Iter = Inst2Obj.find(DeadInst);
-      if (Iter != Inst2Obj.end()) {
-        for (const Value *Obj : Iter->second)
-          EarliestEscapes.erase(Obj);
-        Inst2Obj.erase(DeadInst);
-      }
+      EI.removeInstruction(DeadInst);
       DeadInst->eraseFromParent();
     }
   }

diff  --git a/llvm/test/Transforms/DeadStoreElimination/captures-before-call.ll b/llvm/test/Transforms/DeadStoreElimination/captures-before-call.ll
index e334ad8f42a14..7d9986151d9c3 100644
--- a/llvm/test/Transforms/DeadStoreElimination/captures-before-call.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/captures-before-call.ll
@@ -11,7 +11,6 @@ define i32 @other_value_escapes_before_call() {
 ; CHECK-NEXT:    [[V2:%.*]] = alloca i32, align 4
 ; CHECK-NEXT:    store i32 0, i32* [[V1]], align 4
 ; CHECK-NEXT:    call void @escape(i32* nonnull [[V1]])
-; CHECK-NEXT:    store i32 55555, i32* [[V2]], align 4
 ; CHECK-NEXT:    [[CALL:%.*]] = call i32 @getval()
 ; CHECK-NEXT:    store i32 [[CALL]], i32* [[V2]], align 4
 ; CHECK-NEXT:    call void @escape(i32* nonnull [[V2]])
@@ -46,7 +45,6 @@ declare void @clobber()
 define i32 @test_not_captured_before_call_same_bb() {
 ; CHECK-LABEL: @test_not_captured_before_call_same_bb(
 ; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    store i32 55, i32* [[A]], align 4
 ; CHECK-NEXT:    [[R:%.*]] = call i32 @getval()
 ; CHECK-NEXT:    store i32 99, i32* [[A]], align 4
 ; CHECK-NEXT:    call void @escape_and_clobber(i32* [[A]])
@@ -63,7 +61,6 @@ define i32 @test_not_captured_before_call_same_bb() {
 define i32 @test_not_captured_before_call_same_bb_escape_unreachable_block() {
 ; CHECK-LABEL: @test_not_captured_before_call_same_bb_escape_unreachable_block(
 ; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    store i32 55, i32* [[A]], align 4
 ; CHECK-NEXT:    [[R:%.*]] = call i32 @getval()
 ; CHECK-NEXT:    store i32 99, i32* [[A]], align 4
 ; CHECK-NEXT:    call void @escape_and_clobber(i32* [[A]])
@@ -106,7 +103,6 @@ define i32 @test_captured_and_clobbered_after_load_same_bb_2() {
 define i32 @test_captured_after_call_same_bb_2_clobbered_later() {
 ; CHECK-LABEL: @test_captured_after_call_same_bb_2_clobbered_later(
 ; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    store i32 55, i32* [[A]], align 4
 ; CHECK-NEXT:    [[R:%.*]] = call i32 @getval()
 ; CHECK-NEXT:    call void @escape_writeonly(i32* [[A]])
 ; CHECK-NEXT:    store i32 99, i32* [[A]], align 4
@@ -125,7 +121,6 @@ define i32 @test_captured_after_call_same_bb_2_clobbered_later() {
 define i32 @test_captured_sibling_path_to_call_other_blocks_1(i1 %c.1) {
 ; CHECK-LABEL: @test_captured_sibling_path_to_call_other_blocks_1(
 ; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    store i32 55, i32* [[A]], align 4
 ; CHECK-NEXT:    br i1 [[C_1:%.*]], label [[THEN:%.*]], label [[ELSE:%.*]]
 ; CHECK:       then:
 ; CHECK-NEXT:    call void @escape_writeonly(i32* [[A]])
@@ -296,7 +291,6 @@ exit:
 define i32 @test_not_captured_before_call_other_blocks_1(i1 %c.1) {
 ; CHECK-LABEL: @test_not_captured_before_call_other_blocks_1(
 ; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    store i32 55, i32* [[A]], align 4
 ; CHECK-NEXT:    [[R:%.*]] = call i32 @getval()
 ; CHECK-NEXT:    store i32 99, i32* [[A]], align 4
 ; CHECK-NEXT:    br i1 [[C_1:%.*]], label [[THEN:%.*]], label [[ELSE:%.*]]
@@ -328,7 +322,6 @@ exit:
 define i32 @test_not_captured_before_call_other_blocks_2(i1 %c.1) {
 ; CHECK-LABEL: @test_not_captured_before_call_other_blocks_2(
 ; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    store i32 55, i32* [[A]], align 4
 ; CHECK-NEXT:    [[R:%.*]] = call i32 @getval()
 ; CHECK-NEXT:    store i32 99, i32* [[A]], align 4
 ; CHECK-NEXT:    br i1 [[C_1:%.*]], label [[THEN:%.*]], label [[ELSE:%.*]]
@@ -362,7 +355,6 @@ exit:
 define i32 @test_not_captured_before_call_other_blocks_3(i1 %c.1) {
 ; CHECK-LABEL: @test_not_captured_before_call_other_blocks_3(
 ; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    store i32 55, i32* [[A]], align 4
 ; CHECK-NEXT:    [[R:%.*]] = call i32 @getval()
 ; CHECK-NEXT:    store i32 99, i32* [[A]], align 4
 ; CHECK-NEXT:    br i1 [[C_1:%.*]], label [[THEN:%.*]], label [[ELSE:%.*]]
@@ -394,7 +386,6 @@ exit:
 define i32 @test_not_captured_before_call_other_blocks_4(i1 %c.1) {
 ; CHECK-LABEL: @test_not_captured_before_call_other_blocks_4(
 ; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    store i32 55, i32* [[A]], align 4
 ; CHECK-NEXT:    br i1 [[C_1:%.*]], label [[THEN:%.*]], label [[ELSE:%.*]]
 ; CHECK:       then:
 ; CHECK-NEXT:    br label [[EXIT:%.*]]
@@ -431,7 +422,6 @@ define i32 @test_not_captured_before_call_other_blocks_5(i1 %c.1) {
 ; CHECK-LABEL: @test_not_captured_before_call_other_blocks_5(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    store i32 55, i32* [[A]], align 4
 ; CHECK-NEXT:    br i1 [[C_1:%.*]], label [[THEN:%.*]], label [[EXIT:%.*]]
 ; CHECK:       then:
 ; CHECK-NEXT:    [[R:%.*]] = call i32 @getval()
@@ -499,7 +489,6 @@ define i32 @test_not_captured_before_call_other_blocks_7(i1 %c.1) {
 ; CHECK-LABEL: @test_not_captured_before_call_other_blocks_7(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    store i32 55, i32* [[A]], align 4
 ; CHECK-NEXT:    [[R:%.*]] = call i32 @getval()
 ; CHECK-NEXT:    call void @escape_writeonly(i32* [[A]])
 ; CHECK-NEXT:    br i1 [[C_1:%.*]], label [[THEN:%.*]], label [[EXIT:%.*]]
@@ -554,7 +543,6 @@ define i32 @test_not_captured_before_call_same_bb_but_read() {
 define i32 @test_captured_after_loop(i1 %c.1) {
 ; CHECK-LABEL: @test_captured_after_loop(
 ; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    store i32 55, i32* [[A]], align 4
 ; CHECK-NEXT:    br label [[LOOP:%.*]]
 ; CHECK:       loop:
 ; CHECK-NEXT:    [[R:%.*]] = call i32 @getval()
@@ -613,7 +601,6 @@ define void @test_escaping_store_removed(i8* %src, i64** %escape) {
 ; CHECK-NEXT:  bb:
 ; CHECK-NEXT:    [[A:%.*]] = alloca i64, align 8
 ; CHECK-NEXT:    [[EXT_A:%.*]] = bitcast i64* [[A]] to i8*
-; CHECK-NEXT:    store i64 0, i64* [[A]], align 8
 ; CHECK-NEXT:    call void @clobber()
 ; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[EXT_A]], i8* [[SRC:%.*]], i64 8, i1 false)
 ; CHECK-NEXT:    store i64* [[A]], i64** [[ESCAPE:%.*]], align 8
@@ -641,7 +628,6 @@ define void @test_invoke_captures() personality i8* undef {
 ; CHECK-LABEL: @test_invoke_captures(
 ; CHECK-NEXT:  bb:
 ; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    store i32 99, i32* [[A]], align 4
 ; CHECK-NEXT:    invoke void @clobber()
 ; CHECK-NEXT:    to label [[BB2:%.*]] unwind label [[BB5:%.*]]
 ; CHECK:       bb2:
@@ -695,7 +681,6 @@ declare i32 @getval_nounwind() nounwind
 define i32 @test_not_captured_before_load_same_bb_noalias_call() {
 ; CHECK-LABEL: @test_not_captured_before_load_same_bb_noalias_call(
 ; CHECK-NEXT:    [[A:%.*]] = call i32* @alloc()
-; CHECK-NEXT:    store i32 55, i32* [[A]], align 4
 ; CHECK-NEXT:    [[R:%.*]] = call i32 @getval_nounwind()
 ; CHECK-NEXT:    store i32 99, i32* [[A]], align 4
 ; CHECK-NEXT:    call void @escape_and_clobber(i32* [[A]])
@@ -711,9 +696,8 @@ define i32 @test_not_captured_before_load_same_bb_noalias_call() {
 
 define i32 @test_not_captured_before_load_same_bb_noalias_arg(i32* noalias %a) {
 ; CHECK-LABEL: @test_not_captured_before_load_same_bb_noalias_arg(
-; CHECK-NEXT:    store i32 55, i32* [[A:%.*]], align 4
 ; CHECK-NEXT:    [[R:%.*]] = call i32 @getval_nounwind()
-; CHECK-NEXT:    store i32 99, i32* [[A]], align 4
+; CHECK-NEXT:    store i32 99, i32* [[A:%.*]], align 4
 ; CHECK-NEXT:    call void @escape_and_clobber(i32* [[A]])
 ; CHECK-NEXT:    ret i32 [[R]]
 ;

diff  --git a/llvm/test/Transforms/DeadStoreElimination/captures-before-load.ll b/llvm/test/Transforms/DeadStoreElimination/captures-before-load.ll
index 5243bb335b78e..f50d1775e576b 100644
--- a/llvm/test/Transforms/DeadStoreElimination/captures-before-load.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/captures-before-load.ll
@@ -154,7 +154,6 @@ define i32 @test_captured_before_load_same_bb_2(i32** %in.ptr) {
 define i32 @test_not_captured_before_load_same_bb_clobber(i32** %in.ptr) {
 ; CHECK-LABEL: @test_not_captured_before_load_same_bb_clobber(
 ; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    store i32 55, i32* [[A]], align 4
 ; CHECK-NEXT:    call void @clobber()
 ; CHECK-NEXT:    [[IN_LV_1:%.*]] = load i32*, i32** [[IN_PTR:%.*]], align 2
 ; CHECK-NEXT:    [[IN_LV_2:%.*]] = load i32, i32* [[IN_LV_1]], align 2
@@ -242,7 +241,6 @@ define i32 @test_only_captured_sibling_path_with_ret_to_load_other_blocks(i32**
 ; CHECK-NEXT:    [[IN_LV_2:%.*]] = load i32, i32* [[IN_LV_1]], align 2
 ; CHECK-NEXT:    br label [[EXIT:%.*]]
 ; CHECK:       exit:
-; CHECK-NEXT:    store i32 99, i32* [[A]], align 4
 ; CHECK-NEXT:    call void @clobber()
 ; CHECK-NEXT:    ret i32 [[IN_LV_2]]
 ;
@@ -1124,7 +1122,6 @@ entry:
 define i32 @test_not_captured_before_load_of_ptrtoint(i64 %in) {
 ; CHECK-LABEL: @test_not_captured_before_load_of_ptrtoint(
 ; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    store i32 55, i32* [[A]], align 4
 ; CHECK-NEXT:    [[IN_PTR:%.*]] = inttoptr i64 [[IN:%.*]] to i32*
 ; CHECK-NEXT:    [[IN_PTR_LOAD:%.*]] = load i32, i32* [[IN_PTR]], align 4
 ; CHECK-NEXT:    store i32 99, i32* [[A]], align 4
@@ -1145,7 +1142,6 @@ declare i32* @getptr()
 define i32 @test_not_captured_before_load_of_call() {
 ; CHECK-LABEL: @test_not_captured_before_load_of_call(
 ; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    store i32 55, i32* [[A]], align 4
 ; CHECK-NEXT:    [[IN_PTR:%.*]] = call i32* @getptr() #[[ATTR4:[0-9]+]]
 ; CHECK-NEXT:    [[IN_PTR_LOAD:%.*]] = load i32, i32* [[IN_PTR]], align 4
 ; CHECK-NEXT:    store i32 99, i32* [[A]], align 4
@@ -1166,7 +1162,6 @@ define i32 @test_not_captured_multiple_objects(i1 %c, i32** %in.ptr) {
 ; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
 ; CHECK-NEXT:    [[B:%.*]] = alloca i32, align 4
 ; CHECK-NEXT:    [[O:%.*]] = select i1 [[C:%.*]], i32* [[A]], i32* [[B]]
-; CHECK-NEXT:    store i32 55, i32* [[O]], align 4
 ; CHECK-NEXT:    [[IN_LV_1:%.*]] = load i32*, i32** [[IN_PTR:%.*]], align 2
 ; CHECK-NEXT:    [[IN_LV_2:%.*]] = load i32, i32* [[IN_LV_1]], align 2
 ; CHECK-NEXT:    store i32 99, i32* [[O]], align 4

diff  --git a/llvm/unittests/Analysis/BasicAliasAnalysisTest.cpp b/llvm/unittests/Analysis/BasicAliasAnalysisTest.cpp
index 5b8ece41b07c7..d8fd37e662d1b 100644
--- a/llvm/unittests/Analysis/BasicAliasAnalysisTest.cpp
+++ b/llvm/unittests/Analysis/BasicAliasAnalysisTest.cpp
@@ -46,7 +46,7 @@ class BasicAATest : public testing::Test {
     DominatorTree DT;
     AssumptionCache AC;
     BasicAAResult BAA;
-    AAQueryInfo AAQI;
+    SimpleAAQueryInfo AAQI;
 
     TestAnalyses(BasicAATest &Test)
         : DT(*Test.F), AC(*Test.F), BAA(Test.DL, *Test.F, Test.TLI, AC, &DT),


        


More information about the llvm-commits mailing list