[llvm] r277559 - [MSSA] Fix a caching bug.

George Burgess IV via llvm-commits llvm-commits at lists.llvm.org
Tue Aug 2 18:22:19 PDT 2016


Author: gbiv
Date: Tue Aug  2 20:22:19 2016
New Revision: 277559

URL: http://llvm.org/viewvc/llvm-project?rev=277559&view=rev
Log:
[MSSA] Fix a caching bug.

This fixes a bug where we'd sometimes cache overly-conservative results
with our walker. This bug was made more obvious by r277480, which makes
our cache far more spotty than it was. Test case is llvm-unit, because
we're likely going to use CachingWalker only for def optimization in the
future.

The bug stems from that there was a place where the walker assumed that
`DefNode.Last` was a valid target to cache to when failing to optimize
phis. This is sometimes incorrect if we have a cache hit. The fix is to
use the thing we *can* assume is a valid target to cache to. :)

Modified:
    llvm/trunk/lib/Transforms/Utils/MemorySSA.cpp
    llvm/trunk/unittests/Transforms/Utils/MemorySSA.cpp

Modified: llvm/trunk/lib/Transforms/Utils/MemorySSA.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Utils/MemorySSA.cpp?rev=277559&r1=277558&r2=277559&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/Utils/MemorySSA.cpp (original)
+++ llvm/trunk/lib/Transforms/Utils/MemorySSA.cpp Tue Aug  2 20:22:19 2016
@@ -594,7 +594,7 @@ class ClobberWalker {
   ///
   /// If this returns None, NewPaused is a vector of searches that terminated
   /// at StopWhere. Otherwise, NewPaused is left in an unspecified state.
-  Optional<ListIndex>
+  Optional<TerminatedPath>
   getBlockingAccess(MemoryAccess *StopWhere,
                     SmallVectorImpl<ListIndex> &PausedSearches,
                     SmallVectorImpl<ListIndex> &NewPaused,
@@ -633,11 +633,12 @@ class ClobberWalker {
         assert(Res.Result != StopWhere || Res.FromCache);
         // If this wasn't a cache hit, we hit a clobber when walking. That's a
         // failure.
+        TerminatedPath Term{Res.Result, PathIndex};
         if (!Res.FromCache || !MSSA.dominates(Res.Result, StopWhere))
-          return PathIndex;
+          return Term;
 
         // Otherwise, it's a valid thing to potentially optimize to.
-        Terminated.push_back({Res.Result, PathIndex});
+        Terminated.push_back(Term);
         continue;
       }
 
@@ -769,22 +770,21 @@ class ClobberWalker {
       // liveOnEntry, and we'll happily wait for that to disappear (read: never)
       // For the moment, this is fine, since we do basically nothing with
       // blocker info.
-      if (Optional<ListIndex> Blocker = getBlockingAccess(
+      if (Optional<TerminatedPath> Blocker = getBlockingAccess(
               Target, PausedSearches, NewPaused, TerminatedPaths)) {
-        MemoryAccess *BlockingAccess = Paths[*Blocker].Last;
         // Cache our work on the blocking node, since we know that's correct.
-        cacheDefPath(Paths[*Blocker], BlockingAccess);
+        cacheDefPath(Paths[Blocker->LastNode], Blocker->Clobber);
 
         // Find the node we started at. We can't search based on N->Last, since
         // we may have gone around a loop with a different MemoryLocation.
-        auto Iter = find_if(def_path(*Blocker), [&](const DefPath &N) {
+        auto Iter = find_if(def_path(Blocker->LastNode), [&](const DefPath &N) {
           return defPathIndex(N) < PriorPathsSize;
         });
         assert(Iter != def_path_iterator());
 
         DefPath &CurNode = *Iter;
         assert(CurNode.Last == Current);
-        CurNode.Blocker = BlockingAccess;
+        CurNode.Blocker = Blocker->Clobber;
 
         // Two things:
         // A. We can't reliably cache all of NewPaused back. Consider a case

Modified: llvm/trunk/unittests/Transforms/Utils/MemorySSA.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/unittests/Transforms/Utils/MemorySSA.cpp?rev=277559&r1=277558&r2=277559&view=diff
==============================================================================
--- llvm/trunk/unittests/Transforms/Utils/MemorySSA.cpp (original)
+++ llvm/trunk/unittests/Transforms/Utils/MemorySSA.cpp Tue Aug  2 20:22:19 2016
@@ -344,3 +344,77 @@ TEST_F(MemorySSATest, TestStoreDoubleQue
   EXPECT_EQ(Clobber, StoreAccess);
   EXPECT_TRUE(MSSA.isLiveOnEntryDef(LiveOnEntry));
 }
+
+// Bug: During phi optimization, the walker wouldn't cache to the proper result
+// in the farthest-walked BB.
+//
+// Specifically, it would assume that whatever we walked to was a clobber.
+// "Whatever we walked to" isn't a clobber if we hit a cache entry.
+//
+// ...So, we need a test case that looks like:
+//    A
+//   / \
+//  B   |
+//   \ /
+//    C
+//
+// Where, when we try to optimize a thing in 'C', a blocker is found in 'B'.
+// The walk must determine that the blocker exists by using cache entries *while
+// walking* 'B'.
+TEST_F(MemorySSATest, PartialWalkerCacheWithPhis) {
+  F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false),
+                       GlobalValue::ExternalLinkage, "F", &M);
+  B.SetInsertPoint(BasicBlock::Create(C, "A", F));
+  Type *Int8 = Type::getInt8Ty(C);
+  Constant *One = ConstantInt::get(Int8, 1);
+  Constant *Zero = ConstantInt::get(Int8, 0);
+  Value *AllocA = B.CreateAlloca(Int8, One, "a");
+  Value *AllocB = B.CreateAlloca(Int8, One, "b");
+  BasicBlock *IfThen = BasicBlock::Create(C, "B", F);
+  BasicBlock *IfEnd = BasicBlock::Create(C, "C", F);
+
+  B.CreateCondBr(UndefValue::get(Type::getInt1Ty(C)), IfThen, IfEnd);
+
+  B.SetInsertPoint(IfThen);
+  Instruction *FirstStore = B.CreateStore(Zero, AllocA);
+  B.CreateStore(Zero, AllocB);
+  Instruction *ALoad0 = B.CreateLoad(AllocA, "");
+  Instruction *BStore = B.CreateStore(Zero, AllocB);
+  // Due to use optimization/etc. we make a store to A, which is removed after
+  // we build MSSA. This helps keep the test case simple-ish.
+  Instruction *KillStore = B.CreateStore(Zero, AllocA);
+  Instruction *ALoad = B.CreateLoad(AllocA, "");
+  B.CreateBr(IfEnd);
+
+  B.SetInsertPoint(IfEnd);
+  Instruction *BelowPhi = B.CreateStore(Zero, AllocA);
+
+  setupAnalyses();
+  MemorySSA &MSSA = Analyses->MSSA;
+  MemorySSAWalker *Walker = Analyses->Walker;
+
+  // Kill `KillStore`; it exists solely so that the load after it won't be
+  // optimized to FirstStore.
+  MSSA.removeMemoryAccess(MSSA.getMemoryAccess(KillStore));
+  KillStore->eraseFromParent();
+  auto *ALoadMA = cast<MemoryUse>(MSSA.getMemoryAccess(ALoad));
+  EXPECT_EQ(ALoadMA->getDefiningAccess(), MSSA.getMemoryAccess(BStore));
+
+  // Populate the cache for the store to AllocB directly after FirstStore. It
+  // should point to something in block B (so something in D can't be optimized
+  // to it).
+  MemoryAccess *Load0Clobber = Walker->getClobberingMemoryAccess(ALoad0);
+  EXPECT_EQ(MSSA.getMemoryAccess(FirstStore), Load0Clobber);
+
+  // If the bug exists, this will introduce a bad cache entry for %a on BStore.
+  // It will point to the store to %b after FirstStore. This only happens during
+  // phi optimization.
+  MemoryAccess *BottomClobber = Walker->getClobberingMemoryAccess(BelowPhi);
+  MemoryAccess *Phi = MSSA.getMemoryAccess(IfEnd);
+  EXPECT_EQ(BottomClobber, Phi);
+
+  // This query will first check the cache for {%a, BStore}. It should point to
+  // FirstStore, not to the store after FirstStore.
+  MemoryAccess *UseClobber = Walker->getClobberingMemoryAccess(ALoad);
+  EXPECT_EQ(UseClobber, MSSA.getMemoryAccess(FirstStore));
+}




More information about the llvm-commits mailing list