[llvm] r268087 - [MemorySSA] Fix bugs in walker; refactor unittests a bit.
George Burgess IV via llvm-commits
llvm-commits at lists.llvm.org
Fri Apr 29 11:42:55 PDT 2016
Author: gbiv
Date: Fri Apr 29 13:42:55 2016
New Revision: 268087
URL: http://llvm.org/viewvc/llvm-project?rev=268087&view=rev
Log:
[MemorySSA] Fix bugs in walker; refactor unittests a bit.
This patch fixes two somewhat related bugs in MemorySSA's caching
walker. These bugs were found because D19695 brought up the problem
that we'd have defs cached to themselves, which is incorrect.
The bugs this fixes are:
- We would sometimes skip the nearest clobber of a MemoryAccess, because
we would query our cache for a given potential clobber before
checking if the potential clobber is the clobber we're looking for.
The cache entry for the potential clobber would point to the nearest
clobber *of the potential clobber*, so if that was a cache hit, we'd
ignore the potential clobber entirely.
- There are times (sometimes in DFS, sometimes in the getClobbering...
functions) where we would insert cache entries that say a def
clobbers itself.
There's a bit of common code between the fixes for the bugs, so they
aren't split out into multiple commits.
This patch also adds a few unit tests, and refactors existing tests a
bit to reduce the duplication of setup code.
Modified:
llvm/trunk/lib/Transforms/Utils/MemorySSA.cpp
llvm/trunk/unittests/Transforms/Utils/MemorySSA.cpp
Modified: llvm/trunk/lib/Transforms/Utils/MemorySSA.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Utils/MemorySSA.cpp?rev=268087&r1=268086&r2=268087&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/Utils/MemorySSA.cpp (original)
+++ llvm/trunk/lib/Transforms/Utils/MemorySSA.cpp Fri Apr 29 13:42:55 2016
@@ -824,6 +824,10 @@ void CachingMemorySSAWalker::doCacheInse
MemoryAccess *Result,
const UpwardsMemoryQuery &Q,
const MemoryLocation &Loc) {
+ // This is fine for Phis, since there are times where we can't optimize them.
+ // Making a def its own clobber is never correct, though.
+ assert((Result != M || isa<MemoryPhi>(M)) &&
+ "Something can't clobber itself!");
++NumClobberCacheInserts;
if (Q.IsCall)
CachedUpwardsClobberingCall[M] = Result;
@@ -873,9 +877,11 @@ MemoryAccessPair CachingMemorySSAWalker:
MemoryAccess *CurrAccess = *DFI;
if (MSSA->isLiveOnEntryDef(CurrAccess))
return {CurrAccess, Loc};
- if (auto CacheResult = doCacheLookup(CurrAccess, Q, Loc))
- return {CacheResult, Loc};
- // If this is a MemoryDef, check whether it clobbers our current query.
+ // If this is a MemoryDef, check whether it clobbers our current query. This
+ // needs to be done before consulting the cache, because the cache reports
+ // the clobber for CurrAccess. If CurrAccess is a clobber for this query,
+ // and we ask the cache for information first, then we might skip this
+ // clobber, which is bad.
if (auto *MD = dyn_cast<MemoryDef>(CurrAccess)) {
// If we hit the top, stop following this path.
// While we can do lookups, we can't sanely do inserts here unless we were
@@ -886,6 +892,8 @@ MemoryAccessPair CachingMemorySSAWalker:
break;
}
}
+ if (auto CacheResult = doCacheLookup(CurrAccess, Q, Loc))
+ return {CacheResult, Loc};
// We need to know whether it is a phi so we can track backedges.
// Otherwise, walk all upward defs.
@@ -957,8 +965,15 @@ MemoryAccessPair CachingMemorySSAWalker:
return {MSSA->getLiveOnEntryDef(), Q.StartingLoc};
const BasicBlock *OriginalBlock = StartingAccess->getBlock();
+ assert(DFI.getPathLength() > 0 && "We dropped our path?");
unsigned N = DFI.getPathLength();
- for (; N != 0; --N) {
+ // If we found a clobbering def, the last element in the path will be our
+ // clobber, so we don't want to cache that to itself. OTOH, if we optimized a
+ // phi, we can add the last thing in the path to the cache, since that won't
+ // be the result.
+ if (DFI.getPath(N - 1) == ModifyingAccess)
+ --N;
+ for (; N > 1; --N) {
MemoryAccess *CacheAccess = DFI.getPath(N - 1);
BasicBlock *CurrBlock = CacheAccess->getBlock();
if (!FollowingBackedge)
@@ -970,8 +985,8 @@ MemoryAccessPair CachingMemorySSAWalker:
}
// Cache everything else on the way back. The caller should cache
- // Q.OriginalAccess for us.
- for (; N != 0; --N) {
+ // StartingAccess for us.
+ for (; N > 1; --N) {
MemoryAccess *CacheAccess = DFI.getPath(N - 1);
doCacheInsert(CacheAccess, ModifyingAccess, Q, Loc);
}
@@ -1024,7 +1039,9 @@ CachingMemorySSAWalker::getClobberingMem
: StartingUseOrDef;
MemoryAccess *Clobber = getClobberingMemoryAccess(DefiningAccess, Q);
- doCacheInsert(Q.OriginalAccess, Clobber, Q, Q.StartingLoc);
+ // Only cache this if it wouldn't make Clobber point to itself.
+ if (Clobber != StartingAccess)
+ doCacheInsert(Q.OriginalAccess, Clobber, Q, Q.StartingLoc);
DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is ");
DEBUG(dbgs() << *StartingUseOrDef << "\n");
DEBUG(dbgs() << "Final Memory SSA clobber for " << *I << " is ");
@@ -1063,12 +1080,17 @@ CachingMemorySSAWalker::getClobberingMem
return DefiningAccess;
MemoryAccess *Result = getClobberingMemoryAccess(DefiningAccess, Q);
+ // DFS won't cache a result for DefiningAccess. So, if DefiningAccess isn't
+ // our clobber, be sure that it gets a cache entry, too.
+ if (Result != DefiningAccess)
+ doCacheInsert(DefiningAccess, Result, Q, Q.StartingLoc);
doCacheInsert(Q.OriginalAccess, Result, Q, Q.StartingLoc);
// TODO: When this implementation is more mature, we may want to figure out
// what this additional caching buys us. It's most likely A Good Thing.
if (Q.IsCall)
for (const MemoryAccess *MA : Q.VisitedCalls)
- doCacheInsert(MA, Result, Q, Q.StartingLoc);
+ if (MA != Result)
+ doCacheInsert(MA, Result, Q, Q.StartingLoc);
DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is ");
DEBUG(dbgs() << *DefiningAccess << "\n");
Modified: llvm/trunk/unittests/Transforms/Utils/MemorySSA.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/unittests/Transforms/Utils/MemorySSA.cpp?rev=268087&r1=268086&r2=268087&view=diff
==============================================================================
--- llvm/trunk/unittests/Transforms/Utils/MemorySSA.cpp (original)
+++ llvm/trunk/unittests/Transforms/Utils/MemorySSA.cpp Fri Apr 29 13:42:55 2016
@@ -19,20 +19,59 @@
using namespace llvm;
-TEST(MemorySSA, RemoveMemoryAccess) {
+const static char DLString[] = "e-i64:64-f80:128-n8:16:32:64-S128";
+
+/// There's a lot of common setup between these tests. This fixture helps reduce
+/// that. Tests should mock up a function, store it in F, and then call
+/// setupAnalyses().
+class MemorySSATest : public testing::Test {
+protected:
+ // N.B. Many of these members depend on each other (e.g. the Module depends on
+ // the Context, etc.). So, order matters here (and in TestAnalyses).
LLVMContext C;
- std::unique_ptr<Module> M(new Module("Remove memory access", C));
- IRBuilder<> B(C);
- DataLayout DL("e-i64:64-f80:128-n8:16:32:64-S128");
+ Module M;
+ IRBuilder<> B;
+ DataLayout DL;
TargetLibraryInfoImpl TLII;
- TargetLibraryInfo TLI(TLII);
+ TargetLibraryInfo TLI;
+ Function *F;
+
+ // Things that we need to build after the function is created.
+ struct TestAnalyses {
+ DominatorTree DT;
+ AssumptionCache AC;
+ AAResults AA;
+ BasicAAResult BAA;
+ MemorySSA MSSA;
+ std::unique_ptr<MemorySSAWalker> Walker;
+
+ TestAnalyses(MemorySSATest &Test)
+ : DT(*Test.F), AC(*Test.F), AA(Test.TLI),
+ BAA(Test.DL, Test.TLI, AC, &DT), MSSA(*Test.F) {
+ AA.addAAResult(BAA);
+ Walker.reset(MSSA.buildMemorySSA(&AA, &DT));
+ }
+ };
+
+ std::unique_ptr<TestAnalyses> Analyses;
+
+ void setupAnalyses() {
+ assert(F);
+ Analyses.reset(new TestAnalyses(*this));
+ }
+public:
+ MemorySSATest()
+ : M("MemorySSATest", C), B(C), DL(DLString), TLI(TLII), F(nullptr) {}
+};
+
+TEST_F(MemorySSATest, RemoveMemoryAccess) {
// We create a diamond where there is a store on one side, and then a load
// after the merge point. This enables us to test a bunch of different
// removal cases.
- Function *F = Function::Create(
+ F = Function::Create(
FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false),
- GlobalValue::ExternalLinkage, "F", M.get());
+ GlobalValue::ExternalLinkage, "F", &M);
BasicBlock *Entry(BasicBlock::Create(C, "", F));
BasicBlock *Left(BasicBlock::Create(C, "", F));
BasicBlock *Right(BasicBlock::Create(C, "", F));
@@ -47,24 +86,21 @@ TEST(MemorySSA, RemoveMemoryAccess) {
B.SetInsertPoint(Merge);
LoadInst *LoadInst = B.CreateLoad(PointerArg);
- std::unique_ptr<MemorySSA> MSSA(new MemorySSA(*F));
- std::unique_ptr<DominatorTree> DT(new DominatorTree(*F));
- std::unique_ptr<AssumptionCache> AC(new AssumptionCache(*F));
- AAResults AA(TLI);
- BasicAAResult BAA(DL, TLI, *AC, &*DT);
- AA.addAAResult(BAA);
- std::unique_ptr<MemorySSAWalker> Walker(MSSA->buildMemorySSA(&AA, &*DT));
+ setupAnalyses();
+ MemorySSA &MSSA = Analyses->MSSA;
+ MemorySSAWalker *Walker = &*Analyses->Walker;
+
// Before, the load will be a use of a phi<store, liveonentry>. It should be
// the same after.
- MemoryUse *LoadAccess = cast<MemoryUse>(MSSA->getMemoryAccess(LoadInst));
- MemoryDef *StoreAccess = cast<MemoryDef>(MSSA->getMemoryAccess(StoreInst));
+ MemoryUse *LoadAccess = cast<MemoryUse>(MSSA.getMemoryAccess(LoadInst));
+ MemoryDef *StoreAccess = cast<MemoryDef>(MSSA.getMemoryAccess(StoreInst));
MemoryAccess *DefiningAccess = LoadAccess->getDefiningAccess();
EXPECT_TRUE(isa<MemoryPhi>(DefiningAccess));
// The load is currently clobbered by one of the phi arguments, so the walker
// should determine the clobbering access as the phi.
EXPECT_EQ(DefiningAccess, Walker->getClobberingMemoryAccess(LoadInst));
- MSSA->removeMemoryAccess(StoreAccess);
- MSSA->verifyMemorySSA();
+ MSSA.removeMemoryAccess(StoreAccess);
+ MSSA.verifyMemorySSA();
// After the removeaccess, let's see if we got the right accesses
// The load should still point to the phi ...
EXPECT_EQ(DefiningAccess, LoadAccess->getDefiningAccess());
@@ -72,17 +108,121 @@ TEST(MemorySSA, RemoveMemoryAccess) {
// load, since it will walk past the phi node since every argument is the
// same.
EXPECT_TRUE(
- MSSA->isLiveOnEntryDef(Walker->getClobberingMemoryAccess(LoadInst)));
+ MSSA.isLiveOnEntryDef(Walker->getClobberingMemoryAccess(LoadInst)));
// The phi should now be a two entry phi with two live on entry defs.
for (const auto &Op : DefiningAccess->operands()) {
MemoryAccess *Operand = cast<MemoryAccess>(&*Op);
- EXPECT_TRUE(MSSA->isLiveOnEntryDef(Operand));
+ EXPECT_TRUE(MSSA.isLiveOnEntryDef(Operand));
}
// Now we try to remove the single valued phi
- MSSA->removeMemoryAccess(DefiningAccess);
- MSSA->verifyMemorySSA();
+ MSSA.removeMemoryAccess(DefiningAccess);
+ MSSA.verifyMemorySSA();
// Now the load should be a load of live on entry.
- EXPECT_TRUE(MSSA->isLiveOnEntryDef(LoadAccess->getDefiningAccess()));
+ EXPECT_TRUE(MSSA.isLiveOnEntryDef(LoadAccess->getDefiningAccess()));
+}
+
+// We had a bug with caching where the walker would report MemoryDef#3's clobber
+// (below) was MemoryDef#1.
+//
+// define void @F(i8*) {
+// %A = alloca i8, i8 1
+// ; 1 = MemoryDef(liveOnEntry)
+// store i8 0, i8* %A
+// ; 2 = MemoryDef(1)
+// store i8 1, i8* %A
+// ; 3 = MemoryDef(2)
+// store i8 2, i8* %A
+// }
+TEST_F(MemorySSATest, TestTripleStore) {
+ F = Function::Create(
+ FunctionType::get(B.getVoidTy(), {}, false),
+ GlobalValue::ExternalLinkage, "F", &M);
+ B.SetInsertPoint(BasicBlock::Create(C, "", F));
+ Type *Int8 = Type::getInt8Ty(C);
+ Value *Alloca = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A");
+ StoreInst *S1 = B.CreateStore(ConstantInt::get(Int8, 0), Alloca);
+ StoreInst *S2 = B.CreateStore(ConstantInt::get(Int8, 1), Alloca);
+ StoreInst *S3 = B.CreateStore(ConstantInt::get(Int8, 2), Alloca);
+
+ setupAnalyses();
+ MemorySSA &MSSA = Analyses->MSSA;
+ MemorySSAWalker *Walker = &*Analyses->Walker;
+
+ unsigned I = 0;
+ for (StoreInst *V : {S1, S2, S3}) {
+ // Everything should be clobbered by its defining access
+ MemoryAccess *DefiningAccess =
+ cast<MemoryUseOrDef>(MSSA.getMemoryAccess(V))->getDefiningAccess();
+ MemoryAccess *WalkerClobber = Walker->getClobberingMemoryAccess(V);
+ EXPECT_EQ(DefiningAccess, WalkerClobber)
+ << "Store " << I << " doesn't have the correct clobbering access";
+ // EXPECT_EQ expands such that if we increment I above, it won't get
+ // incremented except when we try to print the error message.
+ ++I;
+ }
+}
+
+// ...And fixing the above bug made it obvious that, when walking, MemorySSA's
+// walker was caching the initial node it walked. This was fine (albeit
+// mostly redundant) unless the initial node being walked is a clobber for the
+// query. In that case, we'd cache that the node clobbered itself.
+TEST_F(MemorySSATest, TestStoreAndLoad) {
+ F = Function::Create(
+ FunctionType::get(B.getVoidTy(), {}, false),
+ GlobalValue::ExternalLinkage, "F", &M);
+ B.SetInsertPoint(BasicBlock::Create(C, "", F));
+ Type *Int8 = Type::getInt8Ty(C);
+ Value *Alloca = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A");
+ Instruction *SI = B.CreateStore(ConstantInt::get(Int8, 0), Alloca);
+ Instruction *LI = B.CreateLoad(Alloca);
+
+ setupAnalyses();
+ MemorySSA &MSSA = Analyses->MSSA;
+ MemorySSAWalker *Walker = &*Analyses->Walker;
+
+ MemoryAccess *LoadClobber = Walker->getClobberingMemoryAccess(LI);
+ EXPECT_EQ(LoadClobber, MSSA.getMemoryAccess(SI));
+ EXPECT_TRUE(MSSA.isLiveOnEntryDef(Walker->getClobberingMemoryAccess(SI)));
+}
+
+// Another bug (related to the above two fixes): It was noted that, given the
+// following code:
+// ; 1 = MemoryDef(liveOnEntry)
+// store i8 0, i8* %1
+//
+// ...A query to getClobberingMemoryAccess(MemoryAccess*, MemoryLocation) would
+// hand back the store (correctly). A later call to
+// getClobberingMemoryAccess(const Instruction*) would also hand back the store
+// (incorrectly; it should return liveOnEntry).
+//
+// This test checks that repeated calls to either function returns what they're
+// meant to.
+TEST_F(MemorySSATest, TestStoreDoubleQuery) {
+ F = Function::Create(
+ FunctionType::get(B.getVoidTy(), {}, false),
+ GlobalValue::ExternalLinkage, "F", &M);
+ B.SetInsertPoint(BasicBlock::Create(C, "", F));
+ Type *Int8 = Type::getInt8Ty(C);
+ Value *Alloca = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A");
+ StoreInst *SI = B.CreateStore(ConstantInt::get(Int8, 0), Alloca);
+
+ setupAnalyses();
+ MemorySSA &MSSA = Analyses->MSSA;
+ MemorySSAWalker *Walker = &*Analyses->Walker;
+
+ MemoryAccess *StoreAccess = MSSA.getMemoryAccess(SI);
+ MemoryLocation StoreLoc = MemoryLocation::get(SI);
+ MemoryAccess *Clobber = Walker->getClobberingMemoryAccess(StoreAccess, StoreLoc);
+ MemoryAccess *LiveOnEntry = Walker->getClobberingMemoryAccess(SI);
+
+ EXPECT_EQ(Clobber, StoreAccess);
+ EXPECT_TRUE(MSSA.isLiveOnEntryDef(LiveOnEntry));
+
+ // Try again (with entries in the cache already) for good measure...
+ Clobber = Walker->getClobberingMemoryAccess(StoreAccess, StoreLoc);
+ LiveOnEntry = Walker->getClobberingMemoryAccess(SI);
+ EXPECT_EQ(Clobber, StoreAccess);
+ EXPECT_TRUE(MSSA.isLiveOnEntryDef(LiveOnEntry));
}
More information about the llvm-commits
mailing list