[llvm] r340577 - [MemorySSA] Fix def optimization handling

George Burgess IV via llvm-commits llvm-commits at lists.llvm.org
Thu Aug 23 14:29:11 PDT 2018


Author: gbiv
Date: Thu Aug 23 14:29:11 2018
New Revision: 340577

URL: http://llvm.org/viewvc/llvm-project?rev=340577&view=rev
Log:
[MemorySSA] Fix def optimization handling

In order for more complex updates of MSSA to happen (e.g. those in
D45299), MemoryDefs need to be actual `Use`s of what they're optimized
to. This patch makes that happen.

In addition, this patch changes our optimization behavior for Defs
slightly: we'll now consider a Def optimization invalid if the
MemoryAccess it's optimized to changes. That we weren't doing this
before was a bug, but given that we were tracking these with a WeakVH
before, it was sort of difficult for that to matter.

We're already have both of these behaviors for MemoryUses. The
difference is that a MemoryUse's defining access is always its optimized
access, and defining accesses are always `Use`s (in the LLVM sense).

Nothing exploded when testing a stage3 clang+llvm locally, so...

This also includes the test-case promised in r340461.

Modified:
    llvm/trunk/include/llvm/Analysis/MemorySSA.h
    llvm/trunk/unittests/Analysis/MemorySSA.cpp

Modified: llvm/trunk/include/llvm/Analysis/MemorySSA.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/Analysis/MemorySSA.h?rev=340577&r1=340576&r2=340577&view=diff
==============================================================================
--- llvm/trunk/include/llvm/Analysis/MemorySSA.h (original)
+++ llvm/trunk/include/llvm/Analysis/MemorySSA.h Thu Aug 23 14:29:11 2018
@@ -280,9 +280,10 @@ protected:
   friend class MemorySSAUpdater;
 
   MemoryUseOrDef(LLVMContext &C, MemoryAccess *DMA, unsigned Vty,
-                 DeleteValueTy DeleteValue, Instruction *MI, BasicBlock *BB)
-      : MemoryAccess(C, Vty, DeleteValue, BB, 1), MemoryInstruction(MI),
-        OptimizedAccessAlias(MayAlias) {
+                 DeleteValueTy DeleteValue, Instruction *MI, BasicBlock *BB,
+                 unsigned NumOperands)
+      : MemoryAccess(C, Vty, DeleteValue, BB, NumOperands),
+        MemoryInstruction(MI), OptimizedAccessAlias(MayAlias) {
     setDefiningAccess(DMA);
   }
 
@@ -308,11 +309,6 @@ private:
   Optional<AliasResult> OptimizedAccessAlias;
 };
 
-template <>
-struct OperandTraits<MemoryUseOrDef>
-    : public FixedNumOperandTraits<MemoryUseOrDef, 1> {};
-DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryUseOrDef, MemoryAccess)
-
 /// Represents read-only accesses to memory
 ///
 /// In particular, the set of Instructions that will be represented by
@@ -323,7 +319,8 @@ public:
   DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess);
 
   MemoryUse(LLVMContext &C, MemoryAccess *DMA, Instruction *MI, BasicBlock *BB)
-      : MemoryUseOrDef(C, DMA, MemoryUseVal, deleteMe, MI, BB) {}
+      : MemoryUseOrDef(C, DMA, MemoryUseVal, deleteMe, MI, BB,
+                       /*NumOperands=*/1) {}
 
   // allocate space for exactly one operand
   void *operator new(size_t s) { return User::operator new(s, 1); }
@@ -381,27 +378,28 @@ public:
 
   MemoryDef(LLVMContext &C, MemoryAccess *DMA, Instruction *MI, BasicBlock *BB,
             unsigned Ver)
-      : MemoryUseOrDef(C, DMA, MemoryDefVal, deleteMe, MI, BB), ID(Ver) {}
+      : MemoryUseOrDef(C, DMA, MemoryDefVal, deleteMe, MI, BB,
+                       /*NumOperands=*/2),
+        ID(Ver) {}
 
-  // allocate space for exactly one operand
-  void *operator new(size_t s) { return User::operator new(s, 1); }
+  // allocate space for exactly two operands
+  void *operator new(size_t s) { return User::operator new(s, 2); }
 
   static bool classof(const Value *MA) {
     return MA->getValueID() == MemoryDefVal;
   }
 
   void setOptimized(MemoryAccess *MA) {
-    Optimized = MA;
-    OptimizedID = getDefiningAccess()->getID();
+    setOperand(1, MA);
+    OptimizedID = MA->getID();
   }
 
   MemoryAccess *getOptimized() const {
-    return cast_or_null<MemoryAccess>(Optimized);
+    return cast_or_null<MemoryAccess>(getOperand(1));
   }
 
   bool isOptimized() const {
-    return getOptimized() && getDefiningAccess() &&
-           OptimizedID == getDefiningAccess()->getID();
+    return getOptimized() && OptimizedID == getOptimized()->getID();
   }
 
   void resetOptimized() {
@@ -417,13 +415,34 @@ private:
 
   const unsigned ID;
   unsigned OptimizedID = INVALID_MEMORYACCESS_ID;
-  WeakVH Optimized;
 };
 
 template <>
-struct OperandTraits<MemoryDef> : public FixedNumOperandTraits<MemoryDef, 1> {};
+struct OperandTraits<MemoryDef> : public FixedNumOperandTraits<MemoryDef, 2> {};
 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryDef, MemoryAccess)
 
+template <>
+struct OperandTraits<MemoryUseOrDef> {
+  static Use *op_begin(MemoryUseOrDef *MUD) {
+    if (auto *MU = dyn_cast<MemoryUse>(MUD))
+      return OperandTraits<MemoryUse>::op_begin(MU);
+    return OperandTraits<MemoryDef>::op_begin(cast<MemoryDef>(MUD));
+  }
+
+  static Use *op_end(MemoryUseOrDef *MUD) {
+    if (auto *MU = dyn_cast<MemoryUse>(MUD))
+      return OperandTraits<MemoryUse>::op_end(MU);
+    return OperandTraits<MemoryDef>::op_end(cast<MemoryDef>(MUD));
+  }
+
+  static unsigned operands(const MemoryUseOrDef *MUD) {
+    if (const auto *MU = dyn_cast<MemoryUse>(MUD))
+      return OperandTraits<MemoryUse>::operands(MU);
+    return OperandTraits<MemoryDef>::operands(cast<MemoryDef>(MUD));
+  }
+};
+DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryUseOrDef, MemoryAccess)
+
 /// Represents phi nodes for memory accesses.
 ///
 /// These have the same semantic as regular phi nodes, with the exception that

Modified: llvm/trunk/unittests/Analysis/MemorySSA.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/unittests/Analysis/MemorySSA.cpp?rev=340577&r1=340576&r2=340577&view=diff
==============================================================================
--- llvm/trunk/unittests/Analysis/MemorySSA.cpp (original)
+++ llvm/trunk/unittests/Analysis/MemorySSA.cpp Thu Aug 23 14:29:11 2018
@@ -1265,3 +1265,131 @@ TEST_F(MemorySSATest, LifetimeMarkersAre
       MSSA.getWalker()->getClobberingMemoryAccess(BarAccess);
   EXPECT_EQ(BarClobber, LifetimeStartAccess);
 }
+
+TEST_F(MemorySSATest, DefOptimizationsAreInvalidatedOnMoving) {
+  IRBuilder<> B(C);
+  F = Function::Create(FunctionType::get(B.getVoidTy(), {B.getInt1Ty()}, false),
+                       GlobalValue::ExternalLinkage, "F", &M);
+
+  // Make a CFG like
+  //     entry
+  //      / \
+  //     a   b
+  //      \ /
+  //       c
+  //
+  // Put a def in A and a def in B, move the def from A -> B, observe as the
+  // optimization is invalidated.
+  BasicBlock *Entry = BasicBlock::Create(C, "entry", F);
+  BasicBlock *BlockA = BasicBlock::Create(C, "a", F);
+  BasicBlock *BlockB = BasicBlock::Create(C, "b", F);
+  BasicBlock *BlockC = BasicBlock::Create(C, "c", F);
+
+  B.SetInsertPoint(Entry);
+  Type *Int8 = Type::getInt8Ty(C);
+  Value *Alloca = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "alloc");
+  StoreInst *StoreEntry = B.CreateStore(B.getInt8(0), Alloca);
+  B.CreateCondBr(B.getTrue(), BlockA, BlockB);
+
+  B.SetInsertPoint(BlockA);
+  StoreInst *StoreA = B.CreateStore(B.getInt8(1), Alloca);
+  B.CreateBr(BlockC);
+
+  B.SetInsertPoint(BlockB);
+  StoreInst *StoreB = B.CreateStore(B.getInt8(2), Alloca);
+  B.CreateBr(BlockC);
+
+  B.SetInsertPoint(BlockC);
+  B.CreateUnreachable();
+
+  setupAnalyses();
+  MemorySSA &MSSA = *Analyses->MSSA;
+
+  auto *AccessEntry = cast<MemoryDef>(MSSA.getMemoryAccess(StoreEntry));
+  auto *StoreAEntry = cast<MemoryDef>(MSSA.getMemoryAccess(StoreA));
+  auto *StoreBEntry = cast<MemoryDef>(MSSA.getMemoryAccess(StoreB));
+
+  ASSERT_EQ(MSSA.getWalker()->getClobberingMemoryAccess(StoreAEntry),
+            AccessEntry);
+  ASSERT_TRUE(StoreAEntry->isOptimized());
+
+  ASSERT_EQ(MSSA.getWalker()->getClobberingMemoryAccess(StoreBEntry),
+            AccessEntry);
+  ASSERT_TRUE(StoreBEntry->isOptimized());
+
+  // Note that if we did InsertionPlace::Beginning, we don't go out of our way
+  // to invalidate the cache for StoreBEntry. If the user wants to actually do
+  // moves like these, it's up to them to ensure that nearby cache entries are
+  // correctly invalidated (which, in general, requires walking all instructions
+  // that the moved instruction dominates. So we probably shouldn't be doing
+  // moves like this in general. Still, works as a test-case. ;) )
+  MemorySSAUpdater(&MSSA).moveToPlace(StoreAEntry, BlockB,
+                                      MemorySSA::InsertionPlace::End);
+  ASSERT_FALSE(StoreAEntry->isOptimized());
+  ASSERT_EQ(MSSA.getWalker()->getClobberingMemoryAccess(StoreAEntry),
+            StoreBEntry);
+}
+
+TEST_F(MemorySSATest, TestOptimizedDefsAreProperUses) {
+  F = Function::Create(FunctionType::get(B.getVoidTy(),
+                                         {B.getInt8PtrTy(), B.getInt8PtrTy()},
+                                         false),
+                       GlobalValue::ExternalLinkage, "F", &M);
+  B.SetInsertPoint(BasicBlock::Create(C, "", F));
+  Type *Int8 = Type::getInt8Ty(C);
+  Value *AllocA = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A");
+  Value *AllocB = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "B");
+
+  StoreInst *StoreA = B.CreateStore(ConstantInt::get(Int8, 0), AllocA);
+  StoreInst *StoreB = B.CreateStore(ConstantInt::get(Int8, 1), AllocB);
+  StoreInst *StoreA2 = B.CreateStore(ConstantInt::get(Int8, 2), AllocA);
+
+  setupAnalyses();
+  MemorySSA &MSSA = *Analyses->MSSA;
+  MemorySSAWalker *Walker = Analyses->Walker;
+
+  // If these don't hold, there's no chance of the test result being useful.
+  ASSERT_EQ(Walker->getClobberingMemoryAccess(StoreA),
+            MSSA.getLiveOnEntryDef());
+  ASSERT_EQ(Walker->getClobberingMemoryAccess(StoreB),
+            MSSA.getLiveOnEntryDef());
+  auto *StoreAAccess = cast<MemoryDef>(MSSA.getMemoryAccess(StoreA));
+  auto *StoreA2Access = cast<MemoryDef>(MSSA.getMemoryAccess(StoreA2));
+  ASSERT_EQ(Walker->getClobberingMemoryAccess(StoreA2), StoreAAccess);
+  ASSERT_EQ(StoreA2Access->getOptimized(), StoreAAccess);
+
+  auto *StoreBAccess = cast<MemoryDef>(MSSA.getMemoryAccess(StoreB));
+  ASSERT_LT(StoreAAccess->getID(), StoreBAccess->getID());
+  ASSERT_LT(StoreBAccess->getID(), StoreA2Access->getID());
+
+  auto SortVecByID = [](std::vector<const MemoryDef *> &Defs) {
+    llvm::sort(Defs.begin(), Defs.end(),
+               [](const MemoryDef *LHS, const MemoryDef *RHS) {
+                 return LHS->getID() < RHS->getID();
+               });
+  };
+
+  auto SortedUserList = [&](const MemoryDef *MD) {
+    std::vector<const MemoryDef *> Result;
+    transform(MD->users(), std::back_inserter(Result),
+              [](const User *U) { return cast<MemoryDef>(U); });
+    SortVecByID(Result);
+    return Result;
+  };
+
+  // Use std::vectors, since they have nice pretty-printing if the test fails.
+  // Parens are necessary because EXPECT_EQ is a macro, and we have commas in
+  // our init lists...
+  EXPECT_EQ(SortedUserList(StoreAAccess),
+            (std::vector<const MemoryDef *>{StoreBAccess, StoreA2Access}));
+
+  EXPECT_EQ(SortedUserList(StoreBAccess),
+            std::vector<const MemoryDef *>{StoreA2Access});
+
+  // StoreAAccess should be present twice, since it uses liveOnEntry for both
+  // its defining and optimized accesses. This is a bit awkward, and is not
+  // relied upon anywhere at the moment. If this is painful, we can fix it.
+  EXPECT_EQ(SortedUserList(cast<MemoryDef>(MSSA.getLiveOnEntryDef())),
+            (std::vector<const MemoryDef *>{StoreAAccess, StoreAAccess,
+                                            StoreBAccess}));
+}




More information about the llvm-commits mailing list