[llvm] r299980 - MemorySSA: Move to Analysis, from Transforms/Utils. It's used as
Daniel Berlin via llvm-commits
llvm-commits at lists.llvm.org
Tue Apr 11 13:06:37 PDT 2017
Author: dannyb
Date: Tue Apr 11 15:06:36 2017
New Revision: 299980
URL: http://llvm.org/viewvc/llvm-project?rev=299980&view=rev
Log:
MemorySSA: Move to Analysis, from Transforms/Utils. It's used as
Analysis, it has Analysis passes, and once NewGVN is made an Analysis,
this removes the cross dependency from Analysis to Transform/Utils.
NFC.
Added:
llvm/trunk/include/llvm/Analysis/MemorySSA.h
- copied, changed from r299975, llvm/trunk/include/llvm/Transforms/Utils/MemorySSA.h
llvm/trunk/include/llvm/Analysis/MemorySSAUpdater.h
- copied, changed from r299975, llvm/trunk/include/llvm/Transforms/Utils/MemorySSAUpdater.h
llvm/trunk/lib/Analysis/MemorySSA.cpp
- copied, changed from r299975, llvm/trunk/lib/Transforms/Utils/MemorySSA.cpp
llvm/trunk/lib/Analysis/MemorySSAUpdater.cpp
- copied, changed from r299975, llvm/trunk/lib/Transforms/Utils/MemorySSAUpdater.cpp
llvm/trunk/test/Analysis/MemorySSA/
llvm/trunk/test/Analysis/MemorySSA/assume.ll
- copied, changed from r299975, llvm/trunk/test/Transforms/Util/MemorySSA/assume.ll
llvm/trunk/test/Analysis/MemorySSA/atomic-clobber.ll
- copied, changed from r299975, llvm/trunk/test/Transforms/Util/MemorySSA/atomic-clobber.ll
llvm/trunk/test/Analysis/MemorySSA/basicaa-memcpy.ll
- copied, changed from r299975, llvm/trunk/test/Transforms/Util/MemorySSA/basicaa-memcpy.ll
llvm/trunk/test/Analysis/MemorySSA/constant-memory.ll
- copied, changed from r299975, llvm/trunk/test/Transforms/Util/MemorySSA/constant-memory.ll
llvm/trunk/test/Analysis/MemorySSA/cyclicphi.ll
- copied, changed from r299975, llvm/trunk/test/Transforms/Util/MemorySSA/cyclicphi.ll
llvm/trunk/test/Analysis/MemorySSA/forward-unreachable.ll
- copied, changed from r299975, llvm/trunk/test/Transforms/Util/MemorySSA/forward-unreachable.ll
llvm/trunk/test/Analysis/MemorySSA/function-clobber.ll
- copied, changed from r299975, llvm/trunk/test/Transforms/Util/MemorySSA/function-clobber.ll
llvm/trunk/test/Analysis/MemorySSA/function-mem-attrs.ll
- copied, changed from r299975, llvm/trunk/test/Transforms/Util/MemorySSA/function-mem-attrs.ll
llvm/trunk/test/Analysis/MemorySSA/invariant-groups.ll
- copied, changed from r299975, llvm/trunk/test/Transforms/Util/MemorySSA/invariant-groups.ll
llvm/trunk/test/Analysis/MemorySSA/lifetime-simple.ll
- copied, changed from r299975, llvm/trunk/test/Transforms/Util/MemorySSA/lifetime-simple.ll
llvm/trunk/test/Analysis/MemorySSA/load-invariant.ll
- copied, changed from r299975, llvm/trunk/test/Transforms/Util/MemorySSA/load-invariant.ll
llvm/trunk/test/Analysis/MemorySSA/many-dom-backedge.ll
- copied, changed from r299975, llvm/trunk/test/Transforms/Util/MemorySSA/many-dom-backedge.ll
llvm/trunk/test/Analysis/MemorySSA/many-doms.ll
- copied, changed from r299975, llvm/trunk/test/Transforms/Util/MemorySSA/many-doms.ll
llvm/trunk/test/Analysis/MemorySSA/multi-edges.ll
- copied, changed from r299975, llvm/trunk/test/Transforms/Util/MemorySSA/multi-edges.ll
llvm/trunk/test/Analysis/MemorySSA/multiple-backedges-hal.ll
- copied, changed from r299975, llvm/trunk/test/Transforms/Util/MemorySSA/multiple-backedges-hal.ll
llvm/trunk/test/Analysis/MemorySSA/multiple-locations.ll
- copied, changed from r299975, llvm/trunk/test/Transforms/Util/MemorySSA/multiple-locations.ll
llvm/trunk/test/Analysis/MemorySSA/no-disconnected.ll
- copied, changed from r299975, llvm/trunk/test/Transforms/Util/MemorySSA/no-disconnected.ll
llvm/trunk/test/Analysis/MemorySSA/optimize-use.ll
- copied, changed from r299975, llvm/trunk/test/Transforms/Util/MemorySSA/optimize-use.ll
llvm/trunk/test/Analysis/MemorySSA/phi-translation.ll
- copied, changed from r299975, llvm/trunk/test/Transforms/Util/MemorySSA/phi-translation.ll
llvm/trunk/test/Analysis/MemorySSA/pr28880.ll
- copied, changed from r299975, llvm/trunk/test/Transforms/Util/MemorySSA/pr28880.ll
llvm/trunk/test/Analysis/MemorySSA/ptr-const-mem.ll
- copied, changed from r299975, llvm/trunk/test/Transforms/Util/MemorySSA/ptr-const-mem.ll
llvm/trunk/test/Analysis/MemorySSA/volatile-clobber.ll
- copied, changed from r299975, llvm/trunk/test/Transforms/Util/MemorySSA/volatile-clobber.ll
llvm/trunk/unittests/Analysis/MemorySSA.cpp
- copied, changed from r299975, llvm/trunk/unittests/Transforms/Utils/MemorySSA.cpp
Removed:
llvm/trunk/include/llvm/Transforms/Utils/MemorySSA.h
llvm/trunk/include/llvm/Transforms/Utils/MemorySSAUpdater.h
llvm/trunk/lib/Transforms/Utils/MemorySSA.cpp
llvm/trunk/lib/Transforms/Utils/MemorySSAUpdater.cpp
llvm/trunk/test/Transforms/Util/MemorySSA/assume.ll
llvm/trunk/test/Transforms/Util/MemorySSA/atomic-clobber.ll
llvm/trunk/test/Transforms/Util/MemorySSA/basicaa-memcpy.ll
llvm/trunk/test/Transforms/Util/MemorySSA/constant-memory.ll
llvm/trunk/test/Transforms/Util/MemorySSA/cyclicphi.ll
llvm/trunk/test/Transforms/Util/MemorySSA/forward-unreachable.ll
llvm/trunk/test/Transforms/Util/MemorySSA/function-clobber.ll
llvm/trunk/test/Transforms/Util/MemorySSA/function-mem-attrs.ll
llvm/trunk/test/Transforms/Util/MemorySSA/invariant-groups.ll
llvm/trunk/test/Transforms/Util/MemorySSA/lifetime-simple.ll
llvm/trunk/test/Transforms/Util/MemorySSA/load-invariant.ll
llvm/trunk/test/Transforms/Util/MemorySSA/many-dom-backedge.ll
llvm/trunk/test/Transforms/Util/MemorySSA/many-doms.ll
llvm/trunk/test/Transforms/Util/MemorySSA/multi-edges.ll
llvm/trunk/test/Transforms/Util/MemorySSA/multiple-backedges-hal.ll
llvm/trunk/test/Transforms/Util/MemorySSA/multiple-locations.ll
llvm/trunk/test/Transforms/Util/MemorySSA/no-disconnected.ll
llvm/trunk/test/Transforms/Util/MemorySSA/optimize-use.ll
llvm/trunk/test/Transforms/Util/MemorySSA/phi-translation.ll
llvm/trunk/test/Transforms/Util/MemorySSA/pr28880.ll
llvm/trunk/test/Transforms/Util/MemorySSA/ptr-const-mem.ll
llvm/trunk/test/Transforms/Util/MemorySSA/volatile-clobber.ll
llvm/trunk/unittests/Transforms/Utils/MemorySSA.cpp
Modified:
llvm/trunk/include/llvm/Transforms/Scalar/GVNExpression.h
llvm/trunk/lib/Analysis/Analysis.cpp
llvm/trunk/lib/Analysis/CMakeLists.txt
llvm/trunk/lib/Passes/PassBuilder.cpp
llvm/trunk/lib/Transforms/Scalar/EarlyCSE.cpp
llvm/trunk/lib/Transforms/Scalar/GVNHoist.cpp
llvm/trunk/lib/Transforms/Scalar/NewGVN.cpp
llvm/trunk/lib/Transforms/Utils/CMakeLists.txt
llvm/trunk/lib/Transforms/Utils/Utils.cpp
llvm/trunk/unittests/Analysis/CMakeLists.txt
llvm/trunk/unittests/Transforms/Utils/CMakeLists.txt
Copied: llvm/trunk/include/llvm/Analysis/MemorySSA.h (from r299975, llvm/trunk/include/llvm/Transforms/Utils/MemorySSA.h)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/Analysis/MemorySSA.h?p2=llvm/trunk/include/llvm/Analysis/MemorySSA.h&p1=llvm/trunk/include/llvm/Transforms/Utils/MemorySSA.h&r1=299975&r2=299980&rev=299980&view=diff
==============================================================================
--- llvm/trunk/include/llvm/Transforms/Utils/MemorySSA.h (original)
+++ llvm/trunk/include/llvm/Analysis/MemorySSA.h Tue Apr 11 15:06:36 2017
@@ -69,8 +69,8 @@
/// per instruction.
//===----------------------------------------------------------------------===//
-#ifndef LLVM_TRANSFORMS_UTILS_MEMORYSSA_H
-#define LLVM_TRANSFORMS_UTILS_MEMORYSSA_H
+#ifndef LLVM_ANALYSIS_MEMORYSSA_H
+#define LLVM_ANALYSIS_MEMORYSSA_H
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/GraphTraits.h"
@@ -1152,4 +1152,4 @@ inline iterator_range<def_chain_iterator
} // end namespace llvm
-#endif // LLVM_TRANSFORMS_UTILS_MEMORYSSA_H
+#endif // LLVM_ANALYSIS_MEMORYSSA_H
Copied: llvm/trunk/include/llvm/Analysis/MemorySSAUpdater.h (from r299975, llvm/trunk/include/llvm/Transforms/Utils/MemorySSAUpdater.h)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/Analysis/MemorySSAUpdater.h?p2=llvm/trunk/include/llvm/Analysis/MemorySSAUpdater.h&p1=llvm/trunk/include/llvm/Transforms/Utils/MemorySSAUpdater.h&r1=299975&r2=299980&rev=299980&view=diff
==============================================================================
--- llvm/trunk/include/llvm/Transforms/Utils/MemorySSAUpdater.h (original)
+++ llvm/trunk/include/llvm/Analysis/MemorySSAUpdater.h Tue Apr 11 15:06:36 2017
@@ -29,8 +29,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_TRANSFORMS_UTILS_MEMORYSSAUPDATER_H
-#define LLVM_TRANSFORMS_UTILS_MEMORYSSAUPDATER_H
+#ifndef LLVM_ANALYSIS_MEMORYSSAUPDATER_H
+#define LLVM_ANALYSIS_MEMORYSSAUPDATER_H
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
@@ -45,7 +45,7 @@
#include "llvm/Pass.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Transforms/Utils/MemorySSA.h"
+#include "llvm/Analysis/MemorySSA.h"
namespace llvm {
@@ -150,4 +150,4 @@ private:
};
} // end namespace llvm
-#endif // LLVM_TRANSFORMS_UTILS_MEMORYSSAUPDATER_H
+#endif // LLVM_ANALYSIS_MEMORYSSAUPDATER_H
Modified: llvm/trunk/include/llvm/Transforms/Scalar/GVNExpression.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/Transforms/Scalar/GVNExpression.h?rev=299980&r1=299979&r2=299980&view=diff
==============================================================================
--- llvm/trunk/include/llvm/Transforms/Scalar/GVNExpression.h (original)
+++ llvm/trunk/include/llvm/Transforms/Scalar/GVNExpression.h Tue Apr 11 15:06:36 2017
@@ -18,6 +18,7 @@
#include "llvm/ADT/Hashing.h"
#include "llvm/ADT/iterator_range.h"
+#include "llvm/Analysis/MemorySSA.h"
#include "llvm/IR/Constant.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Value.h"
@@ -26,7 +27,6 @@
#include "llvm/Support/Casting.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/Transforms/Utils/MemorySSA.h"
#include <algorithm>
#include <cassert>
#include <iterator>
Removed: llvm/trunk/include/llvm/Transforms/Utils/MemorySSA.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/Transforms/Utils/MemorySSA.h?rev=299979&view=auto
==============================================================================
--- llvm/trunk/include/llvm/Transforms/Utils/MemorySSA.h (original)
+++ llvm/trunk/include/llvm/Transforms/Utils/MemorySSA.h (removed)
@@ -1,1155 +0,0 @@
-//===- MemorySSA.h - Build Memory SSA ---------------------------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-///
-/// \file
-/// \brief This file exposes an interface to building/using memory SSA to
-/// walk memory instructions using a use/def graph.
-///
-/// Memory SSA class builds an SSA form that links together memory access
-/// instructions such as loads, stores, atomics, and calls. Additionally, it
-/// does a trivial form of "heap versioning" Every time the memory state changes
-/// in the program, we generate a new heap version. It generates
-/// MemoryDef/Uses/Phis that are overlayed on top of the existing instructions.
-///
-/// As a trivial example,
-/// define i32 @main() #0 {
-/// entry:
-/// %call = call noalias i8* @_Znwm(i64 4) #2
-/// %0 = bitcast i8* %call to i32*
-/// %call1 = call noalias i8* @_Znwm(i64 4) #2
-/// %1 = bitcast i8* %call1 to i32*
-/// store i32 5, i32* %0, align 4
-/// store i32 7, i32* %1, align 4
-/// %2 = load i32* %0, align 4
-/// %3 = load i32* %1, align 4
-/// %add = add nsw i32 %2, %3
-/// ret i32 %add
-/// }
-///
-/// Will become
-/// define i32 @main() #0 {
-/// entry:
-/// ; 1 = MemoryDef(0)
-/// %call = call noalias i8* @_Znwm(i64 4) #3
-/// %2 = bitcast i8* %call to i32*
-/// ; 2 = MemoryDef(1)
-/// %call1 = call noalias i8* @_Znwm(i64 4) #3
-/// %4 = bitcast i8* %call1 to i32*
-/// ; 3 = MemoryDef(2)
-/// store i32 5, i32* %2, align 4
-/// ; 4 = MemoryDef(3)
-/// store i32 7, i32* %4, align 4
-/// ; MemoryUse(3)
-/// %7 = load i32* %2, align 4
-/// ; MemoryUse(4)
-/// %8 = load i32* %4, align 4
-/// %add = add nsw i32 %7, %8
-/// ret i32 %add
-/// }
-///
-/// Given this form, all the stores that could ever effect the load at %8 can be
-/// gotten by using the MemoryUse associated with it, and walking from use to
-/// def until you hit the top of the function.
-///
-/// Each def also has a list of users associated with it, so you can walk from
-/// both def to users, and users to defs. Note that we disambiguate MemoryUses,
-/// but not the RHS of MemoryDefs. You can see this above at %7, which would
-/// otherwise be a MemoryUse(4). Being disambiguated means that for a given
-/// store, all the MemoryUses on its use lists are may-aliases of that store
-/// (but the MemoryDefs on its use list may not be).
-///
-/// MemoryDefs are not disambiguated because it would require multiple reaching
-/// definitions, which would require multiple phis, and multiple memoryaccesses
-/// per instruction.
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_TRANSFORMS_UTILS_MEMORYSSA_H
-#define LLVM_TRANSFORMS_UTILS_MEMORYSSA_H
-
-#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/GraphTraits.h"
-#include "llvm/ADT/SmallPtrSet.h"
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/ADT/ilist.h"
-#include "llvm/ADT/ilist_node.h"
-#include "llvm/ADT/iterator.h"
-#include "llvm/ADT/iterator_range.h"
-#include "llvm/Analysis/AliasAnalysis.h"
-#include "llvm/Analysis/MemoryLocation.h"
-#include "llvm/Analysis/PHITransAddr.h"
-#include "llvm/IR/BasicBlock.h"
-#include "llvm/IR/Dominators.h"
-#include "llvm/IR/Module.h"
-#include "llvm/IR/OperandTraits.h"
-#include "llvm/IR/Type.h"
-#include "llvm/IR/Use.h"
-#include "llvm/IR/User.h"
-#include "llvm/IR/Value.h"
-#include "llvm/Pass.h"
-#include "llvm/Support/Casting.h"
-#include "llvm/Support/ErrorHandling.h"
-#include <algorithm>
-#include <cassert>
-#include <cstddef>
-#include <iterator>
-#include <memory>
-#include <utility>
-
-namespace llvm {
-
-class Function;
-class Instruction;
-class MemoryAccess;
-class LLVMContext;
-class raw_ostream;
-namespace MSSAHelpers {
-struct AllAccessTag {};
-struct DefsOnlyTag {};
-}
-
-enum {
- // Used to signify what the default invalid ID is for MemoryAccess's
- // getID()
- INVALID_MEMORYACCESS_ID = 0
-};
-
-template <class T> class memoryaccess_def_iterator_base;
-using memoryaccess_def_iterator = memoryaccess_def_iterator_base<MemoryAccess>;
-using const_memoryaccess_def_iterator =
- memoryaccess_def_iterator_base<const MemoryAccess>;
-
-// \brief The base for all memory accesses. All memory accesses in a block are
-// linked together using an intrusive list.
-class MemoryAccess
- : public User,
- public ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::AllAccessTag>>,
- public ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::DefsOnlyTag>> {
-public:
- using AllAccessType =
- ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::AllAccessTag>>;
- using DefsOnlyType =
- ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::DefsOnlyTag>>;
-
- // Methods for support type inquiry through isa, cast, and
- // dyn_cast
- static inline bool classof(const Value *V) {
- unsigned ID = V->getValueID();
- return ID == MemoryUseVal || ID == MemoryPhiVal || ID == MemoryDefVal;
- }
-
- MemoryAccess(const MemoryAccess &) = delete;
- MemoryAccess &operator=(const MemoryAccess &) = delete;
- ~MemoryAccess() override;
-
- void *operator new(size_t, unsigned) = delete;
- void *operator new(size_t) = delete;
-
- BasicBlock *getBlock() const { return Block; }
-
- virtual void print(raw_ostream &OS) const = 0;
- virtual void dump() const;
-
- /// \brief The user iterators for a memory access
- typedef user_iterator iterator;
- typedef const_user_iterator const_iterator;
-
- /// \brief This iterator walks over all of the defs in a given
- /// MemoryAccess. For MemoryPhi nodes, this walks arguments. For
- /// MemoryUse/MemoryDef, this walks the defining access.
- memoryaccess_def_iterator defs_begin();
- const_memoryaccess_def_iterator defs_begin() const;
- memoryaccess_def_iterator defs_end();
- const_memoryaccess_def_iterator defs_end() const;
-
- /// \brief Get the iterators for the all access list and the defs only list
- /// We default to the all access list.
- AllAccessType::self_iterator getIterator() {
- return this->AllAccessType::getIterator();
- }
- AllAccessType::const_self_iterator getIterator() const {
- return this->AllAccessType::getIterator();
- }
- AllAccessType::reverse_self_iterator getReverseIterator() {
- return this->AllAccessType::getReverseIterator();
- }
- AllAccessType::const_reverse_self_iterator getReverseIterator() const {
- return this->AllAccessType::getReverseIterator();
- }
- DefsOnlyType::self_iterator getDefsIterator() {
- return this->DefsOnlyType::getIterator();
- }
- DefsOnlyType::const_self_iterator getDefsIterator() const {
- return this->DefsOnlyType::getIterator();
- }
- DefsOnlyType::reverse_self_iterator getReverseDefsIterator() {
- return this->DefsOnlyType::getReverseIterator();
- }
- DefsOnlyType::const_reverse_self_iterator getReverseDefsIterator() const {
- return this->DefsOnlyType::getReverseIterator();
- }
-
-protected:
- friend class MemorySSA;
- friend class MemoryUseOrDef;
- friend class MemoryUse;
- friend class MemoryDef;
- friend class MemoryPhi;
-
- /// \brief Used by MemorySSA to change the block of a MemoryAccess when it is
- /// moved.
- void setBlock(BasicBlock *BB) { Block = BB; }
-
- /// \brief Used for debugging and tracking things about MemoryAccesses.
- /// Guaranteed unique among MemoryAccesses, no guarantees otherwise.
- virtual unsigned getID() const = 0;
-
- MemoryAccess(LLVMContext &C, unsigned Vty, BasicBlock *BB,
- unsigned NumOperands)
- : User(Type::getVoidTy(C), Vty, nullptr, NumOperands), Block(BB) {}
-
-private:
- BasicBlock *Block;
-};
-
-inline raw_ostream &operator<<(raw_ostream &OS, const MemoryAccess &MA) {
- MA.print(OS);
- return OS;
-}
-
-/// \brief Class that has the common methods + fields of memory uses/defs. It's
-/// a little awkward to have, but there are many cases where we want either a
-/// use or def, and there are many cases where uses are needed (defs aren't
-/// acceptable), and vice-versa.
-///
-/// This class should never be instantiated directly; make a MemoryUse or
-/// MemoryDef instead.
-class MemoryUseOrDef : public MemoryAccess {
-public:
- void *operator new(size_t, unsigned) = delete;
- void *operator new(size_t) = delete;
-
- DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess);
-
- /// \brief Get the instruction that this MemoryUse represents.
- Instruction *getMemoryInst() const { return MemoryInst; }
-
- /// \brief Get the access that produces the memory state used by this Use.
- MemoryAccess *getDefiningAccess() const { return getOperand(0); }
-
- static inline bool classof(const Value *MA) {
- return MA->getValueID() == MemoryUseVal || MA->getValueID() == MemoryDefVal;
- }
-
- // Sadly, these have to be public because they are needed in some of the
- // iterators.
- virtual bool isOptimized() const = 0;
- virtual MemoryAccess *getOptimized() const = 0;
- virtual void setOptimized(MemoryAccess *) = 0;
-
- /// \brief Reset the ID of what this MemoryUse was optimized to, causing it to
- /// be rewalked by the walker if necessary.
- /// This really should only be called by tests.
- virtual void resetOptimized() = 0;
-
-protected:
- friend class MemorySSA;
- friend class MemorySSAUpdater;
- MemoryUseOrDef(LLVMContext &C, MemoryAccess *DMA, unsigned Vty,
- Instruction *MI, BasicBlock *BB)
- : MemoryAccess(C, Vty, BB, 1), MemoryInst(MI) {
- setDefiningAccess(DMA);
- }
- void setDefiningAccess(MemoryAccess *DMA, bool Optimized = false) {
- if (!Optimized) {
- setOperand(0, DMA);
- return;
- }
- setOptimized(DMA);
- }
-
-private:
- Instruction *MemoryInst;
-};
-
-template <>
-struct OperandTraits<MemoryUseOrDef>
- : public FixedNumOperandTraits<MemoryUseOrDef, 1> {};
-DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryUseOrDef, MemoryAccess)
-
-/// \brief Represents read-only accesses to memory
-///
-/// In particular, the set of Instructions that will be represented by
-/// MemoryUse's is exactly the set of Instructions for which
-/// AliasAnalysis::getModRefInfo returns "Ref".
-class MemoryUse final : public MemoryUseOrDef {
-public:
- DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess);
-
- MemoryUse(LLVMContext &C, MemoryAccess *DMA, Instruction *MI, BasicBlock *BB)
- : MemoryUseOrDef(C, DMA, MemoryUseVal, MI, BB), OptimizedID(0) {}
-
- // allocate space for exactly one operand
- void *operator new(size_t s) { return User::operator new(s, 1); }
- void *operator new(size_t, unsigned) = delete;
-
- static inline bool classof(const Value *MA) {
- return MA->getValueID() == MemoryUseVal;
- }
-
- void print(raw_ostream &OS) const override;
-
- virtual void setOptimized(MemoryAccess *DMA) override {
- OptimizedID = DMA->getID();
- setOperand(0, DMA);
- }
-
- virtual bool isOptimized() const override {
- return getDefiningAccess() && OptimizedID == getDefiningAccess()->getID();
- }
-
- virtual MemoryAccess *getOptimized() const override {
- return getDefiningAccess();
- }
- virtual void resetOptimized() override {
- OptimizedID = INVALID_MEMORYACCESS_ID;
- }
-
-protected:
- friend class MemorySSA;
-
- unsigned getID() const override {
- llvm_unreachable("MemoryUses do not have IDs");
- }
-
-private:
- unsigned int OptimizedID;
-};
-
-template <>
-struct OperandTraits<MemoryUse> : public FixedNumOperandTraits<MemoryUse, 1> {};
-DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryUse, MemoryAccess)
-
-/// \brief Represents a read-write access to memory, whether it is a must-alias,
-/// or a may-alias.
-///
-/// In particular, the set of Instructions that will be represented by
-/// MemoryDef's is exactly the set of Instructions for which
-/// AliasAnalysis::getModRefInfo returns "Mod" or "ModRef".
-/// Note that, in order to provide def-def chains, all defs also have a use
-/// associated with them. This use points to the nearest reaching
-/// MemoryDef/MemoryPhi.
-class MemoryDef final : public MemoryUseOrDef {
-public:
- DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess);
-
- MemoryDef(LLVMContext &C, MemoryAccess *DMA, Instruction *MI, BasicBlock *BB,
- unsigned Ver)
- : MemoryUseOrDef(C, DMA, MemoryDefVal, MI, BB), ID(Ver),
- Optimized(nullptr), OptimizedID(INVALID_MEMORYACCESS_ID) {}
-
- // allocate space for exactly one operand
- void *operator new(size_t s) { return User::operator new(s, 1); }
- void *operator new(size_t, unsigned) = delete;
-
- static inline bool classof(const Value *MA) {
- return MA->getValueID() == MemoryDefVal;
- }
-
- virtual void setOptimized(MemoryAccess *MA) override {
- Optimized = MA;
- OptimizedID = getDefiningAccess()->getID();
- }
- virtual MemoryAccess *getOptimized() const override { return Optimized; }
- virtual bool isOptimized() const override {
- return getOptimized() && getDefiningAccess() &&
- OptimizedID == getDefiningAccess()->getID();
- }
- virtual void resetOptimized() override {
- OptimizedID = INVALID_MEMORYACCESS_ID;
- }
-
- void print(raw_ostream &OS) const override;
-
-protected:
- friend class MemorySSA;
-
- unsigned getID() const override { return ID; }
-
-private:
- const unsigned ID;
- MemoryAccess *Optimized;
- unsigned int OptimizedID;
-};
-
-template <>
-struct OperandTraits<MemoryDef> : public FixedNumOperandTraits<MemoryDef, 1> {};
-DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryDef, MemoryAccess)
-
-/// \brief Represents phi nodes for memory accesses.
-///
-/// These have the same semantic as regular phi nodes, with the exception that
-/// only one phi will ever exist in a given basic block.
-/// Guaranteeing one phi per block means guaranteeing there is only ever one
-/// valid reaching MemoryDef/MemoryPHI along each path to the phi node.
-/// This is ensured by not allowing disambiguation of the RHS of a MemoryDef or
-/// a MemoryPhi's operands.
-/// That is, given
-/// if (a) {
-/// store %a
-/// store %b
-/// }
-/// it *must* be transformed into
-/// if (a) {
-/// 1 = MemoryDef(liveOnEntry)
-/// store %a
-/// 2 = MemoryDef(1)
-/// store %b
-/// }
-/// and *not*
-/// if (a) {
-/// 1 = MemoryDef(liveOnEntry)
-/// store %a
-/// 2 = MemoryDef(liveOnEntry)
-/// store %b
-/// }
-/// even if the two stores do not conflict. Otherwise, both 1 and 2 reach the
-/// end of the branch, and if there are not two phi nodes, one will be
-/// disconnected completely from the SSA graph below that point.
-/// Because MemoryUse's do not generate new definitions, they do not have this
-/// issue.
-class MemoryPhi final : public MemoryAccess {
- // allocate space for exactly zero operands
- void *operator new(size_t s) { return User::operator new(s); }
-
-public:
- /// Provide fast operand accessors
- DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess);
-
- MemoryPhi(LLVMContext &C, BasicBlock *BB, unsigned Ver, unsigned NumPreds = 0)
- : MemoryAccess(C, MemoryPhiVal, BB, 0), ID(Ver), ReservedSpace(NumPreds) {
- allocHungoffUses(ReservedSpace);
- }
-
- void *operator new(size_t, unsigned) = delete;
-
- // Block iterator interface. This provides access to the list of incoming
- // basic blocks, which parallels the list of incoming values.
- typedef BasicBlock **block_iterator;
- typedef BasicBlock *const *const_block_iterator;
-
- block_iterator block_begin() {
- auto *Ref = reinterpret_cast<Use::UserRef *>(op_begin() + ReservedSpace);
- return reinterpret_cast<block_iterator>(Ref + 1);
- }
-
- const_block_iterator block_begin() const {
- const auto *Ref =
- reinterpret_cast<const Use::UserRef *>(op_begin() + ReservedSpace);
- return reinterpret_cast<const_block_iterator>(Ref + 1);
- }
-
- block_iterator block_end() { return block_begin() + getNumOperands(); }
-
- const_block_iterator block_end() const {
- return block_begin() + getNumOperands();
- }
-
- iterator_range<block_iterator> blocks() {
- return make_range(block_begin(), block_end());
- }
-
- iterator_range<const_block_iterator> blocks() const {
- return make_range(block_begin(), block_end());
- }
-
- op_range incoming_values() { return operands(); }
-
- const_op_range incoming_values() const { return operands(); }
-
- /// \brief Return the number of incoming edges
- unsigned getNumIncomingValues() const { return getNumOperands(); }
-
- /// \brief Return incoming value number x
- MemoryAccess *getIncomingValue(unsigned I) const { return getOperand(I); }
- void setIncomingValue(unsigned I, MemoryAccess *V) {
- assert(V && "PHI node got a null value!");
- setOperand(I, V);
- }
- static unsigned getOperandNumForIncomingValue(unsigned I) { return I; }
- static unsigned getIncomingValueNumForOperand(unsigned I) { return I; }
-
- /// \brief Return incoming basic block number @p i.
- BasicBlock *getIncomingBlock(unsigned I) const { return block_begin()[I]; }
-
- /// \brief Return incoming basic block corresponding
- /// to an operand of the PHI.
- BasicBlock *getIncomingBlock(const Use &U) const {
- assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?");
- return getIncomingBlock(unsigned(&U - op_begin()));
- }
-
- /// \brief Return incoming basic block corresponding
- /// to value use iterator.
- BasicBlock *getIncomingBlock(MemoryAccess::const_user_iterator I) const {
- return getIncomingBlock(I.getUse());
- }
-
- void setIncomingBlock(unsigned I, BasicBlock *BB) {
- assert(BB && "PHI node got a null basic block!");
- block_begin()[I] = BB;
- }
-
- /// \brief Add an incoming value to the end of the PHI list
- void addIncoming(MemoryAccess *V, BasicBlock *BB) {
- if (getNumOperands() == ReservedSpace)
- growOperands(); // Get more space!
- // Initialize some new operands.
- setNumHungOffUseOperands(getNumOperands() + 1);
- setIncomingValue(getNumOperands() - 1, V);
- setIncomingBlock(getNumOperands() - 1, BB);
- }
-
- /// \brief Return the first index of the specified basic
- /// block in the value list for this PHI. Returns -1 if no instance.
- int getBasicBlockIndex(const BasicBlock *BB) const {
- for (unsigned I = 0, E = getNumOperands(); I != E; ++I)
- if (block_begin()[I] == BB)
- return I;
- return -1;
- }
-
- Value *getIncomingValueForBlock(const BasicBlock *BB) const {
- int Idx = getBasicBlockIndex(BB);
- assert(Idx >= 0 && "Invalid basic block argument!");
- return getIncomingValue(Idx);
- }
-
- static inline bool classof(const Value *V) {
- return V->getValueID() == MemoryPhiVal;
- }
-
- void print(raw_ostream &OS) const override;
-
-protected:
- friend class MemorySSA;
-
- /// \brief this is more complicated than the generic
- /// User::allocHungoffUses, because we have to allocate Uses for the incoming
- /// values and pointers to the incoming blocks, all in one allocation.
- void allocHungoffUses(unsigned N) {
- User::allocHungoffUses(N, /* IsPhi */ true);
- }
-
- unsigned getID() const final { return ID; }
-
-private:
- // For debugging only
- const unsigned ID;
- unsigned ReservedSpace;
-
- /// \brief This grows the operand list in response to a push_back style of
- /// operation. This grows the number of ops by 1.5 times.
- void growOperands() {
- unsigned E = getNumOperands();
- // 2 op PHI nodes are VERY common, so reserve at least enough for that.
- ReservedSpace = std::max(E + E / 2, 2u);
- growHungoffUses(ReservedSpace, /* IsPhi */ true);
- }
-};
-
-template <> struct OperandTraits<MemoryPhi> : public HungoffOperandTraits<2> {};
-DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryPhi, MemoryAccess)
-
-class MemorySSAWalker;
-
-/// \brief Encapsulates MemorySSA, including all data associated with memory
-/// accesses.
-class MemorySSA {
-public:
- MemorySSA(Function &, AliasAnalysis *, DominatorTree *);
- ~MemorySSA();
-
- MemorySSAWalker *getWalker();
-
- /// \brief Given a memory Mod/Ref'ing instruction, get the MemorySSA
- /// access associated with it. If passed a basic block gets the memory phi
- /// node that exists for that block, if there is one. Otherwise, this will get
- /// a MemoryUseOrDef.
- MemoryUseOrDef *getMemoryAccess(const Instruction *) const;
- MemoryPhi *getMemoryAccess(const BasicBlock *BB) const;
-
- void dump() const;
- void print(raw_ostream &) const;
-
- /// \brief Return true if \p MA represents the live on entry value
- ///
- /// Loads and stores from pointer arguments and other global values may be
- /// defined by memory operations that do not occur in the current function, so
- /// they may be live on entry to the function. MemorySSA represents such
- /// memory state by the live on entry definition, which is guaranteed to occur
- /// before any other memory access in the function.
- inline bool isLiveOnEntryDef(const MemoryAccess *MA) const {
- return MA == LiveOnEntryDef.get();
- }
-
- inline MemoryAccess *getLiveOnEntryDef() const {
- return LiveOnEntryDef.get();
- }
-
- // Sadly, iplists, by default, owns and deletes pointers added to the
- // list. It's not currently possible to have two iplists for the same type,
- // where one owns the pointers, and one does not. This is because the traits
- // are per-type, not per-tag. If this ever changes, we should make the
- // DefList an iplist.
- using AccessList = iplist<MemoryAccess, ilist_tag<MSSAHelpers::AllAccessTag>>;
- using DefsList =
- simple_ilist<MemoryAccess, ilist_tag<MSSAHelpers::DefsOnlyTag>>;
-
- /// \brief Return the list of MemoryAccess's for a given basic block.
- ///
- /// This list is not modifiable by the user.
- const AccessList *getBlockAccesses(const BasicBlock *BB) const {
- return getWritableBlockAccesses(BB);
- }
-
- /// \brief Return the list of MemoryDef's and MemoryPhi's for a given basic
- /// block.
- ///
- /// This list is not modifiable by the user.
- const DefsList *getBlockDefs(const BasicBlock *BB) const {
- return getWritableBlockDefs(BB);
- }
-
- /// \brief Given two memory accesses in the same basic block, determine
- /// whether MemoryAccess \p A dominates MemoryAccess \p B.
- bool locallyDominates(const MemoryAccess *A, const MemoryAccess *B) const;
-
- /// \brief Given two memory accesses in potentially different blocks,
- /// determine whether MemoryAccess \p A dominates MemoryAccess \p B.
- bool dominates(const MemoryAccess *A, const MemoryAccess *B) const;
-
- /// \brief Given a MemoryAccess and a Use, determine whether MemoryAccess \p A
- /// dominates Use \p B.
- bool dominates(const MemoryAccess *A, const Use &B) const;
-
- /// \brief Verify that MemorySSA is self consistent (IE definitions dominate
- /// all uses, uses appear in the right places). This is used by unit tests.
- void verifyMemorySSA() const;
-
- /// Used in various insertion functions to specify whether we are talking
- /// about the beginning or end of a block.
- enum InsertionPlace { Beginning, End };
-
-protected:
- // Used by Memory SSA annotater, dumpers, and wrapper pass
- friend class MemorySSAAnnotatedWriter;
- friend class MemorySSAPrinterLegacyPass;
- friend class MemorySSAUpdater;
-
- void verifyDefUses(Function &F) const;
- void verifyDomination(Function &F) const;
- void verifyOrdering(Function &F) const;
-
- // This is used by the use optimizer and updater.
- AccessList *getWritableBlockAccesses(const BasicBlock *BB) const {
- auto It = PerBlockAccesses.find(BB);
- return It == PerBlockAccesses.end() ? nullptr : It->second.get();
- }
-
- // This is used by the use optimizer and updater.
- DefsList *getWritableBlockDefs(const BasicBlock *BB) const {
- auto It = PerBlockDefs.find(BB);
- return It == PerBlockDefs.end() ? nullptr : It->second.get();
- }
-
- // These is used by the updater to perform various internal MemorySSA
- // machinsations. They do not always leave the IR in a correct state, and
- // relies on the updater to fixup what it breaks, so it is not public.
-
- void moveTo(MemoryUseOrDef *What, BasicBlock *BB, AccessList::iterator Where);
- void moveTo(MemoryUseOrDef *What, BasicBlock *BB, InsertionPlace Point);
- // Rename the dominator tree branch rooted at BB.
- void renamePass(BasicBlock *BB, MemoryAccess *IncomingVal,
- SmallPtrSetImpl<BasicBlock *> &Visited) {
- renamePass(DT->getNode(BB), IncomingVal, Visited, true, true);
- }
- void removeFromLookups(MemoryAccess *);
- void removeFromLists(MemoryAccess *, bool ShouldDelete = true);
- void insertIntoListsForBlock(MemoryAccess *, const BasicBlock *,
- InsertionPlace);
- void insertIntoListsBefore(MemoryAccess *, const BasicBlock *,
- AccessList::iterator);
- MemoryUseOrDef *createDefinedAccess(Instruction *, MemoryAccess *);
-
-private:
- class CachingWalker;
- class OptimizeUses;
-
- CachingWalker *getWalkerImpl();
- void buildMemorySSA();
- void optimizeUses();
-
- void verifyUseInDefs(MemoryAccess *, MemoryAccess *) const;
- using AccessMap = DenseMap<const BasicBlock *, std::unique_ptr<AccessList>>;
- using DefsMap = DenseMap<const BasicBlock *, std::unique_ptr<DefsList>>;
-
- void
- determineInsertionPoint(const SmallPtrSetImpl<BasicBlock *> &DefiningBlocks);
- void markUnreachableAsLiveOnEntry(BasicBlock *BB);
- bool dominatesUse(const MemoryAccess *, const MemoryAccess *) const;
- MemoryPhi *createMemoryPhi(BasicBlock *BB);
- MemoryUseOrDef *createNewAccess(Instruction *);
- MemoryAccess *findDominatingDef(BasicBlock *, enum InsertionPlace);
- void placePHINodes(const SmallPtrSetImpl<BasicBlock *> &,
- const DenseMap<const BasicBlock *, unsigned int> &);
- MemoryAccess *renameBlock(BasicBlock *, MemoryAccess *, bool);
- void renameSuccessorPhis(BasicBlock *, MemoryAccess *, bool);
- void renamePass(DomTreeNode *, MemoryAccess *IncomingVal,
- SmallPtrSetImpl<BasicBlock *> &Visited,
- bool SkipVisited = false, bool RenameAllUses = false);
- AccessList *getOrCreateAccessList(const BasicBlock *);
- DefsList *getOrCreateDefsList(const BasicBlock *);
- void renumberBlock(const BasicBlock *) const;
- AliasAnalysis *AA;
- DominatorTree *DT;
- Function &F;
-
- // Memory SSA mappings
- DenseMap<const Value *, MemoryAccess *> ValueToMemoryAccess;
- // These two mappings contain the main block to access/def mappings for
- // MemorySSA. The list contained in PerBlockAccesses really owns all the
- // MemoryAccesses.
- // Both maps maintain the invariant that if a block is found in them, the
- // corresponding list is not empty, and if a block is not found in them, the
- // corresponding list is empty.
- AccessMap PerBlockAccesses;
- DefsMap PerBlockDefs;
- std::unique_ptr<MemoryAccess> LiveOnEntryDef;
-
- // Domination mappings
- // Note that the numbering is local to a block, even though the map is
- // global.
- mutable SmallPtrSet<const BasicBlock *, 16> BlockNumberingValid;
- mutable DenseMap<const MemoryAccess *, unsigned long> BlockNumbering;
-
- // Memory SSA building info
- std::unique_ptr<CachingWalker> Walker;
- unsigned NextID;
-};
-
-// Internal MemorySSA utils, for use by MemorySSA classes and walkers
-class MemorySSAUtil {
-protected:
- friend class MemorySSAWalker;
- friend class GVNHoist;
- // This function should not be used by new passes.
- static bool defClobbersUseOrDef(MemoryDef *MD, const MemoryUseOrDef *MU,
- AliasAnalysis &AA);
-};
-
-// This pass does eager building and then printing of MemorySSA. It is used by
-// the tests to be able to build, dump, and verify Memory SSA.
-class MemorySSAPrinterLegacyPass : public FunctionPass {
-public:
- MemorySSAPrinterLegacyPass();
-
- bool runOnFunction(Function &) override;
- void getAnalysisUsage(AnalysisUsage &AU) const override;
-
- static char ID;
-};
-
-/// An analysis that produces \c MemorySSA for a function.
-///
-class MemorySSAAnalysis : public AnalysisInfoMixin<MemorySSAAnalysis> {
- friend AnalysisInfoMixin<MemorySSAAnalysis>;
-
- static AnalysisKey Key;
-
-public:
- // Wrap MemorySSA result to ensure address stability of internal MemorySSA
- // pointers after construction. Use a wrapper class instead of plain
- // unique_ptr<MemorySSA> to avoid build breakage on MSVC.
- struct Result {
- Result(std::unique_ptr<MemorySSA> &&MSSA) : MSSA(std::move(MSSA)) {}
- MemorySSA &getMSSA() { return *MSSA.get(); }
-
- std::unique_ptr<MemorySSA> MSSA;
- };
-
- Result run(Function &F, FunctionAnalysisManager &AM);
-};
-
-/// \brief Printer pass for \c MemorySSA.
-class MemorySSAPrinterPass : public PassInfoMixin<MemorySSAPrinterPass> {
- raw_ostream &OS;
-
-public:
- explicit MemorySSAPrinterPass(raw_ostream &OS) : OS(OS) {}
-
- PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
-};
-
-/// \brief Verifier pass for \c MemorySSA.
-struct MemorySSAVerifierPass : PassInfoMixin<MemorySSAVerifierPass> {
- PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
-};
-
-/// \brief Legacy analysis pass which computes \c MemorySSA.
-class MemorySSAWrapperPass : public FunctionPass {
-public:
- MemorySSAWrapperPass();
-
- static char ID;
-
- bool runOnFunction(Function &) override;
- void releaseMemory() override;
- MemorySSA &getMSSA() { return *MSSA; }
- const MemorySSA &getMSSA() const { return *MSSA; }
-
- void getAnalysisUsage(AnalysisUsage &AU) const override;
-
- void verifyAnalysis() const override;
- void print(raw_ostream &OS, const Module *M = nullptr) const override;
-
-private:
- std::unique_ptr<MemorySSA> MSSA;
-};
-
-/// \brief This is the generic walker interface for walkers of MemorySSA.
-/// Walkers are used to be able to further disambiguate the def-use chains
-/// MemorySSA gives you, or otherwise produce better info than MemorySSA gives
-/// you.
-/// In particular, while the def-use chains provide basic information, and are
-/// guaranteed to give, for example, the nearest may-aliasing MemoryDef for a
-/// MemoryUse as AliasAnalysis considers it, a user mant want better or other
-/// information. In particular, they may want to use SCEV info to further
-/// disambiguate memory accesses, or they may want the nearest dominating
-/// may-aliasing MemoryDef for a call or a store. This API enables a
-/// standardized interface to getting and using that info.
-class MemorySSAWalker {
-public:
- MemorySSAWalker(MemorySSA *);
- virtual ~MemorySSAWalker() = default;
-
- using MemoryAccessSet = SmallVector<MemoryAccess *, 8>;
-
- /// \brief Given a memory Mod/Ref/ModRef'ing instruction, calling this
- /// will give you the nearest dominating MemoryAccess that Mod's the location
- /// the instruction accesses (by skipping any def which AA can prove does not
- /// alias the location(s) accessed by the instruction given).
- ///
- /// Note that this will return a single access, and it must dominate the
- /// Instruction, so if an operand of a MemoryPhi node Mod's the instruction,
- /// this will return the MemoryPhi, not the operand. This means that
- /// given:
- /// if (a) {
- /// 1 = MemoryDef(liveOnEntry)
- /// store %a
- /// } else {
- /// 2 = MemoryDef(liveOnEntry)
- /// store %b
- /// }
- /// 3 = MemoryPhi(2, 1)
- /// MemoryUse(3)
- /// load %a
- ///
- /// calling this API on load(%a) will return the MemoryPhi, not the MemoryDef
- /// in the if (a) branch.
- MemoryAccess *getClobberingMemoryAccess(const Instruction *I) {
- MemoryAccess *MA = MSSA->getMemoryAccess(I);
- assert(MA && "Handed an instruction that MemorySSA doesn't recognize?");
- return getClobberingMemoryAccess(MA);
- }
-
- /// Does the same thing as getClobberingMemoryAccess(const Instruction *I),
- /// but takes a MemoryAccess instead of an Instruction.
- virtual MemoryAccess *getClobberingMemoryAccess(MemoryAccess *) = 0;
-
- /// \brief Given a potentially clobbering memory access and a new location,
- /// calling this will give you the nearest dominating clobbering MemoryAccess
- /// (by skipping non-aliasing def links).
- ///
- /// This version of the function is mainly used to disambiguate phi translated
- /// pointers, where the value of a pointer may have changed from the initial
- /// memory access. Note that this expects to be handed either a MemoryUse,
- /// or an already potentially clobbering access. Unlike the above API, if
- /// given a MemoryDef that clobbers the pointer as the starting access, it
- /// will return that MemoryDef, whereas the above would return the clobber
- /// starting from the use side of the memory def.
- virtual MemoryAccess *getClobberingMemoryAccess(MemoryAccess *,
- const MemoryLocation &) = 0;
-
- /// \brief Given a memory access, invalidate anything this walker knows about
- /// that access.
- /// This API is used by walkers that store information to perform basic cache
- /// invalidation. This will be called by MemorySSA at appropriate times for
- /// the walker it uses or returns.
- virtual void invalidateInfo(MemoryAccess *) {}
-
- virtual void verify(const MemorySSA *MSSA) { assert(MSSA == this->MSSA); }
-
-protected:
- friend class MemorySSA; // For updating MSSA pointer in MemorySSA move
- // constructor.
- MemorySSA *MSSA;
-};
-
-/// \brief A MemorySSAWalker that does no alias queries, or anything else. It
-/// simply returns the links as they were constructed by the builder.
-class DoNothingMemorySSAWalker final : public MemorySSAWalker {
-public:
- // Keep the overrides below from hiding the Instruction overload of
- // getClobberingMemoryAccess.
- using MemorySSAWalker::getClobberingMemoryAccess;
-
- MemoryAccess *getClobberingMemoryAccess(MemoryAccess *) override;
- MemoryAccess *getClobberingMemoryAccess(MemoryAccess *,
- const MemoryLocation &) override;
-};
-
-using MemoryAccessPair = std::pair<MemoryAccess *, MemoryLocation>;
-using ConstMemoryAccessPair = std::pair<const MemoryAccess *, MemoryLocation>;
-
-/// \brief Iterator base class used to implement const and non-const iterators
-/// over the defining accesses of a MemoryAccess.
-template <class T>
-class memoryaccess_def_iterator_base
- : public iterator_facade_base<memoryaccess_def_iterator_base<T>,
- std::forward_iterator_tag, T, ptrdiff_t, T *,
- T *> {
- using BaseT = typename memoryaccess_def_iterator_base::iterator_facade_base;
-
-public:
- memoryaccess_def_iterator_base(T *Start) : Access(Start) {}
- memoryaccess_def_iterator_base() = default;
-
- bool operator==(const memoryaccess_def_iterator_base &Other) const {
- return Access == Other.Access && (!Access || ArgNo == Other.ArgNo);
- }
-
- // This is a bit ugly, but for MemoryPHI's, unlike PHINodes, you can't get the
- // block from the operand in constant time (In a PHINode, the uselist has
- // both, so it's just subtraction). We provide it as part of the
- // iterator to avoid callers having to linear walk to get the block.
- // If the operation becomes constant time on MemoryPHI's, this bit of
- // abstraction breaking should be removed.
- BasicBlock *getPhiArgBlock() const {
- MemoryPhi *MP = dyn_cast<MemoryPhi>(Access);
- assert(MP && "Tried to get phi arg block when not iterating over a PHI");
- return MP->getIncomingBlock(ArgNo);
- }
- typename BaseT::iterator::pointer operator*() const {
- assert(Access && "Tried to access past the end of our iterator");
- // Go to the first argument for phis, and the defining access for everything
- // else.
- if (MemoryPhi *MP = dyn_cast<MemoryPhi>(Access))
- return MP->getIncomingValue(ArgNo);
- return cast<MemoryUseOrDef>(Access)->getDefiningAccess();
- }
- using BaseT::operator++;
- memoryaccess_def_iterator &operator++() {
- assert(Access && "Hit end of iterator");
- if (MemoryPhi *MP = dyn_cast<MemoryPhi>(Access)) {
- if (++ArgNo >= MP->getNumIncomingValues()) {
- ArgNo = 0;
- Access = nullptr;
- }
- } else {
- Access = nullptr;
- }
- return *this;
- }
-
-private:
- T *Access = nullptr;
- unsigned ArgNo = 0;
-};
-
-inline memoryaccess_def_iterator MemoryAccess::defs_begin() {
- return memoryaccess_def_iterator(this);
-}
-
-inline const_memoryaccess_def_iterator MemoryAccess::defs_begin() const {
- return const_memoryaccess_def_iterator(this);
-}
-
-inline memoryaccess_def_iterator MemoryAccess::defs_end() {
- return memoryaccess_def_iterator();
-}
-
-inline const_memoryaccess_def_iterator MemoryAccess::defs_end() const {
- return const_memoryaccess_def_iterator();
-}
-
-/// \brief GraphTraits for a MemoryAccess, which walks defs in the normal case,
-/// and uses in the inverse case.
-template <> struct GraphTraits<MemoryAccess *> {
- using NodeRef = MemoryAccess *;
- using ChildIteratorType = memoryaccess_def_iterator;
-
- static NodeRef getEntryNode(NodeRef N) { return N; }
- static ChildIteratorType child_begin(NodeRef N) { return N->defs_begin(); }
- static ChildIteratorType child_end(NodeRef N) { return N->defs_end(); }
-};
-
-template <> struct GraphTraits<Inverse<MemoryAccess *>> {
- using NodeRef = MemoryAccess *;
- using ChildIteratorType = MemoryAccess::iterator;
-
- static NodeRef getEntryNode(NodeRef N) { return N; }
- static ChildIteratorType child_begin(NodeRef N) { return N->user_begin(); }
- static ChildIteratorType child_end(NodeRef N) { return N->user_end(); }
-};
-
-/// \brief Provide an iterator that walks defs, giving both the memory access,
-/// and the current pointer location, updating the pointer location as it
-/// changes due to phi node translation.
-///
-/// This iterator, while somewhat specialized, is what most clients actually
-/// want when walking upwards through MemorySSA def chains. It takes a pair of
-/// <MemoryAccess,MemoryLocation>, and walks defs, properly translating the
-/// memory location through phi nodes for the user.
-class upward_defs_iterator
- : public iterator_facade_base<upward_defs_iterator,
- std::forward_iterator_tag,
- const MemoryAccessPair> {
- using BaseT = upward_defs_iterator::iterator_facade_base;
-
-public:
- upward_defs_iterator(const MemoryAccessPair &Info)
- : DefIterator(Info.first), Location(Info.second),
- OriginalAccess(Info.first) {
- CurrentPair.first = nullptr;
-
- WalkingPhi = Info.first && isa<MemoryPhi>(Info.first);
- fillInCurrentPair();
- }
-
- upward_defs_iterator() { CurrentPair.first = nullptr; }
-
- bool operator==(const upward_defs_iterator &Other) const {
- return DefIterator == Other.DefIterator;
- }
-
- BaseT::iterator::reference operator*() const {
- assert(DefIterator != OriginalAccess->defs_end() &&
- "Tried to access past the end of our iterator");
- return CurrentPair;
- }
-
- using BaseT::operator++;
- upward_defs_iterator &operator++() {
- assert(DefIterator != OriginalAccess->defs_end() &&
- "Tried to access past the end of the iterator");
- ++DefIterator;
- if (DefIterator != OriginalAccess->defs_end())
- fillInCurrentPair();
- return *this;
- }
-
- BasicBlock *getPhiArgBlock() const { return DefIterator.getPhiArgBlock(); }
-
-private:
- void fillInCurrentPair() {
- CurrentPair.first = *DefIterator;
- if (WalkingPhi && Location.Ptr) {
- PHITransAddr Translator(
- const_cast<Value *>(Location.Ptr),
- OriginalAccess->getBlock()->getModule()->getDataLayout(), nullptr);
- if (!Translator.PHITranslateValue(OriginalAccess->getBlock(),
- DefIterator.getPhiArgBlock(), nullptr,
- false))
- if (Translator.getAddr() != Location.Ptr) {
- CurrentPair.second = Location.getWithNewPtr(Translator.getAddr());
- return;
- }
- }
- CurrentPair.second = Location;
- }
-
- MemoryAccessPair CurrentPair;
- memoryaccess_def_iterator DefIterator;
- MemoryLocation Location;
- MemoryAccess *OriginalAccess = nullptr;
- bool WalkingPhi = false;
-};
-
-inline upward_defs_iterator upward_defs_begin(const MemoryAccessPair &Pair) {
- return upward_defs_iterator(Pair);
-}
-
-inline upward_defs_iterator upward_defs_end() { return upward_defs_iterator(); }
-
-inline iterator_range<upward_defs_iterator>
-upward_defs(const MemoryAccessPair &Pair) {
- return make_range(upward_defs_begin(Pair), upward_defs_end());
-}
-
-/// Walks the defining accesses of MemoryDefs. Stops after we hit something that
-/// has no defining use (e.g. a MemoryPhi or liveOnEntry). Note that, when
-/// comparing against a null def_chain_iterator, this will compare equal only
-/// after walking said Phi/liveOnEntry.
-///
-/// The UseOptimizedChain flag specifies whether to walk the clobbering
-/// access chain, or all the accesses.
-///
-/// Normally, MemoryDef are all just def/use linked together, so a def_chain on
-/// a MemoryDef will walk all MemoryDefs above it in the program until it hits
-/// a phi node. The optimized chain walks the clobbering access of a store.
-/// So if you are just trying to find, given a store, what the next
-/// thing that would clobber the same memory is, you want the optimized chain.
-template <class T, bool UseOptimizedChain = false>
-struct def_chain_iterator
- : public iterator_facade_base<def_chain_iterator<T, UseOptimizedChain>,
- std::forward_iterator_tag, MemoryAccess *> {
- def_chain_iterator() : MA(nullptr) {}
- def_chain_iterator(T MA) : MA(MA) {}
-
- T operator*() const { return MA; }
-
- def_chain_iterator &operator++() {
- // N.B. liveOnEntry has a null defining access.
- if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA)) {
- if (UseOptimizedChain && MUD->isOptimized())
- MA = MUD->getOptimized();
- else
- MA = MUD->getDefiningAccess();
- } else {
- MA = nullptr;
- }
-
- return *this;
- }
-
- bool operator==(const def_chain_iterator &O) const { return MA == O.MA; }
-
-private:
- T MA;
-};
-
-template <class T>
-inline iterator_range<def_chain_iterator<T>>
-def_chain(T MA, MemoryAccess *UpTo = nullptr) {
-#ifdef EXPENSIVE_CHECKS
- assert((!UpTo || find(def_chain(MA), UpTo) != def_chain_iterator<T>()) &&
- "UpTo isn't in the def chain!");
-#endif
- return make_range(def_chain_iterator<T>(MA), def_chain_iterator<T>(UpTo));
-}
-
-template <class T>
-inline iterator_range<def_chain_iterator<T, true>> optimized_def_chain(T MA) {
- return make_range(def_chain_iterator<T, true>(MA),
- def_chain_iterator<T, true>(nullptr));
-}
-
-} // end namespace llvm
-
-#endif // LLVM_TRANSFORMS_UTILS_MEMORYSSA_H
Removed: llvm/trunk/include/llvm/Transforms/Utils/MemorySSAUpdater.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/Transforms/Utils/MemorySSAUpdater.h?rev=299979&view=auto
==============================================================================
--- llvm/trunk/include/llvm/Transforms/Utils/MemorySSAUpdater.h (original)
+++ llvm/trunk/include/llvm/Transforms/Utils/MemorySSAUpdater.h (removed)
@@ -1,153 +0,0 @@
-//===- MemorySSAUpdater.h - Memory SSA Updater-------------------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// \file
-// \brief An automatic updater for MemorySSA that handles arbitrary insertion,
-// deletion, and moves. It performs phi insertion where necessary, and
-// automatically updates the MemorySSA IR to be correct.
-// While updating loads or removing instructions is often easy enough to not
-// need this, updating stores should generally not be attemped outside this
-// API.
-//
-// Basic API usage:
-// Create the memory access you want for the instruction (this is mainly so
-// we know where it is, without having to duplicate the entire set of create
-// functions MemorySSA supports).
-// Call insertDef or insertUse depending on whether it's a MemoryUse or a
-// MemoryDef.
-// That's it.
-//
-// For moving, first, move the instruction itself using the normal SSA
-// instruction moving API, then just call moveBefore, moveAfter,or moveTo with
-// the right arguments.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_TRANSFORMS_UTILS_MEMORYSSAUPDATER_H
-#define LLVM_TRANSFORMS_UTILS_MEMORYSSAUPDATER_H
-
-#include "llvm/ADT/SmallPtrSet.h"
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/IR/BasicBlock.h"
-#include "llvm/IR/Dominators.h"
-#include "llvm/IR/Module.h"
-#include "llvm/IR/OperandTraits.h"
-#include "llvm/IR/Type.h"
-#include "llvm/IR/Use.h"
-#include "llvm/IR/User.h"
-#include "llvm/IR/Value.h"
-#include "llvm/Pass.h"
-#include "llvm/Support/Casting.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Transforms/Utils/MemorySSA.h"
-
-namespace llvm {
-
-class Function;
-class Instruction;
-class MemoryAccess;
-class LLVMContext;
-class raw_ostream;
-
-class MemorySSAUpdater {
-private:
- MemorySSA *MSSA;
- SmallVector<MemoryPhi *, 8> InsertedPHIs;
- SmallPtrSet<BasicBlock *, 8> VisitedBlocks;
-
-public:
- MemorySSAUpdater(MemorySSA *MSSA) : MSSA(MSSA) {}
- /// Insert a definition into the MemorySSA IR. RenameUses will rename any use
- /// below the new def block (and any inserted phis). RenameUses should be set
- /// to true if the definition may cause new aliases for loads below it. This
- /// is not the case for hoisting or sinking or other forms of code *movement*.
- /// It *is* the case for straight code insertion.
- /// For example:
- /// store a
- /// if (foo) { }
- /// load a
- ///
- /// Moving the store into the if block, and calling insertDef, does not
- /// require RenameUses.
- /// However, changing it to:
- /// store a
- /// if (foo) { store b }
- /// load a
- /// Where a mayalias b, *does* require RenameUses be set to true.
- void insertDef(MemoryDef *Def, bool RenameUses = false);
- void insertUse(MemoryUse *Use);
- void moveBefore(MemoryUseOrDef *What, MemoryUseOrDef *Where);
- void moveAfter(MemoryUseOrDef *What, MemoryUseOrDef *Where);
- void moveToPlace(MemoryUseOrDef *What, BasicBlock *BB,
- MemorySSA::InsertionPlace Where);
-
- // The below are utility functions. Other than creation of accesses to pass
- // to insertDef, and removeAccess to remove accesses, you should generally
- // not attempt to update memoryssa yourself. It is very non-trivial to get
- // the edge cases right, and the above calls already operate in near-optimal
- // time bounds.
-
- /// \brief Create a MemoryAccess in MemorySSA at a specified point in a block,
- /// with a specified clobbering definition.
- ///
- /// Returns the new MemoryAccess.
- /// This should be called when a memory instruction is created that is being
- /// used to replace an existing memory instruction. It will *not* create PHI
- /// nodes, or verify the clobbering definition. The insertion place is used
- /// solely to determine where in the memoryssa access lists the instruction
- /// will be placed. The caller is expected to keep ordering the same as
- /// instructions.
- /// It will return the new MemoryAccess.
- /// Note: If a MemoryAccess already exists for I, this function will make it
- /// inaccessible and it *must* have removeMemoryAccess called on it.
- MemoryAccess *createMemoryAccessInBB(Instruction *I, MemoryAccess *Definition,
- const BasicBlock *BB,
- MemorySSA::InsertionPlace Point);
-
- /// \brief Create a MemoryAccess in MemorySSA before or after an existing
- /// MemoryAccess.
- ///
- /// Returns the new MemoryAccess.
- /// This should be called when a memory instruction is created that is being
- /// used to replace an existing memory instruction. It will *not* create PHI
- /// nodes, or verify the clobbering definition.
- ///
- /// Note: If a MemoryAccess already exists for I, this function will make it
- /// inaccessible and it *must* have removeMemoryAccess called on it.
- MemoryUseOrDef *createMemoryAccessBefore(Instruction *I,
- MemoryAccess *Definition,
- MemoryUseOrDef *InsertPt);
- MemoryUseOrDef *createMemoryAccessAfter(Instruction *I,
- MemoryAccess *Definition,
- MemoryAccess *InsertPt);
-
- /// \brief Remove a MemoryAccess from MemorySSA, including updating all
- /// definitions and uses.
- /// This should be called when a memory instruction that has a MemoryAccess
- /// associated with it is erased from the program. For example, if a store or
- /// load is simply erased (not replaced), removeMemoryAccess should be called
- /// on the MemoryAccess for that store/load.
- void removeMemoryAccess(MemoryAccess *);
-
-private:
- // Move What before Where in the MemorySSA IR.
- template <class WhereType>
- void moveTo(MemoryUseOrDef *What, BasicBlock *BB, WhereType Where);
- MemoryAccess *getPreviousDef(MemoryAccess *);
- MemoryAccess *getPreviousDefInBlock(MemoryAccess *);
- MemoryAccess *getPreviousDefFromEnd(BasicBlock *);
- MemoryAccess *getPreviousDefRecursive(BasicBlock *);
- MemoryAccess *recursePhi(MemoryAccess *Phi);
- template <class RangeType>
- MemoryAccess *tryRemoveTrivialPhi(MemoryPhi *Phi, RangeType &Operands);
- void fixupDefs(const SmallVectorImpl<MemoryAccess *> &);
-};
-} // end namespace llvm
-
-#endif // LLVM_TRANSFORMS_UTILS_MEMORYSSAUPDATER_H
Modified: llvm/trunk/lib/Analysis/Analysis.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Analysis/Analysis.cpp?rev=299980&r1=299979&r2=299980&view=diff
==============================================================================
--- llvm/trunk/lib/Analysis/Analysis.cpp (original)
+++ llvm/trunk/lib/Analysis/Analysis.cpp Tue Apr 11 15:06:36 2017
@@ -79,6 +79,8 @@ void llvm::initializeAnalysis(PassRegist
initializeTypeBasedAAWrapperPassPass(Registry);
initializeScopedNoAliasAAWrapperPassPass(Registry);
initializeLCSSAVerificationPassPass(Registry);
+ initializeMemorySSAWrapperPassPass(Registry);
+ initializeMemorySSAPrinterLegacyPassPass(Registry);
}
void LLVMInitializeAnalysis(LLVMPassRegistryRef R) {
Modified: llvm/trunk/lib/Analysis/CMakeLists.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Analysis/CMakeLists.txt?rev=299980&r1=299979&r2=299980&view=diff
==============================================================================
--- llvm/trunk/lib/Analysis/CMakeLists.txt (original)
+++ llvm/trunk/lib/Analysis/CMakeLists.txt Tue Apr 11 15:06:36 2017
@@ -53,6 +53,8 @@ add_llvm_library(LLVMAnalysis
MemoryBuiltins.cpp
MemoryDependenceAnalysis.cpp
MemoryLocation.cpp
+ MemorySSA.cpp
+ MemorySSAUpdater.cpp
ModuleDebugInfoPrinter.cpp
ModuleSummaryAnalysis.cpp
ObjCARCAliasAnalysis.cpp
Copied: llvm/trunk/lib/Analysis/MemorySSA.cpp (from r299975, llvm/trunk/lib/Transforms/Utils/MemorySSA.cpp)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Analysis/MemorySSA.cpp?p2=llvm/trunk/lib/Analysis/MemorySSA.cpp&p1=llvm/trunk/lib/Transforms/Utils/MemorySSA.cpp&r1=299975&r2=299980&rev=299980&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/Utils/MemorySSA.cpp (original)
+++ llvm/trunk/lib/Analysis/MemorySSA.cpp Tue Apr 11 15:06:36 2017
@@ -10,7 +10,7 @@
// This file implements the MemorySSA class.
//
//===----------------------------------------------------------------===//
-#include "llvm/Transforms/Utils/MemorySSA.h"
+#include "llvm/Analysis/MemorySSA.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/DepthFirstIterator.h"
Copied: llvm/trunk/lib/Analysis/MemorySSAUpdater.cpp (from r299975, llvm/trunk/lib/Transforms/Utils/MemorySSAUpdater.cpp)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Analysis/MemorySSAUpdater.cpp?p2=llvm/trunk/lib/Analysis/MemorySSAUpdater.cpp&p1=llvm/trunk/lib/Transforms/Utils/MemorySSAUpdater.cpp&r1=299975&r2=299980&rev=299980&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/Utils/MemorySSAUpdater.cpp (original)
+++ llvm/trunk/lib/Analysis/MemorySSAUpdater.cpp Tue Apr 11 15:06:36 2017
@@ -10,7 +10,7 @@
// This file implements the MemorySSAUpdater class.
//
//===----------------------------------------------------------------===//
-#include "llvm/Transforms/Utils/MemorySSAUpdater.h"
+#include "llvm/Analysis/MemorySSAUpdater.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallSet.h"
@@ -24,7 +24,7 @@
#include "llvm/IR/Module.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/FormattedStream.h"
-#include "llvm/Transforms/Utils/MemorySSA.h"
+#include "llvm/Analysis/MemorySSA.h"
#include <algorithm>
#define DEBUG_TYPE "memoryssa"
Modified: llvm/trunk/lib/Passes/PassBuilder.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Passes/PassBuilder.cpp?rev=299980&r1=299979&r2=299980&view=diff
==============================================================================
--- llvm/trunk/lib/Passes/PassBuilder.cpp (original)
+++ llvm/trunk/lib/Passes/PassBuilder.cpp Tue Apr 11 15:06:36 2017
@@ -39,6 +39,7 @@
#include "llvm/Analysis/LoopAccessAnalysis.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/MemoryDependenceAnalysis.h"
+#include "llvm/Analysis/MemorySSA.h"
#include "llvm/Analysis/ModuleSummaryAnalysis.h"
#include "llvm/Analysis/OptimizationDiagnosticInfo.h"
#include "llvm/Analysis/PostDominators.h"
@@ -135,7 +136,6 @@
#include "llvm/Transforms/Utils/LoopSimplify.h"
#include "llvm/Transforms/Utils/LowerInvoke.h"
#include "llvm/Transforms/Utils/Mem2Reg.h"
-#include "llvm/Transforms/Utils/MemorySSA.h"
#include "llvm/Transforms/Utils/NameAnonGlobals.h"
#include "llvm/Transforms/Utils/PredicateInfo.h"
#include "llvm/Transforms/Utils/SimplifyInstructions.h"
Modified: llvm/trunk/lib/Transforms/Scalar/EarlyCSE.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Scalar/EarlyCSE.cpp?rev=299980&r1=299979&r2=299980&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/Scalar/EarlyCSE.cpp (original)
+++ llvm/trunk/lib/Transforms/Scalar/EarlyCSE.cpp Tue Apr 11 15:06:36 2017
@@ -19,6 +19,8 @@
#include "llvm/Analysis/AssumptionCache.h"
#include "llvm/Analysis/GlobalsModRef.h"
#include "llvm/Analysis/InstructionSimplify.h"
+#include "llvm/Analysis/MemorySSA.h"
+#include "llvm/Analysis/MemorySSAUpdater.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/IR/DataLayout.h"
@@ -32,8 +34,6 @@
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Scalar.h"
#include "llvm/Transforms/Utils/Local.h"
-#include "llvm/Transforms/Utils/MemorySSA.h"
-#include "llvm/Transforms/Utils/MemorySSAUpdater.h"
#include <deque>
using namespace llvm;
using namespace llvm::PatternMatch;
Modified: llvm/trunk/lib/Transforms/Scalar/GVNHoist.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Scalar/GVNHoist.cpp?rev=299980&r1=299979&r2=299980&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/Scalar/GVNHoist.cpp (original)
+++ llvm/trunk/lib/Transforms/Scalar/GVNHoist.cpp Tue Apr 11 15:06:36 2017
@@ -45,11 +45,11 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/Statistic.h"
+#include "llvm/Analysis/MemorySSA.h"
+#include "llvm/Analysis/MemorySSAUpdater.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/Transforms/Scalar.h"
#include "llvm/Transforms/Utils/Local.h"
-#include "llvm/Transforms/Utils/MemorySSA.h"
-#include "llvm/Transforms/Utils/MemorySSAUpdater.h"
using namespace llvm;
Modified: llvm/trunk/lib/Transforms/Scalar/NewGVN.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Scalar/NewGVN.cpp?rev=299980&r1=299979&r2=299980&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/Scalar/NewGVN.cpp (original)
+++ llvm/trunk/lib/Transforms/Scalar/NewGVN.cpp Tue Apr 11 15:06:36 2017
@@ -81,7 +81,7 @@
#include "llvm/Transforms/Scalar/GVNExpression.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/Local.h"
-#include "llvm/Transforms/Utils/MemorySSA.h"
+#include "llvm/Analysis/MemorySSA.h"
#include "llvm/Transforms/Utils/PredicateInfo.h"
#include "llvm/Transforms/Utils/VNCoercion.h"
#include <numeric>
Modified: llvm/trunk/lib/Transforms/Utils/CMakeLists.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Utils/CMakeLists.txt?rev=299980&r1=299979&r2=299980&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/Utils/CMakeLists.txt (original)
+++ llvm/trunk/lib/Transforms/Utils/CMakeLists.txt Tue Apr 11 15:06:36 2017
@@ -34,8 +34,6 @@ add_llvm_library(LLVMTransformUtils
LowerMemIntrinsics.cpp
LowerSwitch.cpp
Mem2Reg.cpp
- MemorySSA.cpp
- MemorySSAUpdater.cpp
MetaRenamer.cpp
ModuleUtils.cpp
NameAnonGlobals.cpp
Removed: llvm/trunk/lib/Transforms/Utils/MemorySSA.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Utils/MemorySSA.cpp?rev=299979&view=auto
==============================================================================
--- llvm/trunk/lib/Transforms/Utils/MemorySSA.cpp (original)
+++ llvm/trunk/lib/Transforms/Utils/MemorySSA.cpp (removed)
@@ -1,2059 +0,0 @@
-//===-- MemorySSA.cpp - Memory SSA Builder---------------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------===//
-//
-// This file implements the MemorySSA class.
-//
-//===----------------------------------------------------------------===//
-#include "llvm/Transforms/Utils/MemorySSA.h"
-#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/DenseSet.h"
-#include "llvm/ADT/DepthFirstIterator.h"
-#include "llvm/ADT/GraphTraits.h"
-#include "llvm/ADT/PostOrderIterator.h"
-#include "llvm/ADT/STLExtras.h"
-#include "llvm/ADT/SmallBitVector.h"
-#include "llvm/ADT/SmallPtrSet.h"
-#include "llvm/ADT/SmallSet.h"
-#include "llvm/ADT/Statistic.h"
-#include "llvm/Analysis/AliasAnalysis.h"
-#include "llvm/Analysis/CFG.h"
-#include "llvm/Analysis/GlobalsModRef.h"
-#include "llvm/Analysis/IteratedDominanceFrontier.h"
-#include "llvm/Analysis/MemoryLocation.h"
-#include "llvm/Analysis/PHITransAddr.h"
-#include "llvm/IR/AssemblyAnnotationWriter.h"
-#include "llvm/IR/DataLayout.h"
-#include "llvm/IR/Dominators.h"
-#include "llvm/IR/GlobalVariable.h"
-#include "llvm/IR/IRBuilder.h"
-#include "llvm/IR/IntrinsicInst.h"
-#include "llvm/IR/LLVMContext.h"
-#include "llvm/IR/Metadata.h"
-#include "llvm/IR/Module.h"
-#include "llvm/IR/PatternMatch.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/FormattedStream.h"
-#include "llvm/Transforms/Scalar.h"
-#include <algorithm>
-
-#define DEBUG_TYPE "memoryssa"
-using namespace llvm;
-INITIALIZE_PASS_BEGIN(MemorySSAWrapperPass, "memoryssa", "Memory SSA", false,
- true)
-INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
-INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
-INITIALIZE_PASS_END(MemorySSAWrapperPass, "memoryssa", "Memory SSA", false,
- true)
-
-INITIALIZE_PASS_BEGIN(MemorySSAPrinterLegacyPass, "print-memoryssa",
- "Memory SSA Printer", false, false)
-INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)
-INITIALIZE_PASS_END(MemorySSAPrinterLegacyPass, "print-memoryssa",
- "Memory SSA Printer", false, false)
-
-static cl::opt<unsigned> MaxCheckLimit(
- "memssa-check-limit", cl::Hidden, cl::init(100),
- cl::desc("The maximum number of stores/phis MemorySSA"
- "will consider trying to walk past (default = 100)"));
-
-static cl::opt<bool>
- VerifyMemorySSA("verify-memoryssa", cl::init(false), cl::Hidden,
- cl::desc("Verify MemorySSA in legacy printer pass."));
-
-namespace llvm {
-/// \brief An assembly annotator class to print Memory SSA information in
-/// comments.
-class MemorySSAAnnotatedWriter : public AssemblyAnnotationWriter {
- friend class MemorySSA;
- const MemorySSA *MSSA;
-
-public:
- MemorySSAAnnotatedWriter(const MemorySSA *M) : MSSA(M) {}
-
- virtual void emitBasicBlockStartAnnot(const BasicBlock *BB,
- formatted_raw_ostream &OS) {
- if (MemoryAccess *MA = MSSA->getMemoryAccess(BB))
- OS << "; " << *MA << "\n";
- }
-
- virtual void emitInstructionAnnot(const Instruction *I,
- formatted_raw_ostream &OS) {
- if (MemoryAccess *MA = MSSA->getMemoryAccess(I))
- OS << "; " << *MA << "\n";
- }
-};
-}
-
-namespace {
-/// Our current alias analysis API differentiates heavily between calls and
-/// non-calls, and functions called on one usually assert on the other.
-/// This class encapsulates the distinction to simplify other code that wants
-/// "Memory affecting instructions and related data" to use as a key.
-/// For example, this class is used as a densemap key in the use optimizer.
-class MemoryLocOrCall {
-public:
- MemoryLocOrCall() : IsCall(false) {}
- MemoryLocOrCall(MemoryUseOrDef *MUD)
- : MemoryLocOrCall(MUD->getMemoryInst()) {}
- MemoryLocOrCall(const MemoryUseOrDef *MUD)
- : MemoryLocOrCall(MUD->getMemoryInst()) {}
-
- MemoryLocOrCall(Instruction *Inst) {
- if (ImmutableCallSite(Inst)) {
- IsCall = true;
- CS = ImmutableCallSite(Inst);
- } else {
- IsCall = false;
- // There is no such thing as a memorylocation for a fence inst, and it is
- // unique in that regard.
- if (!isa<FenceInst>(Inst))
- Loc = MemoryLocation::get(Inst);
- }
- }
-
- explicit MemoryLocOrCall(const MemoryLocation &Loc)
- : IsCall(false), Loc(Loc) {}
-
- bool IsCall;
- ImmutableCallSite getCS() const {
- assert(IsCall);
- return CS;
- }
- MemoryLocation getLoc() const {
- assert(!IsCall);
- return Loc;
- }
-
- bool operator==(const MemoryLocOrCall &Other) const {
- if (IsCall != Other.IsCall)
- return false;
-
- if (IsCall)
- return CS.getCalledValue() == Other.CS.getCalledValue();
- return Loc == Other.Loc;
- }
-
-private:
- union {
- ImmutableCallSite CS;
- MemoryLocation Loc;
- };
-};
-}
-
-namespace llvm {
-template <> struct DenseMapInfo<MemoryLocOrCall> {
- static inline MemoryLocOrCall getEmptyKey() {
- return MemoryLocOrCall(DenseMapInfo<MemoryLocation>::getEmptyKey());
- }
- static inline MemoryLocOrCall getTombstoneKey() {
- return MemoryLocOrCall(DenseMapInfo<MemoryLocation>::getTombstoneKey());
- }
- static unsigned getHashValue(const MemoryLocOrCall &MLOC) {
- if (MLOC.IsCall)
- return hash_combine(MLOC.IsCall,
- DenseMapInfo<const Value *>::getHashValue(
- MLOC.getCS().getCalledValue()));
- return hash_combine(
- MLOC.IsCall, DenseMapInfo<MemoryLocation>::getHashValue(MLOC.getLoc()));
- }
- static bool isEqual(const MemoryLocOrCall &LHS, const MemoryLocOrCall &RHS) {
- return LHS == RHS;
- }
-};
-
-enum class Reorderability { Always, IfNoAlias, Never };
-
-/// This does one-way checks to see if Use could theoretically be hoisted above
-/// MayClobber. This will not check the other way around.
-///
-/// This assumes that, for the purposes of MemorySSA, Use comes directly after
-/// MayClobber, with no potentially clobbering operations in between them.
-/// (Where potentially clobbering ops are memory barriers, aliased stores, etc.)
-static Reorderability getLoadReorderability(const LoadInst *Use,
- const LoadInst *MayClobber) {
- bool VolatileUse = Use->isVolatile();
- bool VolatileClobber = MayClobber->isVolatile();
- // Volatile operations may never be reordered with other volatile operations.
- if (VolatileUse && VolatileClobber)
- return Reorderability::Never;
-
- // The lang ref allows reordering of volatile and non-volatile operations.
- // Whether an aliasing nonvolatile load and volatile load can be reordered,
- // though, is ambiguous. Because it may not be best to exploit this ambiguity,
- // we only allow volatile/non-volatile reordering if the volatile and
- // non-volatile operations don't alias.
- Reorderability Result = VolatileUse || VolatileClobber
- ? Reorderability::IfNoAlias
- : Reorderability::Always;
-
- // If a load is seq_cst, it cannot be moved above other loads. If its ordering
- // is weaker, it can be moved above other loads. We just need to be sure that
- // MayClobber isn't an acquire load, because loads can't be moved above
- // acquire loads.
- //
- // Note that this explicitly *does* allow the free reordering of monotonic (or
- // weaker) loads of the same address.
- bool SeqCstUse = Use->getOrdering() == AtomicOrdering::SequentiallyConsistent;
- bool MayClobberIsAcquire = isAtLeastOrStrongerThan(MayClobber->getOrdering(),
- AtomicOrdering::Acquire);
- if (SeqCstUse || MayClobberIsAcquire)
- return Reorderability::Never;
- return Result;
-}
-
-static bool instructionClobbersQuery(MemoryDef *MD,
- const MemoryLocation &UseLoc,
- const Instruction *UseInst,
- AliasAnalysis &AA) {
- Instruction *DefInst = MD->getMemoryInst();
- assert(DefInst && "Defining instruction not actually an instruction");
- ImmutableCallSite UseCS(UseInst);
-
- if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(DefInst)) {
- // These intrinsics will show up as affecting memory, but they are just
- // markers.
- switch (II->getIntrinsicID()) {
- case Intrinsic::lifetime_start:
- if (UseCS)
- return false;
- return AA.isMustAlias(MemoryLocation(II->getArgOperand(1)), UseLoc);
- case Intrinsic::lifetime_end:
- case Intrinsic::invariant_start:
- case Intrinsic::invariant_end:
- case Intrinsic::assume:
- return false;
- default:
- break;
- }
- }
-
- if (UseCS) {
- ModRefInfo I = AA.getModRefInfo(DefInst, UseCS);
- return I != MRI_NoModRef;
- }
-
- if (auto *DefLoad = dyn_cast<LoadInst>(DefInst)) {
- if (auto *UseLoad = dyn_cast<LoadInst>(UseInst)) {
- switch (getLoadReorderability(UseLoad, DefLoad)) {
- case Reorderability::Always:
- return false;
- case Reorderability::Never:
- return true;
- case Reorderability::IfNoAlias:
- return !AA.isNoAlias(UseLoc, MemoryLocation::get(DefLoad));
- }
- }
- }
-
- return AA.getModRefInfo(DefInst, UseLoc) & MRI_Mod;
-}
-
-static bool instructionClobbersQuery(MemoryDef *MD, const MemoryUseOrDef *MU,
- const MemoryLocOrCall &UseMLOC,
- AliasAnalysis &AA) {
- // FIXME: This is a temporary hack to allow a single instructionClobbersQuery
- // to exist while MemoryLocOrCall is pushed through places.
- if (UseMLOC.IsCall)
- return instructionClobbersQuery(MD, MemoryLocation(), MU->getMemoryInst(),
- AA);
- return instructionClobbersQuery(MD, UseMLOC.getLoc(), MU->getMemoryInst(),
- AA);
-}
-
-// Return true when MD may alias MU, return false otherwise.
-bool MemorySSAUtil::defClobbersUseOrDef(MemoryDef *MD, const MemoryUseOrDef *MU,
- AliasAnalysis &AA) {
- return instructionClobbersQuery(MD, MU, MemoryLocOrCall(MU), AA);
-}
-}
-
-namespace {
-struct UpwardsMemoryQuery {
- // True if our original query started off as a call
- bool IsCall;
- // The pointer location we started the query with. This will be empty if
- // IsCall is true.
- MemoryLocation StartingLoc;
- // This is the instruction we were querying about.
- const Instruction *Inst;
- // The MemoryAccess we actually got called with, used to test local domination
- const MemoryAccess *OriginalAccess;
-
- UpwardsMemoryQuery()
- : IsCall(false), Inst(nullptr), OriginalAccess(nullptr) {}
-
- UpwardsMemoryQuery(const Instruction *Inst, const MemoryAccess *Access)
- : IsCall(ImmutableCallSite(Inst)), Inst(Inst), OriginalAccess(Access) {
- if (!IsCall)
- StartingLoc = MemoryLocation::get(Inst);
- }
-};
-
-static bool lifetimeEndsAt(MemoryDef *MD, const MemoryLocation &Loc,
- AliasAnalysis &AA) {
- Instruction *Inst = MD->getMemoryInst();
- if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
- switch (II->getIntrinsicID()) {
- case Intrinsic::lifetime_end:
- return AA.isMustAlias(MemoryLocation(II->getArgOperand(1)), Loc);
- default:
- return false;
- }
- }
- return false;
-}
-
-static bool isUseTriviallyOptimizableToLiveOnEntry(AliasAnalysis &AA,
- const Instruction *I) {
- // If the memory can't be changed, then loads of the memory can't be
- // clobbered.
- //
- // FIXME: We should handle invariant groups, as well. It's a bit harder,
- // because we need to pay close attention to invariant group barriers.
- return isa<LoadInst>(I) && (I->getMetadata(LLVMContext::MD_invariant_load) ||
- AA.pointsToConstantMemory(cast<LoadInst>(I)->
- getPointerOperand()));
-}
-
-/// Verifies that `Start` is clobbered by `ClobberAt`, and that nothing
-/// inbetween `Start` and `ClobberAt` can clobbers `Start`.
-///
-/// This is meant to be as simple and self-contained as possible. Because it
-/// uses no cache, etc., it can be relatively expensive.
-///
-/// \param Start The MemoryAccess that we want to walk from.
-/// \param ClobberAt A clobber for Start.
-/// \param StartLoc The MemoryLocation for Start.
-/// \param MSSA The MemorySSA isntance that Start and ClobberAt belong to.
-/// \param Query The UpwardsMemoryQuery we used for our search.
-/// \param AA The AliasAnalysis we used for our search.
-static void LLVM_ATTRIBUTE_UNUSED
-checkClobberSanity(MemoryAccess *Start, MemoryAccess *ClobberAt,
- const MemoryLocation &StartLoc, const MemorySSA &MSSA,
- const UpwardsMemoryQuery &Query, AliasAnalysis &AA) {
- assert(MSSA.dominates(ClobberAt, Start) && "Clobber doesn't dominate start?");
-
- if (MSSA.isLiveOnEntryDef(Start)) {
- assert(MSSA.isLiveOnEntryDef(ClobberAt) &&
- "liveOnEntry must clobber itself");
- return;
- }
-
- bool FoundClobber = false;
- DenseSet<MemoryAccessPair> VisitedPhis;
- SmallVector<MemoryAccessPair, 8> Worklist;
- Worklist.emplace_back(Start, StartLoc);
- // Walk all paths from Start to ClobberAt, while looking for clobbers. If one
- // is found, complain.
- while (!Worklist.empty()) {
- MemoryAccessPair MAP = Worklist.pop_back_val();
- // All we care about is that nothing from Start to ClobberAt clobbers Start.
- // We learn nothing from revisiting nodes.
- if (!VisitedPhis.insert(MAP).second)
- continue;
-
- for (MemoryAccess *MA : def_chain(MAP.first)) {
- if (MA == ClobberAt) {
- if (auto *MD = dyn_cast<MemoryDef>(MA)) {
- // instructionClobbersQuery isn't essentially free, so don't use `|=`,
- // since it won't let us short-circuit.
- //
- // Also, note that this can't be hoisted out of the `Worklist` loop,
- // since MD may only act as a clobber for 1 of N MemoryLocations.
- FoundClobber =
- FoundClobber || MSSA.isLiveOnEntryDef(MD) ||
- instructionClobbersQuery(MD, MAP.second, Query.Inst, AA);
- }
- break;
- }
-
- // We should never hit liveOnEntry, unless it's the clobber.
- assert(!MSSA.isLiveOnEntryDef(MA) && "Hit liveOnEntry before clobber?");
-
- if (auto *MD = dyn_cast<MemoryDef>(MA)) {
- (void)MD;
- assert(!instructionClobbersQuery(MD, MAP.second, Query.Inst, AA) &&
- "Found clobber before reaching ClobberAt!");
- continue;
- }
-
- assert(isa<MemoryPhi>(MA));
- Worklist.append(upward_defs_begin({MA, MAP.second}), upward_defs_end());
- }
- }
-
- // If ClobberAt is a MemoryPhi, we can assume something above it acted as a
- // clobber. Otherwise, `ClobberAt` should've acted as a clobber at some point.
- assert((isa<MemoryPhi>(ClobberAt) || FoundClobber) &&
- "ClobberAt never acted as a clobber");
-}
-
-/// Our algorithm for walking (and trying to optimize) clobbers, all wrapped up
-/// in one class.
-class ClobberWalker {
- /// Save a few bytes by using unsigned instead of size_t.
- using ListIndex = unsigned;
-
- /// Represents a span of contiguous MemoryDefs, potentially ending in a
- /// MemoryPhi.
- struct DefPath {
- MemoryLocation Loc;
- // Note that, because we always walk in reverse, Last will always dominate
- // First. Also note that First and Last are inclusive.
- MemoryAccess *First;
- MemoryAccess *Last;
- Optional<ListIndex> Previous;
-
- DefPath(const MemoryLocation &Loc, MemoryAccess *First, MemoryAccess *Last,
- Optional<ListIndex> Previous)
- : Loc(Loc), First(First), Last(Last), Previous(Previous) {}
-
- DefPath(const MemoryLocation &Loc, MemoryAccess *Init,
- Optional<ListIndex> Previous)
- : DefPath(Loc, Init, Init, Previous) {}
- };
-
- const MemorySSA &MSSA;
- AliasAnalysis &AA;
- DominatorTree &DT;
- UpwardsMemoryQuery *Query;
-
- // Phi optimization bookkeeping
- SmallVector<DefPath, 32> Paths;
- DenseSet<ConstMemoryAccessPair> VisitedPhis;
-
- /// Find the nearest def or phi that `From` can legally be optimized to.
- const MemoryAccess *getWalkTarget(const MemoryPhi *From) const {
- assert(From->getNumOperands() && "Phi with no operands?");
-
- BasicBlock *BB = From->getBlock();
- MemoryAccess *Result = MSSA.getLiveOnEntryDef();
- DomTreeNode *Node = DT.getNode(BB);
- while ((Node = Node->getIDom())) {
- auto *Defs = MSSA.getBlockDefs(Node->getBlock());
- if (Defs)
- return &*Defs->rbegin();
- }
- return Result;
- }
-
- /// Result of calling walkToPhiOrClobber.
- struct UpwardsWalkResult {
- /// The "Result" of the walk. Either a clobber, the last thing we walked, or
- /// both.
- MemoryAccess *Result;
- bool IsKnownClobber;
- };
-
- /// Walk to the next Phi or Clobber in the def chain starting at Desc.Last.
- /// This will update Desc.Last as it walks. It will (optionally) also stop at
- /// StopAt.
- ///
- /// This does not test for whether StopAt is a clobber
- UpwardsWalkResult
- walkToPhiOrClobber(DefPath &Desc,
- const MemoryAccess *StopAt = nullptr) const {
- assert(!isa<MemoryUse>(Desc.Last) && "Uses don't exist in my world");
-
- for (MemoryAccess *Current : def_chain(Desc.Last)) {
- Desc.Last = Current;
- if (Current == StopAt)
- return {Current, false};
-
- if (auto *MD = dyn_cast<MemoryDef>(Current))
- if (MSSA.isLiveOnEntryDef(MD) ||
- instructionClobbersQuery(MD, Desc.Loc, Query->Inst, AA))
- return {MD, true};
- }
-
- assert(isa<MemoryPhi>(Desc.Last) &&
- "Ended at a non-clobber that's not a phi?");
- return {Desc.Last, false};
- }
-
- void addSearches(MemoryPhi *Phi, SmallVectorImpl<ListIndex> &PausedSearches,
- ListIndex PriorNode) {
- auto UpwardDefs = make_range(upward_defs_begin({Phi, Paths[PriorNode].Loc}),
- upward_defs_end());
- for (const MemoryAccessPair &P : UpwardDefs) {
- PausedSearches.push_back(Paths.size());
- Paths.emplace_back(P.second, P.first, PriorNode);
- }
- }
-
- /// Represents a search that terminated after finding a clobber. This clobber
- /// may or may not be present in the path of defs from LastNode..SearchStart,
- /// since it may have been retrieved from cache.
- struct TerminatedPath {
- MemoryAccess *Clobber;
- ListIndex LastNode;
- };
-
- /// Get an access that keeps us from optimizing to the given phi.
- ///
- /// PausedSearches is an array of indices into the Paths array. Its incoming
- /// value is the indices of searches that stopped at the last phi optimization
- /// target. It's left in an unspecified state.
- ///
- /// If this returns None, NewPaused is a vector of searches that terminated
- /// at StopWhere. Otherwise, NewPaused is left in an unspecified state.
- Optional<TerminatedPath>
- getBlockingAccess(const MemoryAccess *StopWhere,
- SmallVectorImpl<ListIndex> &PausedSearches,
- SmallVectorImpl<ListIndex> &NewPaused,
- SmallVectorImpl<TerminatedPath> &Terminated) {
- assert(!PausedSearches.empty() && "No searches to continue?");
-
- // BFS vs DFS really doesn't make a difference here, so just do a DFS with
- // PausedSearches as our stack.
- while (!PausedSearches.empty()) {
- ListIndex PathIndex = PausedSearches.pop_back_val();
- DefPath &Node = Paths[PathIndex];
-
- // If we've already visited this path with this MemoryLocation, we don't
- // need to do so again.
- //
- // NOTE: That we just drop these paths on the ground makes caching
- // behavior sporadic. e.g. given a diamond:
- // A
- // B C
- // D
- //
- // ...If we walk D, B, A, C, we'll only cache the result of phi
- // optimization for A, B, and D; C will be skipped because it dies here.
- // This arguably isn't the worst thing ever, since:
- // - We generally query things in a top-down order, so if we got below D
- // without needing cache entries for {C, MemLoc}, then chances are
- // that those cache entries would end up ultimately unused.
- // - We still cache things for A, so C only needs to walk up a bit.
- // If this behavior becomes problematic, we can fix without a ton of extra
- // work.
- if (!VisitedPhis.insert({Node.Last, Node.Loc}).second)
- continue;
-
- UpwardsWalkResult Res = walkToPhiOrClobber(Node, /*StopAt=*/StopWhere);
- if (Res.IsKnownClobber) {
- assert(Res.Result != StopWhere);
- // If this wasn't a cache hit, we hit a clobber when walking. That's a
- // failure.
- TerminatedPath Term{Res.Result, PathIndex};
- if (!MSSA.dominates(Res.Result, StopWhere))
- return Term;
-
- // Otherwise, it's a valid thing to potentially optimize to.
- Terminated.push_back(Term);
- continue;
- }
-
- if (Res.Result == StopWhere) {
- // We've hit our target. Save this path off for if we want to continue
- // walking.
- NewPaused.push_back(PathIndex);
- continue;
- }
-
- assert(!MSSA.isLiveOnEntryDef(Res.Result) && "liveOnEntry is a clobber");
- addSearches(cast<MemoryPhi>(Res.Result), PausedSearches, PathIndex);
- }
-
- return None;
- }
-
- template <typename T, typename Walker>
- struct generic_def_path_iterator
- : public iterator_facade_base<generic_def_path_iterator<T, Walker>,
- std::forward_iterator_tag, T *> {
- generic_def_path_iterator() : W(nullptr), N(None) {}
- generic_def_path_iterator(Walker *W, ListIndex N) : W(W), N(N) {}
-
- T &operator*() const { return curNode(); }
-
- generic_def_path_iterator &operator++() {
- N = curNode().Previous;
- return *this;
- }
-
- bool operator==(const generic_def_path_iterator &O) const {
- if (N.hasValue() != O.N.hasValue())
- return false;
- return !N.hasValue() || *N == *O.N;
- }
-
- private:
- T &curNode() const { return W->Paths[*N]; }
-
- Walker *W;
- Optional<ListIndex> N;
- };
-
- using def_path_iterator = generic_def_path_iterator<DefPath, ClobberWalker>;
- using const_def_path_iterator =
- generic_def_path_iterator<const DefPath, const ClobberWalker>;
-
- iterator_range<def_path_iterator> def_path(ListIndex From) {
- return make_range(def_path_iterator(this, From), def_path_iterator());
- }
-
- iterator_range<const_def_path_iterator> const_def_path(ListIndex From) const {
- return make_range(const_def_path_iterator(this, From),
- const_def_path_iterator());
- }
-
- struct OptznResult {
- /// The path that contains our result.
- TerminatedPath PrimaryClobber;
- /// The paths that we can legally cache back from, but that aren't
- /// necessarily the result of the Phi optimization.
- SmallVector<TerminatedPath, 4> OtherClobbers;
- };
-
- ListIndex defPathIndex(const DefPath &N) const {
- // The assert looks nicer if we don't need to do &N
- const DefPath *NP = &N;
- assert(!Paths.empty() && NP >= &Paths.front() && NP <= &Paths.back() &&
- "Out of bounds DefPath!");
- return NP - &Paths.front();
- }
-
- /// Try to optimize a phi as best as we can. Returns a SmallVector of Paths
- /// that act as legal clobbers. Note that this won't return *all* clobbers.
- ///
- /// Phi optimization algorithm tl;dr:
- /// - Find the earliest def/phi, A, we can optimize to
- /// - Find if all paths from the starting memory access ultimately reach A
- /// - If not, optimization isn't possible.
- /// - Otherwise, walk from A to another clobber or phi, A'.
- /// - If A' is a def, we're done.
- /// - If A' is a phi, try to optimize it.
- ///
- /// A path is a series of {MemoryAccess, MemoryLocation} pairs. A path
- /// terminates when a MemoryAccess that clobbers said MemoryLocation is found.
- OptznResult tryOptimizePhi(MemoryPhi *Phi, MemoryAccess *Start,
- const MemoryLocation &Loc) {
- assert(Paths.empty() && VisitedPhis.empty() &&
- "Reset the optimization state.");
-
- Paths.emplace_back(Loc, Start, Phi, None);
- // Stores how many "valid" optimization nodes we had prior to calling
- // addSearches/getBlockingAccess. Necessary for caching if we had a blocker.
- auto PriorPathsSize = Paths.size();
-
- SmallVector<ListIndex, 16> PausedSearches;
- SmallVector<ListIndex, 8> NewPaused;
- SmallVector<TerminatedPath, 4> TerminatedPaths;
-
- addSearches(Phi, PausedSearches, 0);
-
- // Moves the TerminatedPath with the "most dominated" Clobber to the end of
- // Paths.
- auto MoveDominatedPathToEnd = [&](SmallVectorImpl<TerminatedPath> &Paths) {
- assert(!Paths.empty() && "Need a path to move");
- auto Dom = Paths.begin();
- for (auto I = std::next(Dom), E = Paths.end(); I != E; ++I)
- if (!MSSA.dominates(I->Clobber, Dom->Clobber))
- Dom = I;
- auto Last = Paths.end() - 1;
- if (Last != Dom)
- std::iter_swap(Last, Dom);
- };
-
- MemoryPhi *Current = Phi;
- while (1) {
- assert(!MSSA.isLiveOnEntryDef(Current) &&
- "liveOnEntry wasn't treated as a clobber?");
-
- const auto *Target = getWalkTarget(Current);
- // If a TerminatedPath doesn't dominate Target, then it wasn't a legal
- // optimization for the prior phi.
- assert(all_of(TerminatedPaths, [&](const TerminatedPath &P) {
- return MSSA.dominates(P.Clobber, Target);
- }));
-
- // FIXME: This is broken, because the Blocker may be reported to be
- // liveOnEntry, and we'll happily wait for that to disappear (read: never)
- // For the moment, this is fine, since we do nothing with blocker info.
- if (Optional<TerminatedPath> Blocker = getBlockingAccess(
- Target, PausedSearches, NewPaused, TerminatedPaths)) {
-
- // Find the node we started at. We can't search based on N->Last, since
- // we may have gone around a loop with a different MemoryLocation.
- auto Iter = find_if(def_path(Blocker->LastNode), [&](const DefPath &N) {
- return defPathIndex(N) < PriorPathsSize;
- });
- assert(Iter != def_path_iterator());
-
- DefPath &CurNode = *Iter;
- assert(CurNode.Last == Current);
-
- // Two things:
- // A. We can't reliably cache all of NewPaused back. Consider a case
- // where we have two paths in NewPaused; one of which can't optimize
- // above this phi, whereas the other can. If we cache the second path
- // back, we'll end up with suboptimal cache entries. We can handle
- // cases like this a bit better when we either try to find all
- // clobbers that block phi optimization, or when our cache starts
- // supporting unfinished searches.
- // B. We can't reliably cache TerminatedPaths back here without doing
- // extra checks; consider a case like:
- // T
- // / \
- // D C
- // \ /
- // S
- // Where T is our target, C is a node with a clobber on it, D is a
- // diamond (with a clobber *only* on the left or right node, N), and
- // S is our start. Say we walk to D, through the node opposite N
- // (read: ignoring the clobber), and see a cache entry in the top
- // node of D. That cache entry gets put into TerminatedPaths. We then
- // walk up to C (N is later in our worklist), find the clobber, and
- // quit. If we append TerminatedPaths to OtherClobbers, we'll cache
- // the bottom part of D to the cached clobber, ignoring the clobber
- // in N. Again, this problem goes away if we start tracking all
- // blockers for a given phi optimization.
- TerminatedPath Result{CurNode.Last, defPathIndex(CurNode)};
- return {Result, {}};
- }
-
- // If there's nothing left to search, then all paths led to valid clobbers
- // that we got from our cache; pick the nearest to the start, and allow
- // the rest to be cached back.
- if (NewPaused.empty()) {
- MoveDominatedPathToEnd(TerminatedPaths);
- TerminatedPath Result = TerminatedPaths.pop_back_val();
- return {Result, std::move(TerminatedPaths)};
- }
-
- MemoryAccess *DefChainEnd = nullptr;
- SmallVector<TerminatedPath, 4> Clobbers;
- for (ListIndex Paused : NewPaused) {
- UpwardsWalkResult WR = walkToPhiOrClobber(Paths[Paused]);
- if (WR.IsKnownClobber)
- Clobbers.push_back({WR.Result, Paused});
- else
- // Micro-opt: If we hit the end of the chain, save it.
- DefChainEnd = WR.Result;
- }
-
- if (!TerminatedPaths.empty()) {
- // If we couldn't find the dominating phi/liveOnEntry in the above loop,
- // do it now.
- if (!DefChainEnd)
- for (auto *MA : def_chain(const_cast<MemoryAccess *>(Target)))
- DefChainEnd = MA;
-
- // If any of the terminated paths don't dominate the phi we'll try to
- // optimize, we need to figure out what they are and quit.
- const BasicBlock *ChainBB = DefChainEnd->getBlock();
- for (const TerminatedPath &TP : TerminatedPaths) {
- // Because we know that DefChainEnd is as "high" as we can go, we
- // don't need local dominance checks; BB dominance is sufficient.
- if (DT.dominates(ChainBB, TP.Clobber->getBlock()))
- Clobbers.push_back(TP);
- }
- }
-
- // If we have clobbers in the def chain, find the one closest to Current
- // and quit.
- if (!Clobbers.empty()) {
- MoveDominatedPathToEnd(Clobbers);
- TerminatedPath Result = Clobbers.pop_back_val();
- return {Result, std::move(Clobbers)};
- }
-
- assert(all_of(NewPaused,
- [&](ListIndex I) { return Paths[I].Last == DefChainEnd; }));
-
- // Because liveOnEntry is a clobber, this must be a phi.
- auto *DefChainPhi = cast<MemoryPhi>(DefChainEnd);
-
- PriorPathsSize = Paths.size();
- PausedSearches.clear();
- for (ListIndex I : NewPaused)
- addSearches(DefChainPhi, PausedSearches, I);
- NewPaused.clear();
-
- Current = DefChainPhi;
- }
- }
-
- void verifyOptResult(const OptznResult &R) const {
- assert(all_of(R.OtherClobbers, [&](const TerminatedPath &P) {
- return MSSA.dominates(P.Clobber, R.PrimaryClobber.Clobber);
- }));
- }
-
- void resetPhiOptznState() {
- Paths.clear();
- VisitedPhis.clear();
- }
-
-public:
- ClobberWalker(const MemorySSA &MSSA, AliasAnalysis &AA, DominatorTree &DT)
- : MSSA(MSSA), AA(AA), DT(DT) {}
-
- void reset() {}
-
- /// Finds the nearest clobber for the given query, optimizing phis if
- /// possible.
- MemoryAccess *findClobber(MemoryAccess *Start, UpwardsMemoryQuery &Q) {
- Query = &Q;
-
- MemoryAccess *Current = Start;
- // This walker pretends uses don't exist. If we're handed one, silently grab
- // its def. (This has the nice side-effect of ensuring we never cache uses)
- if (auto *MU = dyn_cast<MemoryUse>(Start))
- Current = MU->getDefiningAccess();
-
- DefPath FirstDesc(Q.StartingLoc, Current, Current, None);
- // Fast path for the overly-common case (no crazy phi optimization
- // necessary)
- UpwardsWalkResult WalkResult = walkToPhiOrClobber(FirstDesc);
- MemoryAccess *Result;
- if (WalkResult.IsKnownClobber) {
- Result = WalkResult.Result;
- } else {
- OptznResult OptRes = tryOptimizePhi(cast<MemoryPhi>(FirstDesc.Last),
- Current, Q.StartingLoc);
- verifyOptResult(OptRes);
- resetPhiOptznState();
- Result = OptRes.PrimaryClobber.Clobber;
- }
-
-#ifdef EXPENSIVE_CHECKS
- checkClobberSanity(Current, Result, Q.StartingLoc, MSSA, Q, AA);
-#endif
- return Result;
- }
-
- void verify(const MemorySSA *MSSA) { assert(MSSA == &this->MSSA); }
-};
-
-struct RenamePassData {
- DomTreeNode *DTN;
- DomTreeNode::const_iterator ChildIt;
- MemoryAccess *IncomingVal;
-
- RenamePassData(DomTreeNode *D, DomTreeNode::const_iterator It,
- MemoryAccess *M)
- : DTN(D), ChildIt(It), IncomingVal(M) {}
- void swap(RenamePassData &RHS) {
- std::swap(DTN, RHS.DTN);
- std::swap(ChildIt, RHS.ChildIt);
- std::swap(IncomingVal, RHS.IncomingVal);
- }
-};
-} // anonymous namespace
-
-namespace llvm {
-/// \brief A MemorySSAWalker that does AA walks to disambiguate accesses. It no
-/// longer does caching on its own,
-/// but the name has been retained for the moment.
-class MemorySSA::CachingWalker final : public MemorySSAWalker {
- ClobberWalker Walker;
- bool AutoResetWalker;
-
- MemoryAccess *getClobberingMemoryAccess(MemoryAccess *, UpwardsMemoryQuery &);
- void verifyRemoved(MemoryAccess *);
-
-public:
- CachingWalker(MemorySSA *, AliasAnalysis *, DominatorTree *);
- ~CachingWalker() override;
-
- using MemorySSAWalker::getClobberingMemoryAccess;
- MemoryAccess *getClobberingMemoryAccess(MemoryAccess *) override;
- MemoryAccess *getClobberingMemoryAccess(MemoryAccess *,
- const MemoryLocation &) override;
- void invalidateInfo(MemoryAccess *) override;
-
- /// Whether we call resetClobberWalker() after each time we *actually* walk to
- /// answer a clobber query.
- void setAutoResetWalker(bool AutoReset) { AutoResetWalker = AutoReset; }
-
- /// Drop the walker's persistent data structures.
- void resetClobberWalker() { Walker.reset(); }
-
- void verify(const MemorySSA *MSSA) override {
- MemorySSAWalker::verify(MSSA);
- Walker.verify(MSSA);
- }
-};
-
-void MemorySSA::renameSuccessorPhis(BasicBlock *BB, MemoryAccess *IncomingVal,
- bool RenameAllUses) {
- // Pass through values to our successors
- for (const BasicBlock *S : successors(BB)) {
- auto It = PerBlockAccesses.find(S);
- // Rename the phi nodes in our successor block
- if (It == PerBlockAccesses.end() || !isa<MemoryPhi>(It->second->front()))
- continue;
- AccessList *Accesses = It->second.get();
- auto *Phi = cast<MemoryPhi>(&Accesses->front());
- if (RenameAllUses) {
- int PhiIndex = Phi->getBasicBlockIndex(BB);
- assert(PhiIndex != -1 && "Incomplete phi during partial rename");
- Phi->setIncomingValue(PhiIndex, IncomingVal);
- } else
- Phi->addIncoming(IncomingVal, BB);
- }
-}
-
-/// \brief Rename a single basic block into MemorySSA form.
-/// Uses the standard SSA renaming algorithm.
-/// \returns The new incoming value.
-MemoryAccess *MemorySSA::renameBlock(BasicBlock *BB, MemoryAccess *IncomingVal,
- bool RenameAllUses) {
- auto It = PerBlockAccesses.find(BB);
- // Skip most processing if the list is empty.
- if (It != PerBlockAccesses.end()) {
- AccessList *Accesses = It->second.get();
- for (MemoryAccess &L : *Accesses) {
- if (MemoryUseOrDef *MUD = dyn_cast<MemoryUseOrDef>(&L)) {
- if (MUD->getDefiningAccess() == nullptr || RenameAllUses)
- MUD->setDefiningAccess(IncomingVal);
- if (isa<MemoryDef>(&L))
- IncomingVal = &L;
- } else {
- IncomingVal = &L;
- }
- }
- }
- return IncomingVal;
-}
-
-/// \brief This is the standard SSA renaming algorithm.
-///
-/// We walk the dominator tree in preorder, renaming accesses, and then filling
-/// in phi nodes in our successors.
-void MemorySSA::renamePass(DomTreeNode *Root, MemoryAccess *IncomingVal,
- SmallPtrSetImpl<BasicBlock *> &Visited,
- bool SkipVisited, bool RenameAllUses) {
- SmallVector<RenamePassData, 32> WorkStack;
- // Skip everything if we already renamed this block and we are skipping.
- // Note: You can't sink this into the if, because we need it to occur
- // regardless of whether we skip blocks or not.
- bool AlreadyVisited = !Visited.insert(Root->getBlock()).second;
- if (SkipVisited && AlreadyVisited)
- return;
-
- IncomingVal = renameBlock(Root->getBlock(), IncomingVal, RenameAllUses);
- renameSuccessorPhis(Root->getBlock(), IncomingVal, RenameAllUses);
- WorkStack.push_back({Root, Root->begin(), IncomingVal});
-
- while (!WorkStack.empty()) {
- DomTreeNode *Node = WorkStack.back().DTN;
- DomTreeNode::const_iterator ChildIt = WorkStack.back().ChildIt;
- IncomingVal = WorkStack.back().IncomingVal;
-
- if (ChildIt == Node->end()) {
- WorkStack.pop_back();
- } else {
- DomTreeNode *Child = *ChildIt;
- ++WorkStack.back().ChildIt;
- BasicBlock *BB = Child->getBlock();
- // Note: You can't sink this into the if, because we need it to occur
- // regardless of whether we skip blocks or not.
- AlreadyVisited = !Visited.insert(BB).second;
- if (SkipVisited && AlreadyVisited) {
- // We already visited this during our renaming, which can happen when
- // being asked to rename multiple blocks. Figure out the incoming val,
- // which is the last def.
- // Incoming value can only change if there is a block def, and in that
- // case, it's the last block def in the list.
- if (auto *BlockDefs = getWritableBlockDefs(BB))
- IncomingVal = &*BlockDefs->rbegin();
- } else
- IncomingVal = renameBlock(BB, IncomingVal, RenameAllUses);
- renameSuccessorPhis(BB, IncomingVal, RenameAllUses);
- WorkStack.push_back({Child, Child->begin(), IncomingVal});
- }
- }
-}
-
-/// \brief This handles unreachable block accesses by deleting phi nodes in
-/// unreachable blocks, and marking all other unreachable MemoryAccess's as
-/// being uses of the live on entry definition.
-void MemorySSA::markUnreachableAsLiveOnEntry(BasicBlock *BB) {
- assert(!DT->isReachableFromEntry(BB) &&
- "Reachable block found while handling unreachable blocks");
-
- // Make sure phi nodes in our reachable successors end up with a
- // LiveOnEntryDef for our incoming edge, even though our block is forward
- // unreachable. We could just disconnect these blocks from the CFG fully,
- // but we do not right now.
- for (const BasicBlock *S : successors(BB)) {
- if (!DT->isReachableFromEntry(S))
- continue;
- auto It = PerBlockAccesses.find(S);
- // Rename the phi nodes in our successor block
- if (It == PerBlockAccesses.end() || !isa<MemoryPhi>(It->second->front()))
- continue;
- AccessList *Accesses = It->second.get();
- auto *Phi = cast<MemoryPhi>(&Accesses->front());
- Phi->addIncoming(LiveOnEntryDef.get(), BB);
- }
-
- auto It = PerBlockAccesses.find(BB);
- if (It == PerBlockAccesses.end())
- return;
-
- auto &Accesses = It->second;
- for (auto AI = Accesses->begin(), AE = Accesses->end(); AI != AE;) {
- auto Next = std::next(AI);
- // If we have a phi, just remove it. We are going to replace all
- // users with live on entry.
- if (auto *UseOrDef = dyn_cast<MemoryUseOrDef>(AI))
- UseOrDef->setDefiningAccess(LiveOnEntryDef.get());
- else
- Accesses->erase(AI);
- AI = Next;
- }
-}
-
-MemorySSA::MemorySSA(Function &Func, AliasAnalysis *AA, DominatorTree *DT)
- : AA(AA), DT(DT), F(Func), LiveOnEntryDef(nullptr), Walker(nullptr),
- NextID(INVALID_MEMORYACCESS_ID) {
- buildMemorySSA();
-}
-
-MemorySSA::~MemorySSA() {
- // Drop all our references
- for (const auto &Pair : PerBlockAccesses)
- for (MemoryAccess &MA : *Pair.second)
- MA.dropAllReferences();
-}
-
-MemorySSA::AccessList *MemorySSA::getOrCreateAccessList(const BasicBlock *BB) {
- auto Res = PerBlockAccesses.insert(std::make_pair(BB, nullptr));
-
- if (Res.second)
- Res.first->second = make_unique<AccessList>();
- return Res.first->second.get();
-}
-MemorySSA::DefsList *MemorySSA::getOrCreateDefsList(const BasicBlock *BB) {
- auto Res = PerBlockDefs.insert(std::make_pair(BB, nullptr));
-
- if (Res.second)
- Res.first->second = make_unique<DefsList>();
- return Res.first->second.get();
-}
-
-/// This class is a batch walker of all MemoryUse's in the program, and points
-/// their defining access at the thing that actually clobbers them. Because it
-/// is a batch walker that touches everything, it does not operate like the
-/// other walkers. This walker is basically performing a top-down SSA renaming
-/// pass, where the version stack is used as the cache. This enables it to be
-/// significantly more time and memory efficient than using the regular walker,
-/// which is walking bottom-up.
-class MemorySSA::OptimizeUses {
-public:
- OptimizeUses(MemorySSA *MSSA, MemorySSAWalker *Walker, AliasAnalysis *AA,
- DominatorTree *DT)
- : MSSA(MSSA), Walker(Walker), AA(AA), DT(DT) {
- Walker = MSSA->getWalker();
- }
-
- void optimizeUses();
-
-private:
- /// This represents where a given memorylocation is in the stack.
- struct MemlocStackInfo {
- // This essentially is keeping track of versions of the stack. Whenever
- // the stack changes due to pushes or pops, these versions increase.
- unsigned long StackEpoch;
- unsigned long PopEpoch;
- // This is the lower bound of places on the stack to check. It is equal to
- // the place the last stack walk ended.
- // Note: Correctness depends on this being initialized to 0, which densemap
- // does
- unsigned long LowerBound;
- const BasicBlock *LowerBoundBlock;
- // This is where the last walk for this memory location ended.
- unsigned long LastKill;
- bool LastKillValid;
- };
- void optimizeUsesInBlock(const BasicBlock *, unsigned long &, unsigned long &,
- SmallVectorImpl<MemoryAccess *> &,
- DenseMap<MemoryLocOrCall, MemlocStackInfo> &);
- MemorySSA *MSSA;
- MemorySSAWalker *Walker;
- AliasAnalysis *AA;
- DominatorTree *DT;
-};
-
-/// Optimize the uses in a given block This is basically the SSA renaming
-/// algorithm, with one caveat: We are able to use a single stack for all
-/// MemoryUses. This is because the set of *possible* reaching MemoryDefs is
-/// the same for every MemoryUse. The *actual* clobbering MemoryDef is just
-/// going to be some position in that stack of possible ones.
-///
-/// We track the stack positions that each MemoryLocation needs
-/// to check, and last ended at. This is because we only want to check the
-/// things that changed since last time. The same MemoryLocation should
-/// get clobbered by the same store (getModRefInfo does not use invariantness or
-/// things like this, and if they start, we can modify MemoryLocOrCall to
-/// include relevant data)
-void MemorySSA::OptimizeUses::optimizeUsesInBlock(
- const BasicBlock *BB, unsigned long &StackEpoch, unsigned long &PopEpoch,
- SmallVectorImpl<MemoryAccess *> &VersionStack,
- DenseMap<MemoryLocOrCall, MemlocStackInfo> &LocStackInfo) {
-
- /// If no accesses, nothing to do.
- MemorySSA::AccessList *Accesses = MSSA->getWritableBlockAccesses(BB);
- if (Accesses == nullptr)
- return;
-
- // Pop everything that doesn't dominate the current block off the stack,
- // increment the PopEpoch to account for this.
- while (true) {
- assert(
- !VersionStack.empty() &&
- "Version stack should have liveOnEntry sentinel dominating everything");
- BasicBlock *BackBlock = VersionStack.back()->getBlock();
- if (DT->dominates(BackBlock, BB))
- break;
- while (VersionStack.back()->getBlock() == BackBlock)
- VersionStack.pop_back();
- ++PopEpoch;
- }
-
- for (MemoryAccess &MA : *Accesses) {
- auto *MU = dyn_cast<MemoryUse>(&MA);
- if (!MU) {
- VersionStack.push_back(&MA);
- ++StackEpoch;
- continue;
- }
-
- if (isUseTriviallyOptimizableToLiveOnEntry(*AA, MU->getMemoryInst())) {
- MU->setDefiningAccess(MSSA->getLiveOnEntryDef(), true);
- continue;
- }
-
- MemoryLocOrCall UseMLOC(MU);
- auto &LocInfo = LocStackInfo[UseMLOC];
- // If the pop epoch changed, it means we've removed stuff from top of
- // stack due to changing blocks. We may have to reset the lower bound or
- // last kill info.
- if (LocInfo.PopEpoch != PopEpoch) {
- LocInfo.PopEpoch = PopEpoch;
- LocInfo.StackEpoch = StackEpoch;
- // If the lower bound was in something that no longer dominates us, we
- // have to reset it.
- // We can't simply track stack size, because the stack may have had
- // pushes/pops in the meantime.
- // XXX: This is non-optimal, but only is slower cases with heavily
- // branching dominator trees. To get the optimal number of queries would
- // be to make lowerbound and lastkill a per-loc stack, and pop it until
- // the top of that stack dominates us. This does not seem worth it ATM.
- // A much cheaper optimization would be to always explore the deepest
- // branch of the dominator tree first. This will guarantee this resets on
- // the smallest set of blocks.
- if (LocInfo.LowerBoundBlock && LocInfo.LowerBoundBlock != BB &&
- !DT->dominates(LocInfo.LowerBoundBlock, BB)) {
- // Reset the lower bound of things to check.
- // TODO: Some day we should be able to reset to last kill, rather than
- // 0.
- LocInfo.LowerBound = 0;
- LocInfo.LowerBoundBlock = VersionStack[0]->getBlock();
- LocInfo.LastKillValid = false;
- }
- } else if (LocInfo.StackEpoch != StackEpoch) {
- // If all that has changed is the StackEpoch, we only have to check the
- // new things on the stack, because we've checked everything before. In
- // this case, the lower bound of things to check remains the same.
- LocInfo.PopEpoch = PopEpoch;
- LocInfo.StackEpoch = StackEpoch;
- }
- if (!LocInfo.LastKillValid) {
- LocInfo.LastKill = VersionStack.size() - 1;
- LocInfo.LastKillValid = true;
- }
-
- // At this point, we should have corrected last kill and LowerBound to be
- // in bounds.
- assert(LocInfo.LowerBound < VersionStack.size() &&
- "Lower bound out of range");
- assert(LocInfo.LastKill < VersionStack.size() &&
- "Last kill info out of range");
- // In any case, the new upper bound is the top of the stack.
- unsigned long UpperBound = VersionStack.size() - 1;
-
- if (UpperBound - LocInfo.LowerBound > MaxCheckLimit) {
- DEBUG(dbgs() << "MemorySSA skipping optimization of " << *MU << " ("
- << *(MU->getMemoryInst()) << ")"
- << " because there are " << UpperBound - LocInfo.LowerBound
- << " stores to disambiguate\n");
- // Because we did not walk, LastKill is no longer valid, as this may
- // have been a kill.
- LocInfo.LastKillValid = false;
- continue;
- }
- bool FoundClobberResult = false;
- while (UpperBound > LocInfo.LowerBound) {
- if (isa<MemoryPhi>(VersionStack[UpperBound])) {
- // For phis, use the walker, see where we ended up, go there
- Instruction *UseInst = MU->getMemoryInst();
- MemoryAccess *Result = Walker->getClobberingMemoryAccess(UseInst);
- // We are guaranteed to find it or something is wrong
- while (VersionStack[UpperBound] != Result) {
- assert(UpperBound != 0);
- --UpperBound;
- }
- FoundClobberResult = true;
- break;
- }
-
- MemoryDef *MD = cast<MemoryDef>(VersionStack[UpperBound]);
- // If the lifetime of the pointer ends at this instruction, it's live on
- // entry.
- if (!UseMLOC.IsCall && lifetimeEndsAt(MD, UseMLOC.getLoc(), *AA)) {
- // Reset UpperBound to liveOnEntryDef's place in the stack
- UpperBound = 0;
- FoundClobberResult = true;
- break;
- }
- if (instructionClobbersQuery(MD, MU, UseMLOC, *AA)) {
- FoundClobberResult = true;
- break;
- }
- --UpperBound;
- }
- // At the end of this loop, UpperBound is either a clobber, or lower bound
- // PHI walking may cause it to be < LowerBound, and in fact, < LastKill.
- if (FoundClobberResult || UpperBound < LocInfo.LastKill) {
- MU->setDefiningAccess(VersionStack[UpperBound], true);
- // We were last killed now by where we got to
- LocInfo.LastKill = UpperBound;
- } else {
- // Otherwise, we checked all the new ones, and now we know we can get to
- // LastKill.
- MU->setDefiningAccess(VersionStack[LocInfo.LastKill], true);
- }
- LocInfo.LowerBound = VersionStack.size() - 1;
- LocInfo.LowerBoundBlock = BB;
- }
-}
-
-/// Optimize uses to point to their actual clobbering definitions.
-void MemorySSA::OptimizeUses::optimizeUses() {
- SmallVector<MemoryAccess *, 16> VersionStack;
- DenseMap<MemoryLocOrCall, MemlocStackInfo> LocStackInfo;
- VersionStack.push_back(MSSA->getLiveOnEntryDef());
-
- unsigned long StackEpoch = 1;
- unsigned long PopEpoch = 1;
- // We perform a non-recursive top-down dominator tree walk.
- for (const auto *DomNode : depth_first(DT->getRootNode()))
- optimizeUsesInBlock(DomNode->getBlock(), StackEpoch, PopEpoch, VersionStack,
- LocStackInfo);
-}
-
-void MemorySSA::placePHINodes(
- const SmallPtrSetImpl<BasicBlock *> &DefiningBlocks,
- const DenseMap<const BasicBlock *, unsigned int> &BBNumbers) {
- // Determine where our MemoryPhi's should go
- ForwardIDFCalculator IDFs(*DT);
- IDFs.setDefiningBlocks(DefiningBlocks);
- SmallVector<BasicBlock *, 32> IDFBlocks;
- IDFs.calculate(IDFBlocks);
-
- std::sort(IDFBlocks.begin(), IDFBlocks.end(),
- [&BBNumbers](const BasicBlock *A, const BasicBlock *B) {
- return BBNumbers.lookup(A) < BBNumbers.lookup(B);
- });
-
- // Now place MemoryPhi nodes.
- for (auto &BB : IDFBlocks)
- createMemoryPhi(BB);
-}
-
-void MemorySSA::buildMemorySSA() {
- // We create an access to represent "live on entry", for things like
- // arguments or users of globals, where the memory they use is defined before
- // the beginning of the function. We do not actually insert it into the IR.
- // We do not define a live on exit for the immediate uses, and thus our
- // semantics do *not* imply that something with no immediate uses can simply
- // be removed.
- BasicBlock &StartingPoint = F.getEntryBlock();
- LiveOnEntryDef = make_unique<MemoryDef>(F.getContext(), nullptr, nullptr,
- &StartingPoint, NextID++);
- DenseMap<const BasicBlock *, unsigned int> BBNumbers;
- unsigned NextBBNum = 0;
-
- // We maintain lists of memory accesses per-block, trading memory for time. We
- // could just look up the memory access for every possible instruction in the
- // stream.
- SmallPtrSet<BasicBlock *, 32> DefiningBlocks;
- SmallPtrSet<BasicBlock *, 32> DefUseBlocks;
- // Go through each block, figure out where defs occur, and chain together all
- // the accesses.
- for (BasicBlock &B : F) {
- BBNumbers[&B] = NextBBNum++;
- bool InsertIntoDef = false;
- AccessList *Accesses = nullptr;
- DefsList *Defs = nullptr;
- for (Instruction &I : B) {
- MemoryUseOrDef *MUD = createNewAccess(&I);
- if (!MUD)
- continue;
-
- if (!Accesses)
- Accesses = getOrCreateAccessList(&B);
- Accesses->push_back(MUD);
- if (isa<MemoryDef>(MUD)) {
- InsertIntoDef = true;
- if (!Defs)
- Defs = getOrCreateDefsList(&B);
- Defs->push_back(*MUD);
- }
- }
- if (InsertIntoDef)
- DefiningBlocks.insert(&B);
- if (Accesses)
- DefUseBlocks.insert(&B);
- }
- placePHINodes(DefiningBlocks, BBNumbers);
-
- // Now do regular SSA renaming on the MemoryDef/MemoryUse. Visited will get
- // filled in with all blocks.
- SmallPtrSet<BasicBlock *, 16> Visited;
- renamePass(DT->getRootNode(), LiveOnEntryDef.get(), Visited);
-
- CachingWalker *Walker = getWalkerImpl();
-
- // We're doing a batch of updates; don't drop useful caches between them.
- Walker->setAutoResetWalker(false);
- OptimizeUses(this, Walker, AA, DT).optimizeUses();
- Walker->setAutoResetWalker(true);
- Walker->resetClobberWalker();
-
- // Mark the uses in unreachable blocks as live on entry, so that they go
- // somewhere.
- for (auto &BB : F)
- if (!Visited.count(&BB))
- markUnreachableAsLiveOnEntry(&BB);
-}
-
-MemorySSAWalker *MemorySSA::getWalker() { return getWalkerImpl(); }
-
-MemorySSA::CachingWalker *MemorySSA::getWalkerImpl() {
- if (Walker)
- return Walker.get();
-
- Walker = make_unique<CachingWalker>(this, AA, DT);
- return Walker.get();
-}
-
-// This is a helper function used by the creation routines. It places NewAccess
-// into the access and defs lists for a given basic block, at the given
-// insertion point.
-void MemorySSA::insertIntoListsForBlock(MemoryAccess *NewAccess,
- const BasicBlock *BB,
- InsertionPlace Point) {
- auto *Accesses = getOrCreateAccessList(BB);
- if (Point == Beginning) {
- // If it's a phi node, it goes first, otherwise, it goes after any phi
- // nodes.
- if (isa<MemoryPhi>(NewAccess)) {
- Accesses->push_front(NewAccess);
- auto *Defs = getOrCreateDefsList(BB);
- Defs->push_front(*NewAccess);
- } else {
- auto AI = find_if_not(
- *Accesses, [](const MemoryAccess &MA) { return isa<MemoryPhi>(MA); });
- Accesses->insert(AI, NewAccess);
- if (!isa<MemoryUse>(NewAccess)) {
- auto *Defs = getOrCreateDefsList(BB);
- auto DI = find_if_not(
- *Defs, [](const MemoryAccess &MA) { return isa<MemoryPhi>(MA); });
- Defs->insert(DI, *NewAccess);
- }
- }
- } else {
- Accesses->push_back(NewAccess);
- if (!isa<MemoryUse>(NewAccess)) {
- auto *Defs = getOrCreateDefsList(BB);
- Defs->push_back(*NewAccess);
- }
- }
- BlockNumberingValid.erase(BB);
-}
-
-void MemorySSA::insertIntoListsBefore(MemoryAccess *What, const BasicBlock *BB,
- AccessList::iterator InsertPt) {
- auto *Accesses = getWritableBlockAccesses(BB);
- bool WasEnd = InsertPt == Accesses->end();
- Accesses->insert(AccessList::iterator(InsertPt), What);
- if (!isa<MemoryUse>(What)) {
- auto *Defs = getOrCreateDefsList(BB);
- // If we got asked to insert at the end, we have an easy job, just shove it
- // at the end. If we got asked to insert before an existing def, we also get
- // an terator. If we got asked to insert before a use, we have to hunt for
- // the next def.
- if (WasEnd) {
- Defs->push_back(*What);
- } else if (isa<MemoryDef>(InsertPt)) {
- Defs->insert(InsertPt->getDefsIterator(), *What);
- } else {
- while (InsertPt != Accesses->end() && !isa<MemoryDef>(InsertPt))
- ++InsertPt;
- // Either we found a def, or we are inserting at the end
- if (InsertPt == Accesses->end())
- Defs->push_back(*What);
- else
- Defs->insert(InsertPt->getDefsIterator(), *What);
- }
- }
- BlockNumberingValid.erase(BB);
-}
-
-// Move What before Where in the IR. The end result is taht What will belong to
-// the right lists and have the right Block set, but will not otherwise be
-// correct. It will not have the right defining access, and if it is a def,
-// things below it will not properly be updated.
-void MemorySSA::moveTo(MemoryUseOrDef *What, BasicBlock *BB,
- AccessList::iterator Where) {
- // Keep it in the lookup tables, remove from the lists
- removeFromLists(What, false);
- What->setBlock(BB);
- insertIntoListsBefore(What, BB, Where);
-}
-
-void MemorySSA::moveTo(MemoryUseOrDef *What, BasicBlock *BB,
- InsertionPlace Point) {
- removeFromLists(What, false);
- What->setBlock(BB);
- insertIntoListsForBlock(What, BB, Point);
-}
-
-MemoryPhi *MemorySSA::createMemoryPhi(BasicBlock *BB) {
- assert(!getMemoryAccess(BB) && "MemoryPhi already exists for this BB");
- MemoryPhi *Phi = new MemoryPhi(BB->getContext(), BB, NextID++);
- // Phi's always are placed at the front of the block.
- insertIntoListsForBlock(Phi, BB, Beginning);
- ValueToMemoryAccess[BB] = Phi;
- return Phi;
-}
-
-MemoryUseOrDef *MemorySSA::createDefinedAccess(Instruction *I,
- MemoryAccess *Definition) {
- assert(!isa<PHINode>(I) && "Cannot create a defined access for a PHI");
- MemoryUseOrDef *NewAccess = createNewAccess(I);
- assert(
- NewAccess != nullptr &&
- "Tried to create a memory access for a non-memory touching instruction");
- NewAccess->setDefiningAccess(Definition);
- return NewAccess;
-}
-
-// Return true if the instruction has ordering constraints.
-// Note specifically that this only considers stores and loads
-// because others are still considered ModRef by getModRefInfo.
-static inline bool isOrdered(const Instruction *I) {
- if (auto *SI = dyn_cast<StoreInst>(I)) {
- if (!SI->isUnordered())
- return true;
- } else if (auto *LI = dyn_cast<LoadInst>(I)) {
- if (!LI->isUnordered())
- return true;
- }
- return false;
-}
-/// \brief Helper function to create new memory accesses
-MemoryUseOrDef *MemorySSA::createNewAccess(Instruction *I) {
- // The assume intrinsic has a control dependency which we model by claiming
- // that it writes arbitrarily. Ignore that fake memory dependency here.
- // FIXME: Replace this special casing with a more accurate modelling of
- // assume's control dependency.
- if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
- if (II->getIntrinsicID() == Intrinsic::assume)
- return nullptr;
-
- // Find out what affect this instruction has on memory.
- ModRefInfo ModRef = AA->getModRefInfo(I);
- // The isOrdered check is used to ensure that volatiles end up as defs
- // (atomics end up as ModRef right now anyway). Until we separate the
- // ordering chain from the memory chain, this enables people to see at least
- // some relative ordering to volatiles. Note that getClobberingMemoryAccess
- // will still give an answer that bypasses other volatile loads. TODO:
- // Separate memory aliasing and ordering into two different chains so that we
- // can precisely represent both "what memory will this read/write/is clobbered
- // by" and "what instructions can I move this past".
- bool Def = bool(ModRef & MRI_Mod) || isOrdered(I);
- bool Use = bool(ModRef & MRI_Ref);
-
- // It's possible for an instruction to not modify memory at all. During
- // construction, we ignore them.
- if (!Def && !Use)
- return nullptr;
-
- assert((Def || Use) &&
- "Trying to create a memory access with a non-memory instruction");
-
- MemoryUseOrDef *MUD;
- if (Def)
- MUD = new MemoryDef(I->getContext(), nullptr, I, I->getParent(), NextID++);
- else
- MUD = new MemoryUse(I->getContext(), nullptr, I, I->getParent());
- ValueToMemoryAccess[I] = MUD;
- return MUD;
-}
-
-/// \brief Returns true if \p Replacer dominates \p Replacee .
-bool MemorySSA::dominatesUse(const MemoryAccess *Replacer,
- const MemoryAccess *Replacee) const {
- if (isa<MemoryUseOrDef>(Replacee))
- return DT->dominates(Replacer->getBlock(), Replacee->getBlock());
- const auto *MP = cast<MemoryPhi>(Replacee);
- // For a phi node, the use occurs in the predecessor block of the phi node.
- // Since we may occur multiple times in the phi node, we have to check each
- // operand to ensure Replacer dominates each operand where Replacee occurs.
- for (const Use &Arg : MP->operands()) {
- if (Arg.get() != Replacee &&
- !DT->dominates(Replacer->getBlock(), MP->getIncomingBlock(Arg)))
- return false;
- }
- return true;
-}
-
-/// \brief Properly remove \p MA from all of MemorySSA's lookup tables.
-void MemorySSA::removeFromLookups(MemoryAccess *MA) {
- assert(MA->use_empty() &&
- "Trying to remove memory access that still has uses");
- BlockNumbering.erase(MA);
- if (MemoryUseOrDef *MUD = dyn_cast<MemoryUseOrDef>(MA))
- MUD->setDefiningAccess(nullptr);
- // Invalidate our walker's cache if necessary
- if (!isa<MemoryUse>(MA))
- Walker->invalidateInfo(MA);
- // The call below to erase will destroy MA, so we can't change the order we
- // are doing things here
- Value *MemoryInst;
- if (MemoryUseOrDef *MUD = dyn_cast<MemoryUseOrDef>(MA)) {
- MemoryInst = MUD->getMemoryInst();
- } else {
- MemoryInst = MA->getBlock();
- }
- auto VMA = ValueToMemoryAccess.find(MemoryInst);
- if (VMA->second == MA)
- ValueToMemoryAccess.erase(VMA);
-}
-
-/// \brief Properly remove \p MA from all of MemorySSA's lists.
-///
-/// Because of the way the intrusive list and use lists work, it is important to
-/// do removal in the right order.
-/// ShouldDelete defaults to true, and will cause the memory access to also be
-/// deleted, not just removed.
-void MemorySSA::removeFromLists(MemoryAccess *MA, bool ShouldDelete) {
- // The access list owns the reference, so we erase it from the non-owning list
- // first.
- if (!isa<MemoryUse>(MA)) {
- auto DefsIt = PerBlockDefs.find(MA->getBlock());
- std::unique_ptr<DefsList> &Defs = DefsIt->second;
- Defs->remove(*MA);
- if (Defs->empty())
- PerBlockDefs.erase(DefsIt);
- }
-
- // The erase call here will delete it. If we don't want it deleted, we call
- // remove instead.
- auto AccessIt = PerBlockAccesses.find(MA->getBlock());
- std::unique_ptr<AccessList> &Accesses = AccessIt->second;
- if (ShouldDelete)
- Accesses->erase(MA);
- else
- Accesses->remove(MA);
-
- if (Accesses->empty())
- PerBlockAccesses.erase(AccessIt);
-}
-
-void MemorySSA::print(raw_ostream &OS) const {
- MemorySSAAnnotatedWriter Writer(this);
- F.print(OS, &Writer);
-}
-
-#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
-LLVM_DUMP_METHOD void MemorySSA::dump() const { print(dbgs()); }
-#endif
-
-void MemorySSA::verifyMemorySSA() const {
- verifyDefUses(F);
- verifyDomination(F);
- verifyOrdering(F);
- Walker->verify(this);
-}
-
-/// \brief Verify that the order and existence of MemoryAccesses matches the
-/// order and existence of memory affecting instructions.
-void MemorySSA::verifyOrdering(Function &F) const {
- // Walk all the blocks, comparing what the lookups think and what the access
- // lists think, as well as the order in the blocks vs the order in the access
- // lists.
- SmallVector<MemoryAccess *, 32> ActualAccesses;
- SmallVector<MemoryAccess *, 32> ActualDefs;
- for (BasicBlock &B : F) {
- const AccessList *AL = getBlockAccesses(&B);
- const auto *DL = getBlockDefs(&B);
- MemoryAccess *Phi = getMemoryAccess(&B);
- if (Phi) {
- ActualAccesses.push_back(Phi);
- ActualDefs.push_back(Phi);
- }
-
- for (Instruction &I : B) {
- MemoryAccess *MA = getMemoryAccess(&I);
- assert((!MA || (AL && (isa<MemoryUse>(MA) || DL))) &&
- "We have memory affecting instructions "
- "in this block but they are not in the "
- "access list or defs list");
- if (MA) {
- ActualAccesses.push_back(MA);
- if (isa<MemoryDef>(MA))
- ActualDefs.push_back(MA);
- }
- }
- // Either we hit the assert, really have no accesses, or we have both
- // accesses and an access list.
- // Same with defs.
- if (!AL && !DL)
- continue;
- assert(AL->size() == ActualAccesses.size() &&
- "We don't have the same number of accesses in the block as on the "
- "access list");
- assert((DL || ActualDefs.size() == 0) &&
- "Either we should have a defs list, or we should have no defs");
- assert((!DL || DL->size() == ActualDefs.size()) &&
- "We don't have the same number of defs in the block as on the "
- "def list");
- auto ALI = AL->begin();
- auto AAI = ActualAccesses.begin();
- while (ALI != AL->end() && AAI != ActualAccesses.end()) {
- assert(&*ALI == *AAI && "Not the same accesses in the same order");
- ++ALI;
- ++AAI;
- }
- ActualAccesses.clear();
- if (DL) {
- auto DLI = DL->begin();
- auto ADI = ActualDefs.begin();
- while (DLI != DL->end() && ADI != ActualDefs.end()) {
- assert(&*DLI == *ADI && "Not the same defs in the same order");
- ++DLI;
- ++ADI;
- }
- }
- ActualDefs.clear();
- }
-}
-
-/// \brief Verify the domination properties of MemorySSA by checking that each
-/// definition dominates all of its uses.
-void MemorySSA::verifyDomination(Function &F) const {
-#ifndef NDEBUG
- for (BasicBlock &B : F) {
- // Phi nodes are attached to basic blocks
- if (MemoryPhi *MP = getMemoryAccess(&B))
- for (const Use &U : MP->uses())
- assert(dominates(MP, U) && "Memory PHI does not dominate it's uses");
-
- for (Instruction &I : B) {
- MemoryAccess *MD = dyn_cast_or_null<MemoryDef>(getMemoryAccess(&I));
- if (!MD)
- continue;
-
- for (const Use &U : MD->uses())
- assert(dominates(MD, U) && "Memory Def does not dominate it's uses");
- }
- }
-#endif
-}
-
-/// \brief Verify the def-use lists in MemorySSA, by verifying that \p Use
-/// appears in the use list of \p Def.
-
-void MemorySSA::verifyUseInDefs(MemoryAccess *Def, MemoryAccess *Use) const {
-#ifndef NDEBUG
- // The live on entry use may cause us to get a NULL def here
- if (!Def)
- assert(isLiveOnEntryDef(Use) &&
- "Null def but use not point to live on entry def");
- else
- assert(is_contained(Def->users(), Use) &&
- "Did not find use in def's use list");
-#endif
-}
-
-/// \brief Verify the immediate use information, by walking all the memory
-/// accesses and verifying that, for each use, it appears in the
-/// appropriate def's use list
-void MemorySSA::verifyDefUses(Function &F) const {
- for (BasicBlock &B : F) {
- // Phi nodes are attached to basic blocks
- if (MemoryPhi *Phi = getMemoryAccess(&B)) {
- assert(Phi->getNumOperands() == static_cast<unsigned>(std::distance(
- pred_begin(&B), pred_end(&B))) &&
- "Incomplete MemoryPhi Node");
- for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I)
- verifyUseInDefs(Phi->getIncomingValue(I), Phi);
- }
-
- for (Instruction &I : B) {
- if (MemoryUseOrDef *MA = getMemoryAccess(&I)) {
- verifyUseInDefs(MA->getDefiningAccess(), MA);
- }
- }
- }
-}
-
-MemoryUseOrDef *MemorySSA::getMemoryAccess(const Instruction *I) const {
- return cast_or_null<MemoryUseOrDef>(ValueToMemoryAccess.lookup(I));
-}
-
-MemoryPhi *MemorySSA::getMemoryAccess(const BasicBlock *BB) const {
- return cast_or_null<MemoryPhi>(ValueToMemoryAccess.lookup(cast<Value>(BB)));
-}
-
-/// Perform a local numbering on blocks so that instruction ordering can be
-/// determined in constant time.
-/// TODO: We currently just number in order. If we numbered by N, we could
-/// allow at least N-1 sequences of insertBefore or insertAfter (and at least
-/// log2(N) sequences of mixed before and after) without needing to invalidate
-/// the numbering.
-void MemorySSA::renumberBlock(const BasicBlock *B) const {
- // The pre-increment ensures the numbers really start at 1.
- unsigned long CurrentNumber = 0;
- const AccessList *AL = getBlockAccesses(B);
- assert(AL != nullptr && "Asking to renumber an empty block");
- for (const auto &I : *AL)
- BlockNumbering[&I] = ++CurrentNumber;
- BlockNumberingValid.insert(B);
-}
-
-/// \brief Determine, for two memory accesses in the same block,
-/// whether \p Dominator dominates \p Dominatee.
-/// \returns True if \p Dominator dominates \p Dominatee.
-bool MemorySSA::locallyDominates(const MemoryAccess *Dominator,
- const MemoryAccess *Dominatee) const {
-
- const BasicBlock *DominatorBlock = Dominator->getBlock();
-
- assert((DominatorBlock == Dominatee->getBlock()) &&
- "Asking for local domination when accesses are in different blocks!");
- // A node dominates itself.
- if (Dominatee == Dominator)
- return true;
-
- // When Dominatee is defined on function entry, it is not dominated by another
- // memory access.
- if (isLiveOnEntryDef(Dominatee))
- return false;
-
- // When Dominator is defined on function entry, it dominates the other memory
- // access.
- if (isLiveOnEntryDef(Dominator))
- return true;
-
- if (!BlockNumberingValid.count(DominatorBlock))
- renumberBlock(DominatorBlock);
-
- unsigned long DominatorNum = BlockNumbering.lookup(Dominator);
- // All numbers start with 1
- assert(DominatorNum != 0 && "Block was not numbered properly");
- unsigned long DominateeNum = BlockNumbering.lookup(Dominatee);
- assert(DominateeNum != 0 && "Block was not numbered properly");
- return DominatorNum < DominateeNum;
-}
-
-bool MemorySSA::dominates(const MemoryAccess *Dominator,
- const MemoryAccess *Dominatee) const {
- if (Dominator == Dominatee)
- return true;
-
- if (isLiveOnEntryDef(Dominatee))
- return false;
-
- if (Dominator->getBlock() != Dominatee->getBlock())
- return DT->dominates(Dominator->getBlock(), Dominatee->getBlock());
- return locallyDominates(Dominator, Dominatee);
-}
-
-bool MemorySSA::dominates(const MemoryAccess *Dominator,
- const Use &Dominatee) const {
- if (MemoryPhi *MP = dyn_cast<MemoryPhi>(Dominatee.getUser())) {
- BasicBlock *UseBB = MP->getIncomingBlock(Dominatee);
- // The def must dominate the incoming block of the phi.
- if (UseBB != Dominator->getBlock())
- return DT->dominates(Dominator->getBlock(), UseBB);
- // If the UseBB and the DefBB are the same, compare locally.
- return locallyDominates(Dominator, cast<MemoryAccess>(Dominatee));
- }
- // If it's not a PHI node use, the normal dominates can already handle it.
- return dominates(Dominator, cast<MemoryAccess>(Dominatee.getUser()));
-}
-
-const static char LiveOnEntryStr[] = "liveOnEntry";
-
-void MemoryDef::print(raw_ostream &OS) const {
- MemoryAccess *UO = getDefiningAccess();
-
- OS << getID() << " = MemoryDef(";
- if (UO && UO->getID())
- OS << UO->getID();
- else
- OS << LiveOnEntryStr;
- OS << ')';
-}
-
-void MemoryPhi::print(raw_ostream &OS) const {
- bool First = true;
- OS << getID() << " = MemoryPhi(";
- for (const auto &Op : operands()) {
- BasicBlock *BB = getIncomingBlock(Op);
- MemoryAccess *MA = cast<MemoryAccess>(Op);
- if (!First)
- OS << ',';
- else
- First = false;
-
- OS << '{';
- if (BB->hasName())
- OS << BB->getName();
- else
- BB->printAsOperand(OS, false);
- OS << ',';
- if (unsigned ID = MA->getID())
- OS << ID;
- else
- OS << LiveOnEntryStr;
- OS << '}';
- }
- OS << ')';
-}
-
-MemoryAccess::~MemoryAccess() {}
-
-void MemoryUse::print(raw_ostream &OS) const {
- MemoryAccess *UO = getDefiningAccess();
- OS << "MemoryUse(";
- if (UO && UO->getID())
- OS << UO->getID();
- else
- OS << LiveOnEntryStr;
- OS << ')';
-}
-
-void MemoryAccess::dump() const {
-// Cannot completely remove virtual function even in release mode.
-#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
- print(dbgs());
- dbgs() << "\n";
-#endif
-}
-
-char MemorySSAPrinterLegacyPass::ID = 0;
-
-MemorySSAPrinterLegacyPass::MemorySSAPrinterLegacyPass() : FunctionPass(ID) {
- initializeMemorySSAPrinterLegacyPassPass(*PassRegistry::getPassRegistry());
-}
-
-void MemorySSAPrinterLegacyPass::getAnalysisUsage(AnalysisUsage &AU) const {
- AU.setPreservesAll();
- AU.addRequired<MemorySSAWrapperPass>();
- AU.addPreserved<MemorySSAWrapperPass>();
-}
-
-bool MemorySSAPrinterLegacyPass::runOnFunction(Function &F) {
- auto &MSSA = getAnalysis<MemorySSAWrapperPass>().getMSSA();
- MSSA.print(dbgs());
- if (VerifyMemorySSA)
- MSSA.verifyMemorySSA();
- return false;
-}
-
-AnalysisKey MemorySSAAnalysis::Key;
-
-MemorySSAAnalysis::Result MemorySSAAnalysis::run(Function &F,
- FunctionAnalysisManager &AM) {
- auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
- auto &AA = AM.getResult<AAManager>(F);
- return MemorySSAAnalysis::Result(make_unique<MemorySSA>(F, &AA, &DT));
-}
-
-PreservedAnalyses MemorySSAPrinterPass::run(Function &F,
- FunctionAnalysisManager &AM) {
- OS << "MemorySSA for function: " << F.getName() << "\n";
- AM.getResult<MemorySSAAnalysis>(F).getMSSA().print(OS);
-
- return PreservedAnalyses::all();
-}
-
-PreservedAnalyses MemorySSAVerifierPass::run(Function &F,
- FunctionAnalysisManager &AM) {
- AM.getResult<MemorySSAAnalysis>(F).getMSSA().verifyMemorySSA();
-
- return PreservedAnalyses::all();
-}
-
-char MemorySSAWrapperPass::ID = 0;
-
-MemorySSAWrapperPass::MemorySSAWrapperPass() : FunctionPass(ID) {
- initializeMemorySSAWrapperPassPass(*PassRegistry::getPassRegistry());
-}
-
-void MemorySSAWrapperPass::releaseMemory() { MSSA.reset(); }
-
-void MemorySSAWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
- AU.setPreservesAll();
- AU.addRequiredTransitive<DominatorTreeWrapperPass>();
- AU.addRequiredTransitive<AAResultsWrapperPass>();
-}
-
-bool MemorySSAWrapperPass::runOnFunction(Function &F) {
- auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
- auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
- MSSA.reset(new MemorySSA(F, &AA, &DT));
- return false;
-}
-
-void MemorySSAWrapperPass::verifyAnalysis() const { MSSA->verifyMemorySSA(); }
-
-void MemorySSAWrapperPass::print(raw_ostream &OS, const Module *M) const {
- MSSA->print(OS);
-}
-
-MemorySSAWalker::MemorySSAWalker(MemorySSA *M) : MSSA(M) {}
-
-MemorySSA::CachingWalker::CachingWalker(MemorySSA *M, AliasAnalysis *A,
- DominatorTree *D)
- : MemorySSAWalker(M), Walker(*M, *A, *D), AutoResetWalker(true) {}
-
-MemorySSA::CachingWalker::~CachingWalker() {}
-
-void MemorySSA::CachingWalker::invalidateInfo(MemoryAccess *MA) {
- if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
- MUD->resetOptimized();
-}
-
-/// \brief Walk the use-def chains starting at \p MA and find
-/// the MemoryAccess that actually clobbers Loc.
-///
-/// \returns our clobbering memory access
-MemoryAccess *MemorySSA::CachingWalker::getClobberingMemoryAccess(
- MemoryAccess *StartingAccess, UpwardsMemoryQuery &Q) {
- MemoryAccess *New = Walker.findClobber(StartingAccess, Q);
-#ifdef EXPENSIVE_CHECKS
- MemoryAccess *NewNoCache = Walker.findClobber(StartingAccess, Q);
- assert(NewNoCache == New && "Cache made us hand back a different result?");
-#endif
- if (AutoResetWalker)
- resetClobberWalker();
- return New;
-}
-
-MemoryAccess *MemorySSA::CachingWalker::getClobberingMemoryAccess(
- MemoryAccess *StartingAccess, const MemoryLocation &Loc) {
- if (isa<MemoryPhi>(StartingAccess))
- return StartingAccess;
-
- auto *StartingUseOrDef = cast<MemoryUseOrDef>(StartingAccess);
- if (MSSA->isLiveOnEntryDef(StartingUseOrDef))
- return StartingUseOrDef;
-
- Instruction *I = StartingUseOrDef->getMemoryInst();
-
- // Conservatively, fences are always clobbers, so don't perform the walk if we
- // hit a fence.
- if (!ImmutableCallSite(I) && I->isFenceLike())
- return StartingUseOrDef;
-
- UpwardsMemoryQuery Q;
- Q.OriginalAccess = StartingUseOrDef;
- Q.StartingLoc = Loc;
- Q.Inst = I;
- Q.IsCall = false;
-
- // Unlike the other function, do not walk to the def of a def, because we are
- // handed something we already believe is the clobbering access.
- MemoryAccess *DefiningAccess = isa<MemoryUse>(StartingUseOrDef)
- ? StartingUseOrDef->getDefiningAccess()
- : StartingUseOrDef;
-
- MemoryAccess *Clobber = getClobberingMemoryAccess(DefiningAccess, Q);
- DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is ");
- DEBUG(dbgs() << *StartingUseOrDef << "\n");
- DEBUG(dbgs() << "Final Memory SSA clobber for " << *I << " is ");
- DEBUG(dbgs() << *Clobber << "\n");
- return Clobber;
-}
-
-MemoryAccess *
-MemorySSA::CachingWalker::getClobberingMemoryAccess(MemoryAccess *MA) {
- auto *StartingAccess = dyn_cast<MemoryUseOrDef>(MA);
- // If this is a MemoryPhi, we can't do anything.
- if (!StartingAccess)
- return MA;
-
- // If this is an already optimized use or def, return the optimized result.
- // Note: Currently, we do not store the optimized def result because we'd need
- // a separate field, since we can't use it as the defining access.
- if (auto *MUD = dyn_cast<MemoryUseOrDef>(StartingAccess))
- if (MUD->isOptimized())
- return MUD->getOptimized();
-
- const Instruction *I = StartingAccess->getMemoryInst();
- UpwardsMemoryQuery Q(I, StartingAccess);
- // We can't sanely do anything with a fences, they conservatively
- // clobber all memory, and have no locations to get pointers from to
- // try to disambiguate.
- if (!Q.IsCall && I->isFenceLike())
- return StartingAccess;
-
- if (isUseTriviallyOptimizableToLiveOnEntry(*MSSA->AA, I)) {
- MemoryAccess *LiveOnEntry = MSSA->getLiveOnEntryDef();
- if (auto *MUD = dyn_cast<MemoryUseOrDef>(StartingAccess))
- MUD->setOptimized(LiveOnEntry);
- return LiveOnEntry;
- }
-
- // Start with the thing we already think clobbers this location
- MemoryAccess *DefiningAccess = StartingAccess->getDefiningAccess();
-
- // At this point, DefiningAccess may be the live on entry def.
- // If it is, we will not get a better result.
- if (MSSA->isLiveOnEntryDef(DefiningAccess))
- return DefiningAccess;
-
- MemoryAccess *Result = getClobberingMemoryAccess(DefiningAccess, Q);
- DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is ");
- DEBUG(dbgs() << *DefiningAccess << "\n");
- DEBUG(dbgs() << "Final Memory SSA clobber for " << *I << " is ");
- DEBUG(dbgs() << *Result << "\n");
- if (auto *MUD = dyn_cast<MemoryUseOrDef>(StartingAccess))
- MUD->setOptimized(Result);
-
- return Result;
-}
-
-MemoryAccess *
-DoNothingMemorySSAWalker::getClobberingMemoryAccess(MemoryAccess *MA) {
- if (auto *Use = dyn_cast<MemoryUseOrDef>(MA))
- return Use->getDefiningAccess();
- return MA;
-}
-
-MemoryAccess *DoNothingMemorySSAWalker::getClobberingMemoryAccess(
- MemoryAccess *StartingAccess, const MemoryLocation &) {
- if (auto *Use = dyn_cast<MemoryUseOrDef>(StartingAccess))
- return Use->getDefiningAccess();
- return StartingAccess;
-}
-} // namespace llvm
Removed: llvm/trunk/lib/Transforms/Utils/MemorySSAUpdater.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Utils/MemorySSAUpdater.cpp?rev=299979&view=auto
==============================================================================
--- llvm/trunk/lib/Transforms/Utils/MemorySSAUpdater.cpp (original)
+++ llvm/trunk/lib/Transforms/Utils/MemorySSAUpdater.cpp (removed)
@@ -1,494 +0,0 @@
-//===-- MemorySSAUpdater.cpp - Memory SSA Updater--------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------===//
-//
-// This file implements the MemorySSAUpdater class.
-//
-//===----------------------------------------------------------------===//
-#include "llvm/Transforms/Utils/MemorySSAUpdater.h"
-#include "llvm/ADT/STLExtras.h"
-#include "llvm/ADT/SmallPtrSet.h"
-#include "llvm/ADT/SmallSet.h"
-#include "llvm/IR/DataLayout.h"
-#include "llvm/IR/Dominators.h"
-#include "llvm/IR/GlobalVariable.h"
-#include "llvm/IR/IRBuilder.h"
-#include "llvm/IR/IntrinsicInst.h"
-#include "llvm/IR/LLVMContext.h"
-#include "llvm/IR/Metadata.h"
-#include "llvm/IR/Module.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/FormattedStream.h"
-#include "llvm/Transforms/Utils/MemorySSA.h"
-#include <algorithm>
-
-#define DEBUG_TYPE "memoryssa"
-using namespace llvm;
-namespace llvm {
-// This is the marker algorithm from "Simple and Efficient Construction of
-// Static Single Assignment Form"
-// The simple, non-marker algorithm places phi nodes at any join
-// Here, we place markers, and only place phi nodes if they end up necessary.
-// They are only necessary if they break a cycle (IE we recursively visit
-// ourselves again), or we discover, while getting the value of the operands,
-// that there are two or more definitions needing to be merged.
-// This still will leave non-minimal form in the case of irreducible control
-// flow, where phi nodes may be in cycles with themselves, but unnecessary.
-MemoryAccess *MemorySSAUpdater::getPreviousDefRecursive(BasicBlock *BB) {
- // Single predecessor case, just recurse, we can only have one definition.
- if (BasicBlock *Pred = BB->getSinglePredecessor()) {
- return getPreviousDefFromEnd(Pred);
- } else if (VisitedBlocks.count(BB)) {
- // We hit our node again, meaning we had a cycle, we must insert a phi
- // node to break it so we have an operand. The only case this will
- // insert useless phis is if we have irreducible control flow.
- return MSSA->createMemoryPhi(BB);
- } else if (VisitedBlocks.insert(BB).second) {
- // Mark us visited so we can detect a cycle
- SmallVector<MemoryAccess *, 8> PhiOps;
-
- // Recurse to get the values in our predecessors for placement of a
- // potential phi node. This will insert phi nodes if we cycle in order to
- // break the cycle and have an operand.
- for (auto *Pred : predecessors(BB))
- PhiOps.push_back(getPreviousDefFromEnd(Pred));
-
- // Now try to simplify the ops to avoid placing a phi.
- // This may return null if we never created a phi yet, that's okay
- MemoryPhi *Phi = dyn_cast_or_null<MemoryPhi>(MSSA->getMemoryAccess(BB));
- bool PHIExistsButNeedsUpdate = false;
- // See if the existing phi operands match what we need.
- // Unlike normal SSA, we only allow one phi node per block, so we can't just
- // create a new one.
- if (Phi && Phi->getNumOperands() != 0)
- if (!std::equal(Phi->op_begin(), Phi->op_end(), PhiOps.begin())) {
- PHIExistsButNeedsUpdate = true;
- }
-
- // See if we can avoid the phi by simplifying it.
- auto *Result = tryRemoveTrivialPhi(Phi, PhiOps);
- // If we couldn't simplify, we may have to create a phi
- if (Result == Phi) {
- if (!Phi)
- Phi = MSSA->createMemoryPhi(BB);
-
- // These will have been filled in by the recursive read we did above.
- if (PHIExistsButNeedsUpdate) {
- std::copy(PhiOps.begin(), PhiOps.end(), Phi->op_begin());
- std::copy(pred_begin(BB), pred_end(BB), Phi->block_begin());
- } else {
- unsigned i = 0;
- for (auto *Pred : predecessors(BB))
- Phi->addIncoming(PhiOps[i++], Pred);
- }
-
- Result = Phi;
- }
- if (MemoryPhi *MP = dyn_cast<MemoryPhi>(Result))
- InsertedPHIs.push_back(MP);
- // Set ourselves up for the next variable by resetting visited state.
- VisitedBlocks.erase(BB);
- return Result;
- }
- llvm_unreachable("Should have hit one of the three cases above");
-}
-
-// This starts at the memory access, and goes backwards in the block to find the
-// previous definition. If a definition is not found the block of the access,
-// it continues globally, creating phi nodes to ensure we have a single
-// definition.
-MemoryAccess *MemorySSAUpdater::getPreviousDef(MemoryAccess *MA) {
- auto *LocalResult = getPreviousDefInBlock(MA);
-
- return LocalResult ? LocalResult : getPreviousDefRecursive(MA->getBlock());
-}
-
-// This starts at the memory access, and goes backwards in the block to the find
-// the previous definition. If the definition is not found in the block of the
-// access, it returns nullptr.
-MemoryAccess *MemorySSAUpdater::getPreviousDefInBlock(MemoryAccess *MA) {
- auto *Defs = MSSA->getWritableBlockDefs(MA->getBlock());
-
- // It's possible there are no defs, or we got handed the first def to start.
- if (Defs) {
- // If this is a def, we can just use the def iterators.
- if (!isa<MemoryUse>(MA)) {
- auto Iter = MA->getReverseDefsIterator();
- ++Iter;
- if (Iter != Defs->rend())
- return &*Iter;
- } else {
- // Otherwise, have to walk the all access iterator.
- auto Iter = MA->getReverseIterator();
- ++Iter;
- while (&*Iter != &*Defs->begin()) {
- if (!isa<MemoryUse>(*Iter))
- return &*Iter;
- --Iter;
- }
- // At this point it must be pointing at firstdef
- assert(&*Iter == &*Defs->begin() &&
- "Should have hit first def walking backwards");
- return &*Iter;
- }
- }
- return nullptr;
-}
-
-// This starts at the end of block
-MemoryAccess *MemorySSAUpdater::getPreviousDefFromEnd(BasicBlock *BB) {
- auto *Defs = MSSA->getWritableBlockDefs(BB);
-
- if (Defs)
- return &*Defs->rbegin();
-
- return getPreviousDefRecursive(BB);
-}
-// Recurse over a set of phi uses to eliminate the trivial ones
-MemoryAccess *MemorySSAUpdater::recursePhi(MemoryAccess *Phi) {
- if (!Phi)
- return nullptr;
- TrackingVH<MemoryAccess> Res(Phi);
- SmallVector<TrackingVH<Value>, 8> Uses;
- std::copy(Phi->user_begin(), Phi->user_end(), std::back_inserter(Uses));
- for (auto &U : Uses) {
- if (MemoryPhi *UsePhi = dyn_cast<MemoryPhi>(&*U)) {
- auto OperRange = UsePhi->operands();
- tryRemoveTrivialPhi(UsePhi, OperRange);
- }
- }
- return Res;
-}
-
-// Eliminate trivial phis
-// Phis are trivial if they are defined either by themselves, or all the same
-// argument.
-// IE phi(a, a) or b = phi(a, b) or c = phi(a, a, c)
-// We recursively try to remove them.
-template <class RangeType>
-MemoryAccess *MemorySSAUpdater::tryRemoveTrivialPhi(MemoryPhi *Phi,
- RangeType &Operands) {
- // Detect equal or self arguments
- MemoryAccess *Same = nullptr;
- for (auto &Op : Operands) {
- // If the same or self, good so far
- if (Op == Phi || Op == Same)
- continue;
- // not the same, return the phi since it's not eliminatable by us
- if (Same)
- return Phi;
- Same = cast<MemoryAccess>(Op);
- }
- // Never found a non-self reference, the phi is undef
- if (Same == nullptr)
- return MSSA->getLiveOnEntryDef();
- if (Phi) {
- Phi->replaceAllUsesWith(Same);
- removeMemoryAccess(Phi);
- }
-
- // We should only end up recursing in case we replaced something, in which
- // case, we may have made other Phis trivial.
- return recursePhi(Same);
-}
-
-void MemorySSAUpdater::insertUse(MemoryUse *MU) {
- InsertedPHIs.clear();
- MU->setDefiningAccess(getPreviousDef(MU));
- // Unlike for defs, there is no extra work to do. Because uses do not create
- // new may-defs, there are only two cases:
- //
- // 1. There was a def already below us, and therefore, we should not have
- // created a phi node because it was already needed for the def.
- //
- // 2. There is no def below us, and therefore, there is no extra renaming work
- // to do.
-}
-
-// Set every incoming edge {BB, MP->getBlock()} of MemoryPhi MP to NewDef.
-void setMemoryPhiValueForBlock(MemoryPhi *MP, const BasicBlock *BB,
- MemoryAccess *NewDef) {
- // Replace any operand with us an incoming block with the new defining
- // access.
- int i = MP->getBasicBlockIndex(BB);
- assert(i != -1 && "Should have found the basic block in the phi");
- // We can't just compare i against getNumOperands since one is signed and the
- // other not. So use it to index into the block iterator.
- for (auto BBIter = MP->block_begin() + i; BBIter != MP->block_end();
- ++BBIter) {
- if (*BBIter != BB)
- break;
- MP->setIncomingValue(i, NewDef);
- ++i;
- }
-}
-
-// A brief description of the algorithm:
-// First, we compute what should define the new def, using the SSA
-// construction algorithm.
-// Then, we update the defs below us (and any new phi nodes) in the graph to
-// point to the correct new defs, to ensure we only have one variable, and no
-// disconnected stores.
-void MemorySSAUpdater::insertDef(MemoryDef *MD, bool RenameUses) {
- InsertedPHIs.clear();
-
- // See if we had a local def, and if not, go hunting.
- MemoryAccess *DefBefore = getPreviousDefInBlock(MD);
- bool DefBeforeSameBlock = DefBefore != nullptr;
- if (!DefBefore)
- DefBefore = getPreviousDefRecursive(MD->getBlock());
-
- // There is a def before us, which means we can replace any store/phi uses
- // of that thing with us, since we are in the way of whatever was there
- // before.
- // We now define that def's memorydefs and memoryphis
- if (DefBeforeSameBlock) {
- for (auto UI = DefBefore->use_begin(), UE = DefBefore->use_end();
- UI != UE;) {
- Use &U = *UI++;
- // Leave the uses alone
- if (isa<MemoryUse>(U.getUser()))
- continue;
- U.set(MD);
- }
- }
-
- // and that def is now our defining access.
- // We change them in this order otherwise we will appear in the use list
- // above and reset ourselves.
- MD->setDefiningAccess(DefBefore);
-
- SmallVector<MemoryAccess *, 8> FixupList(InsertedPHIs.begin(),
- InsertedPHIs.end());
- if (!DefBeforeSameBlock) {
- // If there was a local def before us, we must have the same effect it
- // did. Because every may-def is the same, any phis/etc we would create, it
- // would also have created. If there was no local def before us, we
- // performed a global update, and have to search all successors and make
- // sure we update the first def in each of them (following all paths until
- // we hit the first def along each path). This may also insert phi nodes.
- // TODO: There are other cases we can skip this work, such as when we have a
- // single successor, and only used a straight line of single pred blocks
- // backwards to find the def. To make that work, we'd have to track whether
- // getDefRecursive only ever used the single predecessor case. These types
- // of paths also only exist in between CFG simplifications.
- FixupList.push_back(MD);
- }
-
- while (!FixupList.empty()) {
- unsigned StartingPHISize = InsertedPHIs.size();
- fixupDefs(FixupList);
- FixupList.clear();
- // Put any new phis on the fixup list, and process them
- FixupList.append(InsertedPHIs.end() - StartingPHISize, InsertedPHIs.end());
- }
- // Now that all fixups are done, rename all uses if we are asked.
- if (RenameUses) {
- SmallPtrSet<BasicBlock *, 16> Visited;
- BasicBlock *StartBlock = MD->getBlock();
- // We are guaranteed there is a def in the block, because we just got it
- // handed to us in this function.
- MemoryAccess *FirstDef = &*MSSA->getWritableBlockDefs(StartBlock)->begin();
- // Convert to incoming value if it's a memorydef. A phi *is* already an
- // incoming value.
- if (auto *MD = dyn_cast<MemoryDef>(FirstDef))
- FirstDef = MD->getDefiningAccess();
-
- MSSA->renamePass(MD->getBlock(), FirstDef, Visited);
- // We just inserted a phi into this block, so the incoming value will become
- // the phi anyway, so it does not matter what we pass.
- for (auto *MP : InsertedPHIs)
- MSSA->renamePass(MP->getBlock(), nullptr, Visited);
- }
-}
-
-void MemorySSAUpdater::fixupDefs(const SmallVectorImpl<MemoryAccess *> &Vars) {
- SmallPtrSet<const BasicBlock *, 8> Seen;
- SmallVector<const BasicBlock *, 16> Worklist;
- for (auto *NewDef : Vars) {
- // First, see if there is a local def after the operand.
- auto *Defs = MSSA->getWritableBlockDefs(NewDef->getBlock());
- auto DefIter = NewDef->getDefsIterator();
-
- // If there is a local def after us, we only have to rename that.
- if (++DefIter != Defs->end()) {
- cast<MemoryDef>(DefIter)->setDefiningAccess(NewDef);
- continue;
- }
-
- // Otherwise, we need to search down through the CFG.
- // For each of our successors, handle it directly if their is a phi, or
- // place on the fixup worklist.
- for (const auto *S : successors(NewDef->getBlock())) {
- if (auto *MP = MSSA->getMemoryAccess(S))
- setMemoryPhiValueForBlock(MP, NewDef->getBlock(), NewDef);
- else
- Worklist.push_back(S);
- }
-
- while (!Worklist.empty()) {
- const BasicBlock *FixupBlock = Worklist.back();
- Worklist.pop_back();
-
- // Get the first def in the block that isn't a phi node.
- if (auto *Defs = MSSA->getWritableBlockDefs(FixupBlock)) {
- auto *FirstDef = &*Defs->begin();
- // The loop above and below should have taken care of phi nodes
- assert(!isa<MemoryPhi>(FirstDef) &&
- "Should have already handled phi nodes!");
- // We are now this def's defining access, make sure we actually dominate
- // it
- assert(MSSA->dominates(NewDef, FirstDef) &&
- "Should have dominated the new access");
-
- // This may insert new phi nodes, because we are not guaranteed the
- // block we are processing has a single pred, and depending where the
- // store was inserted, it may require phi nodes below it.
- cast<MemoryDef>(FirstDef)->setDefiningAccess(getPreviousDef(FirstDef));
- return;
- }
- // We didn't find a def, so we must continue.
- for (const auto *S : successors(FixupBlock)) {
- // If there is a phi node, handle it.
- // Otherwise, put the block on the worklist
- if (auto *MP = MSSA->getMemoryAccess(S))
- setMemoryPhiValueForBlock(MP, FixupBlock, NewDef);
- else {
- // If we cycle, we should have ended up at a phi node that we already
- // processed. FIXME: Double check this
- if (!Seen.insert(S).second)
- continue;
- Worklist.push_back(S);
- }
- }
- }
- }
-}
-
-// Move What before Where in the MemorySSA IR.
-template <class WhereType>
-void MemorySSAUpdater::moveTo(MemoryUseOrDef *What, BasicBlock *BB,
- WhereType Where) {
- // Replace all our users with our defining access.
- What->replaceAllUsesWith(What->getDefiningAccess());
-
- // Let MemorySSA take care of moving it around in the lists.
- MSSA->moveTo(What, BB, Where);
-
- // Now reinsert it into the IR and do whatever fixups needed.
- if (auto *MD = dyn_cast<MemoryDef>(What))
- insertDef(MD);
- else
- insertUse(cast<MemoryUse>(What));
-}
-
-// Move What before Where in the MemorySSA IR.
-void MemorySSAUpdater::moveBefore(MemoryUseOrDef *What, MemoryUseOrDef *Where) {
- moveTo(What, Where->getBlock(), Where->getIterator());
-}
-
-// Move What after Where in the MemorySSA IR.
-void MemorySSAUpdater::moveAfter(MemoryUseOrDef *What, MemoryUseOrDef *Where) {
- moveTo(What, Where->getBlock(), ++Where->getIterator());
-}
-
-void MemorySSAUpdater::moveToPlace(MemoryUseOrDef *What, BasicBlock *BB,
- MemorySSA::InsertionPlace Where) {
- return moveTo(What, BB, Where);
-}
-
-/// \brief If all arguments of a MemoryPHI are defined by the same incoming
-/// argument, return that argument.
-static MemoryAccess *onlySingleValue(MemoryPhi *MP) {
- MemoryAccess *MA = nullptr;
-
- for (auto &Arg : MP->operands()) {
- if (!MA)
- MA = cast<MemoryAccess>(Arg);
- else if (MA != Arg)
- return nullptr;
- }
- return MA;
-}
-void MemorySSAUpdater::removeMemoryAccess(MemoryAccess *MA) {
- assert(!MSSA->isLiveOnEntryDef(MA) &&
- "Trying to remove the live on entry def");
- // We can only delete phi nodes if they have no uses, or we can replace all
- // uses with a single definition.
- MemoryAccess *NewDefTarget = nullptr;
- if (MemoryPhi *MP = dyn_cast<MemoryPhi>(MA)) {
- // Note that it is sufficient to know that all edges of the phi node have
- // the same argument. If they do, by the definition of dominance frontiers
- // (which we used to place this phi), that argument must dominate this phi,
- // and thus, must dominate the phi's uses, and so we will not hit the assert
- // below.
- NewDefTarget = onlySingleValue(MP);
- assert((NewDefTarget || MP->use_empty()) &&
- "We can't delete this memory phi");
- } else {
- NewDefTarget = cast<MemoryUseOrDef>(MA)->getDefiningAccess();
- }
-
- // Re-point the uses at our defining access
- if (!isa<MemoryUse>(MA) && !MA->use_empty()) {
- // Reset optimized on users of this store, and reset the uses.
- // A few notes:
- // 1. This is a slightly modified version of RAUW to avoid walking the
- // uses twice here.
- // 2. If we wanted to be complete, we would have to reset the optimized
- // flags on users of phi nodes if doing the below makes a phi node have all
- // the same arguments. Instead, we prefer users to removeMemoryAccess those
- // phi nodes, because doing it here would be N^3.
- if (MA->hasValueHandle())
- ValueHandleBase::ValueIsRAUWd(MA, NewDefTarget);
- // Note: We assume MemorySSA is not used in metadata since it's not really
- // part of the IR.
-
- while (!MA->use_empty()) {
- Use &U = *MA->use_begin();
- if (auto *MUD = dyn_cast<MemoryUseOrDef>(U.getUser()))
- MUD->resetOptimized();
- U.set(NewDefTarget);
- }
- }
-
- // The call below to erase will destroy MA, so we can't change the order we
- // are doing things here
- MSSA->removeFromLookups(MA);
- MSSA->removeFromLists(MA);
-}
-
-MemoryAccess *MemorySSAUpdater::createMemoryAccessInBB(
- Instruction *I, MemoryAccess *Definition, const BasicBlock *BB,
- MemorySSA::InsertionPlace Point) {
- MemoryUseOrDef *NewAccess = MSSA->createDefinedAccess(I, Definition);
- MSSA->insertIntoListsForBlock(NewAccess, BB, Point);
- return NewAccess;
-}
-
-MemoryUseOrDef *MemorySSAUpdater::createMemoryAccessBefore(
- Instruction *I, MemoryAccess *Definition, MemoryUseOrDef *InsertPt) {
- assert(I->getParent() == InsertPt->getBlock() &&
- "New and old access must be in the same block");
- MemoryUseOrDef *NewAccess = MSSA->createDefinedAccess(I, Definition);
- MSSA->insertIntoListsBefore(NewAccess, InsertPt->getBlock(),
- InsertPt->getIterator());
- return NewAccess;
-}
-
-MemoryUseOrDef *MemorySSAUpdater::createMemoryAccessAfter(
- Instruction *I, MemoryAccess *Definition, MemoryAccess *InsertPt) {
- assert(I->getParent() == InsertPt->getBlock() &&
- "New and old access must be in the same block");
- MemoryUseOrDef *NewAccess = MSSA->createDefinedAccess(I, Definition);
- MSSA->insertIntoListsBefore(NewAccess, InsertPt->getBlock(),
- ++InsertPt->getIterator());
- return NewAccess;
-}
-
-} // namespace llvm
Modified: llvm/trunk/lib/Transforms/Utils/Utils.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Utils/Utils.cpp?rev=299980&r1=299979&r2=299980&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/Utils/Utils.cpp (original)
+++ llvm/trunk/lib/Transforms/Utils/Utils.cpp Tue Apr 11 15:06:36 2017
@@ -35,8 +35,6 @@ void llvm::initializeTransformUtils(Pass
initializeUnifyFunctionExitNodesPass(Registry);
initializeInstSimplifierPass(Registry);
initializeMetaRenamerPass(Registry);
- initializeMemorySSAWrapperPassPass(Registry);
- initializeMemorySSAPrinterLegacyPassPass(Registry);
initializeStripGCRelocatesPass(Registry);
initializePredicateInfoPrinterLegacyPassPass(Registry);
}
Copied: llvm/trunk/test/Analysis/MemorySSA/assume.ll (from r299975, llvm/trunk/test/Transforms/Util/MemorySSA/assume.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/MemorySSA/assume.ll?p2=llvm/trunk/test/Analysis/MemorySSA/assume.ll&p1=llvm/trunk/test/Transforms/Util/MemorySSA/assume.ll&r1=299975&r2=299980&rev=299980&view=diff
==============================================================================
(empty)
Copied: llvm/trunk/test/Analysis/MemorySSA/atomic-clobber.ll (from r299975, llvm/trunk/test/Transforms/Util/MemorySSA/atomic-clobber.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/MemorySSA/atomic-clobber.ll?p2=llvm/trunk/test/Analysis/MemorySSA/atomic-clobber.ll&p1=llvm/trunk/test/Transforms/Util/MemorySSA/atomic-clobber.ll&r1=299975&r2=299980&rev=299980&view=diff
==============================================================================
(empty)
Copied: llvm/trunk/test/Analysis/MemorySSA/basicaa-memcpy.ll (from r299975, llvm/trunk/test/Transforms/Util/MemorySSA/basicaa-memcpy.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/MemorySSA/basicaa-memcpy.ll?p2=llvm/trunk/test/Analysis/MemorySSA/basicaa-memcpy.ll&p1=llvm/trunk/test/Transforms/Util/MemorySSA/basicaa-memcpy.ll&r1=299975&r2=299980&rev=299980&view=diff
==============================================================================
(empty)
Copied: llvm/trunk/test/Analysis/MemorySSA/constant-memory.ll (from r299975, llvm/trunk/test/Transforms/Util/MemorySSA/constant-memory.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/MemorySSA/constant-memory.ll?p2=llvm/trunk/test/Analysis/MemorySSA/constant-memory.ll&p1=llvm/trunk/test/Transforms/Util/MemorySSA/constant-memory.ll&r1=299975&r2=299980&rev=299980&view=diff
==============================================================================
(empty)
Copied: llvm/trunk/test/Analysis/MemorySSA/cyclicphi.ll (from r299975, llvm/trunk/test/Transforms/Util/MemorySSA/cyclicphi.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/MemorySSA/cyclicphi.ll?p2=llvm/trunk/test/Analysis/MemorySSA/cyclicphi.ll&p1=llvm/trunk/test/Transforms/Util/MemorySSA/cyclicphi.ll&r1=299975&r2=299980&rev=299980&view=diff
==============================================================================
(empty)
Copied: llvm/trunk/test/Analysis/MemorySSA/forward-unreachable.ll (from r299975, llvm/trunk/test/Transforms/Util/MemorySSA/forward-unreachable.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/MemorySSA/forward-unreachable.ll?p2=llvm/trunk/test/Analysis/MemorySSA/forward-unreachable.ll&p1=llvm/trunk/test/Transforms/Util/MemorySSA/forward-unreachable.ll&r1=299975&r2=299980&rev=299980&view=diff
==============================================================================
(empty)
Copied: llvm/trunk/test/Analysis/MemorySSA/function-clobber.ll (from r299975, llvm/trunk/test/Transforms/Util/MemorySSA/function-clobber.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/MemorySSA/function-clobber.ll?p2=llvm/trunk/test/Analysis/MemorySSA/function-clobber.ll&p1=llvm/trunk/test/Transforms/Util/MemorySSA/function-clobber.ll&r1=299975&r2=299980&rev=299980&view=diff
==============================================================================
(empty)
Copied: llvm/trunk/test/Analysis/MemorySSA/function-mem-attrs.ll (from r299975, llvm/trunk/test/Transforms/Util/MemorySSA/function-mem-attrs.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/MemorySSA/function-mem-attrs.ll?p2=llvm/trunk/test/Analysis/MemorySSA/function-mem-attrs.ll&p1=llvm/trunk/test/Transforms/Util/MemorySSA/function-mem-attrs.ll&r1=299975&r2=299980&rev=299980&view=diff
==============================================================================
(empty)
Copied: llvm/trunk/test/Analysis/MemorySSA/invariant-groups.ll (from r299975, llvm/trunk/test/Transforms/Util/MemorySSA/invariant-groups.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/MemorySSA/invariant-groups.ll?p2=llvm/trunk/test/Analysis/MemorySSA/invariant-groups.ll&p1=llvm/trunk/test/Transforms/Util/MemorySSA/invariant-groups.ll&r1=299975&r2=299980&rev=299980&view=diff
==============================================================================
(empty)
Copied: llvm/trunk/test/Analysis/MemorySSA/lifetime-simple.ll (from r299975, llvm/trunk/test/Transforms/Util/MemorySSA/lifetime-simple.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/MemorySSA/lifetime-simple.ll?p2=llvm/trunk/test/Analysis/MemorySSA/lifetime-simple.ll&p1=llvm/trunk/test/Transforms/Util/MemorySSA/lifetime-simple.ll&r1=299975&r2=299980&rev=299980&view=diff
==============================================================================
(empty)
Copied: llvm/trunk/test/Analysis/MemorySSA/load-invariant.ll (from r299975, llvm/trunk/test/Transforms/Util/MemorySSA/load-invariant.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/MemorySSA/load-invariant.ll?p2=llvm/trunk/test/Analysis/MemorySSA/load-invariant.ll&p1=llvm/trunk/test/Transforms/Util/MemorySSA/load-invariant.ll&r1=299975&r2=299980&rev=299980&view=diff
==============================================================================
(empty)
Copied: llvm/trunk/test/Analysis/MemorySSA/many-dom-backedge.ll (from r299975, llvm/trunk/test/Transforms/Util/MemorySSA/many-dom-backedge.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/MemorySSA/many-dom-backedge.ll?p2=llvm/trunk/test/Analysis/MemorySSA/many-dom-backedge.ll&p1=llvm/trunk/test/Transforms/Util/MemorySSA/many-dom-backedge.ll&r1=299975&r2=299980&rev=299980&view=diff
==============================================================================
(empty)
Copied: llvm/trunk/test/Analysis/MemorySSA/many-doms.ll (from r299975, llvm/trunk/test/Transforms/Util/MemorySSA/many-doms.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/MemorySSA/many-doms.ll?p2=llvm/trunk/test/Analysis/MemorySSA/many-doms.ll&p1=llvm/trunk/test/Transforms/Util/MemorySSA/many-doms.ll&r1=299975&r2=299980&rev=299980&view=diff
==============================================================================
(empty)
Copied: llvm/trunk/test/Analysis/MemorySSA/multi-edges.ll (from r299975, llvm/trunk/test/Transforms/Util/MemorySSA/multi-edges.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/MemorySSA/multi-edges.ll?p2=llvm/trunk/test/Analysis/MemorySSA/multi-edges.ll&p1=llvm/trunk/test/Transforms/Util/MemorySSA/multi-edges.ll&r1=299975&r2=299980&rev=299980&view=diff
==============================================================================
(empty)
Copied: llvm/trunk/test/Analysis/MemorySSA/multiple-backedges-hal.ll (from r299975, llvm/trunk/test/Transforms/Util/MemorySSA/multiple-backedges-hal.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/MemorySSA/multiple-backedges-hal.ll?p2=llvm/trunk/test/Analysis/MemorySSA/multiple-backedges-hal.ll&p1=llvm/trunk/test/Transforms/Util/MemorySSA/multiple-backedges-hal.ll&r1=299975&r2=299980&rev=299980&view=diff
==============================================================================
(empty)
Copied: llvm/trunk/test/Analysis/MemorySSA/multiple-locations.ll (from r299975, llvm/trunk/test/Transforms/Util/MemorySSA/multiple-locations.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/MemorySSA/multiple-locations.ll?p2=llvm/trunk/test/Analysis/MemorySSA/multiple-locations.ll&p1=llvm/trunk/test/Transforms/Util/MemorySSA/multiple-locations.ll&r1=299975&r2=299980&rev=299980&view=diff
==============================================================================
(empty)
Copied: llvm/trunk/test/Analysis/MemorySSA/no-disconnected.ll (from r299975, llvm/trunk/test/Transforms/Util/MemorySSA/no-disconnected.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/MemorySSA/no-disconnected.ll?p2=llvm/trunk/test/Analysis/MemorySSA/no-disconnected.ll&p1=llvm/trunk/test/Transforms/Util/MemorySSA/no-disconnected.ll&r1=299975&r2=299980&rev=299980&view=diff
==============================================================================
(empty)
Copied: llvm/trunk/test/Analysis/MemorySSA/optimize-use.ll (from r299975, llvm/trunk/test/Transforms/Util/MemorySSA/optimize-use.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/MemorySSA/optimize-use.ll?p2=llvm/trunk/test/Analysis/MemorySSA/optimize-use.ll&p1=llvm/trunk/test/Transforms/Util/MemorySSA/optimize-use.ll&r1=299975&r2=299980&rev=299980&view=diff
==============================================================================
(empty)
Copied: llvm/trunk/test/Analysis/MemorySSA/phi-translation.ll (from r299975, llvm/trunk/test/Transforms/Util/MemorySSA/phi-translation.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/MemorySSA/phi-translation.ll?p2=llvm/trunk/test/Analysis/MemorySSA/phi-translation.ll&p1=llvm/trunk/test/Transforms/Util/MemorySSA/phi-translation.ll&r1=299975&r2=299980&rev=299980&view=diff
==============================================================================
(empty)
Copied: llvm/trunk/test/Analysis/MemorySSA/pr28880.ll (from r299975, llvm/trunk/test/Transforms/Util/MemorySSA/pr28880.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/MemorySSA/pr28880.ll?p2=llvm/trunk/test/Analysis/MemorySSA/pr28880.ll&p1=llvm/trunk/test/Transforms/Util/MemorySSA/pr28880.ll&r1=299975&r2=299980&rev=299980&view=diff
==============================================================================
(empty)
Copied: llvm/trunk/test/Analysis/MemorySSA/ptr-const-mem.ll (from r299975, llvm/trunk/test/Transforms/Util/MemorySSA/ptr-const-mem.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/MemorySSA/ptr-const-mem.ll?p2=llvm/trunk/test/Analysis/MemorySSA/ptr-const-mem.ll&p1=llvm/trunk/test/Transforms/Util/MemorySSA/ptr-const-mem.ll&r1=299975&r2=299980&rev=299980&view=diff
==============================================================================
(empty)
Copied: llvm/trunk/test/Analysis/MemorySSA/volatile-clobber.ll (from r299975, llvm/trunk/test/Transforms/Util/MemorySSA/volatile-clobber.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/MemorySSA/volatile-clobber.ll?p2=llvm/trunk/test/Analysis/MemorySSA/volatile-clobber.ll&p1=llvm/trunk/test/Transforms/Util/MemorySSA/volatile-clobber.ll&r1=299975&r2=299980&rev=299980&view=diff
==============================================================================
(empty)
Removed: llvm/trunk/test/Transforms/Util/MemorySSA/assume.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/Util/MemorySSA/assume.ll?rev=299979&view=auto
==============================================================================
--- llvm/trunk/test/Transforms/Util/MemorySSA/assume.ll (original)
+++ llvm/trunk/test/Transforms/Util/MemorySSA/assume.ll (removed)
@@ -1,19 +0,0 @@
-; RUN: opt -basicaa -memoryssa -analyze < %s 2>&1 | FileCheck %s
-; RUN: opt -aa-pipeline=basic-aa -passes='print<memoryssa>,verify<memoryssa>' -disable-output < %s 2>&1 | FileCheck %s
-;
-; Ensures that assumes are treated as not reading or writing memory.
-
-declare void @llvm.assume(i1)
-
-define i32 @foo(i32* %a, i32* %b, i1 %c) {
-; CHECK: 1 = MemoryDef(liveOnEntry)
-; CHECK-NEXT: store i32 4
- store i32 4, i32* %a, align 4
-; CHECK-NOT: MemoryDef
-; CHECK: call void @llvm.assume
- call void @llvm.assume(i1 %c)
-; CHECK: MemoryUse(1)
-; CHECK-NEXT: %1 = load i32
- %1 = load i32, i32* %a, align 4
- ret i32 %1
-}
Removed: llvm/trunk/test/Transforms/Util/MemorySSA/atomic-clobber.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/Util/MemorySSA/atomic-clobber.ll?rev=299979&view=auto
==============================================================================
--- llvm/trunk/test/Transforms/Util/MemorySSA/atomic-clobber.ll (original)
+++ llvm/trunk/test/Transforms/Util/MemorySSA/atomic-clobber.ll (removed)
@@ -1,119 +0,0 @@
-; RUN: opt -basicaa -print-memoryssa -verify-memoryssa -analyze < %s 2>&1 | FileCheck %s
-; RUN: opt -aa-pipeline=basic-aa -passes='print<memoryssa>,verify<memoryssa>' -disable-output < %s 2>&1 | FileCheck %s
-;
-; Ensures that atomic loads count as MemoryDefs
-
-; CHECK-LABEL: define i32 @foo
-define i32 @foo(i32* %a, i32* %b) {
-; CHECK: 1 = MemoryDef(liveOnEntry)
-; CHECK-NEXT: store i32 4
- store i32 4, i32* %a, align 4
-; CHECK: 2 = MemoryDef(1)
-; CHECK-NEXT: %1 = load atomic i32
- %1 = load atomic i32, i32* %b acquire, align 4
-; CHECK: MemoryUse(2)
-; CHECK-NEXT: %2 = load i32
- %2 = load i32, i32* %a, align 4
- %3 = add i32 %1, %2
- ret i32 %3
-}
-
-; CHECK-LABEL: define void @bar
-define void @bar(i32* %a) {
-; CHECK: MemoryUse(liveOnEntry)
-; CHECK-NEXT: load atomic i32, i32* %a unordered, align 4
- load atomic i32, i32* %a unordered, align 4
-; CHECK: 1 = MemoryDef(liveOnEntry)
-; CHECK-NEXT: load atomic i32, i32* %a monotonic, align 4
- load atomic i32, i32* %a monotonic, align 4
-; CHECK: 2 = MemoryDef(1)
-; CHECK-NEXT: load atomic i32, i32* %a acquire, align 4
- load atomic i32, i32* %a acquire, align 4
-; CHECK: 3 = MemoryDef(2)
-; CHECK-NEXT: load atomic i32, i32* %a seq_cst, align 4
- load atomic i32, i32* %a seq_cst, align 4
- ret void
-}
-
-; CHECK-LABEL: define void @baz
-define void @baz(i32* %a) {
-; CHECK: 1 = MemoryDef(liveOnEntry)
-; CHECK-NEXT: %1 = load atomic i32
- %1 = load atomic i32, i32* %a acquire, align 4
-; CHECK: MemoryUse(1)
-; CHECK-NEXT: %2 = load atomic i32, i32* %a unordered, align 4
- %2 = load atomic i32, i32* %a unordered, align 4
-; CHECK: 2 = MemoryDef(1)
-; CHECK-NEXT: %3 = load atomic i32, i32* %a monotonic, align 4
- %3 = load atomic i32, i32* %a monotonic, align 4
- ret void
-}
-
-; CHECK-LABEL: define void @fences
-define void @fences(i32* %a) {
-; CHECK: 1 = MemoryDef(liveOnEntry)
-; CHECK-NEXT: fence acquire
- fence acquire
-; CHECK: MemoryUse(1)
-; CHECK-NEXT: %1 = load i32, i32* %a
- %1 = load i32, i32* %a
-
-; CHECK: 2 = MemoryDef(1)
-; CHECK-NEXT: fence release
- fence release
-; CHECK: MemoryUse(2)
-; CHECK-NEXT: %2 = load i32, i32* %a
- %2 = load i32, i32* %a
-
-; CHECK: 3 = MemoryDef(2)
-; CHECK-NEXT: fence acq_rel
- fence acq_rel
-; CHECK: MemoryUse(3)
-; CHECK-NEXT: %3 = load i32, i32* %a
- %3 = load i32, i32* %a
-
-; CHECK: 4 = MemoryDef(3)
-; CHECK-NEXT: fence seq_cst
- fence seq_cst
-; CHECK: MemoryUse(4)
-; CHECK-NEXT: %4 = load i32, i32* %a
- %4 = load i32, i32* %a
- ret void
-}
-
-; CHECK-LABEL: define void @seq_cst_clobber
-define void @seq_cst_clobber(i32* noalias %a, i32* noalias %b) {
-; CHECK: 1 = MemoryDef(liveOnEntry)
-; CHECK-NEXT: %1 = load atomic i32, i32* %a monotonic, align 4
- load atomic i32, i32* %a monotonic, align 4
-
-; CHECK: 2 = MemoryDef(1)
-; CHECK-NEXT: %2 = load atomic i32, i32* %a seq_cst, align 4
- load atomic i32, i32* %a seq_cst, align 4
-
-; CHECK: 3 = MemoryDef(2)
-; CHECK-NEXT: load atomic i32, i32* %a monotonic, align 4
- load atomic i32, i32* %a monotonic, align 4
-
- ret void
-}
-
-; Ensure that AA hands us MRI_Mod on unreorderable atomic ops.
-;
-; This test is a bit implementation-specific. In particular, it depends on that
-; we pass cmpxchg-load queries to AA, without trying to reason about them on
-; our own.
-;
-; If AA gets more aggressive, we can find another way.
-;
-; CHECK-LABEL: define void @check_aa_is_sane
-define void @check_aa_is_sane(i32* noalias %a, i32* noalias %b) {
-; CHECK: 1 = MemoryDef(liveOnEntry)
-; CHECK-NEXT: cmpxchg i32* %a, i32 0, i32 1 acquire acquire
- cmpxchg i32* %a, i32 0, i32 1 acquire acquire
-; CHECK: MemoryUse(1)
-; CHECK-NEXT: load i32, i32* %b, align 4
- load i32, i32* %b, align 4
-
- ret void
-}
Removed: llvm/trunk/test/Transforms/Util/MemorySSA/basicaa-memcpy.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/Util/MemorySSA/basicaa-memcpy.ll?rev=299979&view=auto
==============================================================================
--- llvm/trunk/test/Transforms/Util/MemorySSA/basicaa-memcpy.ll (original)
+++ llvm/trunk/test/Transforms/Util/MemorySSA/basicaa-memcpy.ll (removed)
@@ -1,16 +0,0 @@
-; RUN: opt -disable-output -basicaa -print-memoryssa %s 2>&1 | FileCheck %s
-
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind
-
-define void @source_clobber(i8* %a, i8* %b) {
-; CHECK-LABEL: @source_clobber(
-; CHECK-NEXT: ; 1 = MemoryDef(liveOnEntry)
-; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* %a, i8* %b, i64 128, i32 1, i1 false)
-; CHECK-NEXT: ; MemoryUse(liveOnEntry)
-; CHECK-NEXT: [[X:%.*]] = load i8, i8* %b
-; CHECK-NEXT: ret void
-;
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* %a, i8* %b, i64 128, i32 1, i1 false)
- %x = load i8, i8* %b
- ret void
-}
Removed: llvm/trunk/test/Transforms/Util/MemorySSA/constant-memory.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/Util/MemorySSA/constant-memory.ll?rev=299979&view=auto
==============================================================================
--- llvm/trunk/test/Transforms/Util/MemorySSA/constant-memory.ll (original)
+++ llvm/trunk/test/Transforms/Util/MemorySSA/constant-memory.ll (removed)
@@ -1,41 +0,0 @@
-; RUN: opt -basicaa -print-memoryssa -verify-memoryssa -analyze < %s 2>&1 | FileCheck %s
-;
-; Things that BasicAA can prove points to constant memory should be
-; liveOnEntry, as well.
-
-declare void @clobberAllTheThings()
-
- at str = private unnamed_addr constant [2 x i8] c"hi"
-
-define i8 @foo() {
-; CHECK: 1 = MemoryDef(liveOnEntry)
-; CHECK-NEXT: call void @clobberAllTheThings()
- call void @clobberAllTheThings()
- %1 = getelementptr [2 x i8], [2 x i8]* @str, i64 0, i64 0
-; CHECK: MemoryUse(liveOnEntry)
-; CHECK-NEXT: %2 = load i8
- %2 = load i8, i8* %1, align 1
- %3 = getelementptr [2 x i8], [2 x i8]* @str, i64 0, i64 1
-; CHECK: MemoryUse(liveOnEntry)
-; CHECK-NEXT: %4 = load i8
- %4 = load i8, i8* %3, align 1
- %5 = add i8 %2, %4
- ret i8 %5
-}
-
-define i8 @select(i1 %b) {
- %1 = alloca i8, align 1
-; CHECK: 1 = MemoryDef(liveOnEntry)
-; CHECK-NEXT: store i8 0
- store i8 0, i8* %1, align 1
-
-; CHECK: 2 = MemoryDef(1)
-; CHECK-NEXT: call void @clobberAllTheThings()
- call void @clobberAllTheThings()
- %2 = getelementptr [2 x i8], [2 x i8]* @str, i64 0, i64 0
- %3 = select i1 %b, i8* %2, i8* %1
-; CHECK: MemoryUse(2)
-; CHECK-NEXT: %4 = load i8
- %4 = load i8, i8* %3, align 1
- ret i8 %4
-}
Removed: llvm/trunk/test/Transforms/Util/MemorySSA/cyclicphi.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/Util/MemorySSA/cyclicphi.ll?rev=299979&view=auto
==============================================================================
--- llvm/trunk/test/Transforms/Util/MemorySSA/cyclicphi.ll (original)
+++ llvm/trunk/test/Transforms/Util/MemorySSA/cyclicphi.ll (removed)
@@ -1,123 +0,0 @@
-; RUN: opt -basicaa -print-memoryssa -verify-memoryssa -analyze < %s 2>&1 | FileCheck %s
-; RUN: opt -aa-pipeline=basic-aa -passes='print<memoryssa>,verify<memoryssa>' -disable-output < %s 2>&1 | FileCheck %s
-
-%struct.hoge = type { i32, %struct.widget }
-%struct.widget = type { i64 }
-
-define hidden void @quux(%struct.hoge *%f) align 2 {
- %tmp = getelementptr inbounds %struct.hoge, %struct.hoge* %f, i64 0, i32 1, i32 0
- %tmp24 = getelementptr inbounds %struct.hoge, %struct.hoge* %f, i64 0, i32 1
- %tmp25 = bitcast %struct.widget* %tmp24 to i64**
- br label %bb26
-
-bb26: ; preds = %bb77, %0
-; CHECK: 2 = MemoryPhi({%0,liveOnEntry},{bb77,3})
-; CHECK-NEXT: br i1 undef, label %bb68, label %bb77
- br i1 undef, label %bb68, label %bb77
-
-bb68: ; preds = %bb26
-; CHECK: MemoryUse(liveOnEntry)
-; CHECK-NEXT: %tmp69 = load i64, i64* null, align 8
- %tmp69 = load i64, i64* null, align 8
-; CHECK: 1 = MemoryDef(2)
-; CHECK-NEXT: store i64 %tmp69, i64* %tmp, align 8
- store i64 %tmp69, i64* %tmp, align 8
- br label %bb77
-
-bb77: ; preds = %bb68, %bb26
-; CHECK: 3 = MemoryPhi({bb26,2},{bb68,1})
-; CHECK: MemoryUse(3)
-; CHECK-NEXT: %tmp78 = load i64*, i64** %tmp25, align 8
- %tmp78 = load i64*, i64** %tmp25, align 8
- %tmp79 = getelementptr inbounds i64, i64* %tmp78, i64 undef
- br label %bb26
-}
-
-; CHECK-LABEL: define void @quux_skip
-define void @quux_skip(%struct.hoge* noalias %f, i64* noalias %g) align 2 {
- %tmp = getelementptr inbounds %struct.hoge, %struct.hoge* %f, i64 0, i32 1, i32 0
- %tmp24 = getelementptr inbounds %struct.hoge, %struct.hoge* %f, i64 0, i32 1
- %tmp25 = bitcast %struct.widget* %tmp24 to i64**
- br label %bb26
-
-bb26: ; preds = %bb77, %0
-; CHECK: 2 = MemoryPhi({%0,liveOnEntry},{bb77,3})
-; CHECK-NEXT: br i1 undef, label %bb68, label %bb77
- br i1 undef, label %bb68, label %bb77
-
-bb68: ; preds = %bb26
-; CHECK: MemoryUse(2)
-; CHECK-NEXT: %tmp69 = load i64, i64* %g, align 8
- %tmp69 = load i64, i64* %g, align 8
-; CHECK: 1 = MemoryDef(2)
-; CHECK-NEXT: store i64 %tmp69, i64* %g, align 8
- store i64 %tmp69, i64* %g, align 8
- br label %bb77
-
-bb77: ; preds = %bb68, %bb26
-; CHECK: 3 = MemoryPhi({bb26,2},{bb68,1})
-; CHECK: MemoryUse(liveOnEntry)
-; CHECK-NEXT: %tmp78 = load i64*, i64** %tmp25, align 8
- %tmp78 = load i64*, i64** %tmp25, align 8
- br label %bb26
-}
-
-; CHECK-LABEL: define void @quux_dominated
-define void @quux_dominated(%struct.hoge* noalias %f, i64* noalias %g) align 2 {
- %tmp = getelementptr inbounds %struct.hoge, %struct.hoge* %f, i64 0, i32 1, i32 0
- %tmp24 = getelementptr inbounds %struct.hoge, %struct.hoge* %f, i64 0, i32 1
- %tmp25 = bitcast %struct.widget* %tmp24 to i64**
- br label %bb26
-
-bb26: ; preds = %bb77, %0
-; CHECK: 3 = MemoryPhi({%0,liveOnEntry},{bb77,2})
-; CHECK: MemoryUse(3)
-; CHECK-NEXT: load i64*, i64** %tmp25, align 8
- load i64*, i64** %tmp25, align 8
- br i1 undef, label %bb68, label %bb77
-
-bb68: ; preds = %bb26
-; CHECK: MemoryUse(3)
-; CHECK-NEXT: %tmp69 = load i64, i64* %g, align 8
- %tmp69 = load i64, i64* %g, align 8
-; CHECK: 1 = MemoryDef(3)
-; CHECK-NEXT: store i64 %tmp69, i64* %g, align 8
- store i64 %tmp69, i64* %g, align 8
- br label %bb77
-
-bb77: ; preds = %bb68, %bb26
-; CHECK: 4 = MemoryPhi({bb26,3},{bb68,1})
-; CHECK: 2 = MemoryDef(4)
-; CHECK-NEXT: store i64* null, i64** %tmp25, align 8
- store i64* null, i64** %tmp25, align 8
- br label %bb26
-}
-
-; CHECK-LABEL: define void @quux_nodominate
-define void @quux_nodominate(%struct.hoge* noalias %f, i64* noalias %g) align 2 {
- %tmp = getelementptr inbounds %struct.hoge, %struct.hoge* %f, i64 0, i32 1, i32 0
- %tmp24 = getelementptr inbounds %struct.hoge, %struct.hoge* %f, i64 0, i32 1
- %tmp25 = bitcast %struct.widget* %tmp24 to i64**
- br label %bb26
-
-bb26: ; preds = %bb77, %0
-; CHECK: 2 = MemoryPhi({%0,liveOnEntry},{bb77,3})
-; CHECK: MemoryUse(liveOnEntry)
-; CHECK-NEXT: load i64*, i64** %tmp25, align 8
- load i64*, i64** %tmp25, align 8
- br i1 undef, label %bb68, label %bb77
-
-bb68: ; preds = %bb26
-; CHECK: MemoryUse(2)
-; CHECK-NEXT: %tmp69 = load i64, i64* %g, align 8
- %tmp69 = load i64, i64* %g, align 8
-; CHECK: 1 = MemoryDef(2)
-; CHECK-NEXT: store i64 %tmp69, i64* %g, align 8
- store i64 %tmp69, i64* %g, align 8
- br label %bb77
-
-bb77: ; preds = %bb68, %bb26
-; CHECK: 3 = MemoryPhi({bb26,2},{bb68,1})
-; CHECK-NEXT: br label %bb26
- br label %bb26
-}
Removed: llvm/trunk/test/Transforms/Util/MemorySSA/forward-unreachable.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/Util/MemorySSA/forward-unreachable.ll?rev=299979&view=auto
==============================================================================
--- llvm/trunk/test/Transforms/Util/MemorySSA/forward-unreachable.ll (original)
+++ llvm/trunk/test/Transforms/Util/MemorySSA/forward-unreachable.ll (removed)
@@ -1,23 +0,0 @@
-; RUN: opt -aa-pipeline=basic-aa -passes='print<memoryssa>,verify<memoryssa>' -disable-output < %s 2>&1 | FileCheck %s
-target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
-
-define void @test() {
-entry:
- br i1 undef, label %split1, label %split2
-
-split1:
- store i16 undef, i16* undef, align 2
- br label %merge
-split2:
- br label %merge
-forwardunreachable:
- br label %merge
-merge:
-; The forwardunreachable block still needs an entry in the phi node,
-; because it is reverse reachable, so the CFG still has it as a
-; predecessor of the block
-; CHECK: 3 = MemoryPhi({split1,1},{split2,liveOnEntry},{forwardunreachable,liveOnEntry})
- store i16 undef, i16* undef, align 2
- ret void
-}
-
Removed: llvm/trunk/test/Transforms/Util/MemorySSA/function-clobber.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/Util/MemorySSA/function-clobber.ll?rev=299979&view=auto
==============================================================================
--- llvm/trunk/test/Transforms/Util/MemorySSA/function-clobber.ll (original)
+++ llvm/trunk/test/Transforms/Util/MemorySSA/function-clobber.ll (removed)
@@ -1,54 +0,0 @@
-; RUN: opt -basicaa -print-memoryssa -verify-memoryssa -analyze < %s 2>&1 | FileCheck %s
-; RUN: opt -aa-pipeline=basic-aa -passes='print<memoryssa>,verify<memoryssa>' -disable-output < %s 2>&1 | FileCheck %s
-;
-; Ensuring that external functions without attributes are MemoryDefs
-
- at g = external global i32
-declare void @modifyG()
-
-define i32 @foo() {
-; CHECK: MemoryUse(liveOnEntry)
-; CHECK-NEXT: %1 = load i32
- %1 = load i32, i32* @g
-
-; CHECK: 1 = MemoryDef(liveOnEntry)
-; CHECK-NEXT: store i32 4
- store i32 4, i32* @g, align 4
-
-; CHECK: 2 = MemoryDef(1)
-; CHECK-NEXT: call void @modifyG()
- call void @modifyG()
-
-; CHECK: MemoryUse(2)
-; CHECK-NEXT: %2 = load i32
- %2 = load i32, i32* @g
- %3 = add i32 %2, %1
- ret i32 %3
-}
-
-declare void @readEverything() readonly
-declare void @clobberEverything()
-
-; CHECK-LABEL: define void @bar
-define void @bar() {
-; CHECK: 1 = MemoryDef(liveOnEntry)
-; CHECK-NEXT: call void @clobberEverything()
- call void @clobberEverything()
- br i1 undef, label %if.end, label %if.then
-
-if.then:
-; CHECK: MemoryUse(1)
-; CHECK-NEXT: call void @readEverything()
- call void @readEverything()
-; CHECK: 2 = MemoryDef(1)
-; CHECK-NEXT: call void @clobberEverything()
- call void @clobberEverything()
- br label %if.end
-
-if.end:
-; CHECK: 3 = MemoryPhi({%0,1},{if.then,2})
-; CHECK: MemoryUse(3)
-; CHECK-NEXT: call void @readEverything()
- call void @readEverything()
- ret void
-}
Removed: llvm/trunk/test/Transforms/Util/MemorySSA/function-mem-attrs.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/Util/MemorySSA/function-mem-attrs.ll?rev=299979&view=auto
==============================================================================
--- llvm/trunk/test/Transforms/Util/MemorySSA/function-mem-attrs.ll (original)
+++ llvm/trunk/test/Transforms/Util/MemorySSA/function-mem-attrs.ll (removed)
@@ -1,59 +0,0 @@
-; RUN: opt -basicaa -print-memoryssa -verify-memoryssa -analyze < %s 2>&1 | FileCheck %s
-; RUN: opt -aa-pipeline=basic-aa -passes='print<memoryssa>,verify<memoryssa>' -disable-output < %s 2>&1 | FileCheck %s
-;
-; Test that various function attributes give us sane results.
-
- at g = external global i32
-
-declare void @readonlyFunction() readonly
-declare void @noattrsFunction()
-
-define void @readonlyAttr() {
-; CHECK: 1 = MemoryDef(liveOnEntry)
-; CHECK-NEXT: store i32 0
- store i32 0, i32* @g, align 4
-
- %1 = alloca i32, align 4
-; CHECK: 2 = MemoryDef(1)
-; CHECK-NEXT: store i32 0
- store i32 0, i32* %1, align 4
-
-; CHECK: MemoryUse(1)
-; CHECK-NEXT: call void @readonlyFunction()
- call void @readonlyFunction()
-
-; CHECK: MemoryUse(1)
-; CHECK-NEXT: call void @noattrsFunction() #
-; Assume that #N is readonly
- call void @noattrsFunction() readonly
-
- ; Sanity check that noattrsFunction is otherwise a MemoryDef
-; CHECK: 3 = MemoryDef(2)
-; CHECK-NEXT: call void @noattrsFunction()
- call void @noattrsFunction()
- ret void
-}
-
-declare void @argMemOnly(i32*) argmemonly
-
-define void @inaccessableOnlyAttr() {
- %1 = alloca i32, align 4
-; CHECK: 1 = MemoryDef(liveOnEntry)
-; CHECK-NEXT: store i32 0
- store i32 0, i32* %1, align 4
-
-; CHECK: 2 = MemoryDef(1)
-; CHECK-NEXT: store i32 0
- store i32 0, i32* @g, align 4
-
-; CHECK: MemoryUse(1)
-; CHECK-NEXT: call void @argMemOnly(i32* %1) #
-; Assume that #N is readonly
- call void @argMemOnly(i32* %1) readonly
-
-; CHECK: 3 = MemoryDef(2)
-; CHECK-NEXT: call void @argMemOnly(i32* %1)
- call void @argMemOnly(i32* %1)
-
- ret void
-}
Removed: llvm/trunk/test/Transforms/Util/MemorySSA/invariant-groups.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/Util/MemorySSA/invariant-groups.ll?rev=299979&view=auto
==============================================================================
--- llvm/trunk/test/Transforms/Util/MemorySSA/invariant-groups.ll (original)
+++ llvm/trunk/test/Transforms/Util/MemorySSA/invariant-groups.ll (removed)
@@ -1,285 +0,0 @@
-; RUN: opt -basicaa -print-memoryssa -verify-memoryssa -analyze < %s 2>&1 | FileCheck %s
-;
-; Currently, MemorySSA doesn't support invariant groups. So, we should ignore
-; invariant.group.barrier intrinsics entirely. We'll need to pay attention to
-; them when/if we decide to support invariant groups.
-
- at g = external global i32
-
-define i32 @foo(i32* %a) {
-; CHECK: 1 = MemoryDef(liveOnEntry)
-; CHECK-NEXT: store i32 0
- store i32 0, i32* %a, align 4, !invariant.group !0
-
-; CHECK: 2 = MemoryDef(1)
-; CHECK-NEXT: store i32 1
- store i32 1, i32* @g, align 4
-
- %1 = bitcast i32* %a to i8*
- %a8 = call i8* @llvm.invariant.group.barrier(i8* %1)
- %a32 = bitcast i8* %a8 to i32*
-
-; This have to be MemoryUse(2), because we can't skip the barrier based on
-; invariant.group.
-; CHECK: MemoryUse(2)
-; CHECK-NEXT: %2 = load i32
- %2 = load i32, i32* %a32, align 4, !invariant.group !0
- ret i32 %2
-}
-
-define i32 @skipBarrier(i32* %a) {
-; CHECK: 1 = MemoryDef(liveOnEntry)
-; CHECK-NEXT: store i32 0
- store i32 0, i32* %a, align 4, !invariant.group !0
-
- %1 = bitcast i32* %a to i8*
- %a8 = call i8* @llvm.invariant.group.barrier(i8* %1)
- %a32 = bitcast i8* %a8 to i32*
-
-; We can skip the barrier only if the "skip" is not based on !invariant.group.
-; CHECK: MemoryUse(1)
-; CHECK-NEXT: %2 = load i32
- %2 = load i32, i32* %a32, align 4, !invariant.group !0
- ret i32 %2
-}
-
-define i32 @skipBarrier2(i32* %a) {
-
-; CHECK: MemoryUse(liveOnEntry)
-; CHECK-NEXT: %v = load i32
- %v = load i32, i32* %a, align 4, !invariant.group !0
-
- %1 = bitcast i32* %a to i8*
- %a8 = call i8* @llvm.invariant.group.barrier(i8* %1)
- %a32 = bitcast i8* %a8 to i32*
-
-; We can skip the barrier only if the "skip" is not based on !invariant.group.
-; CHECK: MemoryUse(liveOnEntry)
-; CHECK-NEXT: %v2 = load i32
- %v2 = load i32, i32* %a32, align 4, !invariant.group !0
-; CHECK: 1 = MemoryDef(liveOnEntry)
-; CHECK-NEXT: store i32 1
- store i32 1, i32* @g, align 4
-
-; FIXME: based on invariant.group it should be MemoryUse(liveOnEntry)
-; CHECK: MemoryUse(1)
-; CHECK-NEXT: %v3 = load i32
- %v3 = load i32, i32* %a32, align 4, !invariant.group !0
- %add = add nsw i32 %v2, %v3
- %add2 = add nsw i32 %add, %v
- ret i32 %add2
-}
-
-define i32 @handleInvariantGroups(i32* %a) {
-; CHECK: 1 = MemoryDef(liveOnEntry)
-; CHECK-NEXT: store i32 0
- store i32 0, i32* %a, align 4, !invariant.group !0
-
-; CHECK: 2 = MemoryDef(1)
-; CHECK-NEXT: store i32 1
- store i32 1, i32* @g, align 4
- %1 = bitcast i32* %a to i8*
- %a8 = call i8* @llvm.invariant.group.barrier(i8* %1)
- %a32 = bitcast i8* %a8 to i32*
-
-; CHECK: MemoryUse(2)
-; CHECK-NEXT: %2 = load i32
- %2 = load i32, i32* %a32, align 4, !invariant.group !0
-
-; CHECK: 3 = MemoryDef(2)
-; CHECK-NEXT: store i32 2
- store i32 2, i32* @g, align 4
-
-; FIXME: This can be changed to MemoryUse(2)
-; CHECK: MemoryUse(3)
-; CHECK-NEXT: %3 = load i32
- %3 = load i32, i32* %a32, align 4, !invariant.group !0
- %add = add nsw i32 %2, %3
- ret i32 %add
-}
-
-define i32 @loop(i1 %a) {
-entry:
- %0 = alloca i32, align 4
-; CHECK: 1 = MemoryDef(liveOnEntry)
-; CHECK-NEXT: store i32 4
- store i32 4, i32* %0, !invariant.group !0
-; CHECK: 2 = MemoryDef(1)
-; CHECK-NEXT: call void @clobber
- call void @clobber(i32* %0)
- br i1 %a, label %Loop.Body, label %Loop.End
-
-Loop.Body:
-; FIXME: MemoryUse(1)
-; CHECK: MemoryUse(2)
-; CHECK-NEXT: %1 = load i32
- %1 = load i32, i32* %0, !invariant.group !0
- br i1 %a, label %Loop.End, label %Loop.Body
-
-Loop.End:
-; FIXME: MemoryUse(1)
-; CHECK: MemoryUse(2)
-; CHECK-NEXT: %2 = load
- %2 = load i32, i32* %0, align 4, !invariant.group !0
- br i1 %a, label %Ret, label %Loop.Body
-
-Ret:
- ret i32 %2
-}
-
-define i8 @loop2(i8* %p) {
-entry:
-; CHECK: 1 = MemoryDef(liveOnEntry)
-; CHECK-NEXT: store i8
- store i8 4, i8* %p, !invariant.group !0
-; CHECK: 2 = MemoryDef(1)
-; CHECK-NEXT: call void @clobber
- call void @clobber8(i8* %p)
- %after = call i8* @llvm.invariant.group.barrier(i8* %p)
- br i1 undef, label %Loop.Body, label %Loop.End
-
-Loop.Body:
-; 4 = MemoryPhi({entry,2},{Loop.Body,3},{Loop.End,5})
-; CHECK: MemoryUse(4)
-; CHECK-NEXT: %0 = load i8
- %0 = load i8, i8* %after, !invariant.group !0
-
-; FIXME: MemoryUse(1)
-; CHECK: MemoryUse(4)
-; CHECK-NEXT: %1 = load i8
- %1 = load i8, i8* %p, !invariant.group !0
-
-; CHECK: 3 = MemoryDef(4)
- store i8 4, i8* %after, !invariant.group !0
-
- br i1 undef, label %Loop.End, label %Loop.Body
-
-Loop.End:
-; 5 = MemoryPhi({entry,2},{Loop.Body,3})
-; CHECK: MemoryUse(5)
-; CHECK-NEXT: %2 = load
- %2 = load i8, i8* %after, align 4, !invariant.group !0
-
-; FIXME: MemoryUse(1)
-; CHECK: MemoryUse(5)
-; CHECK-NEXT: %3 = load
- %3 = load i8, i8* %p, align 4, !invariant.group !0
- br i1 undef, label %Ret, label %Loop.Body
-
-Ret:
- ret i8 %3
-}
-
-
-define i8 @loop3(i8* %p) {
-entry:
-; CHECK: 1 = MemoryDef(liveOnEntry)
-; CHECK-NEXT: store i8
- store i8 4, i8* %p, !invariant.group !0
-; CHECK: 2 = MemoryDef(1)
-; CHECK-NEXT: call void @clobber
- call void @clobber8(i8* %p)
- %after = call i8* @llvm.invariant.group.barrier(i8* %p)
- br i1 undef, label %Loop.Body, label %Loop.End
-
-Loop.Body:
-; CHECK: 6 = MemoryPhi({entry,2},{Loop.Body,3},{Loop.next,4},{Loop.End,5})
-; CHECK: MemoryUse(6)
-; CHECK-NEXT: %0 = load i8
- %0 = load i8, i8* %after, !invariant.group !0
-
-; CHECK: 3 = MemoryDef(6)
-; CHECK-NEXT: call void @clobber8
- call void @clobber8(i8* %after)
-
-; FIXME: MemoryUse(6)
-; CHECK: MemoryUse(3)
-; CHECK-NEXT: %1 = load i8
- %1 = load i8, i8* %after, !invariant.group !0
-
- br i1 undef, label %Loop.next, label %Loop.Body
-Loop.next:
-; CHECK: 4 = MemoryDef(3)
-; CHECK-NEXT: call void @clobber8
- call void @clobber8(i8* %after)
-
-; FIXME: MemoryUse(6)
-; CHECK: MemoryUse(4)
-; CHECK-NEXT: %2 = load i8
- %2 = load i8, i8* %after, !invariant.group !0
-
- br i1 undef, label %Loop.End, label %Loop.Body
-
-Loop.End:
-; CHECK: 7 = MemoryPhi({entry,2},{Loop.next,4})
-; CHECK: MemoryUse(7)
-; CHECK-NEXT: %3 = load
- %3 = load i8, i8* %after, align 4, !invariant.group !0
-
-; CHECK: 5 = MemoryDef(7)
-; CHECK-NEXT: call void @clobber8
- call void @clobber8(i8* %after)
-
-; FIXME: MemoryUse(7)
-; CHECK: MemoryUse(5)
-; CHECK-NEXT: %4 = load
- %4 = load i8, i8* %after, align 4, !invariant.group !0
- br i1 undef, label %Ret, label %Loop.Body
-
-Ret:
- ret i8 %3
-}
-
-define i8 @loop4(i8* %p) {
-entry:
-; CHECK: 1 = MemoryDef(liveOnEntry)
-; CHECK-NEXT: store i8
- store i8 4, i8* %p, !invariant.group !0
-; CHECK: 2 = MemoryDef(1)
-; CHECK-NEXT: call void @clobber
- call void @clobber8(i8* %p)
- %after = call i8* @llvm.invariant.group.barrier(i8* %p)
- br i1 undef, label %Loop.Pre, label %Loop.End
-
-Loop.Pre:
-; CHECK: MemoryUse(2)
-; CHECK-NEXT: %0 = load i8
- %0 = load i8, i8* %after, !invariant.group !0
- br label %Loop.Body
-Loop.Body:
-; CHECK: 4 = MemoryPhi({Loop.Pre,2},{Loop.Body,3},{Loop.End,5})
-; CHECK-NEXT: MemoryUse(4)
-; CHECK-NEXT: %1 = load i8
- %1 = load i8, i8* %after, !invariant.group !0
-
-; FIXME: MemoryUse(2)
-; CHECK: MemoryUse(4)
-; CHECK-NEXT: %2 = load i8
- %2 = load i8, i8* %p, !invariant.group !0
-
-; CHECK: 3 = MemoryDef(4)
- store i8 4, i8* %after, !invariant.group !0
- br i1 undef, label %Loop.End, label %Loop.Body
-
-Loop.End:
-; CHECK: 5 = MemoryPhi({entry,2},{Loop.Body,3})
-; CHECK-NEXT: MemoryUse(5)
-; CHECK-NEXT: %3 = load
- %3 = load i8, i8* %after, align 4, !invariant.group !0
-
-; FIXME: MemoryUse(2)
-; CHECK: MemoryUse(5)
-; CHECK-NEXT: %4 = load
- %4 = load i8, i8* %p, align 4, !invariant.group !0
- br i1 undef, label %Ret, label %Loop.Body
-
-Ret:
- ret i8 %3
-}
-
-declare i8* @llvm.invariant.group.barrier(i8*)
-declare void @clobber(i32*)
-declare void @clobber8(i8*)
-
-
-!0 = !{!"group1"}
Removed: llvm/trunk/test/Transforms/Util/MemorySSA/lifetime-simple.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/Util/MemorySSA/lifetime-simple.ll?rev=299979&view=auto
==============================================================================
--- llvm/trunk/test/Transforms/Util/MemorySSA/lifetime-simple.ll (original)
+++ llvm/trunk/test/Transforms/Util/MemorySSA/lifetime-simple.ll (removed)
@@ -1,30 +0,0 @@
-; RUN: opt -basicaa -print-memoryssa -verify-memoryssa -analyze < %s 2>&1 | FileCheck %s
-; RUN: opt -aa-pipeline=basic-aa -passes='print<memoryssa>,verify<memoryssa>' -disable-output < %s 2>&1 | FileCheck %s
-; This test checks a number of things:
-; First, the lifetime markers should not clobber any uses of Q or P.
-; Second, the loads of P are MemoryUse(LiveOnEntry) due to the placement of the markers vs the loads.
-
-define i8 @test(i8* %P, i8* %Q) {
-entry:
-; CHECK: 1 = MemoryDef(liveOnEntry)
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 32, i8* %P)
- call void @llvm.lifetime.start.p0i8(i64 32, i8* %P)
-; CHECK: MemoryUse(1)
-; CHECK-NEXT: %0 = load i8, i8* %P
- %0 = load i8, i8* %P
-; CHECK: 2 = MemoryDef(1)
-; CHECK-NEXT: store i8 1, i8* %P
- store i8 1, i8* %P
-; CHECK: 3 = MemoryDef(2)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 32, i8* %P)
- call void @llvm.lifetime.end.p0i8(i64 32, i8* %P)
-; CHECK: MemoryUse(liveOnEntry)
-; CHECK-NEXT: %1 = load i8, i8* %P
- %1 = load i8, i8* %P
-; CHECK: MemoryUse(2)
-; CHECK-NEXT: %2 = load i8, i8* %Q
- %2 = load i8, i8* %Q
- ret i8 %1
-}
-declare void @llvm.lifetime.start.p0i8(i64 %S, i8* nocapture %P) readonly
-declare void @llvm.lifetime.end.p0i8(i64 %S, i8* nocapture %P)
Removed: llvm/trunk/test/Transforms/Util/MemorySSA/load-invariant.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/Util/MemorySSA/load-invariant.ll?rev=299979&view=auto
==============================================================================
--- llvm/trunk/test/Transforms/Util/MemorySSA/load-invariant.ll (original)
+++ llvm/trunk/test/Transforms/Util/MemorySSA/load-invariant.ll (removed)
@@ -1,38 +0,0 @@
-; RUN: opt -basicaa -print-memoryssa -verify-memoryssa -analyze < %s 2>&1 | FileCheck %s
-; RUN: opt -aa-pipeline=basic-aa -passes='print<memoryssa>' -verify-memoryssa -disable-output < %s 2>&1 | FileCheck %s
-;
-; Invariant loads should be considered live on entry, because, once the
-; location is known to be dereferenceable, the value can never change.
-
- at g = external global i32
-
-declare void @clobberAllTheThings()
-
-; CHECK-LABEL: define i32 @foo
-define i32 @foo() {
-; CHECK: 1 = MemoryDef(liveOnEntry)
-; CHECK-NEXT: call void @clobberAllTheThings()
- call void @clobberAllTheThings()
-; CHECK: MemoryUse(liveOnEntry)
-; CHECK-NEXT: %1 = load i32
- %1 = load i32, i32* @g, align 4, !invariant.load !0
- ret i32 %1
-}
-
-; CHECK-LABEL: define i32 @bar
-define i32 @bar(i32* %a) {
-; CHECK: 1 = MemoryDef(liveOnEntry)
-; CHECK-NEXT: call void @clobberAllTheThings()
- call void @clobberAllTheThings()
-
-; CHECK: 2 = MemoryDef(1)
-; CHECK-NEXT: %1 = load atomic i32
- %1 = load atomic i32, i32* %a acquire, align 4, !invariant.load !0
-
-; CHECK: MemoryUse(2)
-; CHECK-NEXT: %2 = load i32
- %2 = load i32, i32* %a, align 4
- ret i32 %2
-}
-
-!0 = !{}
Removed: llvm/trunk/test/Transforms/Util/MemorySSA/many-dom-backedge.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/Util/MemorySSA/many-dom-backedge.ll?rev=299979&view=auto
==============================================================================
--- llvm/trunk/test/Transforms/Util/MemorySSA/many-dom-backedge.ll (original)
+++ llvm/trunk/test/Transforms/Util/MemorySSA/many-dom-backedge.ll (removed)
@@ -1,77 +0,0 @@
-; RUN: opt -basicaa -print-memoryssa -verify-memoryssa -analyze < %s 2>&1 | FileCheck %s
-; RUN: opt -aa-pipeline=basic-aa -passes='print<memoryssa>,verify<memoryssa>' -disable-output < %s 2>&1 | FileCheck %s
-;
-; many-dom.ll, with an added back-edge back into the switch.
-; Because people love their gotos.
-
-declare i1 @getBool() readnone
-
-define i32 @foo(i32* %p) {
-entry:
- br label %loopbegin
-
-loopbegin:
-; CHECK: 8 = MemoryPhi({entry,liveOnEntry},{sw.epilog,6})
-; CHECK-NEXT: %n =
- %n = phi i32 [ 0, %entry ], [ %1, %sw.epilog ]
- %m = alloca i32, align 4
- switch i32 %n, label %sw.default [
- i32 0, label %sw.bb
- i32 1, label %sw.bb1
- i32 2, label %sw.bb2
- i32 3, label %sw.bb3
- ]
-
-sw.bb:
-; CHECK: 1 = MemoryDef(8)
-; CHECK-NEXT: store i32 1
- store i32 1, i32* %m, align 4
- br label %sw.epilog
-
-sw.bb1:
-; CHECK: 2 = MemoryDef(8)
-; CHECK-NEXT: store i32 2
- store i32 2, i32* %m, align 4
- br label %sw.epilog
-
-sw.bb2:
-; CHECK: 3 = MemoryDef(8)
-; CHECK-NEXT: store i32 3
- store i32 3, i32* %m, align 4
- br label %sw.epilog
-
-sw.bb3:
-; CHECK: 9 = MemoryPhi({loopbegin,8},{sw.almostexit,6})
-; CHECK: 4 = MemoryDef(9)
-; CHECK-NEXT: store i32 4
- store i32 4, i32* %m, align 4
- br label %sw.epilog
-
-sw.default:
-; CHECK: 5 = MemoryDef(8)
-; CHECK-NEXT: store i32 5
- store i32 5, i32* %m, align 4
- br label %sw.epilog
-
-sw.epilog:
-; CHECK: 10 = MemoryPhi({sw.default,5},{sw.bb3,4},{sw.bb,1},{sw.bb1,2},{sw.bb2,3})
-; CHECK-NEXT: MemoryUse(10)
-; CHECK-NEXT: %0 =
- %0 = load i32, i32* %m, align 4
-; CHECK: 6 = MemoryDef(10)
-; CHECK-NEXT: %1 =
- %1 = load volatile i32, i32* %p, align 4
- %2 = icmp eq i32 %0, %1
- br i1 %2, label %sw.almostexit, label %loopbegin
-
-sw.almostexit:
- %3 = icmp eq i32 0, %1
- br i1 %3, label %exit, label %sw.bb3
-
-exit:
-; CHECK: 7 = MemoryDef(6)
-; CHECK-NEXT: %4 = load volatile i32
- %4 = load volatile i32, i32* %p, align 4
- %5 = add i32 %4, %1
- ret i32 %5
-}
Removed: llvm/trunk/test/Transforms/Util/MemorySSA/many-doms.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/Util/MemorySSA/many-doms.ll?rev=299979&view=auto
==============================================================================
--- llvm/trunk/test/Transforms/Util/MemorySSA/many-doms.ll (original)
+++ llvm/trunk/test/Transforms/Util/MemorySSA/many-doms.ll (removed)
@@ -1,67 +0,0 @@
-; RUN: opt -basicaa -print-memoryssa -verify-memoryssa -analyze < %s 2>&1 | FileCheck %s
-; RUN: opt -aa-pipeline=basic-aa -passes='print<memoryssa>,verify<memoryssa>' -disable-output < %s 2>&1 | FileCheck %s
-;
-; Testing many dominators, specifically from a switch statement in C.
-
-declare i1 @getBool() readnone
-
-define i32 @foo(i32* %p) {
-entry:
- br label %loopbegin
-
-loopbegin:
-; CHECK: 7 = MemoryPhi({entry,liveOnEntry},{sw.epilog,6})
-; CHECK-NEXT: %n =
- %n = phi i32 [ 0, %entry ], [ %1, %sw.epilog ]
- %m = alloca i32, align 4
- switch i32 %n, label %sw.default [
- i32 0, label %sw.bb
- i32 1, label %sw.bb1
- i32 2, label %sw.bb2
- i32 3, label %sw.bb3
- ]
-
-sw.bb:
-; CHECK: 1 = MemoryDef(7)
-; CHECK-NEXT: store i32 1
- store i32 1, i32* %m, align 4
- br label %sw.epilog
-
-sw.bb1:
-; CHECK: 2 = MemoryDef(7)
-; CHECK-NEXT: store i32 2
- store i32 2, i32* %m, align 4
- br label %sw.epilog
-
-sw.bb2:
-; CHECK: 3 = MemoryDef(7)
-; CHECK-NEXT: store i32 3
- store i32 3, i32* %m, align 4
- br label %sw.epilog
-
-sw.bb3:
-; CHECK: 4 = MemoryDef(7)
-; CHECK-NEXT: store i32 4
- store i32 4, i32* %m, align 4
- br label %sw.epilog
-
-sw.default:
-; CHECK: 5 = MemoryDef(7)
-; CHECK-NEXT: store i32 5
- store i32 5, i32* %m, align 4
- br label %sw.epilog
-
-sw.epilog:
-; CHECK: 8 = MemoryPhi({sw.default,5},{sw.bb,1},{sw.bb1,2},{sw.bb2,3},{sw.bb3,4})
-; CHECK-NEXT: MemoryUse(8)
-; CHECK-NEXT: %0 =
- %0 = load i32, i32* %m, align 4
-; CHECK: 6 = MemoryDef(8)
-; CHECK-NEXT: %1 =
- %1 = load volatile i32, i32* %p, align 4
- %2 = icmp eq i32 %0, %1
- br i1 %2, label %exit, label %loopbegin
-
-exit:
- ret i32 %1
-}
Removed: llvm/trunk/test/Transforms/Util/MemorySSA/multi-edges.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/Util/MemorySSA/multi-edges.ll?rev=299979&view=auto
==============================================================================
--- llvm/trunk/test/Transforms/Util/MemorySSA/multi-edges.ll (original)
+++ llvm/trunk/test/Transforms/Util/MemorySSA/multi-edges.ll (removed)
@@ -1,32 +0,0 @@
-; RUN: opt -basicaa -print-memoryssa -verify-memoryssa -analyze < %s 2>&1 | FileCheck %s
-; RUN: opt -aa-pipeline=basic-aa -passes='print<memoryssa>,verify<memoryssa>' -disable-output < %s 2>&1 | FileCheck %s
-;
-; Makes sure we have a sane model if both successors of some block is the same
-; block.
-
-define i32 @foo(i1 %a) {
-entry:
- %0 = alloca i32, align 4
-; CHECK: 1 = MemoryDef(liveOnEntry)
-; CHECK-NEXT: store i32 4
- store i32 4, i32* %0
- br i1 %a, label %Loop.Body, label %Loop.End
-
-Loop.Body:
-; CHECK: 3 = MemoryPhi({entry,1},{Loop.End,4})
-; CHECK-NEXT: 2 = MemoryDef(3)
-; CHECK-NEXT: store i32 5
- store i32 5, i32* %0, align 4
- br i1 %a, label %Loop.End, label %Loop.End ; WhyDoWeEvenHaveThatLever.gif
-
-Loop.End:
-; CHECK: 4 = MemoryPhi({entry,1},{Loop.Body,2},{Loop.Body,2})
-; CHECK-NEXT: MemoryUse(4)
-; CHECK-NEXT: %1 = load
- %1 = load i32, i32* %0, align 4
- %2 = icmp eq i32 5, %1
- br i1 %2, label %Ret, label %Loop.Body
-
-Ret:
- ret i32 %1
-}
Removed: llvm/trunk/test/Transforms/Util/MemorySSA/multiple-backedges-hal.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/Util/MemorySSA/multiple-backedges-hal.ll?rev=299979&view=auto
==============================================================================
--- llvm/trunk/test/Transforms/Util/MemorySSA/multiple-backedges-hal.ll (original)
+++ llvm/trunk/test/Transforms/Util/MemorySSA/multiple-backedges-hal.ll (removed)
@@ -1,73 +0,0 @@
-; RUN: opt -basicaa -print-memoryssa -verify-memoryssa -analyze < %s 2>&1 | FileCheck %s
-; RUN: opt -aa-pipeline=basic-aa -passes='print<memoryssa>,verify<memoryssa>' -disable-output < %s 2>&1 | FileCheck %s
-
-; hfinkel's case
-; [entry]
-; |
-; .....
-; (clobbering access - b)
-; |
-; .... ________________________________
-; \ / |
-; (x) |
-; ...... |
-; | |
-; | ______________________ |
-; \ / | |
-; (starting access) | |
-; ... | |
-; (clobbering access - a) | |
-; ... | |
-; | | | |
-; | |_______________________| |
-; | |
-; |_________________________________|
-;
-; More specifically, one access, with multiple clobbering accesses. One of
-; which strictly dominates the access, the other of which has a backedge
-
-; readnone so we don't have a 1:1 mapping of MemorySSA edges to Instructions.
-declare void @doThingWithoutReading() readnone
-declare i8 @getValue() readnone
-declare i1 @getBool() readnone
-
-define hidden void @testcase(i8* %Arg) {
-Entry:
- call void @doThingWithoutReading()
- %Val.Entry = call i8 @getValue()
-; CHECK: 1 = MemoryDef(liveOnEntry)
-; CHECK-NEXT: store i8 %Val.Entry
- store i8 %Val.Entry, i8* %Arg
- call void @doThingWithoutReading()
- br label %OuterLoop
-
-OuterLoop:
-; CHECK: 4 = MemoryPhi({Entry,1},{InnerLoop.Tail,3})
-; CHECK-NEXT: %Val.Outer =
- %Val.Outer = call i8 @getValue()
-; CHECK: 2 = MemoryDef(4)
-; CHECK-NEXT: store i8 %Val.Outer
- store i8 %Val.Outer, i8* %Arg
- call void @doThingWithoutReading()
- br label %InnerLoop
-
-InnerLoop:
-; CHECK: 5 = MemoryPhi({OuterLoop,2},{InnerLoop,3})
-; CHECK-NEXT: ; MemoryUse(5)
-; CHECK-NEXT: %StartingAccess = load
- %StartingAccess = load i8, i8* %Arg, align 4
- %Val.Inner = call i8 @getValue()
-; CHECK: 3 = MemoryDef(5)
-; CHECK-NEXT: store i8 %Val.Inner
- store i8 %Val.Inner, i8* %Arg
- call void @doThingWithoutReading()
- %KeepGoing = call i1 @getBool()
- br i1 %KeepGoing, label %InnerLoop.Tail, label %InnerLoop
-
-InnerLoop.Tail:
- %KeepGoing.Tail = call i1 @getBool()
- br i1 %KeepGoing.Tail, label %End, label %OuterLoop
-
-End:
- ret void
-}
Removed: llvm/trunk/test/Transforms/Util/MemorySSA/multiple-locations.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/Util/MemorySSA/multiple-locations.ll?rev=299979&view=auto
==============================================================================
--- llvm/trunk/test/Transforms/Util/MemorySSA/multiple-locations.ll (original)
+++ llvm/trunk/test/Transforms/Util/MemorySSA/multiple-locations.ll (removed)
@@ -1,25 +0,0 @@
-; RUN: opt -basicaa -print-memoryssa -verify-memoryssa -analyze < %s 2>&1 | FileCheck %s
-; RUN: opt -aa-pipeline=basic-aa -passes='print<memoryssa>,verify<memoryssa>' -disable-output < %s 2>&1 | FileCheck %s
-;
-; Checks that basicAA is doing some amount of disambiguation for us
-
-define i32 @foo(i1 %cond) {
- %a = alloca i32, align 4
- %b = alloca i32, align 4
-; CHECK: 1 = MemoryDef(liveOnEntry)
-; CHECK-NEXT: store i32 0
- store i32 0, i32* %a, align 4
-; CHECK: 2 = MemoryDef(1)
-; CHECK-NEXT: store i32 1
- store i32 1, i32* %b, align 4
-
-; CHECK: MemoryUse(1)
-; CHECK-NEXT: %1 = load i32
- %1 = load i32, i32* %a, align 4
-; CHECK: MemoryUse(2)
-; CHECK-NEXT: %2 = load i32
- %2 = load i32, i32* %b, align 4
-
- %3 = add i32 %1, %2
- ret i32 %3
-}
Removed: llvm/trunk/test/Transforms/Util/MemorySSA/no-disconnected.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/Util/MemorySSA/no-disconnected.ll?rev=299979&view=auto
==============================================================================
--- llvm/trunk/test/Transforms/Util/MemorySSA/no-disconnected.ll (original)
+++ llvm/trunk/test/Transforms/Util/MemorySSA/no-disconnected.ll (removed)
@@ -1,43 +0,0 @@
-; RUN: opt -basicaa -print-memoryssa -verify-memoryssa -analyze < %s 2>&1 | FileCheck %s
-; RUN: opt -aa-pipeline=basic-aa -passes='print<memoryssa>,verify<memoryssa>' -disable-output < %s 2>&1 | FileCheck %s
-;
-; This test ensures we don't end up with multiple reaching defs for a single
-; use/phi edge If we were to optimize defs, we would end up with 2=
-; MemoryDef(liveOnEntry) and 4 = MemoryDef(liveOnEntry) Both would mean both
-; 1,2, and 3,4 would reach the phi node. Because the phi node can only have one
-; entry on each edge, it would choose 2, 4 and disconnect 1 and 3 completely
-; from the SSA graph, even though they are not dead
-
-define void @sink_store(i32 %index, i32* %foo, i32* %bar) {
-entry:
- %cmp = trunc i32 %index to i1
- br i1 %cmp, label %if.then, label %if.else
-
-if.then: ; preds = %entry
-; CHECK: 1 = MemoryDef(liveOnEntry)
-; CHECK-NEXT: store i32 %index, i32* %foo, align 4
- store i32 %index, i32* %foo, align 4
-; CHECK: 2 = MemoryDef(1)
-; CHECK-NEXT: store i32 %index, i32* %bar, align 4
- store i32 %index, i32* %bar, align 4
- br label %if.end
-
-if.else: ; preds = %entry
-; CHECK: 3 = MemoryDef(liveOnEntry)
-; CHECK-NEXT: store i32 %index, i32* %foo, align 4
- store i32 %index, i32* %foo, align 4
-; CHECK: 4 = MemoryDef(3)
-; CHECK-NEXT: store i32 %index, i32* %bar, align 4
- store i32 %index, i32* %bar, align 4
- br label %if.end
-
-if.end: ; preds = %if.else, %if.then
-; CHECK: 5 = MemoryPhi({if.then,2},{if.else,4})
-; CHECK: MemoryUse(5)
-; CHECK-NEXT: %c = load i32, i32* %foo
- %c = load i32, i32* %foo
-; CHECK: MemoryUse(5)
-; CHECK-NEXT: %d = load i32, i32* %bar
- %d = load i32, i32* %bar
- ret void
-}
Removed: llvm/trunk/test/Transforms/Util/MemorySSA/optimize-use.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/Util/MemorySSA/optimize-use.ll?rev=299979&view=auto
==============================================================================
--- llvm/trunk/test/Transforms/Util/MemorySSA/optimize-use.ll (original)
+++ llvm/trunk/test/Transforms/Util/MemorySSA/optimize-use.ll (removed)
@@ -1,37 +0,0 @@
-; RUN: opt -basicaa -print-memoryssa -verify-memoryssa -analyze < %s 2>&1 | FileCheck %s
-; RUN: opt -aa-pipeline=basic-aa -passes='print<memoryssa>,verify<memoryssa>' -disable-output < %s 2>&1 | FileCheck %s
-
-; Function Attrs: ssp uwtable
-define i32 @main() {
-entry:
-; CHECK: 1 = MemoryDef(liveOnEntry)
-; CHECK-NEXT: %call = call noalias i8* @_Znwm(i64 4)
- %call = call noalias i8* @_Znwm(i64 4)
- %0 = bitcast i8* %call to i32*
-; CHECK: 2 = MemoryDef(1)
-; CHECK-NEXT: %call1 = call noalias i8* @_Znwm(i64 4)
- %call1 = call noalias i8* @_Znwm(i64 4)
- %1 = bitcast i8* %call1 to i32*
-; CHECK: 3 = MemoryDef(2)
-; CHECK-NEXT: store i32 5, i32* %0, align 4
- store i32 5, i32* %0, align 4
-; CHECK: 4 = MemoryDef(3)
-; CHECK-NEXT: store i32 7, i32* %1, align 4
- store i32 7, i32* %1, align 4
-; CHECK: MemoryUse(3)
-; CHECK-NEXT: %2 = load i32, i32* %0, align 4
- %2 = load i32, i32* %0, align 4
-; CHECK: MemoryUse(4)
-; CHECK-NEXT: %3 = load i32, i32* %1, align 4
- %3 = load i32, i32* %1, align 4
-; CHECK: MemoryUse(3)
-; CHECK-NEXT: %4 = load i32, i32* %0, align 4
- %4 = load i32, i32* %0, align 4
-; CHECK: MemoryUse(4)
-; CHECK-NEXT: %5 = load i32, i32* %1, align 4
- %5 = load i32, i32* %1, align 4
- %add = add nsw i32 %3, %5
- ret i32 %add
-}
-
-declare noalias i8* @_Znwm(i64)
Removed: llvm/trunk/test/Transforms/Util/MemorySSA/phi-translation.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/Util/MemorySSA/phi-translation.ll?rev=299979&view=auto
==============================================================================
--- llvm/trunk/test/Transforms/Util/MemorySSA/phi-translation.ll (original)
+++ llvm/trunk/test/Transforms/Util/MemorySSA/phi-translation.ll (removed)
@@ -1,181 +0,0 @@
-; RUN: opt -basicaa -print-memoryssa -verify-memoryssa -analyze < %s 2>&1 | FileCheck %s
-; RUN: opt -aa-pipeline=basic-aa -passes='print<memoryssa>,verify<memoryssa>' -disable-output < %s 2>&1 | FileCheck %s
-
-; %ptr can't alias %local, so we should be able to optimize the use of %local to
-; point to the store to %local.
-; CHECK-LABEL: define void @check
-define void @check(i8* %ptr, i1 %bool) {
-entry:
- %local = alloca i8, align 1
-; CHECK: 1 = MemoryDef(liveOnEntry)
-; CHECK-NEXT: store i8 0, i8* %local, align 1
- store i8 0, i8* %local, align 1
- br i1 %bool, label %if.then, label %if.end
-
-if.then:
- %p2 = getelementptr inbounds i8, i8* %ptr, i32 1
-; CHECK: 2 = MemoryDef(1)
-; CHECK-NEXT: store i8 0, i8* %p2, align 1
- store i8 0, i8* %p2, align 1
- br label %if.end
-
-if.end:
-; CHECK: 3 = MemoryPhi({entry,1},{if.then,2})
-; CHECK: MemoryUse(1)
-; CHECK-NEXT: load i8, i8* %local, align 1
- load i8, i8* %local, align 1
- ret void
-}
-
-; CHECK-LABEL: define void @check2
-define void @check2(i1 %val1, i1 %val2, i1 %val3) {
-entry:
- %local = alloca i8, align 1
- %local2 = alloca i8, align 1
-
-; CHECK: 1 = MemoryDef(liveOnEntry)
-; CHECK-NEXT: store i8 0, i8* %local
- store i8 0, i8* %local
- br i1 %val1, label %if.then, label %phi.3
-
-if.then:
-; CHECK: 2 = MemoryDef(1)
-; CHECK-NEXT: store i8 2, i8* %local2
- store i8 2, i8* %local2
- br i1 %val2, label %phi.2, label %phi.3
-
-phi.3:
-; CHECK: 5 = MemoryPhi({entry,1},{if.then,2})
-; CHECK: 3 = MemoryDef(5)
-; CHECK-NEXT: store i8 3, i8* %local2
- store i8 3, i8* %local2
- br i1 %val3, label %phi.2, label %phi.1
-
-phi.2:
-; CHECK: 6 = MemoryPhi({if.then,2},{phi.3,3})
-; CHECK: 4 = MemoryDef(6)
-; CHECK-NEXT: store i8 4, i8* %local2
- store i8 4, i8* %local2
- br label %phi.1
-
-phi.1:
-; Order matters here; phi.2 needs to come before phi.3, because that's the order
-; they're visited in.
-; CHECK: 7 = MemoryPhi({phi.2,4},{phi.3,3})
-; CHECK: MemoryUse(1)
-; CHECK-NEXT: load i8, i8* %local
- load i8, i8* %local
- ret void
-}
-
-; CHECK-LABEL: define void @cross_phi
-define void @cross_phi(i8* noalias %p1, i8* noalias %p2) {
-; CHECK: 1 = MemoryDef(liveOnEntry)
-; CHECK-NEXT: store i8 0, i8* %p1
- store i8 0, i8* %p1
-; CHECK: MemoryUse(1)
-; CHECK-NEXT: load i8, i8* %p1
- load i8, i8* %p1
- br i1 undef, label %a, label %b
-
-a:
-; CHECK: 2 = MemoryDef(1)
-; CHECK-NEXT: store i8 0, i8* %p2
- store i8 0, i8* %p2
- br i1 undef, label %c, label %d
-
-b:
-; CHECK: 3 = MemoryDef(1)
-; CHECK-NEXT: store i8 1, i8* %p2
- store i8 1, i8* %p2
- br i1 undef, label %c, label %d
-
-c:
-; CHECK: 6 = MemoryPhi({a,2},{b,3})
-; CHECK: 4 = MemoryDef(6)
-; CHECK-NEXT: store i8 2, i8* %p2
- store i8 2, i8* %p2
- br label %e
-
-d:
-; CHECK: 7 = MemoryPhi({a,2},{b,3})
-; CHECK: 5 = MemoryDef(7)
-; CHECK-NEXT: store i8 3, i8* %p2
- store i8 3, i8* %p2
- br label %e
-
-e:
-; 8 = MemoryPhi({c,4},{d,5})
-; CHECK: MemoryUse(1)
-; CHECK-NEXT: load i8, i8* %p1
- load i8, i8* %p1
- ret void
-}
-
-; CHECK-LABEL: define void @looped
-define void @looped(i8* noalias %p1, i8* noalias %p2) {
-; CHECK: 1 = MemoryDef(liveOnEntry)
-; CHECK-NEXT: store i8 0, i8* %p1
- store i8 0, i8* %p1
- br label %loop.1
-
-loop.1:
-; CHECK: 5 = MemoryPhi({%0,1},{loop.3,4})
-; CHECK: 2 = MemoryDef(5)
-; CHECK-NEXT: store i8 0, i8* %p2
- store i8 0, i8* %p2
- br i1 undef, label %loop.2, label %loop.3
-
-loop.2:
-; CHECK: 6 = MemoryPhi({loop.1,2},{loop.3,4})
-; CHECK: 3 = MemoryDef(6)
-; CHECK-NEXT: store i8 1, i8* %p2
- store i8 1, i8* %p2
- br label %loop.3
-
-loop.3:
-; CHECK: 7 = MemoryPhi({loop.1,2},{loop.2,3})
-; CHECK: 4 = MemoryDef(7)
-; CHECK-NEXT: store i8 2, i8* %p2
- store i8 2, i8* %p2
-; CHECK: MemoryUse(1)
-; CHECK-NEXT: load i8, i8* %p1
- load i8, i8* %p1
- br i1 undef, label %loop.2, label %loop.1
-}
-
-; CHECK-LABEL: define void @looped_visitedonlyonce
-define void @looped_visitedonlyonce(i8* noalias %p1, i8* noalias %p2) {
- br label %while.cond
-
-while.cond:
-; CHECK: 4 = MemoryPhi({%0,liveOnEntry},{if.end,3})
-; CHECK-NEXT: br i1 undef, label %if.then, label %if.end
- br i1 undef, label %if.then, label %if.end
-
-if.then:
-; CHECK: 1 = MemoryDef(4)
-; CHECK-NEXT: store i8 0, i8* %p1
- store i8 0, i8* %p1
- br i1 undef, label %if.end, label %if.then2
-
-if.then2:
-; CHECK: 2 = MemoryDef(1)
-; CHECK-NEXT: store i8 1, i8* %p2
- store i8 1, i8* %p2
- br label %if.end
-
-if.end:
-; CHECK: 5 = MemoryPhi({while.cond,4},{if.then,1},{if.then2,2})
-; CHECK: MemoryUse(5)
-; CHECK-NEXT: load i8, i8* %p1
- load i8, i8* %p1
-; CHECK: 3 = MemoryDef(5)
-; CHECK-NEXT: store i8 2, i8* %p2
- store i8 2, i8* %p2
-; CHECK: MemoryUse(5)
-; CHECK-NEXT: load i8, i8* %p1
- load i8, i8* %p1
- br label %while.cond
-}
-
Removed: llvm/trunk/test/Transforms/Util/MemorySSA/pr28880.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/Util/MemorySSA/pr28880.ll?rev=299979&view=auto
==============================================================================
--- llvm/trunk/test/Transforms/Util/MemorySSA/pr28880.ll (original)
+++ llvm/trunk/test/Transforms/Util/MemorySSA/pr28880.ll (removed)
@@ -1,51 +0,0 @@
-; RUN: opt -basicaa -print-memoryssa -verify-memoryssa -analyze < %s 2>&1 | FileCheck %s
-; RUN: opt -aa-pipeline=basic-aa -passes='print<memoryssa>,verify<memoryssa>' -disable-output < %s 2>&1 | FileCheck %s
-
-; This testcase is reduced from SingleSource/Benchmarks/Misc/fbench.c
-; It is testing to make sure that the MemorySSA use optimizer
-; comes up with right answers when dealing with multiple MemoryLocations
-; over different blocks. See PR28880 for more details.
- at global = external hidden unnamed_addr global double, align 8
- at global.1 = external hidden unnamed_addr global double, align 8
-
-; Function Attrs: nounwind ssp uwtable
-define hidden fastcc void @hoge() unnamed_addr #0 {
-bb:
- br i1 undef, label %bb1, label %bb2
-
-bb1: ; preds = %bb
-; These accesses should not conflict.
-; CHECK: 1 = MemoryDef(liveOnEntry)
-; 1 = MemoryDef(liveOnEntry)
-; CHECK-NEXT: store double undef, double* @global, align 8
- store double undef, double* @global, align 8
-; CHECK: MemoryUse(liveOnEntry)
-; MemoryUse(liveOnEntry)
-; CHECK-NEXT: %tmp = load double, double* @global.1, align 8
- %tmp = load double, double* @global.1, align 8
- unreachable
-
-bb2: ; preds = %bb
- br label %bb3
-
-bb3: ; preds = %bb2
- br i1 undef, label %bb4, label %bb6
-
-bb4: ; preds = %bb3
-; These accesses should conflict.
-; CHECK: 2 = MemoryDef(liveOnEntry)
-; 2 = MemoryDef(liveOnEntry)
-; CHECK-NEXT: store double 0.000000e+00, double* @global.1, align 8
- store double 0.000000e+00, double* @global.1, align 8
-; CHECK: MemoryUse(2)
-; MemoryUse(2)
-; CHECK-NEXT: %tmp5 = load double, double* @global.1, align 8
- %tmp5 = load double, double* @global.1, align 8
- unreachable
-
-bb6: ; preds = %bb3
- unreachable
-}
-
-attributes #0 = { nounwind ssp uwtable "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="core2" "target-features"="+cx16,+fxsr,+mmx,+sse,+sse2,+sse3,+ssse3" "unsafe-fp-math"="false" "use-soft-float"="false" }
-
Removed: llvm/trunk/test/Transforms/Util/MemorySSA/ptr-const-mem.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/Util/MemorySSA/ptr-const-mem.ll?rev=299979&view=auto
==============================================================================
--- llvm/trunk/test/Transforms/Util/MemorySSA/ptr-const-mem.ll (original)
+++ llvm/trunk/test/Transforms/Util/MemorySSA/ptr-const-mem.ll (removed)
@@ -1,23 +0,0 @@
-; RUN: opt -basicaa -print-memoryssa -verify-memoryssa -analyze -memssa-check-limit=0 < %s 2>&1 | FileCheck %s
-; RUN: opt -aa-pipeline=basic-aa -passes='print<memoryssa>' -verify-memoryssa -disable-output -memssa-check-limit=0 < %s 2>&1 | FileCheck %s
-target datalayout = "e-p:32:32-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64"
-target triple = "amdgcn"
-
- at g4 = external unnamed_addr constant i8, align 1
-
-define signext i8 @cmp_constant(i8* %q, i8 %v) local_unnamed_addr {
-entry:
-
- store i8 %v, i8* %q, align 1
-; CHECK: 1 = MemoryDef(liveOnEntry)
-; CHECK-NEXT: store i8 %v, i8* %q, align 1
-
- %0 = load i8, i8* @g4, align 1
-; Make sure that this load is liveOnEntry just based on the fact that @g4 is
-; constant memory.
-; CHECK: MemoryUse(liveOnEntry)
-; CHECK-NEXT: load i8, i8* @g4, align 1
-
- ret i8 %0
-}
-
Removed: llvm/trunk/test/Transforms/Util/MemorySSA/volatile-clobber.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/Util/MemorySSA/volatile-clobber.ll?rev=299979&view=auto
==============================================================================
--- llvm/trunk/test/Transforms/Util/MemorySSA/volatile-clobber.ll (original)
+++ llvm/trunk/test/Transforms/Util/MemorySSA/volatile-clobber.ll (removed)
@@ -1,94 +0,0 @@
-; RUN: opt -basicaa -print-memoryssa -verify-memoryssa -analyze < %s 2>&1 | FileCheck %s
-; RUN: opt -aa-pipeline=basic-aa -passes='print<memoryssa>,verify<memoryssa>' -disable-output < %s 2>&1 | FileCheck %s
-;
-; Ensures that volatile stores/loads count as MemoryDefs
-
-; CHECK-LABEL: define i32 @foo
-define i32 @foo() {
- %1 = alloca i32, align 4
-; CHECK: 1 = MemoryDef(liveOnEntry)
-; CHECK-NEXT: store volatile i32 4
- store volatile i32 4, i32* %1, align 4
-; CHECK: 2 = MemoryDef(1)
-; CHECK-NEXT: store volatile i32 8
- store volatile i32 8, i32* %1, align 4
-; CHECK: 3 = MemoryDef(2)
-; CHECK-NEXT: %2 = load volatile i32
- %2 = load volatile i32, i32* %1, align 4
-; CHECK: 4 = MemoryDef(3)
-; CHECK-NEXT: %3 = load volatile i32
- %3 = load volatile i32, i32* %1, align 4
- %4 = add i32 %3, %2
- ret i32 %4
-}
-
-; Ensuring that we don't automatically hoist nonvolatile loads around volatile
-; loads
-; CHECK-LABEL define void @volatile_only
-define void @volatile_only(i32* %arg1, i32* %arg2) {
- ; Trivially NoAlias/MustAlias
- %a = alloca i32
- %b = alloca i32
-
-; CHECK: 1 = MemoryDef(liveOnEntry)
-; CHECK-NEXT: load volatile i32, i32* %a
- load volatile i32, i32* %a
-; CHECK: MemoryUse(liveOnEntry)
-; CHECK-NEXT: load i32, i32* %b
- load i32, i32* %b
-; CHECK: MemoryUse(1)
-; CHECK-NEXT: load i32, i32* %a
- load i32, i32* %a
-
- ; MayAlias
-; CHECK: 2 = MemoryDef(1)
-; CHECK-NEXT: load volatile i32, i32* %arg1
- load volatile i32, i32* %arg1
-; CHECK: MemoryUse(2)
-; CHECK-NEXT: load i32, i32* %arg2
- load i32, i32* %arg2
-
- ret void
-}
-
-; Ensuring that volatile atomic operations work properly.
-; CHECK-LABEL define void @volatile_atomics
-define void @volatile_atomics(i32* %arg1, i32* %arg2) {
- %a = alloca i32
- %b = alloca i32
-
- ; Trivially NoAlias/MustAlias
-
-; CHECK: 1 = MemoryDef(liveOnEntry)
-; CHECK-NEXT: load atomic volatile i32, i32* %a acquire, align 4
- load atomic volatile i32, i32* %a acquire, align 4
-; CHECK: MemoryUse(1)
-; CHECK-NEXT: load i32, i32* %b
- load i32, i32* %b
-
-; CHECK: 2 = MemoryDef(1)
-; CHECK-NEXT: load atomic volatile i32, i32* %a monotonic, align 4
- load atomic volatile i32, i32* %a monotonic, align 4
-; CHECK: MemoryUse(1)
-; CHECK-NEXT: load i32, i32* %b
- load i32, i32* %b
-; CHECK: MemoryUse(1)
-; CHECK-NEXT: load atomic i32, i32* %b unordered, align 4
- load atomic i32, i32* %b unordered, align 4
-; CHECK: MemoryUse(2)
-; CHECK-NEXT: load atomic i32, i32* %a unordered, align 4
- load atomic i32, i32* %a unordered, align 4
-; CHECK: MemoryUse(2)
-; CHECK-NEXT: load i32, i32* %a
- load i32, i32* %a
-
- ; MayAlias
-; CHECK: 3 = MemoryDef(2)
-; CHECK-NEXT: load atomic volatile i32, i32* %arg1 monotonic, align 4
- load atomic volatile i32, i32* %arg1 monotonic, align 4
-; CHECK: MemoryUse(3)
-; CHECK-NEXT: load i32, i32* %arg2
- load i32, i32* %arg2
-
- ret void
-}
Modified: llvm/trunk/unittests/Analysis/CMakeLists.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/unittests/Analysis/CMakeLists.txt?rev=299980&r1=299979&r2=299980&view=diff
==============================================================================
--- llvm/trunk/unittests/Analysis/CMakeLists.txt (original)
+++ llvm/trunk/unittests/Analysis/CMakeLists.txt Tue Apr 11 15:06:36 2017
@@ -15,6 +15,7 @@ add_llvm_unittest(AnalysisTests
LazyCallGraphTest.cpp
LoopInfoTest.cpp
MemoryBuiltinsTest.cpp
+ MemorySSA.cpp
ProfileSummaryInfoTest.cpp
ScalarEvolutionTest.cpp
TBAATest.cpp
Copied: llvm/trunk/unittests/Analysis/MemorySSA.cpp (from r299975, llvm/trunk/unittests/Transforms/Utils/MemorySSA.cpp)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/unittests/Analysis/MemorySSA.cpp?p2=llvm/trunk/unittests/Analysis/MemorySSA.cpp&p1=llvm/trunk/unittests/Transforms/Utils/MemorySSA.cpp&r1=299975&r2=299980&rev=299980&view=diff
==============================================================================
--- llvm/trunk/unittests/Transforms/Utils/MemorySSA.cpp (original)
+++ llvm/trunk/unittests/Analysis/MemorySSA.cpp Tue Apr 11 15:06:36 2017
@@ -6,16 +6,16 @@
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
-#include "llvm/Transforms/Utils/MemorySSA.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/BasicAliasAnalysis.h"
+#include "llvm/Analysis/MemorySSA.h"
+#include "llvm/Analysis/MemorySSAUpdater.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/LLVMContext.h"
-#include "llvm/Transforms/Utils/MemorySSAUpdater.h"
#include "gtest/gtest.h"
using namespace llvm;
Modified: llvm/trunk/unittests/Transforms/Utils/CMakeLists.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/unittests/Transforms/Utils/CMakeLists.txt?rev=299980&r1=299979&r2=299980&view=diff
==============================================================================
--- llvm/trunk/unittests/Transforms/Utils/CMakeLists.txt (original)
+++ llvm/trunk/unittests/Transforms/Utils/CMakeLists.txt Tue Apr 11 15:06:36 2017
@@ -11,6 +11,5 @@ add_llvm_unittest(UtilsTests
FunctionComparator.cpp
IntegerDivision.cpp
Local.cpp
- MemorySSA.cpp
ValueMapperTest.cpp
)
Removed: llvm/trunk/unittests/Transforms/Utils/MemorySSA.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/unittests/Transforms/Utils/MemorySSA.cpp?rev=299979&view=auto
==============================================================================
--- llvm/trunk/unittests/Transforms/Utils/MemorySSA.cpp (original)
+++ llvm/trunk/unittests/Transforms/Utils/MemorySSA.cpp (removed)
@@ -1,865 +0,0 @@
-//===- MemorySSA.cpp - Unit tests for MemorySSA ---------------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-#include "llvm/Transforms/Utils/MemorySSA.h"
-#include "llvm/Analysis/AliasAnalysis.h"
-#include "llvm/Analysis/BasicAliasAnalysis.h"
-#include "llvm/IR/BasicBlock.h"
-#include "llvm/IR/DataLayout.h"
-#include "llvm/IR/Dominators.h"
-#include "llvm/IR/IRBuilder.h"
-#include "llvm/IR/Instructions.h"
-#include "llvm/IR/LLVMContext.h"
-#include "llvm/Transforms/Utils/MemorySSAUpdater.h"
-#include "gtest/gtest.h"
-
-using namespace llvm;
-
-const static char DLString[] = "e-i64:64-f80:128-n8:16:32:64-S128";
-
-/// There's a lot of common setup between these tests. This fixture helps reduce
-/// that. Tests should mock up a function, store it in F, and then call
-/// setupAnalyses().
-class MemorySSATest : public testing::Test {
-protected:
- // N.B. Many of these members depend on each other (e.g. the Module depends on
- // the Context, etc.). So, order matters here (and in TestAnalyses).
- LLVMContext C;
- Module M;
- IRBuilder<> B;
- DataLayout DL;
- TargetLibraryInfoImpl TLII;
- TargetLibraryInfo TLI;
- Function *F;
-
- // Things that we need to build after the function is created.
- struct TestAnalyses {
- DominatorTree DT;
- AssumptionCache AC;
- AAResults AA;
- BasicAAResult BAA;
- // We need to defer MSSA construction until AA is *entirely* set up, which
- // requires calling addAAResult. Hence, we just use a pointer here.
- std::unique_ptr<MemorySSA> MSSA;
- MemorySSAWalker *Walker;
-
- TestAnalyses(MemorySSATest &Test)
- : DT(*Test.F), AC(*Test.F), AA(Test.TLI),
- BAA(Test.DL, Test.TLI, AC, &DT) {
- AA.addAAResult(BAA);
- MSSA = make_unique<MemorySSA>(*Test.F, &AA, &DT);
- Walker = MSSA->getWalker();
- }
- };
-
- std::unique_ptr<TestAnalyses> Analyses;
-
- void setupAnalyses() {
- assert(F);
- Analyses.reset(new TestAnalyses(*this));
- }
-
-public:
- MemorySSATest()
- : M("MemorySSATest", C), B(C), DL(DLString), TLI(TLII), F(nullptr) {}
-};
-
-TEST_F(MemorySSATest, CreateALoad) {
- // We create a diamond where there is a store on one side, and then after
- // building MemorySSA, create a load after the merge point, and use it to test
- // updating by creating an access for the load.
- F = Function::Create(
- FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false),
- GlobalValue::ExternalLinkage, "F", &M);
- BasicBlock *Entry(BasicBlock::Create(C, "", F));
- BasicBlock *Left(BasicBlock::Create(C, "", F));
- BasicBlock *Right(BasicBlock::Create(C, "", F));
- BasicBlock *Merge(BasicBlock::Create(C, "", F));
- B.SetInsertPoint(Entry);
- B.CreateCondBr(B.getTrue(), Left, Right);
- B.SetInsertPoint(Left);
- Argument *PointerArg = &*F->arg_begin();
- B.CreateStore(B.getInt8(16), PointerArg);
- BranchInst::Create(Merge, Left);
- BranchInst::Create(Merge, Right);
-
- setupAnalyses();
- MemorySSA &MSSA = *Analyses->MSSA;
- MemorySSAUpdater Updater(&MSSA);
- // Add the load
- B.SetInsertPoint(Merge);
- LoadInst *LoadInst = B.CreateLoad(PointerArg);
-
- // MemoryPHI should already exist.
- MemoryPhi *MP = MSSA.getMemoryAccess(Merge);
- EXPECT_NE(MP, nullptr);
-
- // Create the load memory acccess
- MemoryUse *LoadAccess = cast<MemoryUse>(Updater.createMemoryAccessInBB(
- LoadInst, MP, Merge, MemorySSA::Beginning));
- MemoryAccess *DefiningAccess = LoadAccess->getDefiningAccess();
- EXPECT_TRUE(isa<MemoryPhi>(DefiningAccess));
- MSSA.verifyMemorySSA();
-}
-TEST_F(MemorySSATest, CreateLoadsAndStoreUpdater) {
- // We create a diamond, then build memoryssa with no memory accesses, and
- // incrementally update it by inserting a store in the, entry, a load in the
- // merge point, then a store in the branch, another load in the merge point,
- // and then a store in the entry.
- F = Function::Create(
- FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false),
- GlobalValue::ExternalLinkage, "F", &M);
- BasicBlock *Entry(BasicBlock::Create(C, "", F));
- BasicBlock *Left(BasicBlock::Create(C, "", F));
- BasicBlock *Right(BasicBlock::Create(C, "", F));
- BasicBlock *Merge(BasicBlock::Create(C, "", F));
- B.SetInsertPoint(Entry);
- B.CreateCondBr(B.getTrue(), Left, Right);
- B.SetInsertPoint(Left, Left->begin());
- Argument *PointerArg = &*F->arg_begin();
- B.SetInsertPoint(Left);
- B.CreateBr(Merge);
- B.SetInsertPoint(Right);
- B.CreateBr(Merge);
-
- setupAnalyses();
- MemorySSA &MSSA = *Analyses->MSSA;
- MemorySSAUpdater Updater(&MSSA);
- // Add the store
- B.SetInsertPoint(Entry, Entry->begin());
- StoreInst *EntryStore = B.CreateStore(B.getInt8(16), PointerArg);
- MemoryAccess *EntryStoreAccess = Updater.createMemoryAccessInBB(
- EntryStore, nullptr, Entry, MemorySSA::Beginning);
- Updater.insertDef(cast<MemoryDef>(EntryStoreAccess));
-
- // Add the load
- B.SetInsertPoint(Merge, Merge->begin());
- LoadInst *FirstLoad = B.CreateLoad(PointerArg);
-
- // MemoryPHI should not already exist.
- MemoryPhi *MP = MSSA.getMemoryAccess(Merge);
- EXPECT_EQ(MP, nullptr);
-
- // Create the load memory access
- MemoryUse *FirstLoadAccess = cast<MemoryUse>(Updater.createMemoryAccessInBB(
- FirstLoad, nullptr, Merge, MemorySSA::Beginning));
- Updater.insertUse(FirstLoadAccess);
- // Should just have a load using the entry access, because it should discover
- // the phi is trivial
- EXPECT_EQ(FirstLoadAccess->getDefiningAccess(), EntryStoreAccess);
-
- // Create a store on the left
- // Add the store
- B.SetInsertPoint(Left, Left->begin());
- StoreInst *LeftStore = B.CreateStore(B.getInt8(16), PointerArg);
- MemoryAccess *LeftStoreAccess = Updater.createMemoryAccessInBB(
- LeftStore, nullptr, Left, MemorySSA::Beginning);
- Updater.insertDef(cast<MemoryDef>(LeftStoreAccess), false);
- // We don't touch existing loads, so we need to create a new one to get a phi
- // Add the second load
- B.SetInsertPoint(Merge, Merge->begin());
- LoadInst *SecondLoad = B.CreateLoad(PointerArg);
-
- // MemoryPHI should not already exist.
- MP = MSSA.getMemoryAccess(Merge);
- EXPECT_EQ(MP, nullptr);
-
- // Create the load memory access
- MemoryUse *SecondLoadAccess = cast<MemoryUse>(Updater.createMemoryAccessInBB(
- SecondLoad, nullptr, Merge, MemorySSA::Beginning));
- Updater.insertUse(SecondLoadAccess);
- // Now the load should be a phi of the entry store and the left store
- MemoryPhi *MergePhi =
- dyn_cast<MemoryPhi>(SecondLoadAccess->getDefiningAccess());
- EXPECT_NE(MergePhi, nullptr);
- EXPECT_EQ(MergePhi->getIncomingValue(0), EntryStoreAccess);
- EXPECT_EQ(MergePhi->getIncomingValue(1), LeftStoreAccess);
- // Now create a store below the existing one in the entry
- B.SetInsertPoint(Entry, --Entry->end());
- StoreInst *SecondEntryStore = B.CreateStore(B.getInt8(16), PointerArg);
- MemoryAccess *SecondEntryStoreAccess = Updater.createMemoryAccessInBB(
- SecondEntryStore, nullptr, Entry, MemorySSA::End);
- // Insert it twice just to test renaming
- Updater.insertDef(cast<MemoryDef>(SecondEntryStoreAccess), false);
- EXPECT_NE(FirstLoadAccess->getDefiningAccess(), MergePhi);
- Updater.insertDef(cast<MemoryDef>(SecondEntryStoreAccess), true);
- EXPECT_EQ(FirstLoadAccess->getDefiningAccess(), MergePhi);
- // and make sure the phi below it got updated, despite being blocks away
- MergePhi = dyn_cast<MemoryPhi>(SecondLoadAccess->getDefiningAccess());
- EXPECT_NE(MergePhi, nullptr);
- EXPECT_EQ(MergePhi->getIncomingValue(0), SecondEntryStoreAccess);
- EXPECT_EQ(MergePhi->getIncomingValue(1), LeftStoreAccess);
- MSSA.verifyMemorySSA();
-}
-
-TEST_F(MemorySSATest, CreateALoadUpdater) {
- // We create a diamond, then build memoryssa with no memory accesses, and
- // incrementally update it by inserting a store in one of the branches, and a
- // load in the merge point
- F = Function::Create(
- FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false),
- GlobalValue::ExternalLinkage, "F", &M);
- BasicBlock *Entry(BasicBlock::Create(C, "", F));
- BasicBlock *Left(BasicBlock::Create(C, "", F));
- BasicBlock *Right(BasicBlock::Create(C, "", F));
- BasicBlock *Merge(BasicBlock::Create(C, "", F));
- B.SetInsertPoint(Entry);
- B.CreateCondBr(B.getTrue(), Left, Right);
- B.SetInsertPoint(Left, Left->begin());
- Argument *PointerArg = &*F->arg_begin();
- B.SetInsertPoint(Left);
- B.CreateBr(Merge);
- B.SetInsertPoint(Right);
- B.CreateBr(Merge);
-
- setupAnalyses();
- MemorySSA &MSSA = *Analyses->MSSA;
- MemorySSAUpdater Updater(&MSSA);
- B.SetInsertPoint(Left, Left->begin());
- // Add the store
- StoreInst *SI = B.CreateStore(B.getInt8(16), PointerArg);
- MemoryAccess *StoreAccess =
- Updater.createMemoryAccessInBB(SI, nullptr, Left, MemorySSA::Beginning);
- Updater.insertDef(cast<MemoryDef>(StoreAccess));
-
- // Add the load
- B.SetInsertPoint(Merge, Merge->begin());
- LoadInst *LoadInst = B.CreateLoad(PointerArg);
-
- // MemoryPHI should not already exist.
- MemoryPhi *MP = MSSA.getMemoryAccess(Merge);
- EXPECT_EQ(MP, nullptr);
-
- // Create the load memory acccess
- MemoryUse *LoadAccess = cast<MemoryUse>(Updater.createMemoryAccessInBB(
- LoadInst, nullptr, Merge, MemorySSA::Beginning));
- Updater.insertUse(LoadAccess);
- MemoryAccess *DefiningAccess = LoadAccess->getDefiningAccess();
- EXPECT_TRUE(isa<MemoryPhi>(DefiningAccess));
- MSSA.verifyMemorySSA();
-}
-
-TEST_F(MemorySSATest, MoveAStore) {
- // We create a diamond where there is a in the entry, a store on one side, and
- // a load at the end. After building MemorySSA, we test updating by moving
- // the store from the side block to the entry block. This destroys the old
- // access.
- F = Function::Create(
- FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false),
- GlobalValue::ExternalLinkage, "F", &M);
- BasicBlock *Entry(BasicBlock::Create(C, "", F));
- BasicBlock *Left(BasicBlock::Create(C, "", F));
- BasicBlock *Right(BasicBlock::Create(C, "", F));
- BasicBlock *Merge(BasicBlock::Create(C, "", F));
- B.SetInsertPoint(Entry);
- Argument *PointerArg = &*F->arg_begin();
- StoreInst *EntryStore = B.CreateStore(B.getInt8(16), PointerArg);
- B.CreateCondBr(B.getTrue(), Left, Right);
- B.SetInsertPoint(Left);
- StoreInst *SideStore = B.CreateStore(B.getInt8(16), PointerArg);
- BranchInst::Create(Merge, Left);
- BranchInst::Create(Merge, Right);
- B.SetInsertPoint(Merge);
- B.CreateLoad(PointerArg);
- setupAnalyses();
- MemorySSA &MSSA = *Analyses->MSSA;
- MemorySSAUpdater Updater(&MSSA);
- // Move the store
- SideStore->moveBefore(Entry->getTerminator());
- MemoryAccess *EntryStoreAccess = MSSA.getMemoryAccess(EntryStore);
- MemoryAccess *SideStoreAccess = MSSA.getMemoryAccess(SideStore);
- MemoryAccess *NewStoreAccess = Updater.createMemoryAccessAfter(
- SideStore, EntryStoreAccess, EntryStoreAccess);
- EntryStoreAccess->replaceAllUsesWith(NewStoreAccess);
- Updater.removeMemoryAccess(SideStoreAccess);
- MSSA.verifyMemorySSA();
-}
-
-TEST_F(MemorySSATest, MoveAStoreUpdater) {
- // We create a diamond where there is a in the entry, a store on one side, and
- // a load at the end. After building MemorySSA, we test updating by moving
- // the store from the side block to the entry block. This destroys the old
- // access.
- F = Function::Create(
- FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false),
- GlobalValue::ExternalLinkage, "F", &M);
- BasicBlock *Entry(BasicBlock::Create(C, "", F));
- BasicBlock *Left(BasicBlock::Create(C, "", F));
- BasicBlock *Right(BasicBlock::Create(C, "", F));
- BasicBlock *Merge(BasicBlock::Create(C, "", F));
- B.SetInsertPoint(Entry);
- Argument *PointerArg = &*F->arg_begin();
- StoreInst *EntryStore = B.CreateStore(B.getInt8(16), PointerArg);
- B.CreateCondBr(B.getTrue(), Left, Right);
- B.SetInsertPoint(Left);
- auto *SideStore = B.CreateStore(B.getInt8(16), PointerArg);
- BranchInst::Create(Merge, Left);
- BranchInst::Create(Merge, Right);
- B.SetInsertPoint(Merge);
- auto *MergeLoad = B.CreateLoad(PointerArg);
- setupAnalyses();
- MemorySSA &MSSA = *Analyses->MSSA;
- MemorySSAUpdater Updater(&MSSA);
-
- // Move the store
- SideStore->moveBefore(Entry->getTerminator());
- auto *EntryStoreAccess = MSSA.getMemoryAccess(EntryStore);
- auto *SideStoreAccess = MSSA.getMemoryAccess(SideStore);
- auto *NewStoreAccess = Updater.createMemoryAccessAfter(
- SideStore, EntryStoreAccess, EntryStoreAccess);
- // Before, the load will point to a phi of the EntryStore and SideStore.
- auto *LoadAccess = cast<MemoryUse>(MSSA.getMemoryAccess(MergeLoad));
- EXPECT_TRUE(isa<MemoryPhi>(LoadAccess->getDefiningAccess()));
- MemoryPhi *MergePhi = cast<MemoryPhi>(LoadAccess->getDefiningAccess());
- EXPECT_EQ(MergePhi->getIncomingValue(1), EntryStoreAccess);
- EXPECT_EQ(MergePhi->getIncomingValue(0), SideStoreAccess);
- Updater.removeMemoryAccess(SideStoreAccess);
- Updater.insertDef(cast<MemoryDef>(NewStoreAccess));
- // After it's a phi of the new side store access.
- EXPECT_EQ(MergePhi->getIncomingValue(0), NewStoreAccess);
- EXPECT_EQ(MergePhi->getIncomingValue(1), NewStoreAccess);
- MSSA.verifyMemorySSA();
-}
-
-TEST_F(MemorySSATest, MoveAStoreUpdaterMove) {
- // We create a diamond where there is a in the entry, a store on one side, and
- // a load at the end. After building MemorySSA, we test updating by moving
- // the store from the side block to the entry block. This does not destroy
- // the old access.
- F = Function::Create(
- FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false),
- GlobalValue::ExternalLinkage, "F", &M);
- BasicBlock *Entry(BasicBlock::Create(C, "", F));
- BasicBlock *Left(BasicBlock::Create(C, "", F));
- BasicBlock *Right(BasicBlock::Create(C, "", F));
- BasicBlock *Merge(BasicBlock::Create(C, "", F));
- B.SetInsertPoint(Entry);
- Argument *PointerArg = &*F->arg_begin();
- StoreInst *EntryStore = B.CreateStore(B.getInt8(16), PointerArg);
- B.CreateCondBr(B.getTrue(), Left, Right);
- B.SetInsertPoint(Left);
- auto *SideStore = B.CreateStore(B.getInt8(16), PointerArg);
- BranchInst::Create(Merge, Left);
- BranchInst::Create(Merge, Right);
- B.SetInsertPoint(Merge);
- auto *MergeLoad = B.CreateLoad(PointerArg);
- setupAnalyses();
- MemorySSA &MSSA = *Analyses->MSSA;
- MemorySSAUpdater Updater(&MSSA);
-
- // Move the store
- auto *EntryStoreAccess = MSSA.getMemoryAccess(EntryStore);
- auto *SideStoreAccess = MSSA.getMemoryAccess(SideStore);
- // Before, the load will point to a phi of the EntryStore and SideStore.
- auto *LoadAccess = cast<MemoryUse>(MSSA.getMemoryAccess(MergeLoad));
- EXPECT_TRUE(isa<MemoryPhi>(LoadAccess->getDefiningAccess()));
- MemoryPhi *MergePhi = cast<MemoryPhi>(LoadAccess->getDefiningAccess());
- EXPECT_EQ(MergePhi->getIncomingValue(1), EntryStoreAccess);
- EXPECT_EQ(MergePhi->getIncomingValue(0), SideStoreAccess);
- SideStore->moveBefore(*EntryStore->getParent(), ++EntryStore->getIterator());
- Updater.moveAfter(SideStoreAccess, EntryStoreAccess);
- // After, it's a phi of the side store.
- EXPECT_EQ(MergePhi->getIncomingValue(0), SideStoreAccess);
- EXPECT_EQ(MergePhi->getIncomingValue(1), SideStoreAccess);
-
- MSSA.verifyMemorySSA();
-}
-
-TEST_F(MemorySSATest, MoveAStoreAllAround) {
- // We create a diamond where there is a in the entry, a store on one side, and
- // a load at the end. After building MemorySSA, we test updating by moving
- // the store from the side block to the entry block, then to the other side
- // block, then to before the load. This does not destroy the old access.
- F = Function::Create(
- FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false),
- GlobalValue::ExternalLinkage, "F", &M);
- BasicBlock *Entry(BasicBlock::Create(C, "", F));
- BasicBlock *Left(BasicBlock::Create(C, "", F));
- BasicBlock *Right(BasicBlock::Create(C, "", F));
- BasicBlock *Merge(BasicBlock::Create(C, "", F));
- B.SetInsertPoint(Entry);
- Argument *PointerArg = &*F->arg_begin();
- StoreInst *EntryStore = B.CreateStore(B.getInt8(16), PointerArg);
- B.CreateCondBr(B.getTrue(), Left, Right);
- B.SetInsertPoint(Left);
- auto *SideStore = B.CreateStore(B.getInt8(16), PointerArg);
- BranchInst::Create(Merge, Left);
- BranchInst::Create(Merge, Right);
- B.SetInsertPoint(Merge);
- auto *MergeLoad = B.CreateLoad(PointerArg);
- setupAnalyses();
- MemorySSA &MSSA = *Analyses->MSSA;
- MemorySSAUpdater Updater(&MSSA);
-
- // Move the store
- auto *EntryStoreAccess = MSSA.getMemoryAccess(EntryStore);
- auto *SideStoreAccess = MSSA.getMemoryAccess(SideStore);
- // Before, the load will point to a phi of the EntryStore and SideStore.
- auto *LoadAccess = cast<MemoryUse>(MSSA.getMemoryAccess(MergeLoad));
- EXPECT_TRUE(isa<MemoryPhi>(LoadAccess->getDefiningAccess()));
- MemoryPhi *MergePhi = cast<MemoryPhi>(LoadAccess->getDefiningAccess());
- EXPECT_EQ(MergePhi->getIncomingValue(1), EntryStoreAccess);
- EXPECT_EQ(MergePhi->getIncomingValue(0), SideStoreAccess);
- // Move the store before the entry store
- SideStore->moveBefore(*EntryStore->getParent(), EntryStore->getIterator());
- Updater.moveBefore(SideStoreAccess, EntryStoreAccess);
- // After, it's a phi of the entry store.
- EXPECT_EQ(MergePhi->getIncomingValue(0), EntryStoreAccess);
- EXPECT_EQ(MergePhi->getIncomingValue(1), EntryStoreAccess);
- MSSA.verifyMemorySSA();
- // Now move the store to the right branch
- SideStore->moveBefore(*Right, Right->begin());
- Updater.moveToPlace(SideStoreAccess, Right, MemorySSA::Beginning);
- MSSA.verifyMemorySSA();
- EXPECT_EQ(MergePhi->getIncomingValue(0), EntryStoreAccess);
- EXPECT_EQ(MergePhi->getIncomingValue(1), SideStoreAccess);
- // Now move it before the load
- SideStore->moveBefore(MergeLoad);
- Updater.moveBefore(SideStoreAccess, LoadAccess);
- EXPECT_EQ(MergePhi->getIncomingValue(0), EntryStoreAccess);
- EXPECT_EQ(MergePhi->getIncomingValue(1), EntryStoreAccess);
- MSSA.verifyMemorySSA();
-}
-
-TEST_F(MemorySSATest, RemoveAPhi) {
- // We create a diamond where there is a store on one side, and then a load
- // after the merge point. This enables us to test a bunch of different
- // removal cases.
- F = Function::Create(
- FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false),
- GlobalValue::ExternalLinkage, "F", &M);
- BasicBlock *Entry(BasicBlock::Create(C, "", F));
- BasicBlock *Left(BasicBlock::Create(C, "", F));
- BasicBlock *Right(BasicBlock::Create(C, "", F));
- BasicBlock *Merge(BasicBlock::Create(C, "", F));
- B.SetInsertPoint(Entry);
- B.CreateCondBr(B.getTrue(), Left, Right);
- B.SetInsertPoint(Left);
- Argument *PointerArg = &*F->arg_begin();
- StoreInst *StoreInst = B.CreateStore(B.getInt8(16), PointerArg);
- BranchInst::Create(Merge, Left);
- BranchInst::Create(Merge, Right);
- B.SetInsertPoint(Merge);
- LoadInst *LoadInst = B.CreateLoad(PointerArg);
-
- setupAnalyses();
- MemorySSA &MSSA = *Analyses->MSSA;
- MemorySSAUpdater Updater(&MSSA);
-
- // Before, the load will be a use of a phi<store, liveonentry>.
- MemoryUse *LoadAccess = cast<MemoryUse>(MSSA.getMemoryAccess(LoadInst));
- MemoryDef *StoreAccess = cast<MemoryDef>(MSSA.getMemoryAccess(StoreInst));
- MemoryAccess *DefiningAccess = LoadAccess->getDefiningAccess();
- EXPECT_TRUE(isa<MemoryPhi>(DefiningAccess));
- // Kill the store
- Updater.removeMemoryAccess(StoreAccess);
- MemoryPhi *MP = cast<MemoryPhi>(DefiningAccess);
- // Verify the phi ended up as liveonentry, liveonentry
- for (auto &Op : MP->incoming_values())
- EXPECT_TRUE(MSSA.isLiveOnEntryDef(cast<MemoryAccess>(Op.get())));
- // Replace the phi uses with the live on entry def
- MP->replaceAllUsesWith(MSSA.getLiveOnEntryDef());
- // Verify the load is now defined by liveOnEntryDef
- EXPECT_TRUE(MSSA.isLiveOnEntryDef(LoadAccess->getDefiningAccess()));
- // Remove the PHI
- Updater.removeMemoryAccess(MP);
- MSSA.verifyMemorySSA();
-}
-
-TEST_F(MemorySSATest, RemoveMemoryAccess) {
- // We create a diamond where there is a store on one side, and then a load
- // after the merge point. This enables us to test a bunch of different
- // removal cases.
- F = Function::Create(
- FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false),
- GlobalValue::ExternalLinkage, "F", &M);
- BasicBlock *Entry(BasicBlock::Create(C, "", F));
- BasicBlock *Left(BasicBlock::Create(C, "", F));
- BasicBlock *Right(BasicBlock::Create(C, "", F));
- BasicBlock *Merge(BasicBlock::Create(C, "", F));
- B.SetInsertPoint(Entry);
- B.CreateCondBr(B.getTrue(), Left, Right);
- B.SetInsertPoint(Left);
- Argument *PointerArg = &*F->arg_begin();
- StoreInst *StoreInst = B.CreateStore(B.getInt8(16), PointerArg);
- BranchInst::Create(Merge, Left);
- BranchInst::Create(Merge, Right);
- B.SetInsertPoint(Merge);
- LoadInst *LoadInst = B.CreateLoad(PointerArg);
-
- setupAnalyses();
- MemorySSA &MSSA = *Analyses->MSSA;
- MemorySSAWalker *Walker = Analyses->Walker;
- MemorySSAUpdater Updater(&MSSA);
-
- // Before, the load will be a use of a phi<store, liveonentry>. It should be
- // the same after.
- MemoryUse *LoadAccess = cast<MemoryUse>(MSSA.getMemoryAccess(LoadInst));
- MemoryDef *StoreAccess = cast<MemoryDef>(MSSA.getMemoryAccess(StoreInst));
- MemoryAccess *DefiningAccess = LoadAccess->getDefiningAccess();
- EXPECT_TRUE(isa<MemoryPhi>(DefiningAccess));
- // The load is currently clobbered by one of the phi arguments, so the walker
- // should determine the clobbering access as the phi.
- EXPECT_EQ(DefiningAccess, Walker->getClobberingMemoryAccess(LoadInst));
- Updater.removeMemoryAccess(StoreAccess);
- MSSA.verifyMemorySSA();
- // After the removeaccess, let's see if we got the right accesses
- // The load should still point to the phi ...
- EXPECT_EQ(DefiningAccess, LoadAccess->getDefiningAccess());
- // but we should now get live on entry for the clobbering definition of the
- // load, since it will walk past the phi node since every argument is the
- // same.
- // XXX: This currently requires either removing the phi or resetting optimized
- // on the load
-
- EXPECT_FALSE(
- MSSA.isLiveOnEntryDef(Walker->getClobberingMemoryAccess(LoadInst)));
- // If we reset optimized, we get live on entry.
- LoadAccess->resetOptimized();
- EXPECT_TRUE(
- MSSA.isLiveOnEntryDef(Walker->getClobberingMemoryAccess(LoadInst)));
- // The phi should now be a two entry phi with two live on entry defs.
- for (const auto &Op : DefiningAccess->operands()) {
- MemoryAccess *Operand = cast<MemoryAccess>(&*Op);
- EXPECT_TRUE(MSSA.isLiveOnEntryDef(Operand));
- }
-
- // Now we try to remove the single valued phi
- Updater.removeMemoryAccess(DefiningAccess);
- MSSA.verifyMemorySSA();
- // Now the load should be a load of live on entry.
- EXPECT_TRUE(MSSA.isLiveOnEntryDef(LoadAccess->getDefiningAccess()));
-}
-
-// We had a bug with caching where the walker would report MemoryDef#3's clobber
-// (below) was MemoryDef#1.
-//
-// define void @F(i8*) {
-// %A = alloca i8, i8 1
-// ; 1 = MemoryDef(liveOnEntry)
-// store i8 0, i8* %A
-// ; 2 = MemoryDef(1)
-// store i8 1, i8* %A
-// ; 3 = MemoryDef(2)
-// store i8 2, i8* %A
-// }
-TEST_F(MemorySSATest, TestTripleStore) {
- F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false),
- GlobalValue::ExternalLinkage, "F", &M);
- B.SetInsertPoint(BasicBlock::Create(C, "", F));
- Type *Int8 = Type::getInt8Ty(C);
- Value *Alloca = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A");
- StoreInst *S1 = B.CreateStore(ConstantInt::get(Int8, 0), Alloca);
- StoreInst *S2 = B.CreateStore(ConstantInt::get(Int8, 1), Alloca);
- StoreInst *S3 = B.CreateStore(ConstantInt::get(Int8, 2), Alloca);
-
- setupAnalyses();
- MemorySSA &MSSA = *Analyses->MSSA;
- MemorySSAWalker *Walker = Analyses->Walker;
-
- unsigned I = 0;
- for (StoreInst *V : {S1, S2, S3}) {
- // Everything should be clobbered by its defining access
- MemoryAccess *DefiningAccess = MSSA.getMemoryAccess(V)->getDefiningAccess();
- MemoryAccess *WalkerClobber = Walker->getClobberingMemoryAccess(V);
- EXPECT_EQ(DefiningAccess, WalkerClobber)
- << "Store " << I << " doesn't have the correct clobbering access";
- // EXPECT_EQ expands such that if we increment I above, it won't get
- // incremented except when we try to print the error message.
- ++I;
- }
-}
-
-// ...And fixing the above bug made it obvious that, when walking, MemorySSA's
-// walker was caching the initial node it walked. This was fine (albeit
-// mostly redundant) unless the initial node being walked is a clobber for the
-// query. In that case, we'd cache that the node clobbered itself.
-TEST_F(MemorySSATest, TestStoreAndLoad) {
- F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false),
- GlobalValue::ExternalLinkage, "F", &M);
- B.SetInsertPoint(BasicBlock::Create(C, "", F));
- Type *Int8 = Type::getInt8Ty(C);
- Value *Alloca = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A");
- Instruction *SI = B.CreateStore(ConstantInt::get(Int8, 0), Alloca);
- Instruction *LI = B.CreateLoad(Alloca);
-
- setupAnalyses();
- MemorySSA &MSSA = *Analyses->MSSA;
- MemorySSAWalker *Walker = Analyses->Walker;
-
- MemoryAccess *LoadClobber = Walker->getClobberingMemoryAccess(LI);
- EXPECT_EQ(LoadClobber, MSSA.getMemoryAccess(SI));
- EXPECT_TRUE(MSSA.isLiveOnEntryDef(Walker->getClobberingMemoryAccess(SI)));
-}
-
-// Another bug (related to the above two fixes): It was noted that, given the
-// following code:
-// ; 1 = MemoryDef(liveOnEntry)
-// store i8 0, i8* %1
-//
-// ...A query to getClobberingMemoryAccess(MemoryAccess*, MemoryLocation) would
-// hand back the store (correctly). A later call to
-// getClobberingMemoryAccess(const Instruction*) would also hand back the store
-// (incorrectly; it should return liveOnEntry).
-//
-// This test checks that repeated calls to either function returns what they're
-// meant to.
-TEST_F(MemorySSATest, TestStoreDoubleQuery) {
- F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false),
- GlobalValue::ExternalLinkage, "F", &M);
- B.SetInsertPoint(BasicBlock::Create(C, "", F));
- Type *Int8 = Type::getInt8Ty(C);
- Value *Alloca = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A");
- StoreInst *SI = B.CreateStore(ConstantInt::get(Int8, 0), Alloca);
-
- setupAnalyses();
- MemorySSA &MSSA = *Analyses->MSSA;
- MemorySSAWalker *Walker = Analyses->Walker;
-
- MemoryAccess *StoreAccess = MSSA.getMemoryAccess(SI);
- MemoryLocation StoreLoc = MemoryLocation::get(SI);
- MemoryAccess *Clobber =
- Walker->getClobberingMemoryAccess(StoreAccess, StoreLoc);
- MemoryAccess *LiveOnEntry = Walker->getClobberingMemoryAccess(SI);
-
- EXPECT_EQ(Clobber, StoreAccess);
- EXPECT_TRUE(MSSA.isLiveOnEntryDef(LiveOnEntry));
-
- // Try again (with entries in the cache already) for good measure...
- Clobber = Walker->getClobberingMemoryAccess(StoreAccess, StoreLoc);
- LiveOnEntry = Walker->getClobberingMemoryAccess(SI);
- EXPECT_EQ(Clobber, StoreAccess);
- EXPECT_TRUE(MSSA.isLiveOnEntryDef(LiveOnEntry));
-}
-
-// Bug: During phi optimization, the walker wouldn't cache to the proper result
-// in the farthest-walked BB.
-//
-// Specifically, it would assume that whatever we walked to was a clobber.
-// "Whatever we walked to" isn't a clobber if we hit a cache entry.
-//
-// ...So, we need a test case that looks like:
-// A
-// / \
-// B |
-// \ /
-// C
-//
-// Where, when we try to optimize a thing in 'C', a blocker is found in 'B'.
-// The walk must determine that the blocker exists by using cache entries *while
-// walking* 'B'.
-TEST_F(MemorySSATest, PartialWalkerCacheWithPhis) {
- F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false),
- GlobalValue::ExternalLinkage, "F", &M);
- B.SetInsertPoint(BasicBlock::Create(C, "A", F));
- Type *Int8 = Type::getInt8Ty(C);
- Constant *One = ConstantInt::get(Int8, 1);
- Constant *Zero = ConstantInt::get(Int8, 0);
- Value *AllocA = B.CreateAlloca(Int8, One, "a");
- Value *AllocB = B.CreateAlloca(Int8, One, "b");
- BasicBlock *IfThen = BasicBlock::Create(C, "B", F);
- BasicBlock *IfEnd = BasicBlock::Create(C, "C", F);
-
- B.CreateCondBr(UndefValue::get(Type::getInt1Ty(C)), IfThen, IfEnd);
-
- B.SetInsertPoint(IfThen);
- Instruction *FirstStore = B.CreateStore(Zero, AllocA);
- B.CreateStore(Zero, AllocB);
- Instruction *ALoad0 = B.CreateLoad(AllocA, "");
- Instruction *BStore = B.CreateStore(Zero, AllocB);
- // Due to use optimization/etc. we make a store to A, which is removed after
- // we build MSSA. This helps keep the test case simple-ish.
- Instruction *KillStore = B.CreateStore(Zero, AllocA);
- Instruction *ALoad = B.CreateLoad(AllocA, "");
- B.CreateBr(IfEnd);
-
- B.SetInsertPoint(IfEnd);
- Instruction *BelowPhi = B.CreateStore(Zero, AllocA);
-
- setupAnalyses();
- MemorySSA &MSSA = *Analyses->MSSA;
- MemorySSAWalker *Walker = Analyses->Walker;
- MemorySSAUpdater Updater(&MSSA);
-
- // Kill `KillStore`; it exists solely so that the load after it won't be
- // optimized to FirstStore.
- Updater.removeMemoryAccess(MSSA.getMemoryAccess(KillStore));
- KillStore->eraseFromParent();
- auto *ALoadMA = cast<MemoryUse>(MSSA.getMemoryAccess(ALoad));
- EXPECT_EQ(ALoadMA->getDefiningAccess(), MSSA.getMemoryAccess(BStore));
-
- // Populate the cache for the store to AllocB directly after FirstStore. It
- // should point to something in block B (so something in D can't be optimized
- // to it).
- MemoryAccess *Load0Clobber = Walker->getClobberingMemoryAccess(ALoad0);
- EXPECT_EQ(MSSA.getMemoryAccess(FirstStore), Load0Clobber);
-
- // If the bug exists, this will introduce a bad cache entry for %a on BStore.
- // It will point to the store to %b after FirstStore. This only happens during
- // phi optimization.
- MemoryAccess *BottomClobber = Walker->getClobberingMemoryAccess(BelowPhi);
- MemoryAccess *Phi = MSSA.getMemoryAccess(IfEnd);
- EXPECT_EQ(BottomClobber, Phi);
-
- // This query will first check the cache for {%a, BStore}. It should point to
- // FirstStore, not to the store after FirstStore.
- MemoryAccess *UseClobber = Walker->getClobberingMemoryAccess(ALoad);
- EXPECT_EQ(UseClobber, MSSA.getMemoryAccess(FirstStore));
-}
-
-// Test that our walker properly handles loads with the invariant group
-// attribute. It's a bit hacky, since we add the invariant attribute *after*
-// building MSSA. Otherwise, the use optimizer will optimize it for us, which
-// isn't what we want.
-// FIXME: It may be easier/cleaner to just add an 'optimize uses?' flag to MSSA.
-TEST_F(MemorySSATest, WalkerInvariantLoadOpt) {
- F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false),
- GlobalValue::ExternalLinkage, "F", &M);
- B.SetInsertPoint(BasicBlock::Create(C, "", F));
- Type *Int8 = Type::getInt8Ty(C);
- Constant *One = ConstantInt::get(Int8, 1);
- Value *AllocA = B.CreateAlloca(Int8, One, "");
-
- Instruction *Store = B.CreateStore(One, AllocA);
- Instruction *Load = B.CreateLoad(AllocA);
-
- setupAnalyses();
- MemorySSA &MSSA = *Analyses->MSSA;
- MemorySSAWalker *Walker = Analyses->Walker;
-
- auto *LoadMA = cast<MemoryUse>(MSSA.getMemoryAccess(Load));
- auto *StoreMA = cast<MemoryDef>(MSSA.getMemoryAccess(Store));
- EXPECT_EQ(LoadMA->getDefiningAccess(), StoreMA);
-
- // ...At the time of writing, no cache should exist for LoadMA. Be a bit
- // flexible to future changes.
- Walker->invalidateInfo(LoadMA);
- Load->setMetadata(LLVMContext::MD_invariant_load, MDNode::get(C, {}));
-
- MemoryAccess *LoadClobber = Walker->getClobberingMemoryAccess(LoadMA);
- EXPECT_EQ(LoadClobber, MSSA.getLiveOnEntryDef());
-}
-
-// Test loads get reoptimized properly by the walker.
-TEST_F(MemorySSATest, WalkerReopt) {
- F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false),
- GlobalValue::ExternalLinkage, "F", &M);
- B.SetInsertPoint(BasicBlock::Create(C, "", F));
- Type *Int8 = Type::getInt8Ty(C);
- Value *AllocaA = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A");
- Instruction *SIA = B.CreateStore(ConstantInt::get(Int8, 0), AllocaA);
- Value *AllocaB = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "B");
- Instruction *SIB = B.CreateStore(ConstantInt::get(Int8, 0), AllocaB);
- Instruction *LIA = B.CreateLoad(AllocaA);
-
- setupAnalyses();
- MemorySSA &MSSA = *Analyses->MSSA;
- MemorySSAWalker *Walker = Analyses->Walker;
- MemorySSAUpdater Updater(&MSSA);
-
- MemoryAccess *LoadClobber = Walker->getClobberingMemoryAccess(LIA);
- MemoryUse *LoadAccess = cast<MemoryUse>(MSSA.getMemoryAccess(LIA));
- EXPECT_EQ(LoadClobber, MSSA.getMemoryAccess(SIA));
- EXPECT_TRUE(MSSA.isLiveOnEntryDef(Walker->getClobberingMemoryAccess(SIA)));
- Updater.removeMemoryAccess(LoadAccess);
-
- // Create the load memory access pointing to an unoptimized place.
- MemoryUse *NewLoadAccess = cast<MemoryUse>(Updater.createMemoryAccessInBB(
- LIA, MSSA.getMemoryAccess(SIB), LIA->getParent(), MemorySSA::End));
- // This should it cause it to be optimized
- EXPECT_EQ(Walker->getClobberingMemoryAccess(NewLoadAccess), LoadClobber);
- EXPECT_EQ(NewLoadAccess->getDefiningAccess(), LoadClobber);
-}
-
-// Test out MemorySSAUpdater::moveBefore
-TEST_F(MemorySSATest, MoveAboveMemoryDef) {
- F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false),
- GlobalValue::ExternalLinkage, "F", &M);
- B.SetInsertPoint(BasicBlock::Create(C, "", F));
-
- Type *Int8 = Type::getInt8Ty(C);
- Value *A = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A");
- Value *B_ = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "B");
- Value *C = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "C");
-
- StoreInst *StoreA0 = B.CreateStore(ConstantInt::get(Int8, 0), A);
- StoreInst *StoreB = B.CreateStore(ConstantInt::get(Int8, 0), B_);
- LoadInst *LoadB = B.CreateLoad(B_);
- StoreInst *StoreA1 = B.CreateStore(ConstantInt::get(Int8, 4), A);
- StoreInst *StoreC = B.CreateStore(ConstantInt::get(Int8, 4), C);
- StoreInst *StoreA2 = B.CreateStore(ConstantInt::get(Int8, 4), A);
- LoadInst *LoadC = B.CreateLoad(C);
-
- setupAnalyses();
- MemorySSA &MSSA = *Analyses->MSSA;
- MemorySSAWalker &Walker = *Analyses->Walker;
-
- MemorySSAUpdater Updater(&MSSA);
- StoreC->moveBefore(StoreB);
- Updater.moveBefore(cast<MemoryDef>(MSSA.getMemoryAccess(StoreC)),
- cast<MemoryDef>(MSSA.getMemoryAccess(StoreB)));
-
- MSSA.verifyMemorySSA();
-
- EXPECT_EQ(MSSA.getMemoryAccess(StoreB)->getDefiningAccess(),
- MSSA.getMemoryAccess(StoreC));
- EXPECT_EQ(MSSA.getMemoryAccess(StoreC)->getDefiningAccess(),
- MSSA.getMemoryAccess(StoreA0));
- EXPECT_EQ(MSSA.getMemoryAccess(StoreA2)->getDefiningAccess(),
- MSSA.getMemoryAccess(StoreA1));
- EXPECT_EQ(Walker.getClobberingMemoryAccess(LoadB),
- MSSA.getMemoryAccess(StoreB));
- EXPECT_EQ(Walker.getClobberingMemoryAccess(LoadC),
- MSSA.getMemoryAccess(StoreC));
-
- // exercise block numbering
- EXPECT_TRUE(MSSA.locallyDominates(MSSA.getMemoryAccess(StoreC),
- MSSA.getMemoryAccess(StoreB)));
- EXPECT_TRUE(MSSA.locallyDominates(MSSA.getMemoryAccess(StoreA1),
- MSSA.getMemoryAccess(StoreA2)));
-}
-
-TEST_F(MemorySSATest, Irreducible) {
- // Create the equivalent of
- // x = something
- // if (...)
- // goto second_loop_entry
- // while (...) {
- // second_loop_entry:
- // }
- // use(x)
-
- SmallVector<PHINode *, 8> Inserted;
- IRBuilder<> B(C);
- F = Function::Create(
- FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false),
- GlobalValue::ExternalLinkage, "F", &M);
-
- // Make blocks
- BasicBlock *IfBB = BasicBlock::Create(C, "if", F);
- BasicBlock *LoopStartBB = BasicBlock::Create(C, "loopstart", F);
- BasicBlock *LoopMainBB = BasicBlock::Create(C, "loopmain", F);
- BasicBlock *AfterLoopBB = BasicBlock::Create(C, "afterloop", F);
- B.SetInsertPoint(IfBB);
- B.CreateCondBr(B.getTrue(), LoopMainBB, LoopStartBB);
- B.SetInsertPoint(LoopStartBB);
- B.CreateBr(LoopMainBB);
- B.SetInsertPoint(LoopMainBB);
- B.CreateCondBr(B.getTrue(), LoopStartBB, AfterLoopBB);
- B.SetInsertPoint(AfterLoopBB);
- Argument *FirstArg = &*F->arg_begin();
- setupAnalyses();
- MemorySSA &MSSA = *Analyses->MSSA;
- MemorySSAUpdater Updater(&MSSA);
- // Create the load memory acccess
- LoadInst *LoadInst = B.CreateLoad(FirstArg);
- MemoryUse *LoadAccess = cast<MemoryUse>(Updater.createMemoryAccessInBB(
- LoadInst, nullptr, AfterLoopBB, MemorySSA::Beginning));
- Updater.insertUse(LoadAccess);
- MSSA.verifyMemorySSA();
-}
More information about the llvm-commits
mailing list