[llvm] r259963 - Add note of suboptimal behavior in MemorySSA. NFC.
George Burgess IV via llvm-commits
llvm-commits at lists.llvm.org
Fri Feb 5 16:42:52 PST 2016
Author: gbiv
Date: Fri Feb 5 18:42:52 2016
New Revision: 259963
URL: http://llvm.org/viewvc/llvm-project?rev=259963&view=rev
Log:
Add note of suboptimal behavior in MemorySSA. NFC.
Modified:
llvm/trunk/include/llvm/Transforms/Utils/MemorySSA.h
Modified: llvm/trunk/include/llvm/Transforms/Utils/MemorySSA.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/Transforms/Utils/MemorySSA.h?rev=259963&r1=259962&r2=259963&view=diff
==============================================================================
--- llvm/trunk/include/llvm/Transforms/Utils/MemorySSA.h (original)
+++ llvm/trunk/include/llvm/Transforms/Utils/MemorySSA.h Fri Feb 5 18:42:52 2016
@@ -678,6 +678,37 @@ using ConstMemoryAccessPair = std::pair<
/// \brief A MemorySSAWalker that does AA walks and caching of lookups to
/// disambiguate accesses.
+///
+/// FIXME: The current implementation of this can take quadratic space in rare
+/// cases. This can be fixed, but it is something to note until it is fixed.
+///
+/// In order to trigger this behavior, you need to store to N distinct locations
+/// (that AA can prove don't alias), perform M stores to other memory
+/// locations that AA can prove don't alias any of the initial N locations, and
+/// then load from all of the N locations. In this case, we insert M cache
+/// entries for each of the N loads.
+///
+/// For example:
+/// define i32 @foo() {
+/// %a = alloca i32, align 4
+/// %b = alloca i32, align 4
+/// store i32 0, i32* %a, align 4
+/// store i32 0, i32* %b, align 4
+///
+/// ; Insert M stores to other memory that doesn't alias %a or %b here
+///
+/// %c = load i32, i32* %a, align 4 ; Caches M entries in
+/// ; CachedUpwardsClobberingAccess for the
+/// ; MemoryLocation %a
+/// %d = load i32, i32* %b, align 4 ; Caches M entries in
+/// ; CachedUpwardsClobberingAccess for the
+/// ; MemoryLocation %b
+///
+/// ; For completeness' sake, loading %a or %b again would not cache *another*
+/// ; M entries.
+/// %r = add i32 %c, %d
+/// ret i32 %r
+/// }
class CachingMemorySSAWalker final : public MemorySSAWalker {
public:
CachingMemorySSAWalker(MemorySSA *, AliasAnalysis *, DominatorTree *);
More information about the llvm-commits
mailing list