summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--llvm/include/llvm/Transforms/Utils/MemorySSA.h31
1 files changed, 31 insertions, 0 deletions
diff --git a/llvm/include/llvm/Transforms/Utils/MemorySSA.h b/llvm/include/llvm/Transforms/Utils/MemorySSA.h
index b87640cbe5b..abf42743df6 100644
--- a/llvm/include/llvm/Transforms/Utils/MemorySSA.h
+++ b/llvm/include/llvm/Transforms/Utils/MemorySSA.h
@@ -678,6 +678,37 @@ using ConstMemoryAccessPair = std::pair<const MemoryAccess *, MemoryLocation>;
/// \brief A MemorySSAWalker that does AA walks and caching of lookups to
/// disambiguate accesses.
+///
+/// FIXME: The current implementation of this can take quadratic space in rare
+/// cases. This can be fixed, but it is something to note until it is fixed.
+///
+/// In order to trigger this behavior, you need to store to N distinct locations
+/// (that AA can prove don't alias), perform M stores to other memory
+/// locations that AA can prove don't alias any of the initial N locations, and
+/// then load from all of the N locations. In this case, we insert M cache
+/// entries for each of the N loads.
+///
+/// For example:
+/// define i32 @foo() {
+/// %a = alloca i32, align 4
+/// %b = alloca i32, align 4
+/// store i32 0, i32* %a, align 4
+/// store i32 0, i32* %b, align 4
+///
+/// ; Insert M stores to other memory that doesn't alias %a or %b here
+///
+/// %c = load i32, i32* %a, align 4 ; Caches M entries in
+/// ; CachedUpwardsClobberingAccess for the
+/// ; MemoryLocation %a
+/// %d = load i32, i32* %b, align 4 ; Caches M entries in
+/// ; CachedUpwardsClobberingAccess for the
+/// ; MemoryLocation %b
+///
+/// ; For completeness' sake, loading %a or %b again would not cache *another*
+/// ; M entries.
+/// %r = add i32 %c, %d
+/// ret i32 %r
+/// }
class CachingMemorySSAWalker final : public MemorySSAWalker {
public:
CachingMemorySSAWalker(MemorySSA *, AliasAnalysis *, DominatorTree *);
OpenPOWER on IntegriCloud