summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGeorge Burgess IV <george.burgess.iv@gmail.com>2016-02-06 00:42:52 +0000
committerGeorge Burgess IV <george.burgess.iv@gmail.com>2016-02-06 00:42:52 +0000
commit304ccee5285534369a79960152a767e7a18398e8 (patch)
treee1bdf3a9464b93d10026b303f35945906755e83d
parent1f5fcf8afd6ebc5d0cd2dc95d36d7dc3dcc90402 (diff)
downloadbcm5719-llvm-304ccee5285534369a79960152a767e7a18398e8.tar.gz
bcm5719-llvm-304ccee5285534369a79960152a767e7a18398e8.zip
Add note of suboptimal behavior in MemorySSA. NFC.
llvm-svn: 259963
-rw-r--r--llvm/include/llvm/Transforms/Utils/MemorySSA.h31
1 files changed, 31 insertions, 0 deletions
diff --git a/llvm/include/llvm/Transforms/Utils/MemorySSA.h b/llvm/include/llvm/Transforms/Utils/MemorySSA.h
index b87640cbe5b..abf42743df6 100644
--- a/llvm/include/llvm/Transforms/Utils/MemorySSA.h
+++ b/llvm/include/llvm/Transforms/Utils/MemorySSA.h
@@ -678,6 +678,37 @@ using ConstMemoryAccessPair = std::pair<const MemoryAccess *, MemoryLocation>;
/// \brief A MemorySSAWalker that does AA walks and caching of lookups to
/// disambiguate accesses.
+///
+/// FIXME: The current implementation of this can take quadratic space in rare
+/// cases. This can be fixed, but it is something to note until it is fixed.
+///
+/// In order to trigger this behavior, you need to store to N distinct locations
+/// (that AA can prove don't alias), perform M stores to other memory
+/// locations that AA can prove don't alias any of the initial N locations, and
+/// then load from all of the N locations. In this case, we insert M cache
+/// entries for each of the N loads.
+///
+/// For example:
+/// define i32 @foo() {
+/// %a = alloca i32, align 4
+/// %b = alloca i32, align 4
+/// store i32 0, i32* %a, align 4
+/// store i32 0, i32* %b, align 4
+///
+/// ; Insert M stores to other memory that doesn't alias %a or %b here
+///
+/// %c = load i32, i32* %a, align 4 ; Caches M entries in
+/// ; CachedUpwardsClobberingAccess for the
+/// ; MemoryLocation %a
+/// %d = load i32, i32* %b, align 4 ; Caches M entries in
+/// ; CachedUpwardsClobberingAccess for the
+/// ; MemoryLocation %b
+///
+/// ; For completeness' sake, loading %a or %b again would not cache *another*
+/// ; M entries.
+/// %r = add i32 %c, %d
+/// ret i32 %r
+/// }
class CachingMemorySSAWalker final : public MemorySSAWalker {
public:
CachingMemorySSAWalker(MemorySSA *, AliasAnalysis *, DominatorTree *);
OpenPOWER on IntegriCloud