summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Transforms
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Transforms')
-rw-r--r--llvm/lib/Transforms/Utils/MemorySSA.cpp38
1 files changed, 30 insertions, 8 deletions
diff --git a/llvm/lib/Transforms/Utils/MemorySSA.cpp b/llvm/lib/Transforms/Utils/MemorySSA.cpp
index 97c728bbe20..896b24fc16e 100644
--- a/llvm/lib/Transforms/Utils/MemorySSA.cpp
+++ b/llvm/lib/Transforms/Utils/MemorySSA.cpp
@@ -824,6 +824,10 @@ void CachingMemorySSAWalker::doCacheInsert(const MemoryAccess *M,
MemoryAccess *Result,
const UpwardsMemoryQuery &Q,
const MemoryLocation &Loc) {
+ // This is fine for Phis, since there are times where we can't optimize them.
+ // Making a def its own clobber is never correct, though.
+ assert((Result != M || isa<MemoryPhi>(M)) &&
+ "Something can't clobber itself!");
++NumClobberCacheInserts;
if (Q.IsCall)
CachedUpwardsClobberingCall[M] = Result;
@@ -873,9 +877,11 @@ MemoryAccessPair CachingMemorySSAWalker::UpwardsDFSWalk(
MemoryAccess *CurrAccess = *DFI;
if (MSSA->isLiveOnEntryDef(CurrAccess))
return {CurrAccess, Loc};
- if (auto CacheResult = doCacheLookup(CurrAccess, Q, Loc))
- return {CacheResult, Loc};
- // If this is a MemoryDef, check whether it clobbers our current query.
+ // If this is a MemoryDef, check whether it clobbers our current query. This
+ // needs to be done before consulting the cache, because the cache reports
+ // the clobber for CurrAccess. If CurrAccess is a clobber for this query,
+ // and we ask the cache for information first, then we might skip this
+ // clobber, which is bad.
if (auto *MD = dyn_cast<MemoryDef>(CurrAccess)) {
// If we hit the top, stop following this path.
// While we can do lookups, we can't sanely do inserts here unless we were
@@ -886,6 +892,8 @@ MemoryAccessPair CachingMemorySSAWalker::UpwardsDFSWalk(
break;
}
}
+ if (auto CacheResult = doCacheLookup(CurrAccess, Q, Loc))
+ return {CacheResult, Loc};
// We need to know whether it is a phi so we can track backedges.
// Otherwise, walk all upward defs.
@@ -957,8 +965,15 @@ MemoryAccessPair CachingMemorySSAWalker::UpwardsDFSWalk(
return {MSSA->getLiveOnEntryDef(), Q.StartingLoc};
const BasicBlock *OriginalBlock = StartingAccess->getBlock();
+ assert(DFI.getPathLength() > 0 && "We dropped our path?");
unsigned N = DFI.getPathLength();
- for (; N != 0; --N) {
+ // If we found a clobbering def, the last element in the path will be our
+ // clobber, so we don't want to cache that to itself. OTOH, if we optimized a
+ // phi, we can add the last thing in the path to the cache, since that won't
+ // be the result.
+ if (DFI.getPath(N - 1) == ModifyingAccess)
+ --N;
+ for (; N > 1; --N) {
MemoryAccess *CacheAccess = DFI.getPath(N - 1);
BasicBlock *CurrBlock = CacheAccess->getBlock();
if (!FollowingBackedge)
@@ -970,8 +985,8 @@ MemoryAccessPair CachingMemorySSAWalker::UpwardsDFSWalk(
}
// Cache everything else on the way back. The caller should cache
- // Q.OriginalAccess for us.
- for (; N != 0; --N) {
+ // StartingAccess for us.
+ for (; N > 1; --N) {
MemoryAccess *CacheAccess = DFI.getPath(N - 1);
doCacheInsert(CacheAccess, ModifyingAccess, Q, Loc);
}
@@ -1024,7 +1039,9 @@ CachingMemorySSAWalker::getClobberingMemoryAccess(MemoryAccess *StartingAccess,
: StartingUseOrDef;
MemoryAccess *Clobber = getClobberingMemoryAccess(DefiningAccess, Q);
- doCacheInsert(Q.OriginalAccess, Clobber, Q, Q.StartingLoc);
+ // Only cache this if it wouldn't make Clobber point to itself.
+ if (Clobber != StartingAccess)
+ doCacheInsert(Q.OriginalAccess, Clobber, Q, Q.StartingLoc);
DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is ");
DEBUG(dbgs() << *StartingUseOrDef << "\n");
DEBUG(dbgs() << "Final Memory SSA clobber for " << *I << " is ");
@@ -1063,12 +1080,17 @@ CachingMemorySSAWalker::getClobberingMemoryAccess(const Instruction *I) {
return DefiningAccess;
MemoryAccess *Result = getClobberingMemoryAccess(DefiningAccess, Q);
+ // DFS won't cache a result for DefiningAccess. So, if DefiningAccess isn't
+ // our clobber, be sure that it gets a cache entry, too.
+ if (Result != DefiningAccess)
+ doCacheInsert(DefiningAccess, Result, Q, Q.StartingLoc);
doCacheInsert(Q.OriginalAccess, Result, Q, Q.StartingLoc);
// TODO: When this implementation is more mature, we may want to figure out
// what this additional caching buys us. It's most likely A Good Thing.
if (Q.IsCall)
for (const MemoryAccess *MA : Q.VisitedCalls)
- doCacheInsert(MA, Result, Q, Q.StartingLoc);
+ if (MA != Result)
+ doCacheInsert(MA, Result, Q, Q.StartingLoc);
DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is ");
DEBUG(dbgs() << *DefiningAccess << "\n");
OpenPOWER on IntegriCloud