summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Transforms
diff options
context:
space:
mode:
authorGeoff Berry <gberry@codeaurora.org>2016-10-25 16:18:47 +0000
committerGeoff Berry <gberry@codeaurora.org>2016-10-25 16:18:47 +0000
commit91e9a5cc236941473e9791de0071f02378e0497d (patch)
tree761b0e5c3bc14dec8c0bf54766a65a2b2a36e861 /llvm/lib/Transforms
parent58139d1758c116e4f86d859fe99b11dafca1ec9f (diff)
downloadbcm5719-llvm-91e9a5cc236941473e9791de0071f02378e0497d.tar.gz
bcm5719-llvm-91e9a5cc236941473e9791de0071f02378e0497d.zip
[EarlyCSE] Make MemorySSA memory dependency check more aggressive.
Now that MemorySSA keeps track of whether MemoryUses are optimized, use getClobberingMemoryAccess() to check MemoryUse memory dependencies since it should no longer be so expensive. This is a follow-up change to https://reviews.llvm.org/D25881 llvm-svn: 285080
Diffstat (limited to 'llvm/lib/Transforms')
-rw-r--r--llvm/lib/Transforms/Scalar/EarlyCSE.cpp22
1 files changed, 6 insertions, 16 deletions
diff --git a/llvm/lib/Transforms/Scalar/EarlyCSE.cpp b/llvm/lib/Transforms/Scalar/EarlyCSE.cpp
index 5f88460b464..9bf638dcbae 100644
--- a/llvm/lib/Transforms/Scalar/EarlyCSE.cpp
+++ b/llvm/lib/Transforms/Scalar/EarlyCSE.cpp
@@ -496,12 +496,11 @@ private:
void removeMSSA(Instruction *Inst) {
if (!MSSA)
return;
- // FIXME: Removing a store here can leave MemorySSA in an unoptimized state
- // by creating MemoryPhis that have identical arguments and by creating
+ // Removing a store here can leave MemorySSA in an unoptimized state by
+ // creating MemoryPhis that have identical arguments and by creating
// MemoryUses whose defining access is not an actual clobber. We handle the
- // phi case here, but the non-optimized MemoryUse case is not handled. Once
- // MemorySSA tracks whether uses are optimized this will be taken care of on
- // the MemorySSA side.
+ // phi case eagerly here. The non-optimized MemoryUse case is lazily
+ // updated by MemorySSA getClobberingMemoryAccess.
if (MemoryAccess *MA = MSSA->getMemoryAccess(Inst)) {
// Optimize MemoryPhi nodes that may become redundant by having all the
// same input values once MA is removed.
@@ -564,17 +563,8 @@ bool EarlyCSE::isSameMemGeneration(unsigned EarlierGeneration,
// LaterInst, if LaterDef dominates EarlierInst then it can't occur between
// EarlierInst and LaterInst and neither can any other write that potentially
// clobbers LaterInst.
- // FIXME: Use getClobberingMemoryAccess only for stores since it is currently
- // fairly expensive to call on MemoryUses since it does an AA check even for
- // MemoryUses that were already optimized by MemorySSA construction. Once
- // MemorySSA optimized use tracking change has been committed we can use
- // getClobberingMemoryAccess for MemoryUses as well.
- MemoryAccess *LaterMA = MSSA->getMemoryAccess(LaterInst);
- MemoryAccess *LaterDef;
- if (auto *LaterUse = dyn_cast<MemoryUse>(LaterMA))
- LaterDef = LaterUse->getDefiningAccess();
- else
- LaterDef = MSSA->getWalker()->getClobberingMemoryAccess(LaterInst);
+ MemoryAccess *LaterDef =
+ MSSA->getWalker()->getClobberingMemoryAccess(LaterInst);
return MSSA->dominates(LaterDef, MSSA->getMemoryAccess(EarlierInst));
}
OpenPOWER on IntegriCloud