summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Transforms/Scalar/EarlyCSE.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Transforms/Scalar/EarlyCSE.cpp')
-rw-r--r--llvm/lib/Transforms/Scalar/EarlyCSE.cpp54
1 files changed, 44 insertions, 10 deletions
diff --git a/llvm/lib/Transforms/Scalar/EarlyCSE.cpp b/llvm/lib/Transforms/Scalar/EarlyCSE.cpp
index cb02b2cbc7e..5f88460b464 100644
--- a/llvm/lib/Transforms/Scalar/EarlyCSE.cpp
+++ b/llvm/lib/Transforms/Scalar/EarlyCSE.cpp
@@ -498,15 +498,43 @@ private:
return;
// FIXME: Removing a store here can leave MemorySSA in an unoptimized state
// by creating MemoryPhis that have identical arguments and by creating
- // MemoryUses whose defining access is not an actual clobber.
- if (MemoryAccess *MA = MSSA->getMemoryAccess(Inst))
- MSSA->removeMemoryAccess(MA);
+ // MemoryUses whose defining access is not an actual clobber. We handle the
+ // phi case here, but the non-optimized MemoryUse case is not handled. Once
+ // MemorySSA tracks whether uses are optimized this will be taken care of on
+ // the MemorySSA side.
+ if (MemoryAccess *MA = MSSA->getMemoryAccess(Inst)) {
+ // Optimize MemoryPhi nodes that may become redundant by having all the
+ // same input values once MA is removed.
+ SmallVector<MemoryPhi *, 4> PhisToCheck;
+ SmallVector<MemoryAccess *, 8> WorkQueue;
+ WorkQueue.push_back(MA);
+ // Process MemoryPhi nodes in FIFO order using a ever-growing vector since
+ // we shouldn't be processing that many phis and this will avoid an
+ // allocation in almost all cases.
+ for (unsigned I = 0; I < WorkQueue.size(); ++I) {
+ MemoryAccess *WI = WorkQueue[I];
+
+ for (auto *U : WI->users())
+ if (MemoryPhi *MP = dyn_cast<MemoryPhi>(U))
+ PhisToCheck.push_back(MP);
+
+ MSSA->removeMemoryAccess(WI);
+
+ for (MemoryPhi *MP : PhisToCheck) {
+ MemoryAccess *FirstIn = MP->getIncomingValue(0);
+ if (all_of(MP->incoming_values(),
+ [=](Use &In) { return In == FirstIn; }))
+ WorkQueue.push_back(MP);
+ }
+ PhisToCheck.clear();
+ }
+ }
}
};
}
-/// Determine if the memory referenced by LaterInst is from the same heap version
-/// as EarlierInst.
+/// Determine if the memory referenced by LaterInst is from the same heap
+/// version as EarlierInst.
/// This is currently called in two scenarios:
///
/// load p
@@ -536,11 +564,17 @@ bool EarlyCSE::isSameMemGeneration(unsigned EarlierGeneration,
// LaterInst, if LaterDef dominates EarlierInst then it can't occur between
// EarlierInst and LaterInst and neither can any other write that potentially
// clobbers LaterInst.
- // FIXME: This is currently fairly expensive since it does an AA check even
- // for MemoryUses that were already optimized by MemorySSA construction.
- // Re-visit once MemorySSA optimized use tracking change has been committed.
- MemoryAccess *LaterDef =
- MSSA->getWalker()->getClobberingMemoryAccess(LaterInst);
+ // FIXME: Use getClobberingMemoryAccess only for stores since it is currently
+ // fairly expensive to call on MemoryUses since it does an AA check even for
+ // MemoryUses that were already optimized by MemorySSA construction. Once
+ // MemorySSA optimized use tracking change has been committed we can use
+ // getClobberingMemoryAccess for MemoryUses as well.
+ MemoryAccess *LaterMA = MSSA->getMemoryAccess(LaterInst);
+ MemoryAccess *LaterDef;
+ if (auto *LaterUse = dyn_cast<MemoryUse>(LaterMA))
+ LaterDef = LaterUse->getDefiningAccess();
+ else
+ LaterDef = MSSA->getWalker()->getClobberingMemoryAccess(LaterInst);
return MSSA->dominates(LaterDef, MSSA->getMemoryAccess(EarlierInst));
}
OpenPOWER on IntegriCloud