summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
diff options
context:
space:
mode:
authorDaniel Neilson <dneilson@azul.com>2018-04-23 19:06:49 +0000
committerDaniel Neilson <dneilson@azul.com>2018-04-23 19:06:49 +0000
commitcc45e923c5c1be906ad9b56b2665b954f78c4a49 (patch)
tree675972b04202586f04e8dd698e4a2bddc421984b /llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
parentd03fb0e3e042bdbfd073a7b27cbcd14570f74aae (diff)
downloadbcm5719-llvm-cc45e923c5c1be906ad9b56b2665b954f78c4a49.tar.gz
bcm5719-llvm-cc45e923c5c1be906ad9b56b2665b954f78c4a49.zip
[DSE] Teach the pass that atomic memory intrinsics are stores.
Summary: This change teaches DSE that the atomic memory intrinsics are stores that can be eliminated, and can allow other stores to be eliminated. This change specifically does not teach DSE that these intrinsics can be partially eliminated (i.e. length reduced, and dest/src changed); that will be handled in another change. Reviewers: mkazantsev, skatkov, apilipenko, efriedma, rsmith Reviewed By: efriedma Subscribers: dmgreen, llvm-commits Differential Revision: https://reviews.llvm.org/D45535 llvm-svn: 330629
Diffstat (limited to 'llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp')
-rw-r--r--llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp17
1 files changed, 12 insertions, 5 deletions
diff --git a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
index 85f96f59e6d..b3dbe4df7ef 100644
--- a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
+++ b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
@@ -160,6 +160,9 @@ static bool hasAnalyzableMemoryWrite(Instruction *I,
case Intrinsic::memset:
case Intrinsic::memmove:
case Intrinsic::memcpy:
+ case Intrinsic::memcpy_element_unordered_atomic:
+ case Intrinsic::memmove_element_unordered_atomic:
+ case Intrinsic::memset_element_unordered_atomic:
case Intrinsic::init_trampoline:
case Intrinsic::lifetime_end:
return true;
@@ -189,7 +192,7 @@ static MemoryLocation getLocForWrite(Instruction *Inst) {
if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
return MemoryLocation::get(SI);
- if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(Inst)) {
+ if (auto *MI = dyn_cast<AnyMemIntrinsic>(Inst)) {
// memcpy/memmove/memset.
MemoryLocation Loc = MemoryLocation::getForDest(MI);
return Loc;
@@ -222,7 +225,7 @@ static MemoryLocation getLocForRead(Instruction *Inst,
// The only instructions that both read and write are the mem transfer
// instructions (memcpy/memmove).
- if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(Inst))
+ if (auto *MTI = dyn_cast<AnyMemTransferInst>(Inst))
return MemoryLocation::getForSource(MTI);
return MemoryLocation();
}
@@ -249,6 +252,10 @@ static bool isRemovable(Instruction *I) {
case Intrinsic::memcpy:
// Don't remove volatile memory intrinsics.
return !cast<MemIntrinsic>(II)->isVolatile();
+ case Intrinsic::memcpy_element_unordered_atomic:
+ case Intrinsic::memmove_element_unordered_atomic:
+ case Intrinsic::memset_element_unordered_atomic:
+ return true;
}
}
@@ -273,6 +280,7 @@ static bool isShortenableAtTheEnd(Instruction *I) {
case Intrinsic::memcpy:
// Do shorten memory intrinsics.
// FIXME: Add memmove if it's also safe to transform.
+ // TODO: Add atomic memcpy/memset
return true;
}
}
@@ -287,6 +295,7 @@ static bool isShortenableAtTheEnd(Instruction *I) {
static bool isShortenableAtTheBeginning(Instruction *I) {
// FIXME: Handle only memset for now. Supporting memcpy/memmove should be
// easily done by offsetting the source address.
+ // TODO: Handle atomic memory intrinsics
IntrinsicInst *II = dyn_cast<IntrinsicInst>(I);
return II && II->getIntrinsicID() == Intrinsic::memset;
}
@@ -534,7 +543,7 @@ static bool isPossibleSelfRead(Instruction *Inst,
if (AA.isNoAlias(InstReadLoc, InstStoreLoc))
return false;
- if (isa<MemCpyInst>(Inst)) {
+ if (isa<AnyMemCpyInst>(Inst)) {
// LLVM's memcpy overlap semantics are not fully fleshed out (see PR11763)
// but in practice memcpy(A <- B) either means that A and B are disjoint or
// are equal (i.e. there are not partial overlaps). Given that, if we have:
@@ -856,8 +865,6 @@ static bool handleEndBlock(BasicBlock &BB, AliasAnalysis *AA,
LoadedLoc = MemoryLocation::get(L);
} else if (VAArgInst *V = dyn_cast<VAArgInst>(BBI)) {
LoadedLoc = MemoryLocation::get(V);
- } else if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(BBI)) {
- LoadedLoc = MemoryLocation::getForSource(MTI);
} else if (!BBI->mayReadFromMemory()) {
// Instruction doesn't read memory. Note that stores that weren't removed
// above will hit this case.
OpenPOWER on IntegriCloud