summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Transforms
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Transforms')
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineAtomicRMW.cpp90
1 files changed, 60 insertions, 30 deletions
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAtomicRMW.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAtomicRMW.cpp
index 86bbfb15986..0c7e7ab66a9 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineAtomicRMW.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineAtomicRMW.cpp
@@ -14,35 +14,65 @@
using namespace llvm;
-Instruction *InstCombiner::visitAtomicRMWInst(AtomicRMWInst &RMWI) {
- switch (RMWI.getOperation()) {
- default:
- break;
- case AtomicRMWInst::Add:
- case AtomicRMWInst::Sub:
- case AtomicRMWInst::Or:
- // Replace atomicrmw <op> addr, 0 => load atomic addr.
-
- // Volatile RMWs perform a load and a store, we cannot replace
- // this by just a load.
- if (RMWI.isVolatile())
- break;
-
- auto *CI = dyn_cast<ConstantInt>(RMWI.getValOperand());
- if (!CI || !CI->isZero())
- break;
- // Check if the required ordering is compatible with an
- // atomic load.
- AtomicOrdering Ordering = RMWI.getOrdering();
- assert(Ordering != AtomicOrdering::NotAtomic &&
- Ordering != AtomicOrdering::Unordered &&
- "AtomicRMWs don't make sense with Unordered or NotAtomic");
- if (Ordering != AtomicOrdering::Acquire &&
- Ordering != AtomicOrdering::Monotonic)
- break;
- LoadInst *Load = new LoadInst(RMWI.getType(), RMWI.getPointerOperand());
- Load->setAtomic(Ordering, RMWI.getSyncScopeID());
- return Load;
+namespace {
+/// Return true if and only if the given instruction does not modify the memory
+/// location referenced. Note that an idemptent atomicrmw may still have
+/// ordering effects on nearby instructions, or be volatile.
+/// TODO: Common w/ the version in AtomicExpandPass, and change the term used.
+/// Idemptotent is confusing in this context.
+bool isIdempotentRMW(AtomicRMWInst& RMWI) {
+ auto C = dyn_cast<ConstantInt>(RMWI.getValOperand());
+ if(!C)
+ // TODO: Handle fadd, fsub?
+ return false;
+
+ AtomicRMWInst::BinOp Op = RMWI.getOperation();
+ switch(Op) {
+ case AtomicRMWInst::Add:
+ case AtomicRMWInst::Sub:
+ case AtomicRMWInst::Or:
+ case AtomicRMWInst::Xor:
+ return C->isZero();
+ case AtomicRMWInst::And:
+ return C->isMinusOne();
+ case AtomicRMWInst::Min:
+ return C->isMaxValue(true);
+ case AtomicRMWInst::Max:
+ return C->isMinValue(true);
+ case AtomicRMWInst::UMin:
+ return C->isMaxValue(false);
+ case AtomicRMWInst::UMax:
+ return C->isMinValue(false);
+ default:
+ return false;
}
- return nullptr;
+}
+}
+
+
+Instruction *InstCombiner::visitAtomicRMWInst(AtomicRMWInst &RMWI) {
+ if (!isIdempotentRMW(RMWI))
+ return nullptr;
+
+ // TODO: Canonicalize the operation for an idempotent operation we can't
+ // convert into a simple load.
+
+ // Volatile RMWs perform a load and a store, we cannot replace
+ // this by just a load.
+ if (RMWI.isVolatile())
+ return nullptr;
+
+ // Check if the required ordering is compatible with an atomic load.
+ AtomicOrdering Ordering = RMWI.getOrdering();
+ assert(Ordering != AtomicOrdering::NotAtomic &&
+ Ordering != AtomicOrdering::Unordered &&
+ "AtomicRMWs don't make sense with Unordered or NotAtomic");
+ if (Ordering != AtomicOrdering::Acquire &&
+ Ordering != AtomicOrdering::Monotonic)
+ return nullptr;
+
+ LoadInst *Load = new LoadInst(RMWI.getType(), RMWI.getPointerOperand());
+ Load->setAtomic(Ordering, RMWI.getSyncScopeID());
+ Load->setAlignment(DL.getABITypeAlignment(RMWI.getType()));
+ return Load;
}
OpenPOWER on IntegriCloud