summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Transforms/InstCombine/InstCombineAtomicRMW.cpp
diff options
context:
space:
mode:
authorPhilip Reames <listmail@philipreames.com>2019-02-14 18:39:14 +0000
committerPhilip Reames <listmail@philipreames.com>2019-02-14 18:39:14 +0000
commit97067d3c7321ff8a3dcb0b0f41ae06b1602787c3 (patch)
tree0ca1d8472bb98311ca55b347a42716dc145bb8f6 /llvm/lib/Transforms/InstCombine/InstCombineAtomicRMW.cpp
parentcb914cf683108414ea2b62e68d9e9b10879c2a03 (diff)
downloadbcm5719-llvm-97067d3c7321ff8a3dcb0b0f41ae06b1602787c3.tar.gz
bcm5719-llvm-97067d3c7321ff8a3dcb0b0f41ae06b1602787c3.zip
Teach instcombine about remaining "idempotent" atomicrmw types
Expand on Quentin's r353471 patch which converts some atomicrmws into loads. Handle remaining operation types, and fix a slight bug. Atomic loads are required to have alignment. Since this was within the InstCombine fixed point, somewhere else in InstCombine was adding alignment before the verifier saw it, but still, we should fix. Terminology wise, I'm using the "idempotent" naming that is used for the same operations in AtomicExpand and X86ISelLoweringInfo. Once this lands, I'll add similar tests for AtomicExpand, and move the pattern match function to a common location. In the review, there was seemingly consensus that "idempotent" was slightly incorrect for this context. Once we setle on a better name, I'll update all uses at once. Differential Revision: https://reviews.llvm.org/D58242 llvm-svn: 354046
Diffstat (limited to 'llvm/lib/Transforms/InstCombine/InstCombineAtomicRMW.cpp')
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineAtomicRMW.cpp90
1 files changed, 60 insertions, 30 deletions
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAtomicRMW.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAtomicRMW.cpp
index 86bbfb15986..0c7e7ab66a9 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineAtomicRMW.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineAtomicRMW.cpp
@@ -14,35 +14,65 @@
using namespace llvm;
-Instruction *InstCombiner::visitAtomicRMWInst(AtomicRMWInst &RMWI) {
- switch (RMWI.getOperation()) {
- default:
- break;
- case AtomicRMWInst::Add:
- case AtomicRMWInst::Sub:
- case AtomicRMWInst::Or:
- // Replace atomicrmw <op> addr, 0 => load atomic addr.
-
- // Volatile RMWs perform a load and a store, we cannot replace
- // this by just a load.
- if (RMWI.isVolatile())
- break;
-
- auto *CI = dyn_cast<ConstantInt>(RMWI.getValOperand());
- if (!CI || !CI->isZero())
- break;
- // Check if the required ordering is compatible with an
- // atomic load.
- AtomicOrdering Ordering = RMWI.getOrdering();
- assert(Ordering != AtomicOrdering::NotAtomic &&
- Ordering != AtomicOrdering::Unordered &&
- "AtomicRMWs don't make sense with Unordered or NotAtomic");
- if (Ordering != AtomicOrdering::Acquire &&
- Ordering != AtomicOrdering::Monotonic)
- break;
- LoadInst *Load = new LoadInst(RMWI.getType(), RMWI.getPointerOperand());
- Load->setAtomic(Ordering, RMWI.getSyncScopeID());
- return Load;
+namespace {
+/// Return true if and only if the given instruction does not modify the memory
+/// location referenced. Note that an idemptent atomicrmw may still have
+/// ordering effects on nearby instructions, or be volatile.
+/// TODO: Common w/ the version in AtomicExpandPass, and change the term used.
+/// Idemptotent is confusing in this context.
+bool isIdempotentRMW(AtomicRMWInst& RMWI) {
+ auto C = dyn_cast<ConstantInt>(RMWI.getValOperand());
+ if(!C)
+ // TODO: Handle fadd, fsub?
+ return false;
+
+ AtomicRMWInst::BinOp Op = RMWI.getOperation();
+ switch(Op) {
+ case AtomicRMWInst::Add:
+ case AtomicRMWInst::Sub:
+ case AtomicRMWInst::Or:
+ case AtomicRMWInst::Xor:
+ return C->isZero();
+ case AtomicRMWInst::And:
+ return C->isMinusOne();
+ case AtomicRMWInst::Min:
+ return C->isMaxValue(true);
+ case AtomicRMWInst::Max:
+ return C->isMinValue(true);
+ case AtomicRMWInst::UMin:
+ return C->isMaxValue(false);
+ case AtomicRMWInst::UMax:
+ return C->isMinValue(false);
+ default:
+ return false;
}
- return nullptr;
+}
+}
+
+
+Instruction *InstCombiner::visitAtomicRMWInst(AtomicRMWInst &RMWI) {
+ if (!isIdempotentRMW(RMWI))
+ return nullptr;
+
+ // TODO: Canonicalize the operation for an idempotent operation we can't
+ // convert into a simple load.
+
+ // Volatile RMWs perform a load and a store, we cannot replace
+ // this by just a load.
+ if (RMWI.isVolatile())
+ return nullptr;
+
+ // Check if the required ordering is compatible with an atomic load.
+ AtomicOrdering Ordering = RMWI.getOrdering();
+ assert(Ordering != AtomicOrdering::NotAtomic &&
+ Ordering != AtomicOrdering::Unordered &&
+ "AtomicRMWs don't make sense with Unordered or NotAtomic");
+ if (Ordering != AtomicOrdering::Acquire &&
+ Ordering != AtomicOrdering::Monotonic)
+ return nullptr;
+
+ LoadInst *Load = new LoadInst(RMWI.getType(), RMWI.getPointerOperand());
+ Load->setAtomic(Ordering, RMWI.getSyncScopeID());
+ Load->setAlignment(DL.getABITypeAlignment(RMWI.getType()));
+ return Load;
}
OpenPOWER on IntegriCloud