summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Transforms
diff options
context:
space:
mode:
authorPhilip Reames <listmail@philipreames.com>2019-02-14 20:41:17 +0000
committerPhilip Reames <listmail@philipreames.com>2019-02-14 20:41:17 +0000
commit485474208e11ecbf2a589d9e4156e78a88257bfa (patch)
tree86eb7587be681e6841e5eb5d4ae104592102b1d3 /llvm/lib/Transforms
parent04a1ee466062fe00dd414fdba2f827b400bc7bee (diff)
downloadbcm5719-llvm-485474208e11ecbf2a589d9e4156e78a88257bfa.tar.gz
bcm5719-llvm-485474208e11ecbf2a589d9e4156e78a88257bfa.zip
Canonicalize all integer "idempotent" atomicrmw ops
For "idempotent" atomicrmw instructions which we can't simply turn into load, canonicalize the operation and constant. This reduces the matching needed elsewhere in the optimizer, but doesn't directly impact codegen. For any architecture where OR/Zero is not a good default choice, you can extend the AtomicExpand lowerIdempotentRMWIntoFencedLoad mechanism. I reviewed X86 to make sure this works well, haven't audited other backends. Differential Revision: https://reviews.llvm.org/D58244 llvm-svn: 354058
Diffstat (limited to 'llvm/lib/Transforms')
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineAtomicRMW.cpp18
1 files changed, 13 insertions, 5 deletions
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAtomicRMW.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAtomicRMW.cpp
index 0c7e7ab66a9..b607c6dd608 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineAtomicRMW.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineAtomicRMW.cpp
@@ -54,14 +54,22 @@ Instruction *InstCombiner::visitAtomicRMWInst(AtomicRMWInst &RMWI) {
if (!isIdempotentRMW(RMWI))
return nullptr;
- // TODO: Canonicalize the operation for an idempotent operation we can't
- // convert into a simple load.
-
- // Volatile RMWs perform a load and a store, we cannot replace
- // this by just a load.
+ // Volatile RMWs perform a load and a store, we cannot replace this by just a
+ // load. We chose not to canonicalize out of general paranoia about user
+ // expectations around volatile.
if (RMWI.isVolatile())
return nullptr;
+ // We chose to canonicalize all idempotent operations to an single
+ // operation code and constant. This makes it easier for the rest of the
+ // optimizer to match easily. The choice of or w/zero is arbitrary.
+ if (RMWI.getType()->isIntegerTy() &&
+ RMWI.getOperation() != AtomicRMWInst::Or) {
+ RMWI.setOperation(AtomicRMWInst::Or);
+ RMWI.setOperand(1, ConstantInt::get(RMWI.getType(), 0));
+ return &RMWI;
+ }
+
// Check if the required ordering is compatible with an atomic load.
AtomicOrdering Ordering = RMWI.getOrdering();
assert(Ordering != AtomicOrdering::NotAtomic &&
OpenPOWER on IntegriCloud