diff options
Diffstat (limited to 'llvm/lib/Transforms/InstCombine/InstCombineAtomicRMW.cpp')
-rw-r--r-- | llvm/lib/Transforms/InstCombine/InstCombineAtomicRMW.cpp | 74 |
1 files changed, 59 insertions, 15 deletions
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAtomicRMW.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAtomicRMW.cpp index 37c43977d90..58da7eb6759 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineAtomicRMW.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineAtomicRMW.cpp @@ -47,25 +47,73 @@ bool isIdempotentRMW(AtomicRMWInst& RMWI) { return false; } } -} +/// Return true if the given instruction always produces a value in memory +/// equivelent to its value operand. +bool isSaturating(AtomicRMWInst& RMWI) { + auto C = dyn_cast<ConstantInt>(RMWI.getValOperand()); + if(!C) + return false; + + AtomicRMWInst::BinOp Op = RMWI.getOperation(); + switch(Op) { + default: + // TODO: fadd, fsub w/Nan + // Note: We avoid listing xchg to prevent transform cycles. + return false; + case AtomicRMWInst::Or: + return C->isAllOnesValue(); + case AtomicRMWInst::And: + return C->isZero(); + case AtomicRMWInst::Min: + return C->isMinValue(true); + case AtomicRMWInst::Max: + return C->isMaxValue(true); + case AtomicRMWInst::UMin: + return C->isMinValue(false); + case AtomicRMWInst::UMax: + return C->isMaxValue(false); + }; +} +} Instruction *InstCombiner::visitAtomicRMWInst(AtomicRMWInst &RMWI) { - // TODO: Any atomicrmw op which produces a known result in memory can be - // replaced w/an atomicrmw xchg. (see getBinOpAbsorber) - - // TODO: Any atomicrmw xchg with no uses can be converted to a atomic store - // if the ordering is compatible. - - if (!isIdempotentRMW(RMWI)) - return nullptr; // Volatile RMWs perform a load and a store, we cannot replace this by just a - // load. We chose not to canonicalize out of general paranoia about user - // expectations around volatile. + // load or just a store. We chose not to canonicalize out of general paranoia + // about user expectations around volatile. if (RMWI.isVolatile()) return nullptr; + // Any atomicrmw op which produces a known result in memory can be + // replaced w/an atomicrmw xchg. + if (isSaturating(RMWI)) { + RMWI.setOperation(AtomicRMWInst::Xchg); + return &RMWI; + } + + AtomicOrdering Ordering = RMWI.getOrdering(); + assert(Ordering != AtomicOrdering::NotAtomic && + Ordering != AtomicOrdering::Unordered && + "AtomicRMWs don't make sense with Unordered or NotAtomic"); + + // Any atomicrmw xchg with no uses can be converted to a atomic store if the + // ordering is compatible. + if (RMWI.getOperation() == AtomicRMWInst::Xchg && + RMWI.use_empty()) { + if (Ordering != AtomicOrdering::Release && + Ordering != AtomicOrdering::Monotonic) + return nullptr; + auto *SI = new StoreInst(RMWI.getValOperand(), + RMWI.getPointerOperand(), &RMWI); + SI->setAtomic(Ordering, RMWI.getSyncScopeID()); + SI->setAlignment(DL.getABITypeAlignment(RMWI.getType())); + return eraseInstFromFunction(RMWI); + } + + if (!isIdempotentRMW(RMWI)) + return nullptr; + // We chose to canonicalize all idempotent operations to an single // operation code and constant. This makes it easier for the rest of the // optimizer to match easily. The choice of or w/zero is arbitrary. @@ -77,10 +125,6 @@ Instruction *InstCombiner::visitAtomicRMWInst(AtomicRMWInst &RMWI) { } // Check if the required ordering is compatible with an atomic load. - AtomicOrdering Ordering = RMWI.getOrdering(); - assert(Ordering != AtomicOrdering::NotAtomic && - Ordering != AtomicOrdering::Unordered && - "AtomicRMWs don't make sense with Unordered or NotAtomic"); if (Ordering != AtomicOrdering::Acquire && Ordering != AtomicOrdering::Monotonic) return nullptr; |