diff options
author | Alex Bradbury <asb@lowrisc.org> | 2018-08-17 14:03:37 +0000 |
---|---|---|
committer | Alex Bradbury <asb@lowrisc.org> | 2018-08-17 14:03:37 +0000 |
commit | 3291f9aa8168408ce5ef4757012c06c196a72c41 (patch) | |
tree | 250f4e4c3d2990d0ecfb1c1bb086e0e9febd3ddc /llvm/lib/CodeGen/AtomicExpandPass.cpp | |
parent | 1962621a7e3350ed3677645dba6d0f20a765db4f (diff) | |
download | bcm5719-llvm-3291f9aa8168408ce5ef4757012c06c196a72c41.tar.gz bcm5719-llvm-3291f9aa8168408ce5ef4757012c06c196a72c41.zip |
[AtomicExpandPass] Widen partword atomicrmw or/xor/and before tryExpandAtomicRMW
This patch performs a widening transformation of bitwise atomicrmw
{or,xor,and} and applies it prior to tryExpandAtomicRMW. This operates
similarly to convertCmpXchgToIntegerType. For these operations, the i8/i16
atomicrmw can be implemented in terms of the 32-bit atomicrmw by appropriately
manipulating the operands. There is no functional change for the handling of
partword or/xor, but the transformation for partword 'and' is new.
The advantage of performing this transformation early is that the same
code-path can be used regardless of the approach used to expand the atomicrmw
(AtomicExpansionKind). i.e. the same logic is used for
AtomicExpansionKind::CmpXchg and can also be used by the intrinsic-based
expansion in D47882.
Differential Revision: https://reviews.llvm.org/D48129
llvm-svn: 340027
Diffstat (limited to 'llvm/lib/CodeGen/AtomicExpandPass.cpp')
-rw-r--r-- | llvm/lib/CodeGen/AtomicExpandPass.cpp | 52 |
1 files changed, 48 insertions, 4 deletions
diff --git a/llvm/lib/CodeGen/AtomicExpandPass.cpp b/llvm/lib/CodeGen/AtomicExpandPass.cpp index e28fc6fb9d4..b55afed2d68 100644 --- a/llvm/lib/CodeGen/AtomicExpandPass.cpp +++ b/llvm/lib/CodeGen/AtomicExpandPass.cpp @@ -88,6 +88,7 @@ namespace { void expandPartwordAtomicRMW( AtomicRMWInst *I, TargetLoweringBase::AtomicExpansionKind ExpansionKind); + AtomicRMWInst *widenPartwordAtomicRMW(AtomicRMWInst *AI); void expandPartwordCmpXchg(AtomicCmpXchgInst *I); AtomicCmpXchgInst *convertCmpXchgToIntegerType(AtomicCmpXchgInst *CI); @@ -306,6 +307,16 @@ bool AtomicExpand::runOnFunction(Function &F) { if (isIdempotentRMW(RMWI) && simplifyIdempotentRMW(RMWI)) { MadeChange = true; } else { + unsigned MinCASSize = TLI->getMinCmpXchgSizeInBits() / 8; + unsigned ValueSize = getAtomicOpSize(RMWI); + AtomicRMWInst::BinOp Op = RMWI->getOperation(); + if (ValueSize < MinCASSize && + (Op == AtomicRMWInst::Or || Op == AtomicRMWInst::Xor || + Op == AtomicRMWInst::And)) { + RMWI = widenPartwordAtomicRMW(RMWI); + MadeChange = true; + } + MadeChange |= tryExpandAtomicRMW(RMWI); } } else if (CASI) { @@ -659,12 +670,10 @@ static Value *performMaskedAtomicOp(AtomicRMWInst::BinOp Op, } case AtomicRMWInst::Or: case AtomicRMWInst::Xor: - // Or/Xor won't affect any other bits, so can just be done - // directly. - return performAtomicOp(Op, Builder, Loaded, Shifted_Inc); + case AtomicRMWInst::And: + llvm_unreachable("Or/Xor/And handled by widenPartwordAtomicRMW"); case AtomicRMWInst::Add: case AtomicRMWInst::Sub: - case AtomicRMWInst::And: case AtomicRMWInst::Nand: { // The other arithmetic ops need to be masked into place. Value *NewVal = performAtomicOp(Op, Builder, Loaded, Shifted_Inc); @@ -733,6 +742,41 @@ void AtomicExpand::expandPartwordAtomicRMW( AI->eraseFromParent(); } +// Widen the bitwise atomicrmw (or/xor/and) to the minimum supported width. +AtomicRMWInst *AtomicExpand::widenPartwordAtomicRMW(AtomicRMWInst *AI) { + IRBuilder<> Builder(AI); + AtomicRMWInst::BinOp Op = AI->getOperation(); + + assert((Op == AtomicRMWInst::Or || Op == AtomicRMWInst::Xor || + Op == AtomicRMWInst::And) && + "Unable to widen operation"); + + PartwordMaskValues PMV = + createMaskInstrs(Builder, AI, AI->getType(), AI->getPointerOperand(), + TLI->getMinCmpXchgSizeInBits() / 8); + + Value *ValOperand_Shifted = + Builder.CreateShl(Builder.CreateZExt(AI->getValOperand(), PMV.WordType), + PMV.ShiftAmt, "ValOperand_Shifted"); + + Value *NewOperand; + + if (Op == AtomicRMWInst::And) + NewOperand = + Builder.CreateOr(PMV.Inv_Mask, ValOperand_Shifted, "AndOperand"); + else + NewOperand = ValOperand_Shifted; + + AtomicRMWInst *NewAI = Builder.CreateAtomicRMW(Op, PMV.AlignedAddr, + NewOperand, AI->getOrdering()); + + Value *FinalOldResult = Builder.CreateTrunc( + Builder.CreateLShr(NewAI, PMV.ShiftAmt), PMV.ValueType); + AI->replaceAllUsesWith(FinalOldResult); + AI->eraseFromParent(); + return NewAI; +} + void AtomicExpand::expandPartwordCmpXchg(AtomicCmpXchgInst *CI) { // The basic idea here is that we're expanding a cmpxchg of a // smaller memory size up to a word-sized cmpxchg. To do this, we |