diff options
author | Daniel Sanders <daniel_l_sanders@apple.com> | 2017-12-04 20:39:32 +0000 |
---|---|---|
committer | Daniel Sanders <daniel_l_sanders@apple.com> | 2017-12-04 20:39:32 +0000 |
commit | 04e4f47e93e2bc26a80bf738c3692846ca622230 (patch) | |
tree | 1e9395e834862e14ab18a434eee7da723e821f10 /llvm/lib/CodeGen/GlobalISel | |
parent | ef573fa073e3a4ed71679bc461c37720a34f9a51 (diff) | |
download | bcm5719-llvm-04e4f47e93e2bc26a80bf738c3692846ca622230.tar.gz bcm5719-llvm-04e4f47e93e2bc26a80bf738c3692846ca622230.zip |
[globalisel][tablegen] Split atomic load/store into separate opcode and enable for AArch64.
This patch splits atomics out of the generic G_LOAD/G_STORE and into their own
G_ATOMIC_LOAD/G_ATOMIC_STORE. This is a pragmatic decision rather than a
necessary one. Atomic load/store has little in implementation in common with
non-atomic load/store. They tend to be handled very differently throughout the
backend. It also has the nice side-effect of slightly improving the common-case
performance at ISel since there's no longer a need for an atomicity check in the
matcher table.
All targets have been updated to remove the atomic load/store check from the
G_LOAD/G_STORE path. AArch64 has also been updated to mark
G_ATOMIC_LOAD/G_ATOMIC_STORE legal.
There is one issue with this patch though which also affects the extending loads
and truncating stores. The rules only match when an appropriate G_ANYEXT is
present in the MIR. For example,
(G_ATOMIC_STORE (G_TRUNC:s16 (G_ANYEXT:s32 (G_ATOMIC_LOAD:s16 X))))
will match but:
(G_ATOMIC_STORE (G_ATOMIC_LOAD:s16 X))
will not. This shouldn't be a problem at the moment, but as we get better at
eliminating extends/truncates we'll likely start failing to match in some
cases. The current plan is to fix this in a patch that changes the
representation of extending-load/truncating-store to allow the MMO to describe
a different type to the operation.
llvm-svn: 319691
Diffstat (limited to 'llvm/lib/CodeGen/GlobalISel')
-rw-r--r-- | llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp | 21 | ||||
-rw-r--r-- | llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp | 32 |
2 files changed, 53 insertions, 0 deletions
diff --git a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp index e911085d0ad..774ea9877a7 100644 --- a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp +++ b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp @@ -345,6 +345,16 @@ bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) { unsigned Res = getOrCreateVReg(LI); unsigned Addr = getOrCreateVReg(*LI.getPointerOperand()); + if (LI.getOrdering() != AtomicOrdering::NotAtomic) { + MIRBuilder.buildAtomicLoad( + Res, Addr, + *MF->getMachineMemOperand(MachinePointerInfo(LI.getPointerOperand()), + Flags, DL->getTypeStoreSize(LI.getType()), + getMemOpAlignment(LI), AAMDNodes(), nullptr, + LI.getSyncScopeID(), LI.getOrdering())); + return true; + } + MIRBuilder.buildLoad( Res, Addr, *MF->getMachineMemOperand(MachinePointerInfo(LI.getPointerOperand()), @@ -366,6 +376,17 @@ bool IRTranslator::translateStore(const User &U, MachineIRBuilder &MIRBuilder) { unsigned Val = getOrCreateVReg(*SI.getValueOperand()); unsigned Addr = getOrCreateVReg(*SI.getPointerOperand()); + if (SI.getOrdering() != AtomicOrdering::NotAtomic) { + MIRBuilder.buildAtomicStore( + Val, Addr, + *MF->getMachineMemOperand( + MachinePointerInfo(SI.getPointerOperand()), Flags, + DL->getTypeStoreSize(SI.getValueOperand()->getType()), + getMemOpAlignment(SI), AAMDNodes(), nullptr, SI.getSyncScopeID(), + SI.getOrdering())); + return true; + } + MIRBuilder.buildStore( Val, Addr, *MF->getMachineMemOperand( diff --git a/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp b/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp index 62c396e6cdf..fbcb14d5252 100644 --- a/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp +++ b/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp @@ -295,6 +295,8 @@ MachineInstrBuilder MachineIRBuilder::buildLoad(unsigned Res, unsigned Addr, MachineMemOperand &MMO) { assert(MRI->getType(Res).isValid() && "invalid operand type"); assert(MRI->getType(Addr).isPointer() && "invalid operand type"); + assert(MMO.getOrdering() == AtomicOrdering::NotAtomic && + "invalid atomic ordering"); return buildInstr(TargetOpcode::G_LOAD) .addDef(Res) @@ -306,6 +308,8 @@ MachineInstrBuilder MachineIRBuilder::buildStore(unsigned Val, unsigned Addr, MachineMemOperand &MMO) { assert(MRI->getType(Val).isValid() && "invalid operand type"); assert(MRI->getType(Addr).isPointer() && "invalid operand type"); + assert(MMO.getOrdering() == AtomicOrdering::NotAtomic && + "invalid atomic ordering"); return buildInstr(TargetOpcode::G_STORE) .addUse(Val) @@ -313,6 +317,34 @@ MachineInstrBuilder MachineIRBuilder::buildStore(unsigned Val, unsigned Addr, .addMemOperand(&MMO); } +MachineInstrBuilder MachineIRBuilder::buildAtomicLoad(unsigned Res, + unsigned Addr, + MachineMemOperand &MMO) { + assert(MRI->getType(Res).isValid() && "invalid operand type"); + assert(MRI->getType(Addr).isPointer() && "invalid operand type"); + assert(MMO.getOrdering() != AtomicOrdering::NotAtomic && + "invalid atomic ordering"); + + return buildInstr(TargetOpcode::G_ATOMIC_LOAD) + .addDef(Res) + .addUse(Addr) + .addMemOperand(&MMO); +} + +MachineInstrBuilder MachineIRBuilder::buildAtomicStore(unsigned Val, + unsigned Addr, + MachineMemOperand &MMO) { + assert(MRI->getType(Val).isValid() && "invalid operand type"); + assert(MRI->getType(Addr).isPointer() && "invalid operand type"); + assert(MMO.getOrdering() != AtomicOrdering::NotAtomic && + "invalid atomic ordering"); + + return buildInstr(TargetOpcode::G_ATOMIC_STORE) + .addUse(Val) + .addUse(Addr) + .addMemOperand(&MMO); +} + MachineInstrBuilder MachineIRBuilder::buildUAdde(unsigned Res, unsigned CarryOut, unsigned Op0, unsigned Op1, |