diff options
| author | Daniel Sanders <daniel_l_sanders@apple.com> | 2017-12-05 05:52:07 +0000 |
|---|---|---|
| committer | Daniel Sanders <daniel_l_sanders@apple.com> | 2017-12-05 05:52:07 +0000 |
| commit | 3c1c4c0ee0628b22d09a6ec2b1fedd0a72c5ace3 (patch) | |
| tree | 0a0ba2bf8f470c395407aedeb4189a70829156ba /llvm/lib/Target | |
| parent | 04013b704c129939706177b036ea8e8c8abd187a (diff) | |
| download | bcm5719-llvm-3c1c4c0ee0628b22d09a6ec2b1fedd0a72c5ace3.tar.gz bcm5719-llvm-3c1c4c0ee0628b22d09a6ec2b1fedd0a72c5ace3.zip | |
Revert r319691: [globalisel][tablegen] Split atomic load/store into separate opcode and enable for AArch64.
Some concerns were raised with the direction. Revert while we discuss it and look into an alternative
llvm-svn: 319739
Diffstat (limited to 'llvm/lib/Target')
4 files changed, 17 insertions, 8 deletions
diff --git a/llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp b/llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp index 6999721b626..c2d3ae31c62 100644 --- a/llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp +++ b/llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp @@ -889,6 +889,12 @@ bool AArch64InstructionSelector::select(MachineInstr &I, return false; } + auto &MemOp = **I.memoperands_begin(); + if (MemOp.getOrdering() != AtomicOrdering::NotAtomic) { + DEBUG(dbgs() << "Atomic load/store not supported yet\n"); + return false; + } + const unsigned PtrReg = I.getOperand(1).getReg(); #ifndef NDEBUG const RegisterBank &PtrRB = *RBI.getRegBank(PtrReg, MRI, TRI); diff --git a/llvm/lib/Target/AArch64/AArch64LegalizerInfo.cpp b/llvm/lib/Target/AArch64/AArch64LegalizerInfo.cpp index 6531d5ebe4c..05df5120222 100644 --- a/llvm/lib/Target/AArch64/AArch64LegalizerInfo.cpp +++ b/llvm/lib/Target/AArch64/AArch64LegalizerInfo.cpp @@ -231,14 +231,6 @@ AArch64LegalizerInfo::AArch64LegalizerInfo(const AArch64Subtarget &ST) { setAction({MemOp, 1, p0}, Legal); } - for (unsigned MemOp : {G_ATOMIC_LOAD, G_ATOMIC_STORE}) { - for (auto Ty : {s8, s16, s32, s64, p0}) - setAction({MemOp, Ty}, Legal); - - // And everything's fine in addrspace 0. - setAction({MemOp, 1, p0}, Legal); - } - // Constants for (auto Ty : {s32, s64}) { setAction({TargetOpcode::G_CONSTANT, Ty}, Legal); diff --git a/llvm/lib/Target/ARM/ARMInstructionSelector.cpp b/llvm/lib/Target/ARM/ARMInstructionSelector.cpp index b43faf4903a..6bbeae2e115 100644 --- a/llvm/lib/Target/ARM/ARMInstructionSelector.cpp +++ b/llvm/lib/Target/ARM/ARMInstructionSelector.cpp @@ -801,6 +801,12 @@ bool ARMInstructionSelector::select(MachineInstr &I, return selectGlobal(MIB, MRI); case G_STORE: case G_LOAD: { + const auto &MemOp = **I.memoperands_begin(); + if (MemOp.getOrdering() != AtomicOrdering::NotAtomic) { + DEBUG(dbgs() << "Atomic load/store not supported yet\n"); + return false; + } + unsigned Reg = I.getOperand(0).getReg(); unsigned RegBank = RBI.getRegBank(Reg, MRI, TRI)->getID(); diff --git a/llvm/lib/Target/X86/X86InstructionSelector.cpp b/llvm/lib/Target/X86/X86InstructionSelector.cpp index f6530c4eee2..44bbc3f1b3f 100644 --- a/llvm/lib/Target/X86/X86InstructionSelector.cpp +++ b/llvm/lib/Target/X86/X86InstructionSelector.cpp @@ -484,6 +484,11 @@ bool X86InstructionSelector::selectLoadStoreOp(MachineInstr &I, const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI); auto &MemOp = **I.memoperands_begin(); + if (MemOp.getOrdering() != AtomicOrdering::NotAtomic) { + DEBUG(dbgs() << "Atomic load/store not supported yet\n"); + return false; + } + unsigned NewOpc = getLoadStoreOp(Ty, RB, Opc, MemOp.getAlignment()); if (NewOpc == Opc) return false; |

