summaryrefslogtreecommitdiffstats
path: root/llvm/lib
diff options
context:
space:
mode:
authorDaniel Sanders <daniel_l_sanders@apple.com>2017-12-05 05:52:07 +0000
committerDaniel Sanders <daniel_l_sanders@apple.com>2017-12-05 05:52:07 +0000
commit3c1c4c0ee0628b22d09a6ec2b1fedd0a72c5ace3 (patch)
tree0a0ba2bf8f470c395407aedeb4189a70829156ba /llvm/lib
parent04013b704c129939706177b036ea8e8c8abd187a (diff)
downloadbcm5719-llvm-3c1c4c0ee0628b22d09a6ec2b1fedd0a72c5ace3.tar.gz
bcm5719-llvm-3c1c4c0ee0628b22d09a6ec2b1fedd0a72c5ace3.zip
Revert r319691: [globalisel][tablegen] Split atomic load/store into separate opcode and enable for AArch64.
Some concerns were raised with the direction. Revert while we discuss it and look into an alternative llvm-svn: 319739
Diffstat (limited to 'llvm/lib')
-rw-r--r--llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp21
-rw-r--r--llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp32
-rw-r--r--llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp6
-rw-r--r--llvm/lib/Target/AArch64/AArch64LegalizerInfo.cpp8
-rw-r--r--llvm/lib/Target/ARM/ARMInstructionSelector.cpp6
-rw-r--r--llvm/lib/Target/X86/X86InstructionSelector.cpp5
6 files changed, 17 insertions, 61 deletions
diff --git a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
index 774ea9877a7..e911085d0ad 100644
--- a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
@@ -345,16 +345,6 @@ bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) {
unsigned Res = getOrCreateVReg(LI);
unsigned Addr = getOrCreateVReg(*LI.getPointerOperand());
- if (LI.getOrdering() != AtomicOrdering::NotAtomic) {
- MIRBuilder.buildAtomicLoad(
- Res, Addr,
- *MF->getMachineMemOperand(MachinePointerInfo(LI.getPointerOperand()),
- Flags, DL->getTypeStoreSize(LI.getType()),
- getMemOpAlignment(LI), AAMDNodes(), nullptr,
- LI.getSyncScopeID(), LI.getOrdering()));
- return true;
- }
-
MIRBuilder.buildLoad(
Res, Addr,
*MF->getMachineMemOperand(MachinePointerInfo(LI.getPointerOperand()),
@@ -376,17 +366,6 @@ bool IRTranslator::translateStore(const User &U, MachineIRBuilder &MIRBuilder) {
unsigned Val = getOrCreateVReg(*SI.getValueOperand());
unsigned Addr = getOrCreateVReg(*SI.getPointerOperand());
- if (SI.getOrdering() != AtomicOrdering::NotAtomic) {
- MIRBuilder.buildAtomicStore(
- Val, Addr,
- *MF->getMachineMemOperand(
- MachinePointerInfo(SI.getPointerOperand()), Flags,
- DL->getTypeStoreSize(SI.getValueOperand()->getType()),
- getMemOpAlignment(SI), AAMDNodes(), nullptr, SI.getSyncScopeID(),
- SI.getOrdering()));
- return true;
- }
-
MIRBuilder.buildStore(
Val, Addr,
*MF->getMachineMemOperand(
diff --git a/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp b/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
index fbcb14d5252..62c396e6cdf 100644
--- a/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
@@ -295,8 +295,6 @@ MachineInstrBuilder MachineIRBuilder::buildLoad(unsigned Res, unsigned Addr,
MachineMemOperand &MMO) {
assert(MRI->getType(Res).isValid() && "invalid operand type");
assert(MRI->getType(Addr).isPointer() && "invalid operand type");
- assert(MMO.getOrdering() == AtomicOrdering::NotAtomic &&
- "invalid atomic ordering");
return buildInstr(TargetOpcode::G_LOAD)
.addDef(Res)
@@ -308,8 +306,6 @@ MachineInstrBuilder MachineIRBuilder::buildStore(unsigned Val, unsigned Addr,
MachineMemOperand &MMO) {
assert(MRI->getType(Val).isValid() && "invalid operand type");
assert(MRI->getType(Addr).isPointer() && "invalid operand type");
- assert(MMO.getOrdering() == AtomicOrdering::NotAtomic &&
- "invalid atomic ordering");
return buildInstr(TargetOpcode::G_STORE)
.addUse(Val)
@@ -317,34 +313,6 @@ MachineInstrBuilder MachineIRBuilder::buildStore(unsigned Val, unsigned Addr,
.addMemOperand(&MMO);
}
-MachineInstrBuilder MachineIRBuilder::buildAtomicLoad(unsigned Res,
- unsigned Addr,
- MachineMemOperand &MMO) {
- assert(MRI->getType(Res).isValid() && "invalid operand type");
- assert(MRI->getType(Addr).isPointer() && "invalid operand type");
- assert(MMO.getOrdering() != AtomicOrdering::NotAtomic &&
- "invalid atomic ordering");
-
- return buildInstr(TargetOpcode::G_ATOMIC_LOAD)
- .addDef(Res)
- .addUse(Addr)
- .addMemOperand(&MMO);
-}
-
-MachineInstrBuilder MachineIRBuilder::buildAtomicStore(unsigned Val,
- unsigned Addr,
- MachineMemOperand &MMO) {
- assert(MRI->getType(Val).isValid() && "invalid operand type");
- assert(MRI->getType(Addr).isPointer() && "invalid operand type");
- assert(MMO.getOrdering() != AtomicOrdering::NotAtomic &&
- "invalid atomic ordering");
-
- return buildInstr(TargetOpcode::G_ATOMIC_STORE)
- .addUse(Val)
- .addUse(Addr)
- .addMemOperand(&MMO);
-}
-
MachineInstrBuilder MachineIRBuilder::buildUAdde(unsigned Res,
unsigned CarryOut,
unsigned Op0, unsigned Op1,
diff --git a/llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp b/llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp
index 6999721b626..c2d3ae31c62 100644
--- a/llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp
+++ b/llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp
@@ -889,6 +889,12 @@ bool AArch64InstructionSelector::select(MachineInstr &I,
return false;
}
+ auto &MemOp = **I.memoperands_begin();
+ if (MemOp.getOrdering() != AtomicOrdering::NotAtomic) {
+ DEBUG(dbgs() << "Atomic load/store not supported yet\n");
+ return false;
+ }
+
const unsigned PtrReg = I.getOperand(1).getReg();
#ifndef NDEBUG
const RegisterBank &PtrRB = *RBI.getRegBank(PtrReg, MRI, TRI);
diff --git a/llvm/lib/Target/AArch64/AArch64LegalizerInfo.cpp b/llvm/lib/Target/AArch64/AArch64LegalizerInfo.cpp
index 6531d5ebe4c..05df5120222 100644
--- a/llvm/lib/Target/AArch64/AArch64LegalizerInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64LegalizerInfo.cpp
@@ -231,14 +231,6 @@ AArch64LegalizerInfo::AArch64LegalizerInfo(const AArch64Subtarget &ST) {
setAction({MemOp, 1, p0}, Legal);
}
- for (unsigned MemOp : {G_ATOMIC_LOAD, G_ATOMIC_STORE}) {
- for (auto Ty : {s8, s16, s32, s64, p0})
- setAction({MemOp, Ty}, Legal);
-
- // And everything's fine in addrspace 0.
- setAction({MemOp, 1, p0}, Legal);
- }
-
// Constants
for (auto Ty : {s32, s64}) {
setAction({TargetOpcode::G_CONSTANT, Ty}, Legal);
diff --git a/llvm/lib/Target/ARM/ARMInstructionSelector.cpp b/llvm/lib/Target/ARM/ARMInstructionSelector.cpp
index b43faf4903a..6bbeae2e115 100644
--- a/llvm/lib/Target/ARM/ARMInstructionSelector.cpp
+++ b/llvm/lib/Target/ARM/ARMInstructionSelector.cpp
@@ -801,6 +801,12 @@ bool ARMInstructionSelector::select(MachineInstr &I,
return selectGlobal(MIB, MRI);
case G_STORE:
case G_LOAD: {
+ const auto &MemOp = **I.memoperands_begin();
+ if (MemOp.getOrdering() != AtomicOrdering::NotAtomic) {
+ DEBUG(dbgs() << "Atomic load/store not supported yet\n");
+ return false;
+ }
+
unsigned Reg = I.getOperand(0).getReg();
unsigned RegBank = RBI.getRegBank(Reg, MRI, TRI)->getID();
diff --git a/llvm/lib/Target/X86/X86InstructionSelector.cpp b/llvm/lib/Target/X86/X86InstructionSelector.cpp
index f6530c4eee2..44bbc3f1b3f 100644
--- a/llvm/lib/Target/X86/X86InstructionSelector.cpp
+++ b/llvm/lib/Target/X86/X86InstructionSelector.cpp
@@ -484,6 +484,11 @@ bool X86InstructionSelector::selectLoadStoreOp(MachineInstr &I,
const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI);
auto &MemOp = **I.memoperands_begin();
+ if (MemOp.getOrdering() != AtomicOrdering::NotAtomic) {
+ DEBUG(dbgs() << "Atomic load/store not supported yet\n");
+ return false;
+ }
+
unsigned NewOpc = getLoadStoreOp(Ty, RB, Opc, MemOp.getAlignment());
if (NewOpc == Opc)
return false;
OpenPOWER on IntegriCloud