summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp36
-rw-r--r--llvm/lib/Target/SystemZ/SystemZISelLowering.cpp21
-rw-r--r--llvm/lib/Target/SystemZ/SystemZISelLowering.h1
-rw-r--r--llvm/lib/Target/XCore/XCoreISelLowering.cpp21
-rw-r--r--llvm/lib/Target/XCore/XCoreISelLowering.h2
-rw-r--r--llvm/test/CodeGen/AMDGPU/syncscopes.ll6
6 files changed, 66 insertions, 21 deletions
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 95343f0eae1..f57b86835d2 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -4417,10 +4417,10 @@ void SelectionDAGBuilder::visitAtomicCmpXchg(const AtomicCmpXchgInst &I) {
auto Alignment = DAG.getEVTAlignment(MemVT);
- // FIXME: Volatile isn't really correct; we should keep track of atomic
- // orderings in the memoperand.
- auto Flags = MachineMemOperand::MOVolatile | MachineMemOperand::MOLoad |
- MachineMemOperand::MOStore;
+ auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
+ if (I.isVolatile())
+ Flags |= MachineMemOperand::MOVolatile;
+ Flags |= DAG.getTargetLoweringInfo().getMMOFlags(I);
MachineFunction &MF = DAG.getMachineFunction();
MachineMemOperand *MMO =
@@ -4468,12 +4468,10 @@ void SelectionDAGBuilder::visitAtomicRMW(const AtomicRMWInst &I) {
auto MemVT = getValue(I.getValOperand()).getSimpleValueType();
auto Alignment = DAG.getEVTAlignment(MemVT);
- // For now, atomics are considered to be volatile always, and they are
- // chained as such.
- // FIXME: Volatile isn't really correct; we should keep track of atomic
- // orderings in the memoperand.
- auto Flags = MachineMemOperand::MOVolatile |
- MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
+ auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
+ if (I.isVolatile())
+ Flags |= MachineMemOperand::MOVolatile;
+ Flags |= DAG.getTargetLoweringInfo().getMMOFlags(I);
MachineFunction &MF = DAG.getMachineFunction();
MachineMemOperand *MMO =
@@ -4518,12 +4516,15 @@ void SelectionDAGBuilder::visitAtomicLoad(const LoadInst &I) {
I.getAlignment() < VT.getStoreSize())
report_fatal_error("Cannot generate unaligned atomic load");
+ auto Flags = MachineMemOperand::MOLoad;
+ if (I.isVolatile())
+ Flags |= MachineMemOperand::MOVolatile;
+ Flags |= TLI.getMMOFlags(I);
+
MachineMemOperand *MMO =
DAG.getMachineFunction().
getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()),
- MachineMemOperand::MOVolatile |
- MachineMemOperand::MOLoad,
- VT.getStoreSize(),
+ Flags, VT.getStoreSize(),
I.getAlignment() ? I.getAlignment() :
DAG.getEVTAlignment(VT),
AAMDNodes(), nullptr, SSID, Order);
@@ -4554,11 +4555,10 @@ void SelectionDAGBuilder::visitAtomicStore(const StoreInst &I) {
if (I.getAlignment() < VT.getStoreSize())
report_fatal_error("Cannot generate unaligned atomic store");
- // For now, atomics are considered to be volatile always, and they are
- // chained as such.
- // FIXME: Volatile isn't really correct; we should keep track of atomic
- // orderings in the memoperand.
- auto Flags = MachineMemOperand::MOVolatile | MachineMemOperand::MOStore;
+ auto Flags = MachineMemOperand::MOStore;
+ if (I.isVolatile())
+ Flags |= MachineMemOperand::MOVolatile;
+ Flags |= TLI.getMMOFlags(I);
MachineFunction &MF = DAG.getMachineFunction();
MachineMemOperand *MMO =
diff --git a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
index 7012d9a243f..abb70e0c152 100644
--- a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
@@ -3718,6 +3718,27 @@ SDValue SystemZTargetLowering::lowerATOMIC_CMP_SWAP(SDValue Op,
return SDValue();
}
+MachineMemOperand::Flags
+SystemZTargetLowering::getMMOFlags(const Instruction &I) const {
+ // Because of how we convert atomic_load and atomic_store to normal loads and
+ // stores in the DAG, we need to ensure that the MMOs are marked volatile
+ // since DAGCombine hasn't been updated to account for atomic, but non
+ // volatile loads. (See D57601)
+ if (auto *SI = dyn_cast<StoreInst>(&I))
+ if (SI->isAtomic())
+ return MachineMemOperand::MOVolatile;
+ if (auto *LI = dyn_cast<LoadInst>(&I))
+ if (LI->isAtomic())
+ return MachineMemOperand::MOVolatile;
+ if (auto *AI = dyn_cast<AtomicRMWInst>(&I))
+ if (AI->isAtomic())
+ return MachineMemOperand::MOVolatile;
+ if (auto *AI = dyn_cast<AtomicCmpXchgInst>(&I))
+ if (AI->isAtomic())
+ return MachineMemOperand::MOVolatile;
+ return MachineMemOperand::MONone;
+}
+
SDValue SystemZTargetLowering::lowerSTACKSAVE(SDValue Op,
SelectionDAG &DAG) const {
MachineFunction &MF = DAG.getMachineFunction();
diff --git a/llvm/lib/Target/SystemZ/SystemZISelLowering.h b/llvm/lib/Target/SystemZ/SystemZISelLowering.h
index b8dc4ab65f4..ae73c5ec74f 100644
--- a/llvm/lib/Target/SystemZ/SystemZISelLowering.h
+++ b/llvm/lib/Target/SystemZ/SystemZISelLowering.h
@@ -642,6 +642,7 @@ private:
MachineBasicBlock *MBB,
unsigned Opcode) const;
+ MachineMemOperand::Flags getMMOFlags(const Instruction &I) const override;
const TargetRegisterClass *getRepRegClassFor(MVT VT) const override;
};
diff --git a/llvm/lib/Target/XCore/XCoreISelLowering.cpp b/llvm/lib/Target/XCore/XCoreISelLowering.cpp
index 8979ee727d0..51dbf016392 100644
--- a/llvm/lib/Target/XCore/XCoreISelLowering.cpp
+++ b/llvm/lib/Target/XCore/XCoreISelLowering.cpp
@@ -1008,6 +1008,27 @@ LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) const {
return SDValue();
}
+MachineMemOperand::Flags
+XCoreTargetLowering::getMMOFlags(const Instruction &I) const {
+ // Because of how we convert atomic_load and atomic_store to normal loads and
+ // stores in the DAG, we need to ensure that the MMOs are marked volatile
+ // since DAGCombine hasn't been updated to account for atomic, but non
+ // volatile loads. (See D57601)
+ if (auto *SI = dyn_cast<StoreInst>(&I))
+ if (SI->isAtomic())
+ return MachineMemOperand::MOVolatile;
+ if (auto *LI = dyn_cast<LoadInst>(&I))
+ if (LI->isAtomic())
+ return MachineMemOperand::MOVolatile;
+ if (auto *AI = dyn_cast<AtomicRMWInst>(&I))
+ if (AI->isAtomic())
+ return MachineMemOperand::MOVolatile;
+ if (auto *AI = dyn_cast<AtomicCmpXchgInst>(&I))
+ if (AI->isAtomic())
+ return MachineMemOperand::MOVolatile;
+ return MachineMemOperand::MONone;
+}
+
//===----------------------------------------------------------------------===//
// Calling Convention Implementation
//===----------------------------------------------------------------------===//
diff --git a/llvm/lib/Target/XCore/XCoreISelLowering.h b/llvm/lib/Target/XCore/XCoreISelLowering.h
index eb5b17e0530..b4f25feda7f 100644
--- a/llvm/lib/Target/XCore/XCoreISelLowering.h
+++ b/llvm/lib/Target/XCore/XCoreISelLowering.h
@@ -188,6 +188,8 @@ namespace llvm {
SDValue LowerATOMIC_LOAD(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) const;
+ MachineMemOperand::Flags getMMOFlags(const Instruction &I) const override;
+
// Inline asm support
std::pair<unsigned, const TargetRegisterClass *>
getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
diff --git a/llvm/test/CodeGen/AMDGPU/syncscopes.ll b/llvm/test/CodeGen/AMDGPU/syncscopes.ll
index 413b7654e05..e277a569ef5 100644
--- a/llvm/test/CodeGen/AMDGPU/syncscopes.ll
+++ b/llvm/test/CodeGen/AMDGPU/syncscopes.ll
@@ -1,9 +1,9 @@
; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx803 -stop-after=si-insert-skips < %s | FileCheck --check-prefix=GCN %s
; GCN-LABEL: name: syncscopes
-; GCN: FLAT_STORE_DWORD killed renamable $vgpr1_vgpr2, killed renamable $vgpr0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (volatile store syncscope("agent") seq_cst 4 into %ir.agent_out)
-; GCN: FLAT_STORE_DWORD killed renamable $vgpr4_vgpr5, killed renamable $vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr :: (volatile store syncscope("workgroup") seq_cst 4 into %ir.workgroup_out)
-; GCN: FLAT_STORE_DWORD killed renamable $vgpr7_vgpr8, killed renamable $vgpr6, 0, 0, 0, implicit $exec, implicit $flat_scr :: (volatile store syncscope("wavefront") seq_cst 4 into %ir.wavefront_out)
+; GCN: FLAT_STORE_DWORD killed renamable $vgpr1_vgpr2, killed renamable $vgpr0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store syncscope("agent") seq_cst 4 into %ir.agent_out)
+; GCN: FLAT_STORE_DWORD killed renamable $vgpr4_vgpr5, killed renamable $vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store syncscope("workgroup") seq_cst 4 into %ir.workgroup_out)
+; GCN: FLAT_STORE_DWORD killed renamable $vgpr7_vgpr8, killed renamable $vgpr6, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store syncscope("wavefront") seq_cst 4 into %ir.wavefront_out)
define void @syncscopes(
i32 %agent,
i32* %agent_out,
OpenPOWER on IntegriCloud