summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTom Stellard <thomas.stellard@amd.com>2016-10-28 23:53:48 +0000
committerTom Stellard <thomas.stellard@amd.com>2016-10-28 23:53:48 +0000
commit6695ba0440f38c4cf0ccddd514b079b96786e352 (patch)
treeb68c6ba36c018659608394ff1e7ba45a74e18f28
parent2678d023ca5b02dbecd65a88520366523e1ae6de (diff)
downloadbcm5719-llvm-6695ba0440f38c4cf0ccddd514b079b96786e352.tar.gz
bcm5719-llvm-6695ba0440f38c4cf0ccddd514b079b96786e352.zip
AMDGPU/SI: Don't use non-0 waitcnt values when waiting on Flat instructions
Summary: Flat instruction can return out of order, so we need always need to wait for all the outstanding flat operations. Reviewers: tony-tye, arsenm Subscribers: kzhuravl, wdng, nhaehnle, llvm-commits, yaxunl Differential Revision: https://reviews.llvm.org/D25998 llvm-svn: 285479
-rw-r--r--llvm/lib/Target/AMDGPU/SIInsertWaits.cpp13
-rw-r--r--llvm/lib/Target/AMDGPU/SIInstrInfo.cpp14
-rw-r--r--llvm/lib/Target/AMDGPU/SIInstrInfo.h2
-rw-r--r--llvm/test/CodeGen/MIR/AMDGPU/waitcnt.mir59
4 files changed, 86 insertions, 2 deletions
diff --git a/llvm/lib/Target/AMDGPU/SIInsertWaits.cpp b/llvm/lib/Target/AMDGPU/SIInsertWaits.cpp
index e2ae25af561..6c4a2a4d210 100644
--- a/llvm/lib/Target/AMDGPU/SIInsertWaits.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInsertWaits.cpp
@@ -93,6 +93,9 @@ private:
bool LastInstWritesM0;
+ /// Whether or not we have flat operations outstanding.
+ bool IsFlatOutstanding;
+
/// \brief Whether the machine function returns void
bool ReturnsVoid;
@@ -294,6 +297,9 @@ void SIInsertWaits::pushInstruction(MachineBasicBlock &MBB,
Counters Limit = ZeroCounts;
unsigned Sum = 0;
+ if (TII->mayAccessFlatAddressSpace(*I))
+ IsFlatOutstanding = true;
+
for (unsigned i = 0; i < 3; ++i) {
LastIssued.Array[i] += Increment.Array[i];
if (Increment.Array[i])
@@ -368,8 +374,9 @@ bool SIInsertWaits::insertWait(MachineBasicBlock &MBB,
// Figure out if the async instructions execute in order
bool Ordered[3];
- // VM_CNT is always ordered
- Ordered[0] = true;
+ // VM_CNT is always ordered except when there are flat instructions, which
+ // can return out of order.
+ Ordered[0] = !IsFlatOutstanding;
// EXP_CNT is unordered if we have both EXP & VM-writes
Ordered[1] = ExpInstrTypesSeen == 3;
@@ -419,6 +426,7 @@ bool SIInsertWaits::insertWait(MachineBasicBlock &MBB,
LastOpcodeType = OTHER;
LastInstWritesM0 = false;
+ IsFlatOutstanding = false;
return true;
}
@@ -532,6 +540,7 @@ bool SIInsertWaits::runOnMachineFunction(MachineFunction &MF) {
LastIssued = ZeroCounts;
LastOpcodeType = OTHER;
LastInstWritesM0 = false;
+ IsFlatOutstanding = false;
ReturnsVoid = MF.getInfo<SIMachineFunctionInfo>()->returnsVoid();
memset(&UsedRegs, 0, sizeof(UsedRegs));
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
index 72de6497397..d8c98e6897b 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
@@ -3540,6 +3540,20 @@ unsigned SIInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const {
}
}
+bool SIInstrInfo::mayAccessFlatAddressSpace(const MachineInstr &MI) const {
+ if (!isFLAT(MI))
+ return false;
+
+ if (MI.memoperands_empty())
+ return true;
+
+ for (const MachineMemOperand *MMO : MI.memoperands()) {
+ if (MMO->getAddrSpace() == AMDGPUAS::FLAT_ADDRESS)
+ return true;
+ }
+ return false;
+}
+
ArrayRef<std::pair<int, const char *>>
SIInstrInfo::getSerializableTargetIndices() const {
static const std::pair<int, const char *> TargetIndices[] = {
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.h b/llvm/lib/Target/AMDGPU/SIInstrInfo.h
index e5d237b6279..b83d2cd97f6 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.h
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.h
@@ -617,6 +617,8 @@ public:
unsigned getInstSizeInBytes(const MachineInstr &MI) const override;
+ bool mayAccessFlatAddressSpace(const MachineInstr &MI) const;
+
ArrayRef<std::pair<int, const char *>>
getSerializableTargetIndices() const override;
diff --git a/llvm/test/CodeGen/MIR/AMDGPU/waitcnt.mir b/llvm/test/CodeGen/MIR/AMDGPU/waitcnt.mir
new file mode 100644
index 00000000000..cb5de6a2419
--- /dev/null
+++ b/llvm/test/CodeGen/MIR/AMDGPU/waitcnt.mir
@@ -0,0 +1,59 @@
+# RUN: llc -march=amdgcn -mcpu=fiji -run-pass si-insert-waits %s -o - | FileCheck %s
+
+--- |
+ define void @flat_zero_waitcnt(i32 addrspace(1)* %global4,
+ <4 x i32> addrspace(1)* %global16,
+ i32 addrspace(4)* %flat4,
+ <4 x i32> addrspace(4)* %flat16) {
+ ret void
+ }
+...
+---
+
+# CHECK-LABEL: name: flat_zero_waitcnt
+
+# CHECK-LABEL: bb.0:
+# CHECK: FLAT_LOAD_DWORD
+# CHECK: FLAT_LOAD_DWORDX4
+# Global loads will return in order so we should:
+# s_waitcnt vmcnt(1) lgkmcnt(0)
+# CHECK-NEXT: S_WAITCNT 113
+
+# CHECK-LABEL: bb.1:
+# CHECK: FLAT_LOAD_DWORD
+# CHECK: FLAT_LOAD_DWORDX4
+# The first load has no mem operand, so we should assume it accesses the flat
+# address space.
+# s_waitcnt vmcnt(0) lgkmcnt(0)
+# CHECK-NEXT: S_WAITCNT 112
+
+# CHECK-LABEL: bb.2:
+# CHECK: FLAT_LOAD_DWORD
+# CHECK: FLAT_LOAD_DWORDX4
+# One outstand loads access the flat address space.
+# s_waitcnt vmcnt(0) lgkmcnt(0)
+# CHECK-NEXT: S_WAITCNT 112
+
+name: flat_zero_waitcnt
+
+body: |
+ bb.0:
+ successors: %bb.1
+ %vgpr0 = FLAT_LOAD_DWORD %vgpr1_vgpr2, 0, 0, 0, implicit %exec, implicit %flat_scr :: (load 4 from %ir.global4)
+ %vgpr3_vgpr4_vgpr5_vgpr6 = FLAT_LOAD_DWORDX4 %vgpr7_vgpr8, 0, 0, 0, implicit %exec, implicit %flat_scr :: (load 16 from %ir.global16)
+ %vgpr0 = V_MOV_B32_e32 %vgpr1, implicit %exec
+ S_BRANCH %bb.1
+
+ bb.1:
+ successors: %bb.2
+ %vgpr0 = FLAT_LOAD_DWORD %vgpr1_vgpr2, 0, 0, 0, implicit %exec, implicit %flat_scr
+ %vgpr3_vgpr4_vgpr5_vgpr6 = FLAT_LOAD_DWORDX4 %vgpr7_vgpr8, 0, 0, 0, implicit %exec, implicit %flat_scr :: (load 16 from %ir.global16)
+ %vgpr0 = V_MOV_B32_e32 %vgpr1, implicit %exec
+ S_BRANCH %bb.2
+
+ bb.2:
+ %vgpr0 = FLAT_LOAD_DWORD %vgpr1_vgpr2, 0, 0, 0, implicit %exec, implicit %flat_scr :: (load 4 from %ir.flat4)
+ %vgpr3_vgpr4_vgpr5_vgpr6 = FLAT_LOAD_DWORDX4 %vgpr7_vgpr8, 0, 0, 0, implicit %exec, implicit %flat_scr :: (load 16 from %ir.flat16)
+ %vgpr0 = V_MOV_B32_e32 %vgpr1, implicit %exec
+ S_ENDPGM
+...
OpenPOWER on IntegriCloud