diff options
author | Tom Stellard <thomas.stellard@amd.com> | 2016-03-28 16:10:13 +0000 |
---|---|---|
committer | Tom Stellard <thomas.stellard@amd.com> | 2016-03-28 16:10:13 +0000 |
commit | a76bcc2ea1474fe7df0c757d3a4ca0bfaeed8913 (patch) | |
tree | 286d0524226e8af2d487bdad1958e2600d55f567 | |
parent | 6db1dcbf6b42d1eedc070585c65e6fe7dab25e54 (diff) | |
download | bcm5719-llvm-a76bcc2ea1474fe7df0c757d3a4ca0bfaeed8913.tar.gz bcm5719-llvm-a76bcc2ea1474fe7df0c757d3a4ca0bfaeed8913.zip |
AMDGPU/SI: Limit load clustering to 16 bytes instead of 4 instructions
Summary:
This helps prevent load clustering from drastically increasing register
pressure by trying to cluster 4 SMRDx8 loads together. The limit of 16
bytes was chosen, because it seems like that was the original intent
of setting the limit to 4 instructions, but more analysis could show
that a different limit is better.
This fixes yields small decreases in register usage with shader-db, but
also helps avoid a large increase in register usage when lane mask
tracking is enabled in the machine scheduler, because lane mask tracking
enables more opportunities for load clustering.
shader-db stats:
2379 shaders in 477 tests
Totals:
SGPRS: 49744 -> 48600 (-2.30 %)
VGPRS: 34120 -> 34076 (-0.13 %)
Code Size: 1282888 -> 1283184 (0.02 %) bytes
LDS: 28 -> 28 (0.00 %) blocks
Scratch: 495616 -> 492544 (-0.62 %) bytes per wave
Max Waves: 6843 -> 6853 (0.15 %)
Wait states: 0 -> 0 (0.00 %)
Reviewers: nhaehnle, arsenm
Subscribers: arsenm, llvm-commits
Differential Revision: http://reviews.llvm.org/D18451
llvm-svn: 264589
-rw-r--r-- | llvm/lib/Target/AMDGPU/SIInstrInfo.cpp | 41 | ||||
-rw-r--r-- | llvm/test/CodeGen/AMDGPU/ctpop.ll | 2 | ||||
-rw-r--r-- | llvm/test/CodeGen/AMDGPU/madak.ll | 2 | ||||
-rw-r--r-- | llvm/test/CodeGen/AMDGPU/schedule-kernel-arg-loads.ll | 14 |
4 files changed, 43 insertions, 16 deletions
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp index f0b420d10a8..4b1b15f868a 100644 --- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp +++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp @@ -295,18 +295,43 @@ bool SIInstrInfo::getMemOpBaseRegImmOfs(MachineInstr *LdSt, unsigned &BaseReg, bool SIInstrInfo::shouldClusterLoads(MachineInstr *FirstLdSt, MachineInstr *SecondLdSt, unsigned NumLoads) const { - // TODO: This needs finer tuning - if (NumLoads > 4) + const MachineOperand *FirstDst = nullptr; + const MachineOperand *SecondDst = nullptr; + + if (isDS(*FirstLdSt) && isDS(*SecondLdSt)) { + FirstDst = getNamedOperand(*FirstLdSt, AMDGPU::OpName::vdst); + SecondDst = getNamedOperand(*SecondLdSt, AMDGPU::OpName::vdst); + } + + if (isSMRD(*FirstLdSt) && isSMRD(*FirstLdSt)) { + FirstDst = getNamedOperand(*FirstLdSt, AMDGPU::OpName::sdst); + SecondDst = getNamedOperand(*SecondLdSt, AMDGPU::OpName::sdst); + } + + if ((isMUBUF(*FirstLdSt) && isMUBUF(*SecondLdSt)) || + (isMTBUF(*FirstLdSt) && isMTBUF(*SecondLdSt))) { + FirstDst = getNamedOperand(*FirstLdSt, AMDGPU::OpName::vdata); + SecondDst = getNamedOperand(*SecondLdSt, AMDGPU::OpName::vdata); + } + + if (!FirstDst || !SecondDst) return false; - if (isDS(*FirstLdSt) && isDS(*SecondLdSt)) - return true; + // Try to limit clustering based on the total number of bytes loaded + // rather than the number of instructions. This is done to help reduce + // register pressure. The method used is somewhat inexact, though, + // because it assumes that all loads in the cluster will load the + // same number of bytes as FirstLdSt. - if (isSMRD(*FirstLdSt) && isSMRD(*SecondLdSt)) - return true; + // The unit of this value is bytes. + // FIXME: This needs finer tuning. + unsigned LoadClusterThreshold = 16; + + const MachineRegisterInfo &MRI = + FirstLdSt->getParent()->getParent()->getRegInfo(); + const TargetRegisterClass *DstRC = MRI.getRegClass(FirstDst->getReg()); - return (isMUBUF(*FirstLdSt) || isMTBUF(*FirstLdSt)) && - (isMUBUF(*SecondLdSt) || isMTBUF(*SecondLdSt)); + return (NumLoads * DstRC->getSize()) <= LoadClusterThreshold; } void diff --git a/llvm/test/CodeGen/AMDGPU/ctpop.ll b/llvm/test/CodeGen/AMDGPU/ctpop.ll index 0a031c5e24d..839b893035a 100644 --- a/llvm/test/CodeGen/AMDGPU/ctpop.ll +++ b/llvm/test/CodeGen/AMDGPU/ctpop.ll @@ -60,7 +60,7 @@ define void @v_ctpop_add_chain_i32(i32 addrspace(1)* noalias %out, i32 addrspace ; FUNC-LABEL: {{^}}v_ctpop_add_sgpr_i32: ; GCN: buffer_load_dword [[VAL0:v[0-9]+]], -; GCN-NEXT: s_waitcnt +; GCN: s_waitcnt ; GCN-NEXT: v_bcnt_u32_b32_e64 [[RESULT:v[0-9]+]], [[VAL0]], s{{[0-9]+}} ; GCN-NEXT: buffer_store_dword [[RESULT]], ; GCN: s_endpgm diff --git a/llvm/test/CodeGen/AMDGPU/madak.ll b/llvm/test/CodeGen/AMDGPU/madak.ll index b34fdfac257..96dacefd453 100644 --- a/llvm/test/CodeGen/AMDGPU/madak.ll +++ b/llvm/test/CodeGen/AMDGPU/madak.ll @@ -101,7 +101,7 @@ define void @madak_inline_imm_f32(float addrspace(1)* noalias %out, float addrsp ; We can't use an SGPR when forming madak ; GCN-LABEL: {{^}}s_v_madak_f32: -; GCN: s_load_dword [[SB:s[0-9]+]] +; GCN-DAG: s_load_dword [[SB:s[0-9]+]] ; GCN-DAG: v_mov_b32_e32 [[VK:v[0-9]+]], 0x41200000 ; GCN-DAG: buffer_load_dword [[VA:v[0-9]+]] ; GCN-NOT: v_madak_f32 diff --git a/llvm/test/CodeGen/AMDGPU/schedule-kernel-arg-loads.ll b/llvm/test/CodeGen/AMDGPU/schedule-kernel-arg-loads.ll index 6b3e0814c38..f5cfd23e873 100644 --- a/llvm/test/CodeGen/AMDGPU/schedule-kernel-arg-loads.ll +++ b/llvm/test/CodeGen/AMDGPU/schedule-kernel-arg-loads.ll @@ -2,17 +2,19 @@ ; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=FUNC -check-prefix=VI --check-prefix=GCN %s ; FUNC-LABEL: {{^}}cluster_arg_loads: +; FIXME: Due to changes in the load clustering heuristics. We now longer +; cluster all argument loads together. +; SI: s_load_dword s{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, 0xd +; SI-NEXT: s_load_dword s{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, 0xe ; SI: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0x9 ; SI-NEXT: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0xb -; SI-NEXT: s_load_dword s{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, 0xd -; SI-NEXT: s_load_dword s{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, 0xe -; VI: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0x24 +; VI: s_load_dword s{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, 0x34 ; VI-NEXT: s_nop 0 -; VI-NEXT: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0x2c +; VI-NEXT: s_load_dword s{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, 0x38 ; VI-NEXT: s_nop 0 -; VI-NEXT: s_load_dword s{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, 0x34 +; VI: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0x24 ; VI-NEXT: s_nop 0 -; VI-NEXT: s_load_dword s{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, 0x38 +; VI-NEXT: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0x2c define void @cluster_arg_loads(i32 addrspace(1)* %out0, i32 addrspace(1)* %out1, i32 %x, i32 %y) nounwind { store i32 %x, i32 addrspace(1)* %out0, align 4 store i32 %y, i32 addrspace(1)* %out1, align 4 |