summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNicolai Haehnle <nhaehnle@gmail.com>2016-02-10 20:13:58 +0000
committerNicolai Haehnle <nhaehnle@gmail.com>2016-02-10 20:13:58 +0000
commitd791bd07c707c432f0f97d88e8161274a5867f78 (patch)
treec5e0aed311c70c078c5aa466c096a8c5fcea9f0d
parentd76d4aabdd4e070924966e8c7bc2073f85bfebb9 (diff)
downloadbcm5719-llvm-d791bd07c707c432f0f97d88e8161274a5867f78.tar.gz
bcm5719-llvm-d791bd07c707c432f0f97d88e8161274a5867f78.zip
AMDGPU: Release the scavenged offset register during VGPR spill
Summary: This fixes a crash where subsequent spills would be unable to scavenge a register. In particular, it fixes a crash in piglit's spec@glsl-1.50@execution@geometry@max-input-components (the test still has a shader that fails to compile because of too many SGPR spills, but at least it doesn't crash any more). This is a candidate for the release branch. Reviewers: arsenm, tstellarAMD Subscribers: qcolombet, arsenm Differential Revision: http://reviews.llvm.org/D16558 llvm-svn: 260427
-rw-r--r--llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp9
-rw-r--r--llvm/test/CodeGen/AMDGPU/spill-scavenge-offset.ll33
2 files changed, 41 insertions, 1 deletions
diff --git a/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp b/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
index 609f5e7df54..025ed2b5b76 100644
--- a/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
@@ -234,6 +234,7 @@ void SIRegisterInfo::buildScratchLoadStore(MachineBasicBlock::iterator MI,
bool IsLoad = TII->get(LoadStoreOp).mayLoad();
bool RanOutOfSGPRs = false;
+ bool Scavenged = false;
unsigned SOffset = ScratchOffset;
unsigned NumSubRegs = getNumSubRegsForSpillOp(MI->getOpcode());
@@ -244,6 +245,8 @@ void SIRegisterInfo::buildScratchLoadStore(MachineBasicBlock::iterator MI,
if (SOffset == AMDGPU::NoRegister) {
RanOutOfSGPRs = true;
SOffset = AMDGPU::SGPR0;
+ } else {
+ Scavenged = true;
}
BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_ADD_U32), SOffset)
.addReg(ScratchOffset)
@@ -259,10 +262,14 @@ void SIRegisterInfo::buildScratchLoadStore(MachineBasicBlock::iterator MI,
getPhysRegSubReg(Value, &AMDGPU::VGPR_32RegClass, i) :
Value;
+ unsigned SOffsetRegState = 0;
+ if (i + 1 == e && Scavenged)
+ SOffsetRegState |= RegState::Kill;
+
BuildMI(*MBB, MI, DL, TII->get(LoadStoreOp))
.addReg(SubReg, getDefRegState(IsLoad))
.addReg(ScratchRsrcReg)
- .addReg(SOffset)
+ .addReg(SOffset, SOffsetRegState)
.addImm(Offset)
.addImm(0) // glc
.addImm(0) // slc
diff --git a/llvm/test/CodeGen/AMDGPU/spill-scavenge-offset.ll b/llvm/test/CodeGen/AMDGPU/spill-scavenge-offset.ll
new file mode 100644
index 00000000000..4a12ed545b8
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/spill-scavenge-offset.ll
@@ -0,0 +1,33 @@
+; RUN: llc -march=amdgcn -mcpu=verde < %s | FileCheck %s
+; RUN: llc -march=amdgcn -mcpu=tonga < %s | FileCheck %s
+
+; When the offset of VGPR spills into scratch space gets too large, an additional SGPR
+; is used to calculate the scratch load/store address. Make sure that this
+; mechanism works even when many spills happen.
+
+; Just test that it compiles successfully.
+; CHECK-LABEL: test
+define void @test(<1280 x i32> addrspace(1)* %out, <1280 x i32> addrspace(1)* %in,
+ <96 x i32> addrspace(1)* %sdata_out, <96 x i32> %sdata_in) {
+entry:
+ %tid = call i32 @llvm.SI.tid() nounwind readnone
+
+ %aptr = getelementptr <1280 x i32>, <1280 x i32> addrspace(1)* %in, i32 %tid
+ %a = load <1280 x i32>, <1280 x i32> addrspace(1)* %aptr
+
+; mark most VGPR registers as used to increase register pressure
+ call void asm sideeffect "", "~{VGPR4},~{VGPR8},~{VGPR12},~{VGPR16},~{VGPR20},~{VGPR24},~{VGPR28},~{VGPR32}" ()
+ call void asm sideeffect "", "~{VGPR36},~{VGPR40},~{VGPR44},~{VGPR48},~{VGPR52},~{VGPR56},~{VGPR60},~{VGPR64}" ()
+ call void asm sideeffect "", "~{VGPR68},~{VGPR72},~{VGPR76},~{VGPR80},~{VGPR84},~{VGPR88},~{VGPR92},~{VGPR96}" ()
+ call void asm sideeffect "", "~{VGPR100},~{VGPR104},~{VGPR108},~{VGPR112},~{VGPR116},~{VGPR120},~{VGPR124},~{VGPR128}" ()
+ call void asm sideeffect "", "~{VGPR132},~{VGPR136},~{VGPR140},~{VGPR144},~{VGPR148},~{VGPR152},~{VGPR156},~{VGPR160}" ()
+ call void asm sideeffect "", "~{VGPR164},~{VGPR168},~{VGPR172},~{VGPR176},~{VGPR180},~{VGPR184},~{VGPR188},~{VGPR192}" ()
+ call void asm sideeffect "", "~{VGPR196},~{VGPR200},~{VGPR204},~{VGPR208},~{VGPR212},~{VGPR216},~{VGPR220},~{VGPR224}" ()
+
+ %outptr = getelementptr <1280 x i32>, <1280 x i32> addrspace(1)* %in, i32 %tid
+ store <1280 x i32> %a, <1280 x i32> addrspace(1)* %outptr
+
+ ret void
+}
+
+declare i32 @llvm.SI.tid() nounwind readnone
OpenPOWER on IntegriCloud