summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
diff options
context:
space:
mode:
authorMatt Arsenault <Matthew.Arsenault@amd.com>2016-11-13 18:20:54 +0000
committerMatt Arsenault <Matthew.Arsenault@amd.com>2016-11-13 18:20:54 +0000
commitdc45274d546b531ca64308cfe4304042e341c463 (patch)
treeb70f3b53f74a692128c5fd8b8f21618a9a4e75d0 /llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
parente2399f9e0e0272ad12f649c8eff0da49dcbf2068 (diff)
downloadbcm5719-llvm-dc45274d546b531ca64308cfe4304042e341c463.tar.gz
bcm5719-llvm-dc45274d546b531ca64308cfe4304042e341c463.zip
AMDGPU: Implement SGPR spilling with scalar stores
nThis avoids the nasty problems caused by using memory instructions that read the exec mask while spilling / restoring registers used for control flow masking, but only for VI when these were added. This always uses the scalar stores when enabled currently, but it may be better to still try to spill to a VGPR and use this on the fallback memory path. The cache also needs to be flushed before wave termination if a scalar store is used. llvm-svn: 286766
Diffstat (limited to 'llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp')
-rw-r--r--llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp106
1 files changed, 99 insertions, 7 deletions
diff --git a/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp b/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
index 2a093fff20f..63cd49dc044 100644
--- a/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
@@ -24,6 +24,12 @@
using namespace llvm;
+static cl::opt<bool> EnableSpillSGPRToSMEM(
+ "amdgpu-spill-sgpr-to-smem",
+ cl::desc("Use scalar stores to spill SGPRs if supported by subtarget"),
+ cl::init(true));
+
+
static bool hasPressureSet(const int *PSets, unsigned PSetID) {
for (unsigned i = 0; PSets[i] != -1; ++i) {
if (PSets[i] == (int)PSetID)
@@ -475,18 +481,21 @@ void SIRegisterInfo::buildSpillLoadStore(MachineBasicBlock::iterator MI,
void SIRegisterInfo::spillSGPR(MachineBasicBlock::iterator MI,
int Index,
RegScavenger *RS) const {
- MachineFunction *MF = MI->getParent()->getParent();
- MachineRegisterInfo &MRI = MF->getRegInfo();
MachineBasicBlock *MBB = MI->getParent();
- SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
- MachineFrameInfo &FrameInfo = MF->getFrameInfo();
+ MachineFunction *MF = MBB->getParent();
+ MachineRegisterInfo &MRI = MF->getRegInfo();
const SISubtarget &ST = MF->getSubtarget<SISubtarget>();
const SIInstrInfo *TII = ST.getInstrInfo();
- const DebugLoc &DL = MI->getDebugLoc();
unsigned NumSubRegs = getNumSubRegsForSpillOp(MI->getOpcode());
unsigned SuperReg = MI->getOperand(0).getReg();
bool IsKill = MI->getOperand(0).isKill();
+ const DebugLoc &DL = MI->getDebugLoc();
+
+ SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
+ MachineFrameInfo &FrameInfo = MF->getFrameInfo();
+
+ bool SpillToSMEM = ST.hasScalarStores() && EnableSpillSGPRToSMEM;
// SubReg carries the "Kill" flag when SubReg == SuperReg.
unsigned SubKillState = getKillRegState((NumSubRegs == 1) && IsKill);
@@ -494,6 +503,55 @@ void SIRegisterInfo::spillSGPR(MachineBasicBlock::iterator MI,
unsigned SubReg = NumSubRegs == 1 ?
SuperReg : getSubReg(SuperReg, getSubRegFromChannel(i));
+ if (SpillToSMEM) {
+ if (SuperReg == AMDGPU::M0) {
+ assert(NumSubRegs == 1);
+ unsigned CopyM0
+ = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
+
+ BuildMI(*MBB, MI, DL, TII->get(AMDGPU::COPY), CopyM0)
+ .addReg(AMDGPU::M0, getKillRegState(IsKill));
+
+ // The real spill now kills the temp copy.
+ SubReg = SuperReg = CopyM0;
+ IsKill = true;
+ }
+
+ int64_t FrOffset = FrameInfo.getObjectOffset(Index);
+ unsigned Size = FrameInfo.getObjectSize(Index);
+ unsigned Align = FrameInfo.getObjectAlignment(Index);
+ MachinePointerInfo PtrInfo
+ = MachinePointerInfo::getFixedStack(*MF, Index);
+ MachineMemOperand *MMO
+ = MF->getMachineMemOperand(PtrInfo, MachineMemOperand::MOStore,
+ Size, Align);
+
+ unsigned OffsetReg = AMDGPU::M0;
+ // Add i * 4 wave offset.
+ //
+ // SMEM instructions only support a single offset, so increment the wave
+ // offset.
+
+ int64_t Offset = ST.getWavefrontSize() * (FrOffset + 4 * i);
+ if (Offset != 0) {
+ BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_ADD_U32), OffsetReg)
+ .addReg(MFI->getScratchWaveOffsetReg())
+ .addImm(Offset);
+ } else {
+ BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_MOV_B32), OffsetReg)
+ .addReg(MFI->getScratchWaveOffsetReg());
+ }
+
+ BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_BUFFER_STORE_DWORD_SGPR))
+ .addReg(SubReg, getKillRegState(IsKill)) // sdata
+ .addReg(MFI->getScratchRSrcReg()) // sbase
+ .addReg(OffsetReg) // soff
+ .addImm(0) // glc
+ .addMemOperand(MMO);
+
+ continue;
+ }
+
struct SIMachineFunctionInfo::SpilledReg Spill =
MFI->getSpilledReg(MF, Index, i);
if (Spill.hasReg()) {
@@ -520,10 +578,9 @@ void SIRegisterInfo::spillSGPR(MachineBasicBlock::iterator MI,
// it are fixed.
} else {
// Spill SGPR to a frame index.
- // FIXME we should use S_STORE_DWORD here for VI.
-
// TODO: Should VI try to spill to VGPR and then spill to SMEM?
unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
+ // TODO: Should VI try to spill to VGPR and then spill to SMEM?
MachineInstrBuilder Mov
= BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_MOV_B32_e32), TmpReg)
@@ -575,6 +632,7 @@ void SIRegisterInfo::restoreSGPR(MachineBasicBlock::iterator MI,
unsigned NumSubRegs = getNumSubRegsForSpillOp(MI->getOpcode());
unsigned SuperReg = MI->getOperand(0).getReg();
+ bool SpillToSMEM = ST.hasScalarStores() && EnableSpillSGPRToSMEM;
// m0 is not allowed as with readlane/writelane, so a temporary SGPR and
// extra copy is needed.
@@ -584,10 +642,44 @@ void SIRegisterInfo::restoreSGPR(MachineBasicBlock::iterator MI,
SuperReg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
}
+ int64_t FrOffset = FrameInfo.getObjectOffset(Index);
+
for (unsigned i = 0, e = NumSubRegs; i < e; ++i) {
unsigned SubReg = NumSubRegs == 1 ?
SuperReg : getSubReg(SuperReg, getSubRegFromChannel(i));
+ if (SpillToSMEM) {
+ unsigned Size = FrameInfo.getObjectSize(Index);
+ unsigned Align = FrameInfo.getObjectAlignment(Index);
+ MachinePointerInfo PtrInfo
+ = MachinePointerInfo::getFixedStack(*MF, Index);
+ MachineMemOperand *MMO
+ = MF->getMachineMemOperand(PtrInfo, MachineMemOperand::MOLoad,
+ Size, Align);
+
+ unsigned OffsetReg = AMDGPU::M0;
+
+ // Add i * 4 offset
+ int64_t Offset = ST.getWavefrontSize() * (FrOffset + 4 * i);
+ if (Offset != 0) {
+ BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_ADD_U32), OffsetReg)
+ .addReg(MFI->getScratchWaveOffsetReg())
+ .addImm(Offset);
+ } else {
+ BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_MOV_B32), OffsetReg)
+ .addReg(MFI->getScratchWaveOffsetReg());
+ }
+
+ BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_BUFFER_LOAD_DWORD_SGPR), SubReg)
+ .addReg(MFI->getScratchRSrcReg()) // sbase
+ .addReg(OffsetReg) // soff
+ .addImm(0) // glc
+ .addMemOperand(MMO)
+ .addReg(MI->getOperand(0).getReg(), RegState::ImplicitDefine);
+
+ continue;
+ }
+
SIMachineFunctionInfo::SpilledReg Spill
= MFI->getSpilledReg(MF, Index, i);
OpenPOWER on IntegriCloud