summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Target/R600/SIRegisterInfo.cpp
diff options
context:
space:
mode:
authorTom Stellard <thomas.stellard@amd.com>2014-08-21 20:40:54 +0000
committerTom Stellard <thomas.stellard@amd.com>2014-08-21 20:40:54 +0000
commitc5cf2f04d93849c9f8447227b1e9980afef0c8b5 (patch)
treedb3475d0d8f539d15ebd95c6377e4ed0cdcbc508 /llvm/lib/Target/R600/SIRegisterInfo.cpp
parent11aa80cc4a41e95a2796df82537d701fc8273e4e (diff)
downloadbcm5719-llvm-c5cf2f04d93849c9f8447227b1e9980afef0c8b5.tar.gz
bcm5719-llvm-c5cf2f04d93849c9f8447227b1e9980afef0c8b5.zip
R600/SI: Use eliminateFrameIndex() to expand SGPR spill pseudos
This will simplify the SGPR spilling and also allow us to use MachineFrameInfo for calculating offsets, which should be more reliable than our custom code. This fixes a crash in some cases where a register would be spilled in a branch such that the VGPR defined for spilling did not dominate all the uses when restoring. This fixes a crash in an ocl conformance test. The test requries register spilling and is too big to include. llvm-svn: 216217
Diffstat (limited to 'llvm/lib/Target/R600/SIRegisterInfo.cpp')
-rw-r--r--llvm/lib/Target/R600/SIRegisterInfo.cpp107
1 files changed, 98 insertions, 9 deletions
diff --git a/llvm/lib/Target/R600/SIRegisterInfo.cpp b/llvm/lib/Target/R600/SIRegisterInfo.cpp
index 531c5047f50..8663df88922 100644
--- a/llvm/lib/Target/R600/SIRegisterInfo.cpp
+++ b/llvm/lib/Target/R600/SIRegisterInfo.cpp
@@ -20,6 +20,8 @@
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/RegisterScavenging.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/LLVMContext.h"
using namespace llvm;
@@ -43,23 +45,110 @@ bool SIRegisterInfo::requiresRegisterScavenging(const MachineFunction &Fn) const
return Fn.getFrameInfo()->hasStackObjects();
}
+static unsigned getNumSubRegsForSpillOp(unsigned Op) {
+
+ switch (Op) {
+ case AMDGPU::SI_SPILL_S512_SAVE:
+ case AMDGPU::SI_SPILL_S512_RESTORE:
+ return 16;
+ case AMDGPU::SI_SPILL_S256_SAVE:
+ case AMDGPU::SI_SPILL_S256_RESTORE:
+ return 8;
+ case AMDGPU::SI_SPILL_S128_SAVE:
+ case AMDGPU::SI_SPILL_S128_RESTORE:
+ return 4;
+ case AMDGPU::SI_SPILL_S64_SAVE:
+ case AMDGPU::SI_SPILL_S64_RESTORE:
+ return 2;
+ case AMDGPU::SI_SPILL_S32_SAVE:
+ case AMDGPU::SI_SPILL_S32_RESTORE:
+ return 1;
+ default: llvm_unreachable("Invalid spill opcode");
+ }
+}
+
void SIRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI,
int SPAdj, unsigned FIOperandNum,
RegScavenger *RS) const {
MachineFunction *MF = MI->getParent()->getParent();
+ MachineBasicBlock *MBB = MI->getParent();
+ SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
MachineFrameInfo *FrameInfo = MF->getFrameInfo();
const SIInstrInfo *TII = static_cast<const SIInstrInfo*>(ST.getInstrInfo());
+ DebugLoc DL = MI->getDebugLoc();
+
MachineOperand &FIOp = MI->getOperand(FIOperandNum);
int Index = MI->getOperand(FIOperandNum).getIndex();
- int64_t Offset = FrameInfo->getObjectOffset(Index);
-
- FIOp.ChangeToImmediate(Offset);
- if (!TII->isImmOperandLegal(MI, FIOperandNum, FIOp)) {
- unsigned TmpReg = RS->scavengeRegister(&AMDGPU::VReg_32RegClass, MI, SPAdj);
- BuildMI(*MI->getParent(), MI, MI->getDebugLoc(),
- TII->get(AMDGPU::V_MOV_B32_e32), TmpReg)
- .addImm(Offset);
- FIOp.ChangeToRegister(TmpReg, false);
+
+ switch (MI->getOpcode()) {
+ // SGPR register spill
+ case AMDGPU::SI_SPILL_S512_SAVE:
+ case AMDGPU::SI_SPILL_S256_SAVE:
+ case AMDGPU::SI_SPILL_S128_SAVE:
+ case AMDGPU::SI_SPILL_S64_SAVE:
+ case AMDGPU::SI_SPILL_S32_SAVE: {
+ unsigned NumSubRegs = getNumSubRegsForSpillOp(MI->getOpcode());
+
+ for (unsigned i = 0, e = NumSubRegs; i < e; ++i) {
+ unsigned SubReg = getPhysRegSubReg(MI->getOperand(0).getReg(),
+ &AMDGPU::SGPR_32RegClass, i);
+ struct SIMachineFunctionInfo::SpilledReg Spill =
+ MFI->getSpilledReg(MF, Index, i);
+
+ if (Spill.VGPR == AMDGPU::NoRegister) {
+ LLVMContext &Ctx = MF->getFunction()->getContext();
+ Ctx.emitError("Ran out of VGPRs for spilling SGPR");
+ }
+
+ BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_WRITELANE_B32), Spill.VGPR)
+ .addReg(SubReg)
+ .addImm(Spill.Lane);
+
+ }
+ MI->eraseFromParent();
+ break;
+ }
+
+ // SGPR register restore
+ case AMDGPU::SI_SPILL_S512_RESTORE:
+ case AMDGPU::SI_SPILL_S256_RESTORE:
+ case AMDGPU::SI_SPILL_S128_RESTORE:
+ case AMDGPU::SI_SPILL_S64_RESTORE:
+ case AMDGPU::SI_SPILL_S32_RESTORE: {
+ unsigned NumSubRegs = getNumSubRegsForSpillOp(MI->getOpcode());
+
+ for (unsigned i = 0, e = NumSubRegs; i < e; ++i) {
+ unsigned SubReg = getPhysRegSubReg(MI->getOperand(0).getReg(),
+ &AMDGPU::SGPR_32RegClass, i);
+ struct SIMachineFunctionInfo::SpilledReg Spill =
+ MFI->getSpilledReg(MF, Index, i);
+
+ if (Spill.VGPR == AMDGPU::NoRegister) {
+ LLVMContext &Ctx = MF->getFunction()->getContext();
+ Ctx.emitError("Ran out of VGPRs for spilling SGPR");
+ }
+
+ BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_READLANE_B32), SubReg)
+ .addReg(Spill.VGPR)
+ .addImm(Spill.Lane);
+
+ }
+ TII->insertNOPs(MI, 3);
+ MI->eraseFromParent();
+ break;
+ }
+
+ default: {
+ int64_t Offset = FrameInfo->getObjectOffset(Index);
+ FIOp.ChangeToImmediate(Offset);
+ if (!TII->isImmOperandLegal(MI, FIOperandNum, FIOp)) {
+ unsigned TmpReg = RS->scavengeRegister(&AMDGPU::VReg_32RegClass, MI, SPAdj);
+ BuildMI(*MBB, MI, MI->getDebugLoc(),
+ TII->get(AMDGPU::V_MOV_B32_e32), TmpReg)
+ .addImm(Offset);
+ FIOp.ChangeToRegister(TmpReg, false);
+ }
+ }
}
}
OpenPOWER on IntegriCloud