diff options
author | Matt Arsenault <Matthew.Arsenault@amd.com> | 2016-11-24 00:26:44 +0000 |
---|---|---|
committer | Matt Arsenault <Matthew.Arsenault@amd.com> | 2016-11-24 00:26:44 +0000 |
commit | 5ee33253589755df83bd111136e4ffbdd0478b3a (patch) | |
tree | 487079308ebd148c200c889f03ef0be43ddebcfb | |
parent | 9e5c7b10316aca49605dd7cf5f4e6e4a3ab76cd6 (diff) | |
download | bcm5719-llvm-5ee33253589755df83bd111136e4ffbdd0478b3a.tar.gz bcm5719-llvm-5ee33253589755df83bd111136e4ffbdd0478b3a.zip |
AMDGPU: Remove m0 spilling code
Since m0 isn't allocatable it should never be spilled anymore.
llvm-svn: 287842
-rw-r--r-- | llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp | 40 |
1 files changed, 3 insertions, 37 deletions
diff --git a/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp b/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp index 37bda5337f4..e833b2e9fb3 100644 --- a/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp +++ b/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp @@ -517,6 +517,8 @@ void SIRegisterInfo::spillSGPR(MachineBasicBlock::iterator MI, bool SpillToSMEM = ST.hasScalarStores() && EnableSpillSGPRToSMEM; + assert(SuperReg != AMDGPU::M0 && "m0 should never spill"); + const unsigned EltSize = 4; // SubReg carries the "Kill" flag when SubReg == SuperReg. @@ -526,19 +528,6 @@ void SIRegisterInfo::spillSGPR(MachineBasicBlock::iterator MI, SuperReg : getSubReg(SuperReg, getSubRegFromChannel(i)); if (SpillToSMEM) { - if (SuperReg == AMDGPU::M0) { - assert(NumSubRegs == 1); - unsigned CopyM0 - = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); - - BuildMI(*MBB, MI, DL, TII->get(AMDGPU::COPY), CopyM0) - .addReg(AMDGPU::M0, getKillRegState(IsKill)); - - // The real spill now kills the temp copy. - SubReg = SuperReg = CopyM0; - IsKill = true; - } - int64_t FrOffset = FrameInfo.getObjectOffset(Index); unsigned Align = FrameInfo.getObjectAlignment(Index); MachinePointerInfo PtrInfo @@ -576,18 +565,6 @@ void SIRegisterInfo::spillSGPR(MachineBasicBlock::iterator MI, struct SIMachineFunctionInfo::SpilledReg Spill = MFI->getSpilledReg(MF, Index, i); if (Spill.hasReg()) { - if (SuperReg == AMDGPU::M0) { - assert(NumSubRegs == 1); - unsigned CopyM0 - = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); - BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_MOV_B32), CopyM0) - .addReg(SuperReg, getKillRegState(IsKill)); - - // The real spill now kills the temp copy. - SubReg = SuperReg = CopyM0; - IsKill = true; - } - BuildMI(*MBB, MI, DL, TII->getMCOpcodeFromPseudo(AMDGPU::V_WRITELANE_B32), Spill.VGPR) @@ -654,13 +631,7 @@ void SIRegisterInfo::restoreSGPR(MachineBasicBlock::iterator MI, unsigned SuperReg = MI->getOperand(0).getReg(); bool SpillToSMEM = ST.hasScalarStores() && EnableSpillSGPRToSMEM; - // m0 is not allowed as with readlane/writelane, so a temporary SGPR and - // extra copy is needed. - bool IsM0 = (SuperReg == AMDGPU::M0); - if (IsM0) { - assert(NumSubRegs == 1); - SuperReg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); - } + assert(SuperReg != AMDGPU::M0 && "m0 should never spill"); int64_t FrOffset = FrameInfo.getObjectOffset(Index); @@ -745,11 +716,6 @@ void SIRegisterInfo::restoreSGPR(MachineBasicBlock::iterator MI, } } - if (IsM0 && SuperReg != AMDGPU::M0) { - BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0) - .addReg(SuperReg); - } - MI->eraseFromParent(); } |