summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Target
diff options
context:
space:
mode:
authorMehdi Amini <mehdi.amini@apple.com>2016-10-01 02:35:24 +0000
committerMehdi Amini <mehdi.amini@apple.com>2016-10-01 02:35:24 +0000
commit86eeda8e20d5aa5a5622a487c56364b4b26c6f3a (patch)
tree299d69b953e80895aa84ee7108b99bfaa82460e9 /llvm/lib/Target
parente8d141c67551f446a1b6166750abed2ebc32cd11 (diff)
downloadbcm5719-llvm-86eeda8e20d5aa5a5622a487c56364b4b26c6f3a.tar.gz
bcm5719-llvm-86eeda8e20d5aa5a5622a487c56364b4b26c6f3a.zip
Revert "AMDGPU: Don't use offen if it is 0"
This reverts commit r282999. Tests are not passing: http://lab.llvm.org:8011/builders/clang-x86_64-linux-selfhost-modules/builds/20038 llvm-svn: 283003
Diffstat (limited to 'llvm/lib/Target')
-rw-r--r--llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp104
-rw-r--r--llvm/lib/Target/AMDGPU/SIRegisterInfo.h10
2 files changed, 14 insertions, 100 deletions
diff --git a/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp b/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
index 6f8892d5c64..4e842ee9870 100644
--- a/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
@@ -320,82 +320,14 @@ static unsigned getNumSubRegsForSpillOp(unsigned Op) {
}
}
-static int getOffsetMUBUFStore(unsigned Opc) {
- switch (Opc) {
- case AMDGPU::BUFFER_STORE_DWORD_OFFEN:
- return AMDGPU::BUFFER_STORE_DWORD_OFFSET;
- case AMDGPU::BUFFER_STORE_BYTE_OFFEN:
- return AMDGPU::BUFFER_STORE_BYTE_OFFSET;
- case AMDGPU::BUFFER_STORE_SHORT_OFFEN:
- return AMDGPU::BUFFER_STORE_SHORT_OFFSET;
- case AMDGPU::BUFFER_STORE_DWORDX2_OFFEN:
- return AMDGPU::BUFFER_STORE_DWORDX2_OFFSET;
- case AMDGPU::BUFFER_STORE_DWORDX4_OFFEN:
- return AMDGPU::BUFFER_STORE_DWORDX4_OFFSET;
- default:
- return -1;
- }
-}
-
-static int getOffsetMUBUFLoad(unsigned Opc) {
- switch (Opc) {
- case AMDGPU::BUFFER_LOAD_DWORD_OFFEN:
- return AMDGPU::BUFFER_LOAD_DWORD_OFFSET;
- case AMDGPU::BUFFER_LOAD_UBYTE_OFFEN:
- return AMDGPU::BUFFER_LOAD_UBYTE_OFFSET;
- case AMDGPU::BUFFER_LOAD_SBYTE_OFFEN:
- return AMDGPU::BUFFER_LOAD_SBYTE_OFFSET;
- case AMDGPU::BUFFER_LOAD_USHORT_OFFEN:
- return AMDGPU::BUFFER_LOAD_USHORT_OFFSET;
- case AMDGPU::BUFFER_LOAD_SSHORT_OFFEN:
- return AMDGPU::BUFFER_LOAD_SSHORT_OFFSET;
- case AMDGPU::BUFFER_LOAD_DWORDX2_OFFEN:
- return AMDGPU::BUFFER_LOAD_DWORDX2_OFFSET;
- case AMDGPU::BUFFER_LOAD_DWORDX4_OFFEN:
- return AMDGPU::BUFFER_LOAD_DWORDX4_OFFSET;
- default:
- return -1;
- }
-}
-
-// This differs from buildSpillLoadStore by only scavenging a VGPR. It does not
-// need to handle the case where an SGPR may need to be spilled while spilling.
-static bool buildMUBUFOffsetLoadStore(const SIInstrInfo *TII,
- MachineFrameInfo &MFI,
- MachineBasicBlock::iterator MI,
- int Index,
- int64_t Offset) {
- MachineBasicBlock *MBB = MI->getParent();
- const DebugLoc &DL = MI->getDebugLoc();
- bool IsStore = MI->mayStore();
-
- unsigned Opc = MI->getOpcode();
- int LoadStoreOp = IsStore ?
- getOffsetMUBUFStore(Opc) : getOffsetMUBUFLoad(Opc);
- if (LoadStoreOp == -1)
- return false;
-
- unsigned Reg = TII->getNamedOperand(*MI, AMDGPU::OpName::vdata)->getReg();
-
- BuildMI(*MBB, MI, DL, TII->get(LoadStoreOp))
- .addReg(Reg, getDefRegState(!IsStore))
- .addOperand(*TII->getNamedOperand(*MI, AMDGPU::OpName::srsrc))
- .addOperand(*TII->getNamedOperand(*MI, AMDGPU::OpName::soffset))
- .addImm(Offset)
- .addImm(0) // glc
- .addImm(0) // slc
- .addImm(0) // tfe
- .setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
- return true;
-}
+void SIRegisterInfo::buildScratchLoadStore(MachineBasicBlock::iterator MI,
+ unsigned LoadStoreOp,
+ const MachineOperand *SrcDst,
+ unsigned ScratchRsrcReg,
+ unsigned ScratchOffset,
+ int64_t Offset,
+ RegScavenger *RS) const {
-void SIRegisterInfo::buildSpillLoadStore(MachineBasicBlock::iterator MI,
- unsigned LoadStoreOp,
- const MachineOperand *SrcDst,
- unsigned ScratchRsrcReg,
- unsigned ScratchOffset,
- int64_t Offset,
- RegScavenger *RS) const {
unsigned Value = SrcDst->getReg();
bool IsKill = SrcDst->isKill();
MachineBasicBlock *MBB = MI->getParent();
@@ -642,7 +574,7 @@ void SIRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI,
case AMDGPU::SI_SPILL_V96_SAVE:
case AMDGPU::SI_SPILL_V64_SAVE:
case AMDGPU::SI_SPILL_V32_SAVE:
- buildSpillLoadStore(MI, AMDGPU::BUFFER_STORE_DWORD_OFFSET,
+ buildScratchLoadStore(MI, AMDGPU::BUFFER_STORE_DWORD_OFFSET,
TII->getNamedOperand(*MI, AMDGPU::OpName::vdata),
TII->getNamedOperand(*MI, AMDGPU::OpName::srsrc)->getReg(),
TII->getNamedOperand(*MI, AMDGPU::OpName::soffset)->getReg(),
@@ -657,7 +589,7 @@ void SIRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI,
case AMDGPU::SI_SPILL_V128_RESTORE:
case AMDGPU::SI_SPILL_V256_RESTORE:
case AMDGPU::SI_SPILL_V512_RESTORE: {
- buildSpillLoadStore(MI, AMDGPU::BUFFER_LOAD_DWORD_OFFSET,
+ buildScratchLoadStore(MI, AMDGPU::BUFFER_LOAD_DWORD_OFFSET,
TII->getNamedOperand(*MI, AMDGPU::OpName::vdata),
TII->getNamedOperand(*MI, AMDGPU::OpName::srsrc)->getReg(),
TII->getNamedOperand(*MI, AMDGPU::OpName::soffset)->getReg(),
@@ -668,24 +600,6 @@ void SIRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI,
}
default: {
- if (TII->isMUBUF(*MI)) {
- // Disable offen so we don't need a 0 vgpr base.
- assert(static_cast<int>(FIOperandNum) ==
- AMDGPU::getNamedOperandIdx(MI->getOpcode(),
- AMDGPU::OpName::vaddr));
-
- int64_t Offset = FrameInfo.getObjectOffset(Index);
- int64_t OldImm
- = TII->getNamedOperand(*MI, AMDGPU::OpName::offset)->getImm();
- int64_t NewOffset = OldImm + Offset;
-
- if (isUInt<12>(NewOffset) &&
- buildMUBUFOffsetLoadStore(TII, FrameInfo, MI, Index, NewOffset)) {
- MI->eraseFromParent();
- break;
- }
- }
-
int64_t Offset = FrameInfo.getObjectOffset(Index);
FIOp.ChangeToImmediate(Offset);
if (!TII->isImmOperandLegal(*MI, FIOperandNum, FIOp)) {
diff --git a/llvm/lib/Target/AMDGPU/SIRegisterInfo.h b/llvm/lib/Target/AMDGPU/SIRegisterInfo.h
index b62b9932c7a..59952b4fd85 100644
--- a/llvm/lib/Target/AMDGPU/SIRegisterInfo.h
+++ b/llvm/lib/Target/AMDGPU/SIRegisterInfo.h
@@ -240,11 +240,11 @@ public:
unsigned getMaxNumVGPRs(const MachineFunction &MF) const;
private:
- void buildSpillLoadStore(MachineBasicBlock::iterator MI,
- unsigned LoadStoreOp, const MachineOperand *SrcDst,
- unsigned ScratchRsrcReg, unsigned ScratchOffset,
- int64_t Offset,
- RegScavenger *RS) const;
+ void buildScratchLoadStore(MachineBasicBlock::iterator MI,
+ unsigned LoadStoreOp, const MachineOperand *SrcDst,
+ unsigned ScratchRsrcReg, unsigned ScratchOffset,
+ int64_t Offset,
+ RegScavenger *RS) const;
};
} // End namespace llvm
OpenPOWER on IntegriCloud