summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Target/R600/SIFixSGPRCopies.cpp
diff options
context:
space:
mode:
authorTom Stellard <thomas.stellard@amd.com>2013-11-13 23:36:37 +0000
committerTom Stellard <thomas.stellard@amd.com>2013-11-13 23:36:37 +0000
commit8216602a0b5f6c86ddb8ac579e2abdc6e9285212 (patch)
treee5d82640c9bae2da7415c75289ee4f3e849b5643 /llvm/lib/Target/R600/SIFixSGPRCopies.cpp
parent0acd8243e351d9f4c36347bd3967052871190b59 (diff)
downloadbcm5719-llvm-8216602a0b5f6c86ddb8ac579e2abdc6e9285212.tar.gz
bcm5719-llvm-8216602a0b5f6c86ddb8ac579e2abdc6e9285212.zip
R600/SI: Prefer SALU instructions for bit shift operations
All shift operations will be selected as SALU instructions and then if necessary lowered to VALU instructions in the SIFixSGPRCopies pass. This allows us to do more operations on the SALU which will improve performance and is also required for implementing private memory using indirect addressing, since the private memory pointers must stay in the scalar registers. This patch includes some fixes from Matt Arsenault. llvm-svn: 194625
Diffstat (limited to 'llvm/lib/Target/R600/SIFixSGPRCopies.cpp')
-rw-r--r--llvm/lib/Target/R600/SIFixSGPRCopies.cpp147
1 files changed, 130 insertions, 17 deletions
diff --git a/llvm/lib/Target/R600/SIFixSGPRCopies.cpp b/llvm/lib/Target/R600/SIFixSGPRCopies.cpp
index 7f07b01f087..655db5b01da 100644
--- a/llvm/lib/Target/R600/SIFixSGPRCopies.cpp
+++ b/llvm/lib/Target/R600/SIFixSGPRCopies.cpp
@@ -65,10 +65,13 @@
/// ultimately led to the creation of an illegal COPY.
//===----------------------------------------------------------------------===//
+#define DEBUG_TYPE "sgpr-copies"
#include "AMDGPU.h"
#include "SIInstrInfo.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/Support/Debug.h"
#include "llvm/Target/TargetMachine.h"
using namespace llvm;
@@ -79,9 +82,16 @@ class SIFixSGPRCopies : public MachineFunctionPass {
private:
static char ID;
- const TargetRegisterClass *inferRegClass(const TargetRegisterInfo *TRI,
+ const TargetRegisterClass *inferRegClassFromUses(const SIRegisterInfo *TRI,
const MachineRegisterInfo &MRI,
- unsigned Reg) const;
+ unsigned Reg,
+ unsigned SubReg) const;
+ const TargetRegisterClass *inferRegClassFromDef(const SIRegisterInfo *TRI,
+ const MachineRegisterInfo &MRI,
+ unsigned Reg,
+ unsigned SubReg) const;
+ bool isVGPRToSGPRCopy(const MachineInstr &Copy, const SIRegisterInfo *TRI,
+ const MachineRegisterInfo &MRI) const;
public:
SIFixSGPRCopies(TargetMachine &tm) : MachineFunctionPass(ID) { }
@@ -102,25 +112,41 @@ FunctionPass *llvm::createSIFixSGPRCopiesPass(TargetMachine &tm) {
return new SIFixSGPRCopies(tm);
}
-/// This functions walks the use/def chains starting with the definition of
-/// \p Reg until it finds an Instruction that isn't a COPY returns
-/// the register class of that instruction.
-const TargetRegisterClass *SIFixSGPRCopies::inferRegClass(
- const TargetRegisterInfo *TRI,
+static bool hasVGPROperands(const MachineInstr &MI, const SIRegisterInfo *TRI) {
+ const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
+ for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
+ if (!MI.getOperand(i).isReg() ||
+ !TargetRegisterInfo::isVirtualRegister(MI.getOperand(i).getReg()))
+ continue;
+
+ if (TRI->hasVGPRs(MRI.getRegClass(MI.getOperand(i).getReg())))
+ return true;
+ }
+ return false;
+}
+
+/// This functions walks the use list of Reg until it finds an Instruction
+/// that isn't a COPY returns the register class of that instruction.
+/// \param[out] The register defined by the first non-COPY instruction.
+const TargetRegisterClass *SIFixSGPRCopies::inferRegClassFromUses(
+ const SIRegisterInfo *TRI,
const MachineRegisterInfo &MRI,
- unsigned Reg) const {
+ unsigned Reg,
+ unsigned SubReg) const {
// The Reg parameter to the function must always be defined by either a PHI
// or a COPY, therefore it cannot be a physical register.
assert(TargetRegisterInfo::isVirtualRegister(Reg) &&
"Reg cannot be a physical register");
const TargetRegisterClass *RC = MRI.getRegClass(Reg);
+ RC = TRI->getSubRegClass(RC, SubReg);
for (MachineRegisterInfo::use_iterator I = MRI.use_begin(Reg),
E = MRI.use_end(); I != E; ++I) {
switch (I->getOpcode()) {
case AMDGPU::COPY:
- RC = TRI->getCommonSubClass(RC, inferRegClass(TRI, MRI,
- I->getOperand(0).getReg()));
+ RC = TRI->getCommonSubClass(RC, inferRegClassFromUses(TRI, MRI,
+ I->getOperand(0).getReg(),
+ I->getOperand(0).getSubReg()));
break;
}
}
@@ -128,9 +154,50 @@ const TargetRegisterClass *SIFixSGPRCopies::inferRegClass(
return RC;
}
+const TargetRegisterClass *SIFixSGPRCopies::inferRegClassFromDef(
+ const SIRegisterInfo *TRI,
+ const MachineRegisterInfo &MRI,
+ unsigned Reg,
+ unsigned SubReg) const {
+ if (!TargetRegisterInfo::isVirtualRegister(Reg)) {
+ const TargetRegisterClass *RC = TRI->getPhysRegClass(Reg);
+ return TRI->getSubRegClass(RC, SubReg);
+ }
+ MachineInstr *Def = MRI.getVRegDef(Reg);
+ if (Def->getOpcode() != AMDGPU::COPY) {
+ return TRI->getSubRegClass(MRI.getRegClass(Reg), SubReg);
+ }
+
+ return inferRegClassFromDef(TRI, MRI, Def->getOperand(1).getReg(),
+ Def->getOperand(1).getSubReg());
+}
+
+bool SIFixSGPRCopies::isVGPRToSGPRCopy(const MachineInstr &Copy,
+ const SIRegisterInfo *TRI,
+ const MachineRegisterInfo &MRI) const {
+
+ unsigned DstReg = Copy.getOperand(0).getReg();
+ unsigned SrcReg = Copy.getOperand(1).getReg();
+ unsigned SrcSubReg = Copy.getOperand(1).getSubReg();
+ const TargetRegisterClass *DstRC = MRI.getRegClass(DstReg);
+
+ if (!TargetRegisterInfo::isVirtualRegister(SrcReg) ||
+ DstRC == &AMDGPU::M0RegRegClass)
+ return false;
+
+ const TargetRegisterClass *SrcRC = TRI->getSubRegClass(
+ MRI.getRegClass(SrcReg), SrcSubReg);
+
+ return TRI->isSGPRClass(DstRC) &&
+ !TRI->getCommonSubClass(DstRC, SrcRC);
+}
+
bool SIFixSGPRCopies::runOnMachineFunction(MachineFunction &MF) {
MachineRegisterInfo &MRI = MF.getRegInfo();
- const TargetRegisterInfo *TRI = MF.getTarget().getRegisterInfo();
+ const SIRegisterInfo *TRI = static_cast<const SIRegisterInfo *>(
+ MF.getTarget().getRegisterInfo());
+ const SIInstrInfo *TII = static_cast<const SIInstrInfo *>(
+ MF.getTarget().getInstrInfo());
for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
BI != BE; ++BI) {
@@ -138,13 +205,59 @@ bool SIFixSGPRCopies::runOnMachineFunction(MachineFunction &MF) {
for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
I != E; ++I) {
MachineInstr &MI = *I;
- if (MI.getOpcode() != AMDGPU::PHI) {
- continue;
+ if (MI.getOpcode() == AMDGPU::COPY && isVGPRToSGPRCopy(MI, TRI, MRI)) {
+ DEBUG(dbgs() << "Fixing VGPR -> SGPR copy:\n");
+ DEBUG(MI.print(dbgs()));
+ TII->moveToVALU(MI);
+
+ }
+
+ switch (MI.getOpcode()) {
+ default: continue;
+ case AMDGPU::PHI: {
+ DEBUG(dbgs() << " Fixing PHI:\n");
+ DEBUG(MI.print(dbgs()));
+
+ for (unsigned i = 1; i < MI.getNumOperands(); i+=2) {
+ unsigned Reg = MI.getOperand(i).getReg();
+ const TargetRegisterClass *RC = inferRegClassFromDef(TRI, MRI, Reg,
+ MI.getOperand(0).getSubReg());
+ MRI.constrainRegClass(Reg, RC);
+ }
+ unsigned Reg = MI.getOperand(0).getReg();
+ const TargetRegisterClass *RC = inferRegClassFromUses(TRI, MRI, Reg,
+ MI.getOperand(0).getSubReg());
+ if (TRI->getCommonSubClass(RC, &AMDGPU::VReg_32RegClass)) {
+ MRI.constrainRegClass(Reg, &AMDGPU::VReg_32RegClass);
+ }
+
+ if (!TRI->isSGPRClass(MRI.getRegClass(Reg)))
+ break;
+
+ // If a PHI node defines an SGPR and any of its operands are VGPRs,
+ // then we need to move it to the VALU.
+ for (unsigned i = 1; i < MI.getNumOperands(); i+=2) {
+ unsigned Reg = MI.getOperand(i).getReg();
+ if (TRI->hasVGPRs(MRI.getRegClass(Reg))) {
+ TII->moveToVALU(MI);
+ break;
+ }
+ }
+
+ break;
+ }
+ case AMDGPU::REG_SEQUENCE: {
+ if (TRI->hasVGPRs(TII->getOpRegClass(MI, 0)) ||
+ !hasVGPROperands(MI, TRI))
+ continue;
+
+ DEBUG(dbgs() << "Fixing REG_SEQUENCE: \n");
+ DEBUG(MI.print(dbgs()));
+
+ TII->moveToVALU(MI);
+ TII->legalizeOperands(&MI);
+ break;
}
- unsigned Reg = MI.getOperand(0).getReg();
- const TargetRegisterClass *RC = inferRegClass(TRI, MRI, Reg);
- if (TRI->getCommonSubClass(RC, &AMDGPU::VReg_32RegClass)) {
- MRI.constrainRegClass(Reg, &AMDGPU::VReg_32RegClass);
}
}
}
OpenPOWER on IntegriCloud